body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
ebbe7ec65fbaf1e02f85e057488ca4fa4ceb5ee0791f039443eb921b27b470a4
@pytest.fixture def common_fixtures(loop, storage_v0_service_mock, mock_download_file, mock_upload_file, this_node_file: Path, another_node_file: Path, download_file_folder: Path): 'this module main fixture' node_config.STORAGE_ENDPOINT = 'storage:8080'
this module main fixture
packages/simcore-sdk/tests/unit/test_node_ports_v2_port.py
common_fixtures
colinRawlings/osparc-simcore
25
python
@pytest.fixture def common_fixtures(loop, storage_v0_service_mock, mock_download_file, mock_upload_file, this_node_file: Path, another_node_file: Path, download_file_folder: Path): node_config.STORAGE_ENDPOINT = 'storage:8080'
@pytest.fixture def common_fixtures(loop, storage_v0_service_mock, mock_download_file, mock_upload_file, this_node_file: Path, another_node_file: Path, download_file_folder: Path): node_config.STORAGE_ENDPOINT = 'storage:8080'<|docstring|>this module main fixture<|endoftext|>
0375e20f9d1dd1cff34bfe08e19c5546b6073de43855617b12234454217907a9
def rotate(self, nums: List[int], k: int) -> None: '\n Do not return anything, modify nums in-place instead.\n ' def numReverse(start, end): while (start < end): (nums[start], nums[end]) = (nums[end], nums[start]) start += 1 end -= 1 n = len(nums) k %= n numReverse(0, (n - 1)) numReverse(0, (k - 1)) numReverse(k, (n - 1))
Do not return anything, modify nums in-place instead.
leetcode-algorithms/189. Rotate Array/solution.py
rotate
joyfeel/leetcode
3
python
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' def numReverse(start, end): while (start < end): (nums[start], nums[end]) = (nums[end], nums[start]) start += 1 end -= 1 n = len(nums) k %= n numReverse(0, (n - 1)) numReverse(0, (k - 1)) numReverse(k, (n - 1))
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' def numReverse(start, end): while (start < end): (nums[start], nums[end]) = (nums[end], nums[start]) start += 1 end -= 1 n = len(nums) k %= n numReverse(0, (n - 1)) numReverse(0, (k - 1)) numReverse(k, (n - 1))<|docstring|>Do not return anything, modify nums in-place instead.<|endoftext|>
8aaf8f4659505f63c53fe1b9f63f6496f55ada2cd381431326589deb01cb9d92
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc'): 'Add a sub parser that can do a simple thing with no arguments.' parser = _add_procparser(subparsers, name, helpstr, func, defname=defname) _add_def_args(parser) return parser
Add a sub parser that can do a simple thing with no arguments.
Src/PulseEKKO/impdar/bin/apdar.py
_add_simple_procparser
rdrews-dev/ReMeltRadar
0
python
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc'): parser = _add_procparser(subparsers, name, helpstr, func, defname=defname) _add_def_args(parser) return parser
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc'): parser = _add_procparser(subparsers, name, helpstr, func, defname=defname) _add_def_args(parser) return parser<|docstring|>Add a sub parser that can do a simple thing with no arguments.<|endoftext|>
3a6d3402f0af0327dbe419b597574669bcad27c0d69a9eae9dba5946c18f9951
def _add_procparser(subparsers, name, helpstr, func, defname='proc'): 'Wrap adding subparser because we mostly want the same args.' parser = subparsers.add_parser(name, help=helpstr) parser.set_defaults(func=func, name=defname) return parser
Wrap adding subparser because we mostly want the same args.
Src/PulseEKKO/impdar/bin/apdar.py
_add_procparser
rdrews-dev/ReMeltRadar
0
python
def _add_procparser(subparsers, name, helpstr, func, defname='proc'): parser = subparsers.add_parser(name, help=helpstr) parser.set_defaults(func=func, name=defname) return parser
def _add_procparser(subparsers, name, helpstr, func, defname='proc'): parser = subparsers.add_parser(name, help=helpstr) parser.set_defaults(func=func, name=defname) return parser<|docstring|>Wrap adding subparser because we mostly want the same args.<|endoftext|>
b8b9158f65207e6f924e2f6b1945c74ae5b31f9cd2baa6be0b8a86f0bb3cf99f
def _add_def_args(parser): 'Set some default arguments common to the different processing types.' parser.add_argument('fns', type=str, nargs='+', help='The files to process') parser.add_argument('-o', type=str, help='Output to this file (folder if multiple inputs)')
Set some default arguments common to the different processing types.
Src/PulseEKKO/impdar/bin/apdar.py
_add_def_args
rdrews-dev/ReMeltRadar
0
python
def _add_def_args(parser): parser.add_argument('fns', type=str, nargs='+', help='The files to process') parser.add_argument('-o', type=str, help='Output to this file (folder if multiple inputs)')
def _add_def_args(parser): parser.add_argument('fns', type=str, nargs='+', help='The files to process') parser.add_argument('-o', type=str, help='Output to this file (folder if multiple inputs)')<|docstring|>Set some default arguments common to the different processing types.<|endoftext|>
ea042164d2c68a627b10cfbf70c0cc53e2aa2164893ccb40c5f63ee00d5d032d
def main(): 'Get arguments, process data, handle saving.' parser = _get_args() args = parser.parse_args(sys.argv[1:]) if (not hasattr(args, 'func')): parser.parse_args(['-h']) apres_data = [load_apres.load_apres_single_file(fn) for fn in args.fns] if (args.name == 'load'): pass else: for dat in apres_data: args.func(dat, **vars(args)) if (args.name == 'load'): name = 'raw' else: name = args.name if (args.o is not None): if ((len(apres_data) > 1) or (args.o[(- 1)] == '/')): for (d, f) in zip(apres_data, args.fns): bn = os.path.split(os.path.splitext(f)[0])[1] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = os.path.join(args.o, (bn + '_{:s}.h5'.format(name))) d.save(out_fn) else: out_fn = args.o apres_data[0].save(out_fn) else: for (d, f) in zip(apres_data, args.fns): bn = os.path.splitext(f)[0] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = (bn + '_{:s}.h5'.format(name)) d.save(out_fn)
Get arguments, process data, handle saving.
Src/PulseEKKO/impdar/bin/apdar.py
main
rdrews-dev/ReMeltRadar
0
python
def main(): parser = _get_args() args = parser.parse_args(sys.argv[1:]) if (not hasattr(args, 'func')): parser.parse_args(['-h']) apres_data = [load_apres.load_apres_single_file(fn) for fn in args.fns] if (args.name == 'load'): pass else: for dat in apres_data: args.func(dat, **vars(args)) if (args.name == 'load'): name = 'raw' else: name = args.name if (args.o is not None): if ((len(apres_data) > 1) or (args.o[(- 1)] == '/')): for (d, f) in zip(apres_data, args.fns): bn = os.path.split(os.path.splitext(f)[0])[1] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = os.path.join(args.o, (bn + '_{:s}.h5'.format(name))) d.save(out_fn) else: out_fn = args.o apres_data[0].save(out_fn) else: for (d, f) in zip(apres_data, args.fns): bn = os.path.splitext(f)[0] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = (bn + '_{:s}.h5'.format(name)) d.save(out_fn)
def main(): parser = _get_args() args = parser.parse_args(sys.argv[1:]) if (not hasattr(args, 'func')): parser.parse_args(['-h']) apres_data = [load_apres.load_apres_single_file(fn) for fn in args.fns] if (args.name == 'load'): pass else: for dat in apres_data: args.func(dat, **vars(args)) if (args.name == 'load'): name = 'raw' else: name = args.name if (args.o is not None): if ((len(apres_data) > 1) or (args.o[(- 1)] == '/')): for (d, f) in zip(apres_data, args.fns): bn = os.path.split(os.path.splitext(f)[0])[1] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = os.path.join(args.o, (bn + '_{:s}.h5'.format(name))) d.save(out_fn) else: out_fn = args.o apres_data[0].save(out_fn) else: for (d, f) in zip(apres_data, args.fns): bn = os.path.splitext(f)[0] if (bn[(- 4):] == '_raw'): bn = bn[:(- 4)] out_fn = (bn + '_{:s}.h5'.format(name)) d.save(out_fn)<|docstring|>Get arguments, process data, handle saving.<|endoftext|>
0229732920be02c7b6e4146f259cf577c7c0c6fe84c2130273ab1b6c4622934d
def crop(dat, lim=0, top_or_bottom='top', dimension='snum', **kwargs): 'Crop in the vertical.' dat.crop(lim, top_or_bottom=top_or_bottom, dimension=dimension)
Crop in the vertical.
Src/PulseEKKO/impdar/bin/apdar.py
crop
rdrews-dev/ReMeltRadar
0
python
def crop(dat, lim=0, top_or_bottom='top', dimension='snum', **kwargs): dat.crop(lim, top_or_bottom=top_or_bottom, dimension=dimension)
def crop(dat, lim=0, top_or_bottom='top', dimension='snum', **kwargs): dat.crop(lim, top_or_bottom=top_or_bottom, dimension=dimension)<|docstring|>Crop in the vertical.<|endoftext|>
182a2f0545807bd28b5ccf989c97f45001e13f19803de93a0a229c20629e1c35
def readout(module, action, variable=None, show=False, numeric=True): "\n Generic readout function, that wraps values in a json-compliant way.\n :module: TR-064 sub-modules, such as 'WANIPConn1'\n :action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/)\n :variable: (optional) a specific variable out of this set to extract\n :show: print variable name\n :numeric: cast value to numeric\n " try: answer_dict = fc.call_action(module, action) except BaseException: print(f'Could not query {module} with action {action}') raise if (action == 'GetAddonInfos'): answer_dict['NewX_AVM_DE_TotalBytesSent64'] = int(answer_dict['NewX_AVM_DE_TotalBytesSent64']) answer_dict['NewX_AVM_DE_TotalBytesReceived64'] = int(answer_dict['NewX_AVM_DE_TotalBytesReceived64']) if variable: answer_dict = str(answer_dict[variable]) if (not numeric): answer_dict = (('"' + answer_dict) + '"') if show: answer_dict = ((('"' + variable) + '": ') + answer_dict) else: entitiesToRemove = ('NewAllowedCharsSSID', 'NewDNSServer1', 'NewDNSServer2', 'NewVoipDNSServer1', 'NewVoipDNSServer2', 'NewATURVendor', 'NewATURCountry', 'NewDeviceLog') entitiesToRemove = [answer_dict.pop(k, None) for k in entitiesToRemove] answer_dict = str(answer_dict)[1:(- 1)] answer_dict = answer_dict.replace('NewBytes', 'NewTotalBytes') answer_dict = answer_dict.replace('NewPackets', 'NewTotalPackets') flattened_string = answer_dict.replace("'", '"').replace('True', 'true').replace('False', 'false') return flattened_string
Generic readout function, that wraps values in a json-compliant way. :module: TR-064 sub-modules, such as 'WANIPConn1' :action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/) :variable: (optional) a specific variable out of this set to extract :show: print variable name :numeric: cast value to numeric
checkfritz.py
readout
blackw1ng/FritzBox-monitor
42
python
def readout(module, action, variable=None, show=False, numeric=True): "\n Generic readout function, that wraps values in a json-compliant way.\n :module: TR-064 sub-modules, such as 'WANIPConn1'\n :action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/)\n :variable: (optional) a specific variable out of this set to extract\n :show: print variable name\n :numeric: cast value to numeric\n " try: answer_dict = fc.call_action(module, action) except BaseException: print(f'Could not query {module} with action {action}') raise if (action == 'GetAddonInfos'): answer_dict['NewX_AVM_DE_TotalBytesSent64'] = int(answer_dict['NewX_AVM_DE_TotalBytesSent64']) answer_dict['NewX_AVM_DE_TotalBytesReceived64'] = int(answer_dict['NewX_AVM_DE_TotalBytesReceived64']) if variable: answer_dict = str(answer_dict[variable]) if (not numeric): answer_dict = (('"' + answer_dict) + '"') if show: answer_dict = ((('"' + variable) + '": ') + answer_dict) else: entitiesToRemove = ('NewAllowedCharsSSID', 'NewDNSServer1', 'NewDNSServer2', 'NewVoipDNSServer1', 'NewVoipDNSServer2', 'NewATURVendor', 'NewATURCountry', 'NewDeviceLog') entitiesToRemove = [answer_dict.pop(k, None) for k in entitiesToRemove] answer_dict = str(answer_dict)[1:(- 1)] answer_dict = answer_dict.replace('NewBytes', 'NewTotalBytes') answer_dict = answer_dict.replace('NewPackets', 'NewTotalPackets') flattened_string = answer_dict.replace("'", '"').replace('True', 'true').replace('False', 'false') return flattened_string
def readout(module, action, variable=None, show=False, numeric=True): "\n Generic readout function, that wraps values in a json-compliant way.\n :module: TR-064 sub-modules, such as 'WANIPConn1'\n :action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/)\n :variable: (optional) a specific variable out of this set to extract\n :show: print variable name\n :numeric: cast value to numeric\n " try: answer_dict = fc.call_action(module, action) except BaseException: print(f'Could not query {module} with action {action}') raise if (action == 'GetAddonInfos'): answer_dict['NewX_AVM_DE_TotalBytesSent64'] = int(answer_dict['NewX_AVM_DE_TotalBytesSent64']) answer_dict['NewX_AVM_DE_TotalBytesReceived64'] = int(answer_dict['NewX_AVM_DE_TotalBytesReceived64']) if variable: answer_dict = str(answer_dict[variable]) if (not numeric): answer_dict = (('"' + answer_dict) + '"') if show: answer_dict = ((('"' + variable) + '": ') + answer_dict) else: entitiesToRemove = ('NewAllowedCharsSSID', 'NewDNSServer1', 'NewDNSServer2', 'NewVoipDNSServer1', 'NewVoipDNSServer2', 'NewATURVendor', 'NewATURCountry', 'NewDeviceLog') entitiesToRemove = [answer_dict.pop(k, None) for k in entitiesToRemove] answer_dict = str(answer_dict)[1:(- 1)] answer_dict = answer_dict.replace('NewBytes', 'NewTotalBytes') answer_dict = answer_dict.replace('NewPackets', 'NewTotalPackets') flattened_string = answer_dict.replace("'", '"').replace('True', 'true').replace('False', 'false') return flattened_string<|docstring|>Generic readout function, that wraps values in a json-compliant way. :module: TR-064 sub-modules, such as 'WANIPConn1' :action: Calls an action, e.g. 'GetStatusInfo', as defined by TR-04 (cf. https://avm.de/service/schnittstellen/) :variable: (optional) a specific variable out of this set to extract :show: print variable name :numeric: cast value to numeric<|endoftext|>
2dee0b2efbd37d7b2ea850b4a98624eaba8cb876e6ebda81e7c806da39bec285
@staticmethod def execute(list_download_order: Iterable[DownloadOrder], *, limit: int=5, media_filter: Optional[MediaFilter]=None, allow_http_status: List[int]=None) -> List[MediaDownloadResult]: 'Executes parallel media downloading.' return asyncio.get_event_loop().run_until_complete(ParallelMediaDownloadCoroutine.execute(list_download_order, limit=limit, media_filter=media_filter, allow_http_status=allow_http_status))
Executes parallel media downloading.
parallelmediadownloader/parallel_media_downloader.py
execute
yukihiko-shinoda/parallel-media-downloader
1
python
@staticmethod def execute(list_download_order: Iterable[DownloadOrder], *, limit: int=5, media_filter: Optional[MediaFilter]=None, allow_http_status: List[int]=None) -> List[MediaDownloadResult]: return asyncio.get_event_loop().run_until_complete(ParallelMediaDownloadCoroutine.execute(list_download_order, limit=limit, media_filter=media_filter, allow_http_status=allow_http_status))
@staticmethod def execute(list_download_order: Iterable[DownloadOrder], *, limit: int=5, media_filter: Optional[MediaFilter]=None, allow_http_status: List[int]=None) -> List[MediaDownloadResult]: return asyncio.get_event_loop().run_until_complete(ParallelMediaDownloadCoroutine.execute(list_download_order, limit=limit, media_filter=media_filter, allow_http_status=allow_http_status))<|docstring|>Executes parallel media downloading.<|endoftext|>
99cee1b8b49227d5cdfd2069fa22c4a0e049eecd8c861f70d8e0fc2533125098
def smooth(x, window_len=5, window='hanning'): 'smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: input signal \n window_len: dimension of the smoothing window; should be an odd integer\n window: type of window from "flat", "hanning", "hamming", "bartlett",\n "blackman"\n flat window will produce a moving average smoothing.\n\n output:\n smoothed signal\n \n example:\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n \n see also: \n np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead\n of a string\n NOTE: length(output) != length(input)\n to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.\n ' if (x.ndim != 1): raise (ValueError, 'smooth only accepts 1 dimension arrays.') elif (x.size < window_len): raise (ValueError, 'Input vector needs to be bigger than window size.') elif (window not in ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')): raise (ValueError, "Window is 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") if (window_len < 3): return x s = np.r_[(x[(window_len - 1):0:(- 1)], x, x[(- 2):((- 1) - window_len):(- 1)])] if (window == 'flat'): w = np.ones(window_len, 'd') else: w = eval('np.{}(window_len)'.format(window)) y = np.convolve((w / w.sum()), s, mode='valid')[2:(- 2)] return y
smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. input: x: input signal window_len: dimension of the smoothing window; should be an odd integer window: type of window from "flat", "hanning", "hamming", "bartlett", "blackman" flat window will produce a moving average smoothing. output: smoothed signal example: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) see also: np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input) to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.
flypy/responsetools/sampling.py
smooth
ikestar99/flypy
0
python
def smooth(x, window_len=5, window='hanning'): 'smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: input signal \n window_len: dimension of the smoothing window; should be an odd integer\n window: type of window from "flat", "hanning", "hamming", "bartlett",\n "blackman"\n flat window will produce a moving average smoothing.\n\n output:\n smoothed signal\n \n example:\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n \n see also: \n np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead\n of a string\n NOTE: length(output) != length(input)\n to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.\n ' if (x.ndim != 1): raise (ValueError, 'smooth only accepts 1 dimension arrays.') elif (x.size < window_len): raise (ValueError, 'Input vector needs to be bigger than window size.') elif (window not in ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')): raise (ValueError, "Window is 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") if (window_len < 3): return x s = np.r_[(x[(window_len - 1):0:(- 1)], x, x[(- 2):((- 1) - window_len):(- 1)])] if (window == 'flat'): w = np.ones(window_len, 'd') else: w = eval('np.{}(window_len)'.format(window)) y = np.convolve((w / w.sum()), s, mode='valid')[2:(- 2)] return y
def smooth(x, window_len=5, window='hanning'): 'smooth the data using a window with requested size.\n \n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the begining and end part of the output signal.\n \n input:\n x: input signal \n window_len: dimension of the smoothing window; should be an odd integer\n window: type of window from "flat", "hanning", "hamming", "bartlett",\n "blackman"\n flat window will produce a moving average smoothing.\n\n output:\n smoothed signal\n \n example:\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n \n see also: \n np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve\n scipy.signal.lfilter\n \n TODO: the window parameter could be the window itself if an array instead\n of a string\n NOTE: length(output) != length(input)\n to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.\n ' if (x.ndim != 1): raise (ValueError, 'smooth only accepts 1 dimension arrays.') elif (x.size < window_len): raise (ValueError, 'Input vector needs to be bigger than window size.') elif (window not in ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')): raise (ValueError, "Window is 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") if (window_len < 3): return x s = np.r_[(x[(window_len - 1):0:(- 1)], x, x[(- 2):((- 1) - window_len):(- 1)])] if (window == 'flat'): w = np.ones(window_len, 'd') else: w = eval('np.{}(window_len)'.format(window)) y = np.convolve((w / w.sum()), s, mode='valid')[2:(- 2)] return y<|docstring|>smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. input: x: input signal window_len: dimension of the smoothing window; should be an odd integer window: type of window from "flat", "hanning", "hamming", "bartlett", "blackman" flat window will produce a moving average smoothing. output: smoothed signal example: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) see also: np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input) to correct: return y[(window_len/2-1):-(window_len/2)] instead of y.<|endoftext|>
4d998de67fe79cae55ab45819681f46220d0cf25c5448996e5ef9c698b27afcc
def __init__(self, pp, cellarea, cmask, flowacc, slope, S_initial=None, outputs=False): "\n sets up Topmodel for the catchment assuming homogenous\n effective soil depth 'm' and sat. hydr. conductivity 'ko'.\n This is the 'classic' version of Topmodel where hydrologic similarity index is TWI = log(a / tan(b)).\n \n Args:\n pp - parameter dict with keys:\n dt - timestep [s]\n ko - soil transmissivity at saturation [m/s]\n m - effective soil depth (m), i.e. decay factor of Ksat with depth\n twi_cutoff - max allowed twi -index\n so - initial catchment average saturation deficit (m)\n cmask - catchment mask, 1 = catchment_cell\n cellarea - gridcell area [m2]\n flowacc - flow accumulation per unit contour length (m)\n slope - local slope (deg)\n S_initial - initial storage deficit, overrides that in 'pp'\n outputs - True stores outputs after each timestep into dictionary\n " if (not S_initial): S_initial = pp['so'] self.dt = float(pp['dt']) self.cmask = cmask self.CellArea = cellarea dx = (cellarea ** 0.5) self.CatchmentArea = (np.size(cmask[(cmask == 1)]) * self.CellArea) self.a = (flowacc * cmask) self.slope = (slope * cmask) self.M = pp['m'] self.To = (pp['ko'] * self.dt) " \n local and catchment average hydrologic similarity indices (xi, X).\n Set xi > twi_cutoff equal to cutoff value to remove tail of twi-distribution.\n This concerns mainly the stream network cells. 'Outliers' in twi-distribution are\n problem for streamflow prediction\n " slope_rad = np.radians(self.slope) xi = np.log(((self.a / dx) / (np.tan(slope_rad) + eps))) clim = np.percentile(xi[(xi > 0)], pp['twi_cutoff']) xi[(xi > clim)] = clim self.xi = xi self.X = ((1.0 / self.CatchmentArea) * np.nansum((self.xi * self.CellArea))) self.Qo = (self.To * np.exp((- self.X))) s = self.local_s(S_initial) s[(s < 0)] = 0.0 self.S = np.nanmean(s) if outputs: self.results = {'S': [], 'Qb': [], 'Qr': [], 'Qt': [], 'qr': [], 'fsat': [], 'Mbe': [], 'R': []}
sets up Topmodel for the catchment assuming homogenous effective soil depth 'm' and sat. hydr. conductivity 'ko'. This is the 'classic' version of Topmodel where hydrologic similarity index is TWI = log(a / tan(b)). Args: pp - parameter dict with keys: dt - timestep [s] ko - soil transmissivity at saturation [m/s] m - effective soil depth (m), i.e. decay factor of Ksat with depth twi_cutoff - max allowed twi -index so - initial catchment average saturation deficit (m) cmask - catchment mask, 1 = catchment_cell cellarea - gridcell area [m2] flowacc - flow accumulation per unit contour length (m) slope - local slope (deg) S_initial - initial storage deficit, overrides that in 'pp' outputs - True stores outputs after each timestep into dictionary
topmodel.py
__init__
slauniainen/SpaFHy_v1
3
python
def __init__(self, pp, cellarea, cmask, flowacc, slope, S_initial=None, outputs=False): "\n sets up Topmodel for the catchment assuming homogenous\n effective soil depth 'm' and sat. hydr. conductivity 'ko'.\n This is the 'classic' version of Topmodel where hydrologic similarity index is TWI = log(a / tan(b)).\n \n Args:\n pp - parameter dict with keys:\n dt - timestep [s]\n ko - soil transmissivity at saturation [m/s]\n m - effective soil depth (m), i.e. decay factor of Ksat with depth\n twi_cutoff - max allowed twi -index\n so - initial catchment average saturation deficit (m)\n cmask - catchment mask, 1 = catchment_cell\n cellarea - gridcell area [m2]\n flowacc - flow accumulation per unit contour length (m)\n slope - local slope (deg)\n S_initial - initial storage deficit, overrides that in 'pp'\n outputs - True stores outputs after each timestep into dictionary\n " if (not S_initial): S_initial = pp['so'] self.dt = float(pp['dt']) self.cmask = cmask self.CellArea = cellarea dx = (cellarea ** 0.5) self.CatchmentArea = (np.size(cmask[(cmask == 1)]) * self.CellArea) self.a = (flowacc * cmask) self.slope = (slope * cmask) self.M = pp['m'] self.To = (pp['ko'] * self.dt) " \n local and catchment average hydrologic similarity indices (xi, X).\n Set xi > twi_cutoff equal to cutoff value to remove tail of twi-distribution.\n This concerns mainly the stream network cells. 'Outliers' in twi-distribution are\n problem for streamflow prediction\n " slope_rad = np.radians(self.slope) xi = np.log(((self.a / dx) / (np.tan(slope_rad) + eps))) clim = np.percentile(xi[(xi > 0)], pp['twi_cutoff']) xi[(xi > clim)] = clim self.xi = xi self.X = ((1.0 / self.CatchmentArea) * np.nansum((self.xi * self.CellArea))) self.Qo = (self.To * np.exp((- self.X))) s = self.local_s(S_initial) s[(s < 0)] = 0.0 self.S = np.nanmean(s) if outputs: self.results = {'S': [], 'Qb': [], 'Qr': [], 'Qt': [], 'qr': [], 'fsat': [], 'Mbe': [], 'R': []}
def __init__(self, pp, cellarea, cmask, flowacc, slope, S_initial=None, outputs=False): "\n sets up Topmodel for the catchment assuming homogenous\n effective soil depth 'm' and sat. hydr. conductivity 'ko'.\n This is the 'classic' version of Topmodel where hydrologic similarity index is TWI = log(a / tan(b)).\n \n Args:\n pp - parameter dict with keys:\n dt - timestep [s]\n ko - soil transmissivity at saturation [m/s]\n m - effective soil depth (m), i.e. decay factor of Ksat with depth\n twi_cutoff - max allowed twi -index\n so - initial catchment average saturation deficit (m)\n cmask - catchment mask, 1 = catchment_cell\n cellarea - gridcell area [m2]\n flowacc - flow accumulation per unit contour length (m)\n slope - local slope (deg)\n S_initial - initial storage deficit, overrides that in 'pp'\n outputs - True stores outputs after each timestep into dictionary\n " if (not S_initial): S_initial = pp['so'] self.dt = float(pp['dt']) self.cmask = cmask self.CellArea = cellarea dx = (cellarea ** 0.5) self.CatchmentArea = (np.size(cmask[(cmask == 1)]) * self.CellArea) self.a = (flowacc * cmask) self.slope = (slope * cmask) self.M = pp['m'] self.To = (pp['ko'] * self.dt) " \n local and catchment average hydrologic similarity indices (xi, X).\n Set xi > twi_cutoff equal to cutoff value to remove tail of twi-distribution.\n This concerns mainly the stream network cells. 'Outliers' in twi-distribution are\n problem for streamflow prediction\n " slope_rad = np.radians(self.slope) xi = np.log(((self.a / dx) / (np.tan(slope_rad) + eps))) clim = np.percentile(xi[(xi > 0)], pp['twi_cutoff']) xi[(xi > clim)] = clim self.xi = xi self.X = ((1.0 / self.CatchmentArea) * np.nansum((self.xi * self.CellArea))) self.Qo = (self.To * np.exp((- self.X))) s = self.local_s(S_initial) s[(s < 0)] = 0.0 self.S = np.nanmean(s) if outputs: self.results = {'S': [], 'Qb': [], 'Qr': [], 'Qt': [], 'qr': [], 'fsat': [], 'Mbe': [], 'R': []}<|docstring|>sets up Topmodel for the catchment assuming homogenous effective soil depth 'm' and sat. hydr. conductivity 'ko'. This is the 'classic' version of Topmodel where hydrologic similarity index is TWI = log(a / tan(b)). Args: pp - parameter dict with keys: dt - timestep [s] ko - soil transmissivity at saturation [m/s] m - effective soil depth (m), i.e. decay factor of Ksat with depth twi_cutoff - max allowed twi -index so - initial catchment average saturation deficit (m) cmask - catchment mask, 1 = catchment_cell cellarea - gridcell area [m2] flowacc - flow accumulation per unit contour length (m) slope - local slope (deg) S_initial - initial storage deficit, overrides that in 'pp' outputs - True stores outputs after each timestep into dictionary<|endoftext|>
5a71ec90489f6743adaab9ffeba0fd80c74ce331a60e5e91e29a6e2e21f43e83
def local_s(self, Smean): '\n computes local storage deficit s [m] from catchment average\n ' s = (Smean + (self.M * (self.X - self.xi))) return s
computes local storage deficit s [m] from catchment average
topmodel.py
local_s
slauniainen/SpaFHy_v1
3
python
def local_s(self, Smean): '\n \n ' s = (Smean + (self.M * (self.X - self.xi))) return s
def local_s(self, Smean): '\n \n ' s = (Smean + (self.M * (self.X - self.xi))) return s<|docstring|>computes local storage deficit s [m] from catchment average<|endoftext|>
80da63775dfa58475005f20f2b3f65b582733bf95033e2779d5bdcecffcae2d4
def subsurfaceflow(self): 'subsurface flow to stream network (per unit catchment area)' Qb = (self.Qo * np.exp(((- self.S) / (self.M + eps)))) return Qb
subsurface flow to stream network (per unit catchment area)
topmodel.py
subsurfaceflow
slauniainen/SpaFHy_v1
3
python
def subsurfaceflow(self): Qb = (self.Qo * np.exp(((- self.S) / (self.M + eps)))) return Qb
def subsurfaceflow(self): Qb = (self.Qo * np.exp(((- self.S) / (self.M + eps)))) return Qb<|docstring|>subsurface flow to stream network (per unit catchment area)<|endoftext|>
e19eab9081151fde34acfd762c793e05f0d24f96a4cada68cf395692dbb4bb21
def run_timestep(self, R): '\n runs a timestep, updates saturation deficit and returns fluxes\n Args:\n R - recharge [m per unit catchment area] during timestep\n OUT:\n Qb - baseflow [m per unit area]\n Qr - returnflow [m per unit area]\n qr - distributed returnflow [m]\n fsat - saturated area fraction [-]\n Note: \n R is the mean drainage [m] from bucketgrid.\n ' So = self.S s = self.local_s(So) Qb = self.subsurfaceflow() S = ((So + Qb) - R) s = self.local_s(S) qr = (- s) qr[(qr < 0)] = 0.0 Qr = ((np.nansum(qr) * self.CellArea) / self.CatchmentArea) S = (S + Qr) self.S = S ix = np.where((s <= 0)) fsat = ((len(ix[0]) * self.CellArea) / self.CatchmentArea) del ix dS = (So - self.S) dF = ((R - Qb) - Qr) mbe = (dS - dF) if hasattr(self, 'results'): self.results['R'].append(R) self.results['S'].append(self.S) self.results['Qb'].append(Qb) self.results['Qr'].append(Qr) self.results['qr'].append(qr) self.results['fsat'].append(fsat) self.results['Mbe'].append(mbe) return (Qb, Qr, qr, fsat)
runs a timestep, updates saturation deficit and returns fluxes Args: R - recharge [m per unit catchment area] during timestep OUT: Qb - baseflow [m per unit area] Qr - returnflow [m per unit area] qr - distributed returnflow [m] fsat - saturated area fraction [-] Note: R is the mean drainage [m] from bucketgrid.
topmodel.py
run_timestep
slauniainen/SpaFHy_v1
3
python
def run_timestep(self, R): '\n runs a timestep, updates saturation deficit and returns fluxes\n Args:\n R - recharge [m per unit catchment area] during timestep\n OUT:\n Qb - baseflow [m per unit area]\n Qr - returnflow [m per unit area]\n qr - distributed returnflow [m]\n fsat - saturated area fraction [-]\n Note: \n R is the mean drainage [m] from bucketgrid.\n ' So = self.S s = self.local_s(So) Qb = self.subsurfaceflow() S = ((So + Qb) - R) s = self.local_s(S) qr = (- s) qr[(qr < 0)] = 0.0 Qr = ((np.nansum(qr) * self.CellArea) / self.CatchmentArea) S = (S + Qr) self.S = S ix = np.where((s <= 0)) fsat = ((len(ix[0]) * self.CellArea) / self.CatchmentArea) del ix dS = (So - self.S) dF = ((R - Qb) - Qr) mbe = (dS - dF) if hasattr(self, 'results'): self.results['R'].append(R) self.results['S'].append(self.S) self.results['Qb'].append(Qb) self.results['Qr'].append(Qr) self.results['qr'].append(qr) self.results['fsat'].append(fsat) self.results['Mbe'].append(mbe) return (Qb, Qr, qr, fsat)
def run_timestep(self, R): '\n runs a timestep, updates saturation deficit and returns fluxes\n Args:\n R - recharge [m per unit catchment area] during timestep\n OUT:\n Qb - baseflow [m per unit area]\n Qr - returnflow [m per unit area]\n qr - distributed returnflow [m]\n fsat - saturated area fraction [-]\n Note: \n R is the mean drainage [m] from bucketgrid.\n ' So = self.S s = self.local_s(So) Qb = self.subsurfaceflow() S = ((So + Qb) - R) s = self.local_s(S) qr = (- s) qr[(qr < 0)] = 0.0 Qr = ((np.nansum(qr) * self.CellArea) / self.CatchmentArea) S = (S + Qr) self.S = S ix = np.where((s <= 0)) fsat = ((len(ix[0]) * self.CellArea) / self.CatchmentArea) del ix dS = (So - self.S) dF = ((R - Qb) - Qr) mbe = (dS - dF) if hasattr(self, 'results'): self.results['R'].append(R) self.results['S'].append(self.S) self.results['Qb'].append(Qb) self.results['Qr'].append(Qr) self.results['qr'].append(qr) self.results['fsat'].append(fsat) self.results['Mbe'].append(mbe) return (Qb, Qr, qr, fsat)<|docstring|>runs a timestep, updates saturation deficit and returns fluxes Args: R - recharge [m per unit catchment area] during timestep OUT: Qb - baseflow [m per unit area] Qr - returnflow [m per unit area] qr - distributed returnflow [m] fsat - saturated area fraction [-] Note: R is the mean drainage [m] from bucketgrid.<|endoftext|>
83a9c24c54842ed4325e0ddfdf6a418d51d00131293abb4b9a5430770dc9d7a8
def train_classifiers(data_file, cache_dir=os.path.curdir): '\n This function...\n\n :param data_file:\n :param cache_dir:\n ' data = read_data(data_file) for matter in ['WM', 'GM']: classifier_file = os.path.join(cache_dir, 'Classifier', '{0}_matter_classifier.pkl'.format(matter)) if (not os.path.exists(os.path.dirname(classifier_file))): os.makedirs(os.path.dirname(classifier_file)) train_classifier(data['Features'].values, data['Truth'][matter].values, classifier_file)
This function... :param data_file: :param cache_dir:
AutoWorkup/logismosb/maclearn/train_classifiers.py
train_classifiers
pnlbwh/BRAINSTools
89
python
def train_classifiers(data_file, cache_dir=os.path.curdir): '\n This function...\n\n :param data_file:\n :param cache_dir:\n ' data = read_data(data_file) for matter in ['WM', 'GM']: classifier_file = os.path.join(cache_dir, 'Classifier', '{0}_matter_classifier.pkl'.format(matter)) if (not os.path.exists(os.path.dirname(classifier_file))): os.makedirs(os.path.dirname(classifier_file)) train_classifier(data['Features'].values, data['Truth'][matter].values, classifier_file)
def train_classifiers(data_file, cache_dir=os.path.curdir): '\n This function...\n\n :param data_file:\n :param cache_dir:\n ' data = read_data(data_file) for matter in ['WM', 'GM']: classifier_file = os.path.join(cache_dir, 'Classifier', '{0}_matter_classifier.pkl'.format(matter)) if (not os.path.exists(os.path.dirname(classifier_file))): os.makedirs(os.path.dirname(classifier_file)) train_classifier(data['Features'].values, data['Truth'][matter].values, classifier_file)<|docstring|>This function... :param data_file: :param cache_dir:<|endoftext|>
416c8464972a1a18c722fc5d148602bc6f81919c6f72f6cb9af1bf1deaac87bc
def initialize(self, **kwargs): 'Initialize.' super().initialize(impl=PyExt.EvPoll(), **kwargs)
Initialize.
tests/TestCompile.py
initialize
pzread/judge
25
python
def initialize(self, **kwargs): super().initialize(impl=PyExt.EvPoll(), **kwargs)
def initialize(self, **kwargs): super().initialize(impl=PyExt.EvPoll(), **kwargs)<|docstring|>Initialize.<|endoftext|>
b004a3ba23d10323c27ea8d1e14486b2b7c6187ee09640ed5eb92cf936d2cff5
@testing.gen_test(timeout=60) def test_stdchal(self): 'Test g++, A + B problems.' chal = StdChal(1, 'tests/testdata/testce.cpp', 'g++', 'diff', 'tests/testdata/res', ([{'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': ((256 * 1024) * 1024)}] * 4), {}) result_list = (yield chal.start()) self.assertEqual(len(result_list), 4) for result in result_list: (_, _, status, verdict) = result self.assertNotEqual(verdict, '') self.assertEqual(status, STATUS_CE)
Test g++, A + B problems.
tests/TestCompile.py
test_stdchal
pzread/judge
25
python
@testing.gen_test(timeout=60) def test_stdchal(self): chal = StdChal(1, 'tests/testdata/testce.cpp', 'g++', 'diff', 'tests/testdata/res', ([{'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': ((256 * 1024) * 1024)}] * 4), {}) result_list = (yield chal.start()) self.assertEqual(len(result_list), 4) for result in result_list: (_, _, status, verdict) = result self.assertNotEqual(verdict, ) self.assertEqual(status, STATUS_CE)
@testing.gen_test(timeout=60) def test_stdchal(self): chal = StdChal(1, 'tests/testdata/testce.cpp', 'g++', 'diff', 'tests/testdata/res', ([{'in': 'tests/testdata/res/testdata/0.in', 'ans': 'tests/testdata/res/testdata/0.out', 'timelimit': 10000, 'memlimit': ((256 * 1024) * 1024)}] * 4), {}) result_list = (yield chal.start()) self.assertEqual(len(result_list), 4) for result in result_list: (_, _, status, verdict) = result self.assertNotEqual(verdict, ) self.assertEqual(status, STATUS_CE)<|docstring|>Test g++, A + B problems.<|endoftext|>
0b3b990d4df27f2082700c87760ed3bdc6805923a817afd7eea9137ba1bf516e
def parse_events(csvfile): '\n Read list of events from the given CSV file.\n\n CSV columns: date,date_comment,end_time,category,comment\n\n The `date` is expected to appear only when the time crosses midnight. The\n `date_comment` is ignored. The `comment` column is optional. The logical\n `begin_time` of an event is the `end_time` of the event above it.\n\n Returns a list of Event objects. The events are in chronological order,\n cover a contiguous stretch of time, and do not overlap.\n ' reader = csv.reader(csvfile) header = reader.next() ref_header = ['date', 'date_comment', 'end_time', 'category', 'comment'] if (header != ref_header): raise ValueError('Unexpected header line: expected {} but found {}'.format(ref_header, header)) prev_row = parse_csv_line(2, reader.next()) if (not prev_row.date): raise ValueError('first row must specify date') if (prev_row.end_time is None): raise ValueError('first row must specify end_time') prev_event_end = datetime.combine(prev_row.date, prev_row.end_time) events = [] one_day = timedelta(days=1) for (lineno, fields) in enumerate(reader, 3): row = parse_csv_line(lineno, fields) if row.date: if (row.date != (prev_row.date + one_day)): raise ValueError('line {}: expected the day after, but got a different date'.format(lineno)) if (row.end_time >= prev_row.end_time): raise ValueError('line {}: got date but did not expect one'.format(lineno)) else: if (row.end_time <= prev_row.end_time): raise ValueError('line {}: expected date, but did not get one'.format(lineno)) row.date = prev_row.date if (row.end_time is None): raise ValueError('line {}: expected end_time'.format(lineno)) if (not row.category): raise ValueError('line {}: expected category'.format(lineno)) event = Event() event.begin = prev_event_end event.end = datetime.combine(row.date, row.end_time) event.category = row.category event.comment = row.comment events.append(event) prev_row = row prev_event_end = event.end return events
Read list of events from the given CSV file. CSV columns: date,date_comment,end_time,category,comment The `date` is expected to appear only when the time crosses midnight. The `date_comment` is ignored. The `comment` column is optional. The logical `begin_time` of an event is the `end_time` of the event above it. Returns a list of Event objects. The events are in chronological order, cover a contiguous stretch of time, and do not overlap.
tools/events.py
parse_events
cberzan/ticktockman
0
python
def parse_events(csvfile): '\n Read list of events from the given CSV file.\n\n CSV columns: date,date_comment,end_time,category,comment\n\n The `date` is expected to appear only when the time crosses midnight. The\n `date_comment` is ignored. The `comment` column is optional. The logical\n `begin_time` of an event is the `end_time` of the event above it.\n\n Returns a list of Event objects. The events are in chronological order,\n cover a contiguous stretch of time, and do not overlap.\n ' reader = csv.reader(csvfile) header = reader.next() ref_header = ['date', 'date_comment', 'end_time', 'category', 'comment'] if (header != ref_header): raise ValueError('Unexpected header line: expected {} but found {}'.format(ref_header, header)) prev_row = parse_csv_line(2, reader.next()) if (not prev_row.date): raise ValueError('first row must specify date') if (prev_row.end_time is None): raise ValueError('first row must specify end_time') prev_event_end = datetime.combine(prev_row.date, prev_row.end_time) events = [] one_day = timedelta(days=1) for (lineno, fields) in enumerate(reader, 3): row = parse_csv_line(lineno, fields) if row.date: if (row.date != (prev_row.date + one_day)): raise ValueError('line {}: expected the day after, but got a different date'.format(lineno)) if (row.end_time >= prev_row.end_time): raise ValueError('line {}: got date but did not expect one'.format(lineno)) else: if (row.end_time <= prev_row.end_time): raise ValueError('line {}: expected date, but did not get one'.format(lineno)) row.date = prev_row.date if (row.end_time is None): raise ValueError('line {}: expected end_time'.format(lineno)) if (not row.category): raise ValueError('line {}: expected category'.format(lineno)) event = Event() event.begin = prev_event_end event.end = datetime.combine(row.date, row.end_time) event.category = row.category event.comment = row.comment events.append(event) prev_row = row prev_event_end = event.end return events
def parse_events(csvfile): '\n Read list of events from the given CSV file.\n\n CSV columns: date,date_comment,end_time,category,comment\n\n The `date` is expected to appear only when the time crosses midnight. The\n `date_comment` is ignored. The `comment` column is optional. The logical\n `begin_time` of an event is the `end_time` of the event above it.\n\n Returns a list of Event objects. The events are in chronological order,\n cover a contiguous stretch of time, and do not overlap.\n ' reader = csv.reader(csvfile) header = reader.next() ref_header = ['date', 'date_comment', 'end_time', 'category', 'comment'] if (header != ref_header): raise ValueError('Unexpected header line: expected {} but found {}'.format(ref_header, header)) prev_row = parse_csv_line(2, reader.next()) if (not prev_row.date): raise ValueError('first row must specify date') if (prev_row.end_time is None): raise ValueError('first row must specify end_time') prev_event_end = datetime.combine(prev_row.date, prev_row.end_time) events = [] one_day = timedelta(days=1) for (lineno, fields) in enumerate(reader, 3): row = parse_csv_line(lineno, fields) if row.date: if (row.date != (prev_row.date + one_day)): raise ValueError('line {}: expected the day after, but got a different date'.format(lineno)) if (row.end_time >= prev_row.end_time): raise ValueError('line {}: got date but did not expect one'.format(lineno)) else: if (row.end_time <= prev_row.end_time): raise ValueError('line {}: expected date, but did not get one'.format(lineno)) row.date = prev_row.date if (row.end_time is None): raise ValueError('line {}: expected end_time'.format(lineno)) if (not row.category): raise ValueError('line {}: expected category'.format(lineno)) event = Event() event.begin = prev_event_end event.end = datetime.combine(row.date, row.end_time) event.category = row.category event.comment = row.comment events.append(event) prev_row = row prev_event_end = event.end return events<|docstring|>Read list of events from the given CSV file. CSV columns: date,date_comment,end_time,category,comment The `date` is expected to appear only when the time crosses midnight. The `date_comment` is ignored. The `comment` column is optional. The logical `begin_time` of an event is the `end_time` of the event above it. Returns a list of Event objects. The events are in chronological order, cover a contiguous stretch of time, and do not overlap.<|endoftext|>
993c221493672cbb6023a166835449126cf2a1f8f0243deff90954f17b8d6649
def parse_csv_line(lineno, fields): '\n Parse a CSV line (`fields` is a list of string) into a RawLine object.\n ' line = RawLine() if (len(fields) != 5): raise ValueError('line {}: expected {} fields, but found {}'.format(lineno, 5, len(fields))) if fields[0]: try: line.date = dateutil.parser.parse(fields[0]).date() except ValueError: raise ValueError('line {}: could not parse date'.format(lineno)) if fields[1]: line.date_comment = fields[1] if fields[2]: try: line.end_time = dateutil.parser.parse(fields[2]).time() except ValueError: raise ValueError('line {}: could not parse time'.format(lineno)) if fields[3]: line.category = fields[3] if fields[4]: line.comment = fields[4] return line
Parse a CSV line (`fields` is a list of string) into a RawLine object.
tools/events.py
parse_csv_line
cberzan/ticktockman
0
python
def parse_csv_line(lineno, fields): '\n \n ' line = RawLine() if (len(fields) != 5): raise ValueError('line {}: expected {} fields, but found {}'.format(lineno, 5, len(fields))) if fields[0]: try: line.date = dateutil.parser.parse(fields[0]).date() except ValueError: raise ValueError('line {}: could not parse date'.format(lineno)) if fields[1]: line.date_comment = fields[1] if fields[2]: try: line.end_time = dateutil.parser.parse(fields[2]).time() except ValueError: raise ValueError('line {}: could not parse time'.format(lineno)) if fields[3]: line.category = fields[3] if fields[4]: line.comment = fields[4] return line
def parse_csv_line(lineno, fields): '\n \n ' line = RawLine() if (len(fields) != 5): raise ValueError('line {}: expected {} fields, but found {}'.format(lineno, 5, len(fields))) if fields[0]: try: line.date = dateutil.parser.parse(fields[0]).date() except ValueError: raise ValueError('line {}: could not parse date'.format(lineno)) if fields[1]: line.date_comment = fields[1] if fields[2]: try: line.end_time = dateutil.parser.parse(fields[2]).time() except ValueError: raise ValueError('line {}: could not parse time'.format(lineno)) if fields[3]: line.category = fields[3] if fields[4]: line.comment = fields[4] return line<|docstring|>Parse a CSV line (`fields` is a list of string) into a RawLine object.<|endoftext|>
070b8a63c71e4b3ef19e5eeada26c8ec2b4871f71a1d3aa2c8ac697de1283e53
def grep_process_exist(instance_ids: List[str]=None, process_name: str=None, configuration: Configuration=None, secrets: Secrets=None) -> List[AWSResponse]: '\n Grep pid of process name\n\n Parameters\n ----------\n instance_ids : List[str]\n Filter the virtual machines. If the filter is omitted all machines in\n the subscription will be selected as potential chaos candidates.\n process_name : str\n Name of the process to be killed\n configuration : Configuration\n Chaostoolkit Configuration\n secrets : Secrets\n Chaostoolkit Secrets\n ' logger.debug("Start network_latency: configuration='{}', instance_ids='{}'".format(configuration, instance_ids)) response = [] try: for instance in instance_ids: param = dict() param['duration'] = '1' param['instance_id'] = instance param['process_name'] = process_name response.append(__simple_ssm_helper(instance_id=instance, configuration=configuration, secrets=secrets, action=GREP_PROCESS, parameters=param)) return response except Exception as x: raise FailedActivity('failed issuing a execute of shell script via AWS SSM {}'.format(str(x)))
Grep pid of process name Parameters ---------- instance_ids : List[str] Filter the virtual machines. If the filter is omitted all machines in the subscription will be selected as potential chaos candidates. process_name : str Name of the process to be killed configuration : Configuration Chaostoolkit Configuration secrets : Secrets Chaostoolkit Secrets
chaosaws/ec2_os/probes.py
grep_process_exist
xpdable/chaostoolkit-aws
0
python
def grep_process_exist(instance_ids: List[str]=None, process_name: str=None, configuration: Configuration=None, secrets: Secrets=None) -> List[AWSResponse]: '\n Grep pid of process name\n\n Parameters\n ----------\n instance_ids : List[str]\n Filter the virtual machines. If the filter is omitted all machines in\n the subscription will be selected as potential chaos candidates.\n process_name : str\n Name of the process to be killed\n configuration : Configuration\n Chaostoolkit Configuration\n secrets : Secrets\n Chaostoolkit Secrets\n ' logger.debug("Start network_latency: configuration='{}', instance_ids='{}'".format(configuration, instance_ids)) response = [] try: for instance in instance_ids: param = dict() param['duration'] = '1' param['instance_id'] = instance param['process_name'] = process_name response.append(__simple_ssm_helper(instance_id=instance, configuration=configuration, secrets=secrets, action=GREP_PROCESS, parameters=param)) return response except Exception as x: raise FailedActivity('failed issuing a execute of shell script via AWS SSM {}'.format(str(x)))
def grep_process_exist(instance_ids: List[str]=None, process_name: str=None, configuration: Configuration=None, secrets: Secrets=None) -> List[AWSResponse]: '\n Grep pid of process name\n\n Parameters\n ----------\n instance_ids : List[str]\n Filter the virtual machines. If the filter is omitted all machines in\n the subscription will be selected as potential chaos candidates.\n process_name : str\n Name of the process to be killed\n configuration : Configuration\n Chaostoolkit Configuration\n secrets : Secrets\n Chaostoolkit Secrets\n ' logger.debug("Start network_latency: configuration='{}', instance_ids='{}'".format(configuration, instance_ids)) response = [] try: for instance in instance_ids: param = dict() param['duration'] = '1' param['instance_id'] = instance param['process_name'] = process_name response.append(__simple_ssm_helper(instance_id=instance, configuration=configuration, secrets=secrets, action=GREP_PROCESS, parameters=param)) return response except Exception as x: raise FailedActivity('failed issuing a execute of shell script via AWS SSM {}'.format(str(x)))<|docstring|>Grep pid of process name Parameters ---------- instance_ids : List[str] Filter the virtual machines. If the filter is omitted all machines in the subscription will be selected as potential chaos candidates. process_name : str Name of the process to be killed configuration : Configuration Chaostoolkit Configuration secrets : Secrets Chaostoolkit Secrets<|endoftext|>
235d4974fe2975ba8f2f04c0592a88026e92e4ed3b7dd3d0a4ad19488c608d57
def _get_storage_subdict(param, json_storage_params): 'Get the JSON configuration subdictionary where the current parameter must be stored.' parent_section = param.owner_section sections_path = [] while parent_section: sections_path.insert(0, parent_section) parent_section = parent_section.parent_section current_dict = json_storage_params for section in sections_path: dict_key = ((section.key + '_settings') if (section.max_resources > 1) else section.key) section_key_dict = current_dict.get(dict_key, None) if (not section_key_dict): section_key_dict = OrderedDict({}) if (section.max_resources == 1): section_key_dict['label'] = section.label current_dict[dict_key] = section_key_dict current_dict = section_key_dict if (section.max_resources > 1): section_label_dict = current_dict.get(section.label, None) if (not section_label_dict): section_label_dict = OrderedDict({}) current_dict[section.label] = section_label_dict current_dict = section_label_dict return current_dict
Get the JSON configuration subdictionary where the current parameter must be stored.
cli/pcluster/config/json_param_types.py
_get_storage_subdict
gkao123/aws-parallelcluster
0
python
def _get_storage_subdict(param, json_storage_params): parent_section = param.owner_section sections_path = [] while parent_section: sections_path.insert(0, parent_section) parent_section = parent_section.parent_section current_dict = json_storage_params for section in sections_path: dict_key = ((section.key + '_settings') if (section.max_resources > 1) else section.key) section_key_dict = current_dict.get(dict_key, None) if (not section_key_dict): section_key_dict = OrderedDict({}) if (section.max_resources == 1): section_key_dict['label'] = section.label current_dict[dict_key] = section_key_dict current_dict = section_key_dict if (section.max_resources > 1): section_label_dict = current_dict.get(section.label, None) if (not section_label_dict): section_label_dict = OrderedDict({}) current_dict[section.label] = section_label_dict current_dict = section_label_dict return current_dict
def _get_storage_subdict(param, json_storage_params): parent_section = param.owner_section sections_path = [] while parent_section: sections_path.insert(0, parent_section) parent_section = parent_section.parent_section current_dict = json_storage_params for section in sections_path: dict_key = ((section.key + '_settings') if (section.max_resources > 1) else section.key) section_key_dict = current_dict.get(dict_key, None) if (not section_key_dict): section_key_dict = OrderedDict({}) if (section.max_resources == 1): section_key_dict['label'] = section.label current_dict[dict_key] = section_key_dict current_dict = section_key_dict if (section.max_resources > 1): section_label_dict = current_dict.get(section.label, None) if (not section_label_dict): section_label_dict = OrderedDict({}) current_dict[section.label] = section_label_dict current_dict = section_label_dict return current_dict<|docstring|>Get the JSON configuration subdictionary where the current parameter must be stored.<|endoftext|>
5e325356e1174293d07c193fbf616a7c544f7cb75d9866ca57da32106f4edbe6
def get_value_type(self): 'Return the type of the value managed by the Param.' return str
Return the type of the value managed by the Param.
cli/pcluster/config/json_param_types.py
get_value_type
gkao123/aws-parallelcluster
0
python
def get_value_type(self): return str
def get_value_type(self): return str<|docstring|>Return the type of the value managed by the Param.<|endoftext|>
7f29f67623417029c483e26ab1c951d3dc8a162d46a71b9d8a0313575109e000
def from_file(self, config_parser): 'Load the param value from configuration file.' section_name = utils.get_file_section_name(self.section_key, self.section_label) if config_parser.has_option(section_name, self.key): try: self.value = self._parse_value(config_parser, section_name) self._check_allowed_values() except ValueError: self.pcluster_config.error("Configuration parameter '{0}' must be of '{1}' type".format(self.key, self.get_value_type().__name__)) return self
Load the param value from configuration file.
cli/pcluster/config/json_param_types.py
from_file
gkao123/aws-parallelcluster
0
python
def from_file(self, config_parser): section_name = utils.get_file_section_name(self.section_key, self.section_label) if config_parser.has_option(section_name, self.key): try: self.value = self._parse_value(config_parser, section_name) self._check_allowed_values() except ValueError: self.pcluster_config.error("Configuration parameter '{0}' must be of '{1}' type".format(self.key, self.get_value_type().__name__)) return self
def from_file(self, config_parser): section_name = utils.get_file_section_name(self.section_key, self.section_label) if config_parser.has_option(section_name, self.key): try: self.value = self._parse_value(config_parser, section_name) self._check_allowed_values() except ValueError: self.pcluster_config.error("Configuration parameter '{0}' must be of '{1}' type".format(self.key, self.get_value_type().__name__)) return self<|docstring|>Load the param value from configuration file.<|endoftext|>
8c0dbfcf5396342964b2e8e030b9d2addbb6226a3306b7676ed19490844ab4ce
def from_storage(self, storage_params): 'Load the param from the provided Json storage params dict.' storage_value = _get_storage_subdict(self, storage_params.json_params).get(self.get_storage_key(), self.get_default_value()) self.value = storage_value return self
Load the param from the provided Json storage params dict.
cli/pcluster/config/json_param_types.py
from_storage
gkao123/aws-parallelcluster
0
python
def from_storage(self, storage_params): storage_value = _get_storage_subdict(self, storage_params.json_params).get(self.get_storage_key(), self.get_default_value()) self.value = storage_value return self
def from_storage(self, storage_params): storage_value = _get_storage_subdict(self, storage_params.json_params).get(self.get_storage_key(), self.get_default_value()) self.value = storage_value return self<|docstring|>Load the param from the provided Json storage params dict.<|endoftext|>
6589702076aeab0e082dbaf20f8761ebadda22e3dbc3137789c7a44d217a3ee3
def to_storage(self, storage_params): 'Store the param into the provided Json storage params dict.' _get_storage_subdict(self, storage_params.json_params)[self.get_storage_key()] = self.value
Store the param into the provided Json storage params dict.
cli/pcluster/config/json_param_types.py
to_storage
gkao123/aws-parallelcluster
0
python
def to_storage(self, storage_params): _get_storage_subdict(self, storage_params.json_params)[self.get_storage_key()] = self.value
def to_storage(self, storage_params): _get_storage_subdict(self, storage_params.json_params)[self.get_storage_key()] = self.value<|docstring|>Store the param into the provided Json storage params dict.<|endoftext|>
45ee91841520b31ce2b8e9dd0a3d15a16b22a19b6f0731d07cde1dd7c728e51e
def _parse_value(self, config_parser, section_name): 'Parse the value from config file, converting to the needed type for the specific param.' return config_parser.get(section_name, self.key)
Parse the value from config file, converting to the needed type for the specific param.
cli/pcluster/config/json_param_types.py
_parse_value
gkao123/aws-parallelcluster
0
python
def _parse_value(self, config_parser, section_name): return config_parser.get(section_name, self.key)
def _parse_value(self, config_parser, section_name): return config_parser.get(section_name, self.key)<|docstring|>Parse the value from config file, converting to the needed type for the specific param.<|endoftext|>
e373a9c87284b4166cc0702e05a5c8ff03f1ce83c8be1a168edcfc75bfa74166
def get_value_type(self): 'Return the type of the value managed by the Param.' return int
Return the type of the value managed by the Param.
cli/pcluster/config/json_param_types.py
get_value_type
gkao123/aws-parallelcluster
0
python
def get_value_type(self): return int
def get_value_type(self): return int<|docstring|>Return the type of the value managed by the Param.<|endoftext|>
b7a5fa7bdb66d323f794ef3333a3d3e8177dd8470efca659aa56d0794056a49b
def _parse_value(self, config_parser, section_name): 'Parse the value from config file, converting to the needed type for the specific param.' return config_parser.getint(section_name, self.key)
Parse the value from config file, converting to the needed type for the specific param.
cli/pcluster/config/json_param_types.py
_parse_value
gkao123/aws-parallelcluster
0
python
def _parse_value(self, config_parser, section_name): return config_parser.getint(section_name, self.key)
def _parse_value(self, config_parser, section_name): return config_parser.getint(section_name, self.key)<|docstring|>Parse the value from config file, converting to the needed type for the specific param.<|endoftext|>
6b8bb72536e80575ec7ad83742aedff1159b90e157a54dde698bbfef47a024fd
def get_value_type(self): 'Return the type of the value managed by the Param.' return bool
Return the type of the value managed by the Param.
cli/pcluster/config/json_param_types.py
get_value_type
gkao123/aws-parallelcluster
0
python
def get_value_type(self): return bool
def get_value_type(self): return bool<|docstring|>Return the type of the value managed by the Param.<|endoftext|>
116210e838859c118facdf38413b8fbb5f10a0f0eed1a17e702e3f99fbeb84cd
def _parse_value(self, config_parser, section_name): 'Parse the value from config file, converting to the needed type for the specific param.' return config_parser.getboolean(section_name, self.key)
Parse the value from config file, converting to the needed type for the specific param.
cli/pcluster/config/json_param_types.py
_parse_value
gkao123/aws-parallelcluster
0
python
def _parse_value(self, config_parser, section_name): return config_parser.getboolean(section_name, self.key)
def _parse_value(self, config_parser, section_name): return config_parser.getboolean(section_name, self.key)<|docstring|>Parse the value from config file, converting to the needed type for the specific param.<|endoftext|>
43cd5101d1e6918cb4ae413689406b62b80ae88e99a0ee34a46abeeab75da07e
def get_string_value(self): 'Convert internal representation into string.' return (self.get_default_value().lower() if (self.value is None) else str(bool(self.value)).lower())
Convert internal representation into string.
cli/pcluster/config/json_param_types.py
get_string_value
gkao123/aws-parallelcluster
0
python
def get_string_value(self): return (self.get_default_value().lower() if (self.value is None) else str(bool(self.value)).lower())
def get_string_value(self): return (self.get_default_value().lower() if (self.value is None) else str(bool(self.value)).lower())<|docstring|>Convert internal representation into string.<|endoftext|>
53a1fd5d01b00a7e2b76c2f5b6913956345c084a4c725f57af0b92721116bc5a
def get_value_type(self): 'Return the type of the value managed by the Param.' return float
Return the type of the value managed by the Param.
cli/pcluster/config/json_param_types.py
get_value_type
gkao123/aws-parallelcluster
0
python
def get_value_type(self): return float
def get_value_type(self): return float<|docstring|>Return the type of the value managed by the Param.<|endoftext|>
43b20d87cd1bfa947bda9be5063a149a6eff28bc248a7df7874a5fb3734760f4
def _parse_value(self, config_parser, section_name): 'Parse the value from config file, converting to the needed type for the specific param.' return config_parser.getfloat(section_name, self.key)
Parse the value from config file, converting to the needed type for the specific param.
cli/pcluster/config/json_param_types.py
_parse_value
gkao123/aws-parallelcluster
0
python
def _parse_value(self, config_parser, section_name): return config_parser.getfloat(section_name, self.key)
def _parse_value(self, config_parser, section_name): return config_parser.getfloat(section_name, self.key)<|docstring|>Parse the value from config file, converting to the needed type for the specific param.<|endoftext|>
4843b395a957d07f83a420d1b4b5962eb29651ea1334eb0ac6d497bef869c8fe
def refresh(self): 'Take the value from the scaledown_idletime cfn parameter.' self.value = self.owner_section.get_param('scaledown_idletime').value
Take the value from the scaledown_idletime cfn parameter.
cli/pcluster/config/json_param_types.py
refresh
gkao123/aws-parallelcluster
0
python
def refresh(self): self.value = self.owner_section.get_param('scaledown_idletime').value
def refresh(self): self.value = self.owner_section.get_param('scaledown_idletime').value<|docstring|>Take the value from the scaledown_idletime cfn parameter.<|endoftext|>
c9648bb0fcfda28c941f2060aaf54d8f6e9c8bae86d7e21556efab930e157178
def get_storage_key(self): 'Return the key by which the current param must be stored in the JSON.' return 'scaledown_idletime'
Return the key by which the current param must be stored in the JSON.
cli/pcluster/config/json_param_types.py
get_storage_key
gkao123/aws-parallelcluster
0
python
def get_storage_key(self): return 'scaledown_idletime'
def get_storage_key(self): return 'scaledown_idletime'<|docstring|>Return the key by which the current param must be stored in the JSON.<|endoftext|>
8cffd0d7c2e291800bec1eee7e51cec2970e0dfee69f00c7156649b83108d468
def refresh(self): 'Take the label of the first queue as value.' queue_settings_param = self.pcluster_config.get_section('cluster').get_param('queue_settings') if queue_settings_param: queue_settings_param_value = queue_settings_param.value if queue_settings_param_value: self.value = queue_settings_param_value.split(',')[0].strip()
Take the label of the first queue as value.
cli/pcluster/config/json_param_types.py
refresh
gkao123/aws-parallelcluster
0
python
def refresh(self): queue_settings_param = self.pcluster_config.get_section('cluster').get_param('queue_settings') if queue_settings_param: queue_settings_param_value = queue_settings_param.value if queue_settings_param_value: self.value = queue_settings_param_value.split(',')[0].strip()
def refresh(self): queue_settings_param = self.pcluster_config.get_section('cluster').get_param('queue_settings') if queue_settings_param: queue_settings_param_value = queue_settings_param.value if queue_settings_param_value: self.value = queue_settings_param_value.split(',')[0].strip()<|docstring|>Take the label of the first queue as value.<|endoftext|>
80bdbc376702f2dcccd275c23a0eaa80c3597b59d195ff5136f9ea6285170186
def to_storage(self, storage_params): '\n Convert the referred sections into the json storage representation.\n\n For each label, a subdictionary is created is generated under the param key, with the section label as key and\n the related section as value.\n Example of storage conversion:\n config file:\n queue_settings = queue1, queue2\n\n json config:\n "cluster": {\n ...\n "queue_settings": {\n "queue1": {...},\n "queue2": {...}\n }\n }\n ' if self.value: labels = self.referred_section_labels for label in labels: section = self.pcluster_config.get_section(self.referred_section_key, label.strip()) section.to_storage(storage_params)
Convert the referred sections into the json storage representation. For each label, a subdictionary is created is generated under the param key, with the section label as key and the related section as value. Example of storage conversion: config file: queue_settings = queue1, queue2 json config: "cluster": { ... "queue_settings": { "queue1": {...}, "queue2": {...} } }
cli/pcluster/config/json_param_types.py
to_storage
gkao123/aws-parallelcluster
0
python
def to_storage(self, storage_params): '\n Convert the referred sections into the json storage representation.\n\n For each label, a subdictionary is created is generated under the param key, with the section label as key and\n the related section as value.\n Example of storage conversion:\n config file:\n queue_settings = queue1, queue2\n\n json config:\n "cluster": {\n ...\n "queue_settings": {\n "queue1": {...},\n "queue2": {...}\n }\n }\n ' if self.value: labels = self.referred_section_labels for label in labels: section = self.pcluster_config.get_section(self.referred_section_key, label.strip()) section.to_storage(storage_params)
def to_storage(self, storage_params): '\n Convert the referred sections into the json storage representation.\n\n For each label, a subdictionary is created is generated under the param key, with the section label as key and\n the related section as value.\n Example of storage conversion:\n config file:\n queue_settings = queue1, queue2\n\n json config:\n "cluster": {\n ...\n "queue_settings": {\n "queue1": {...},\n "queue2": {...}\n }\n }\n ' if self.value: labels = self.referred_section_labels for label in labels: section = self.pcluster_config.get_section(self.referred_section_key, label.strip()) section.to_storage(storage_params)<|docstring|>Convert the referred sections into the json storage representation. For each label, a subdictionary is created is generated under the param key, with the section label as key and the related section as value. Example of storage conversion: config file: queue_settings = queue1, queue2 json config: "cluster": { ... "queue_settings": { "queue1": {...}, "queue2": {...} } }<|endoftext|>
b6ec49a23260896f2f1d58ef942b521a107039c8a442d04f72f6b21634464929
def from_storage(self, storage_params): '\n Load the referred sections from storage representation.\n\n This method rebuilds the settings labels by iterating through all subsections of the related section;\n then each subsection is loaded from storage as well.\n ' json_params = storage_params.json_params json_subdict = _get_storage_subdict(self, json_params).get(self.key) if json_subdict: labels = [label for label in json_subdict.keys()] self.value = ','.join(labels) for label in labels: section = self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=label, parent_section=self.owner_section).from_storage(storage_params) self.pcluster_config.add_section(section) return self
Load the referred sections from storage representation. This method rebuilds the settings labels by iterating through all subsections of the related section; then each subsection is loaded from storage as well.
cli/pcluster/config/json_param_types.py
from_storage
gkao123/aws-parallelcluster
0
python
def from_storage(self, storage_params): '\n Load the referred sections from storage representation.\n\n This method rebuilds the settings labels by iterating through all subsections of the related section;\n then each subsection is loaded from storage as well.\n ' json_params = storage_params.json_params json_subdict = _get_storage_subdict(self, json_params).get(self.key) if json_subdict: labels = [label for label in json_subdict.keys()] self.value = ','.join(labels) for label in labels: section = self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=label, parent_section=self.owner_section).from_storage(storage_params) self.pcluster_config.add_section(section) return self
def from_storage(self, storage_params): '\n Load the referred sections from storage representation.\n\n This method rebuilds the settings labels by iterating through all subsections of the related section;\n then each subsection is loaded from storage as well.\n ' json_params = storage_params.json_params json_subdict = _get_storage_subdict(self, json_params).get(self.key) if json_subdict: labels = [label for label in json_subdict.keys()] self.value = ','.join(labels) for label in labels: section = self.referred_section_type(self.referred_section_definition, self.pcluster_config, section_label=label, parent_section=self.owner_section).from_storage(storage_params) self.pcluster_config.add_section(section) return self<|docstring|>Load the referred sections from storage representation. This method rebuilds the settings labels by iterating through all subsections of the related section; then each subsection is loaded from storage as well.<|endoftext|>
89419186fe78827c451743135d6dd31ec77630f574c16d94c69dd0d8dc49a80f
def from_storage(self, storage_params): 'Load the section from storage params.' for (param_key, param_definition) in self.definition.get('params').items(): param_type = param_definition.get('type', Param) param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self).from_storage(storage_params) self.add_param(param) return self
Load the section from storage params.
cli/pcluster/config/json_param_types.py
from_storage
gkao123/aws-parallelcluster
0
python
def from_storage(self, storage_params): for (param_key, param_definition) in self.definition.get('params').items(): param_type = param_definition.get('type', Param) param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self).from_storage(storage_params) self.add_param(param) return self
def from_storage(self, storage_params): for (param_key, param_definition) in self.definition.get('params').items(): param_type = param_definition.get('type', Param) param = param_type(self.key, self.label, param_key, param_definition, self.pcluster_config, owner_section=self).from_storage(storage_params) self.add_param(param) return self<|docstring|>Load the section from storage params.<|endoftext|>
5b858eec0ad8efce95ca07e39c3e34cdbba0b9f55fc988b3ceaf5e08823054c1
def to_storage(self, storage_params): 'Write the section into storage params.' for (param_key, _) in self.definition.get('params').items(): param = self.get_param(param_key) if param: param.to_storage(storage_params)
Write the section into storage params.
cli/pcluster/config/json_param_types.py
to_storage
gkao123/aws-parallelcluster
0
python
def to_storage(self, storage_params): for (param_key, _) in self.definition.get('params').items(): param = self.get_param(param_key) if param: param.to_storage(storage_params)
def to_storage(self, storage_params): for (param_key, _) in self.definition.get('params').items(): param = self.get_param(param_key) if param: param.to_storage(storage_params)<|docstring|>Write the section into storage params.<|endoftext|>
c502505fefcdf63e328b7e2c7a8cc3b0d58c07c9486be5649c33166da830708d
def has_metadata(self): 'No metadata must be stored in CloudFormation for Json Sections.' return False
No metadata must be stored in CloudFormation for Json Sections.
cli/pcluster/config/json_param_types.py
has_metadata
gkao123/aws-parallelcluster
0
python
def has_metadata(self): return False
def has_metadata(self): return False<|docstring|>No metadata must be stored in CloudFormation for Json Sections.<|endoftext|>
1bebd8cbd2d522ecc28e9872c7f871a13e4cf51738b96f72a421cf2ba73f4698
def get_default_param_type(self): 'Get the default Param type managed by the Section type.' return JsonParam
Get the default Param type managed by the Section type.
cli/pcluster/config/json_param_types.py
get_default_param_type
gkao123/aws-parallelcluster
0
python
def get_default_param_type(self): return JsonParam
def get_default_param_type(self): return JsonParam<|docstring|>Get the default Param type managed by the Section type.<|endoftext|>
e54a82699cf2847f7014339133a08c5ba90e39778c8b20f6aff225573d182036
def refresh(self): 'Refresh the Json section.' self.refresh_section() super(JsonSection, self).refresh()
Refresh the Json section.
cli/pcluster/config/json_param_types.py
refresh
gkao123/aws-parallelcluster
0
python
def refresh(self): self.refresh_section() super(JsonSection, self).refresh()
def refresh(self): self.refresh_section() super(JsonSection, self).refresh()<|docstring|>Refresh the Json section.<|endoftext|>
ed1dedc5377a6292337d5b74674ea765d0fe33d3738d4cbfa7c95f21177dfc10
def refresh_section(self): 'Perform custom refresh operations.' pass
Perform custom refresh operations.
cli/pcluster/config/json_param_types.py
refresh_section
gkao123/aws-parallelcluster
0
python
def refresh_section(self): pass
def refresh_section(self): pass<|docstring|>Perform custom refresh operations.<|endoftext|>
c68472e7cf32e8a2912bf183686161919e938a4a7aca74741021673a63cd3d0f
def refresh_section(self): 'Take values of disable_hyperthreading and enable_efa from cluster section if not specified.' if (self.get_param_value('disable_hyperthreading') is None): cluster_disable_hyperthreading = self.pcluster_config.get_section('cluster').get_param_value('disable_hyperthreading') self.get_param('disable_hyperthreading').value = (cluster_disable_hyperthreading is True) if (self.get_param_value('enable_efa') is None): cluster_enable_efa = self.pcluster_config.get_section('cluster').get_param_value('enable_efa') self.get_param('enable_efa').value = (cluster_enable_efa == 'compute') compute_resource_labels = self.get_param('compute_resource_settings').referred_section_labels if compute_resource_labels: for compute_resource_label in compute_resource_labels: compute_resource_section = self.pcluster_config.get_section('compute_resource', compute_resource_label) self.refresh_compute_resource(compute_resource_section)
Take values of disable_hyperthreading and enable_efa from cluster section if not specified.
cli/pcluster/config/json_param_types.py
refresh_section
gkao123/aws-parallelcluster
0
python
def refresh_section(self): if (self.get_param_value('disable_hyperthreading') is None): cluster_disable_hyperthreading = self.pcluster_config.get_section('cluster').get_param_value('disable_hyperthreading') self.get_param('disable_hyperthreading').value = (cluster_disable_hyperthreading is True) if (self.get_param_value('enable_efa') is None): cluster_enable_efa = self.pcluster_config.get_section('cluster').get_param_value('enable_efa') self.get_param('enable_efa').value = (cluster_enable_efa == 'compute') compute_resource_labels = self.get_param('compute_resource_settings').referred_section_labels if compute_resource_labels: for compute_resource_label in compute_resource_labels: compute_resource_section = self.pcluster_config.get_section('compute_resource', compute_resource_label) self.refresh_compute_resource(compute_resource_section)
def refresh_section(self): if (self.get_param_value('disable_hyperthreading') is None): cluster_disable_hyperthreading = self.pcluster_config.get_section('cluster').get_param_value('disable_hyperthreading') self.get_param('disable_hyperthreading').value = (cluster_disable_hyperthreading is True) if (self.get_param_value('enable_efa') is None): cluster_enable_efa = self.pcluster_config.get_section('cluster').get_param_value('enable_efa') self.get_param('enable_efa').value = (cluster_enable_efa == 'compute') compute_resource_labels = self.get_param('compute_resource_settings').referred_section_labels if compute_resource_labels: for compute_resource_label in compute_resource_labels: compute_resource_section = self.pcluster_config.get_section('compute_resource', compute_resource_label) self.refresh_compute_resource(compute_resource_section)<|docstring|>Take values of disable_hyperthreading and enable_efa from cluster section if not specified.<|endoftext|>
9a76f177010ec4388a236506b81063b0b03470925748265d74bb9594792d2300
def refresh_compute_resource(self, compute_resource_section): '\n Populate additional settings needed for the linked compute resource like vcpus, gpus etc.\n\n These parameters are set according to queue settings and instance type capabilities.\n ' instance_type_param = compute_resource_section.get_param('instance_type') if instance_type_param.value: instance_type = utils.get_instance_type(instance_type_param.value) ht_disabled = self.get_param_value('disable_hyperthreading') vcpus_info = instance_type.get('VCpuInfo') default_threads_per_core = utils.get_default_threads_per_core(instance_type_param.value, instance_type) vcpus = ((vcpus_info.get('DefaultVCpus') // default_threads_per_core) if ht_disabled else vcpus_info.get('DefaultVCpus')) compute_resource_section.get_param('vcpus').value = vcpus gpu_info = instance_type.get('GpuInfo', None) if gpu_info: compute_resource_section.get_param('gpus').value = sum([gpus.get('Count') for gpus in gpu_info.get('Gpus')]) enable_efa = self.get_param_value('enable_efa') compute_resource_section.get_param('enable_efa').value = (enable_efa and instance_type.get('NetworkInfo').get('EfaSupported')) compute_resource_section.get_param('disable_hyperthreading').value = (ht_disabled and (default_threads_per_core != 1)) compute_resource_section.get_param('disable_hyperthreading_via_cpu_options').value = (compute_resource_section.get_param('disable_hyperthreading').value and utils.disable_ht_via_cpu_options(instance_type_param.value, utils.get_default_threads_per_core(instance_type_param.value, instance_type))) initial_count_param = compute_resource_section.get_param('initial_count') if (initial_count_param.value is None): initial_count_param.value = compute_resource_section.get_param_value('min_count') if (enable_efa and (not compute_resource_section.get_param_value('enable_efa'))): self.pcluster_config.warn("EFA was enabled on queue '{0}', but instance type '{1}' does not support EFA.".format(self.label, instance_type_param.value))
Populate additional settings needed for the linked compute resource like vcpus, gpus etc. These parameters are set according to queue settings and instance type capabilities.
cli/pcluster/config/json_param_types.py
refresh_compute_resource
gkao123/aws-parallelcluster
0
python
def refresh_compute_resource(self, compute_resource_section): '\n Populate additional settings needed for the linked compute resource like vcpus, gpus etc.\n\n These parameters are set according to queue settings and instance type capabilities.\n ' instance_type_param = compute_resource_section.get_param('instance_type') if instance_type_param.value: instance_type = utils.get_instance_type(instance_type_param.value) ht_disabled = self.get_param_value('disable_hyperthreading') vcpus_info = instance_type.get('VCpuInfo') default_threads_per_core = utils.get_default_threads_per_core(instance_type_param.value, instance_type) vcpus = ((vcpus_info.get('DefaultVCpus') // default_threads_per_core) if ht_disabled else vcpus_info.get('DefaultVCpus')) compute_resource_section.get_param('vcpus').value = vcpus gpu_info = instance_type.get('GpuInfo', None) if gpu_info: compute_resource_section.get_param('gpus').value = sum([gpus.get('Count') for gpus in gpu_info.get('Gpus')]) enable_efa = self.get_param_value('enable_efa') compute_resource_section.get_param('enable_efa').value = (enable_efa and instance_type.get('NetworkInfo').get('EfaSupported')) compute_resource_section.get_param('disable_hyperthreading').value = (ht_disabled and (default_threads_per_core != 1)) compute_resource_section.get_param('disable_hyperthreading_via_cpu_options').value = (compute_resource_section.get_param('disable_hyperthreading').value and utils.disable_ht_via_cpu_options(instance_type_param.value, utils.get_default_threads_per_core(instance_type_param.value, instance_type))) initial_count_param = compute_resource_section.get_param('initial_count') if (initial_count_param.value is None): initial_count_param.value = compute_resource_section.get_param_value('min_count') if (enable_efa and (not compute_resource_section.get_param_value('enable_efa'))): self.pcluster_config.warn("EFA was enabled on queue '{0}', but instance type '{1}' does not support EFA.".format(self.label, instance_type_param.value))
def refresh_compute_resource(self, compute_resource_section): '\n Populate additional settings needed for the linked compute resource like vcpus, gpus etc.\n\n These parameters are set according to queue settings and instance type capabilities.\n ' instance_type_param = compute_resource_section.get_param('instance_type') if instance_type_param.value: instance_type = utils.get_instance_type(instance_type_param.value) ht_disabled = self.get_param_value('disable_hyperthreading') vcpus_info = instance_type.get('VCpuInfo') default_threads_per_core = utils.get_default_threads_per_core(instance_type_param.value, instance_type) vcpus = ((vcpus_info.get('DefaultVCpus') // default_threads_per_core) if ht_disabled else vcpus_info.get('DefaultVCpus')) compute_resource_section.get_param('vcpus').value = vcpus gpu_info = instance_type.get('GpuInfo', None) if gpu_info: compute_resource_section.get_param('gpus').value = sum([gpus.get('Count') for gpus in gpu_info.get('Gpus')]) enable_efa = self.get_param_value('enable_efa') compute_resource_section.get_param('enable_efa').value = (enable_efa and instance_type.get('NetworkInfo').get('EfaSupported')) compute_resource_section.get_param('disable_hyperthreading').value = (ht_disabled and (default_threads_per_core != 1)) compute_resource_section.get_param('disable_hyperthreading_via_cpu_options').value = (compute_resource_section.get_param('disable_hyperthreading').value and utils.disable_ht_via_cpu_options(instance_type_param.value, utils.get_default_threads_per_core(instance_type_param.value, instance_type))) initial_count_param = compute_resource_section.get_param('initial_count') if (initial_count_param.value is None): initial_count_param.value = compute_resource_section.get_param_value('min_count') if (enable_efa and (not compute_resource_section.get_param_value('enable_efa'))): self.pcluster_config.warn("EFA was enabled on queue '{0}', but instance type '{1}' does not support EFA.".format(self.label, instance_type_param.value))<|docstring|>Populate additional settings needed for the linked compute resource like vcpus, gpus etc. These parameters are set according to queue settings and instance type capabilities.<|endoftext|>
9f44491025f58fbe8ac32262ecae4eeb91a1b9e15dc17c464eee6bcbab6a800f
def plot_static_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, color_by_columns_dropdown=False, clone_pipeline=True, n_sig_figs=3, node_scale=12, plotly_params=None, labels=None): 'Plot Mapper graphs without interactivity on pipeline parameters.\n\n The output graph is a rendition of the :class:`igraph.Graph` object\n computed by calling the :meth:`fit_transform` method of the\n :class:`~gtda.mapper.pipeline.MapperPipeline` instance `pipeline` on the\n input `data`. The graph\'s nodes correspond to subsets of elements (rows) in\n `data`; these subsets are clusters in larger portions of `data` called\n "pullback (cover) sets", which are computed by means of the `pipeline`\'s\n "filter function" and "cover" and correspond to the differently-colored\n portions in `this diagram <../../../../_images/mapper_pipeline.svg>`_.\n Two clusters from different pullback cover sets can overlap; if they do, an\n edge between the corresponding nodes in the graph may be drawn.\n\n Nodes are colored according to `color_variable` and `node_color_statistic`\n and are sized according to the number of elements they represent. The\n hovertext on each node displays, in this order:\n\n - a globally unique ID for the node, which can be used to retrieve\n node information from the :class:`igraph.Graph` object, see\n :class:`~gtda.mapper.nerve.Nerve`;\n - the label of the pullback (cover) set which the node\'s elements\n form a cluster in;\n - a label identifying the node as a cluster within that pullback set;\n - the number of elements of `data` associated with the node;\n - the value of the summary statistic which determines the node\'s color.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act onto data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : None, callable, or ndarray of shape (n_nodes,) or (n_nodes, 1), optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`. If a numpy array, it must\n have the same length as the number of nodes in the Mapper graph and its\n values are used directly as node colors (`color_variable` is ignored).\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <https://plotly.com/python/bubble-charts/#scaling-the-size-of-bubble -charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n fig : :class:`plotly.graph_objects.Figure` object\n Figure representing the Mapper graph with appropriate node colouring\n and size.\n\n Examples\n --------\n Setting a colorscale different from the default one:\n\n >>> import numpy as np\n >>> np.random.seed(1)\n >>> from gtda.mapper import make_mapper_pipeline, plot_static_mapper_graph\n >>> pipeline = make_mapper_pipeline()\n >>> data = np.random.random((100, 3))\n >>> plotly_params = {"node_trace": {"marker_colorscale": "Blues"}}\n >>> fig = plot_static_mapper_graph(pipeline, data,\n ... plotly_params=plotly_params)\n\n Inspect the composition of a node with "Node ID" displayed as 0 in the\n hovertext:\n\n >>> graph = pipeline.fit_transform(data)\n >>> graph.vs[0]["node_elements"]\n array([70])\n\n See also\n --------\n plot_interactive_mapper_graph, gtda.mapper.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) is_node_color_statistic_ndarray = hasattr(node_color_statistic, 'dtype') if (not (is_node_color_statistic_ndarray or callable(node_color_statistic))): raise ValueError('`node_color_statistic` must be a callable or ndarray.') if is_node_color_statistic_ndarray: _node_color_statistic = node_color_statistic else: _node_color_statistic = (node_color_statistic or np.mean) is_data_dataframe = hasattr(data, 'columns') (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale, labels) layout_options = go.Layout(**PLOT_OPTIONS_LAYOUT_DEFAULTS['common'], **PLOT_OPTIONS_LAYOUT_DEFAULTS[layout_dim]) fig = go.FigureWidget(data=[edge_trace, node_trace], layout=layout_options) _plotly_params = deepcopy(plotly_params) colorscale_for_hoverlabel = None if (layout_dim == 3): compute_hoverlabel_bgcolor = True if _plotly_params: if ('node_trace' in _plotly_params): if ('hoverlabel_bgcolor' in _plotly_params['node_trace']): fig.update_traces(hoverlabel_bgcolor=_plotly_params['node_trace'].pop('hoverlabel_bgcolor'), selector={'name': 'node_trace'}) compute_hoverlabel_bgcolor = False if ('marker_colorscale' in _plotly_params['node_trace']): fig.update_traces(marker_colorscale=_plotly_params['node_trace'].pop('marker_colorscale'), selector={'name': 'node_trace'}) if compute_hoverlabel_bgcolor: colorscale_for_hoverlabel = fig.data[1].marker.colorscale node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) try: hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) except Exception as e: if (e.args[0] == 'This colorscale is not supported.'): warn('Data-dependent background hoverlabel colors cannot be generated with this choice of colorscale. Please use a standard hex- or RGB-formatted colorscale.', RuntimeWarning) else: warn('Something went wrong in generating data-dependent background hoverlabel colors. All background hoverlabel colors will be set to white.', RuntimeWarning) hoverlabel_bgcolor = 'white' colorscale_for_hoverlabel = None fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, fig.data[1].hoverlabel.bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) if color_by_columns_dropdown: fig.add_annotation(go.layout.Annotation(text='Color by:', x=0, xref='paper', y=(button_height - 0.045), yref='paper', align='left', showarrow=False)) if _plotly_params: for key in ['node_trace', 'edge_trace']: fig.update_traces(_plotly_params.pop(key, None), selector={'name': key}) fig.update_layout(_plotly_params.pop('layout', None)) return fig
Plot Mapper graphs without interactivity on pipeline parameters. The output graph is a rendition of the :class:`igraph.Graph` object computed by calling the :meth:`fit_transform` method of the :class:`~gtda.mapper.pipeline.MapperPipeline` instance `pipeline` on the input `data`. The graph's nodes correspond to subsets of elements (rows) in `data`; these subsets are clusters in larger portions of `data` called "pullback (cover) sets", which are computed by means of the `pipeline`'s "filter function" and "cover" and correspond to the differently-colored portions in `this diagram <../../../../_images/mapper_pipeline.svg>`_. Two clusters from different pullback cover sets can overlap; if they do, an edge between the corresponding nodes in the graph may be drawn. Nodes are colored according to `color_variable` and `node_color_statistic` and are sized according to the number of elements they represent. The hovertext on each node displays, in this order: - a globally unique ID for the node, which can be used to retrieve node information from the :class:`igraph.Graph` object, see :class:`~gtda.mapper.nerve.Nerve`; - the label of the pullback (cover) set which the node's elements form a cluster in; - a label identifying the node as a cluster within that pullback set; - the number of elements of `data` associated with the node; - the value of the summary statistic which determines the node's color. Parameters ---------- pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object Mapper pipeline to act onto data. data : array-like of shape (n_samples, n_features) Data used to generate the Mapper graph. Can be a pandas dataframe. layout : None, str or callable, optional, default: ``"kamada-kawai"`` Layout algorithm for the graph. Can be any accepted value for the ``layout`` parameter in the :meth:`layout` method of :class:`igraph.Graph` [1]_. layout_dim : int, default: ``2`` The number of dimensions for the layout. Can be 2 or 3. color_variable : object or None, optional, default: ``None`` Specifies a feature of interest to be used, together with `node_color_statistic`, to determine node colors. 1. If a numpy array or pandas dataframe, it must have the same length as `data`. 2. ``None`` is equivalent to passing `data`. 3. If an object implementing :meth:`transform` or :meth:`fit_transform`, it is applied to `data` to generate the feature of interest. 4. If an index or string, or list of indices/strings, it is equivalent to selecting a column or subset of columns from `data`. node_color_statistic : None, callable, or ndarray of shape (n_nodes,) or (n_nodes, 1), optional, default: ``None`` If a callable, node colors will be computed as summary statistics from the feature array ``Y`` determined by `color_variable` – specifically, the color of a node representing the entries of `data` whose row indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None`` is equivalent to passing :func:`numpy.mean`. If a numpy array, it must have the same length as the number of nodes in the Mapper graph and its values are used directly as node colors (`color_variable` is ignored). color_by_columns_dropdown : bool, optional, default: ``False`` If ``True``, a dropdown widget is generated which allows the user to color Mapper nodes according to any column in `data` (still using `node_color_statistic`) in addition to `color_variable`. clone_pipeline : bool, optional, default: ``True`` If ``True``, the input `pipeline` is cloned before computing the Mapper graph to prevent unexpected side effects from in-place parameter updates. n_sig_figs : int or None, optional, default: ``3`` If not ``None``, number of significant figures to which to round node summary statistics. If ``None``, no rounding is performed. node_scale : int or float, optional, default: ``12`` Sets the scale factor used to determine the rendered size of the nodes. Increase for larger nodes. Implements a formula in the `Plotly documentation <https://plotly.com/python/bubble-charts/#scaling-the-size-of-bubble -charts>`_. plotly_params : dict or None, optional, default: ``None`` Custom parameters to configure the plotly figure. Allowed keys are ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the corresponding values should be dictionaries containing keyword arguments as would be fed to the :meth:`update_traces` and :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`. Returns ------- fig : :class:`plotly.graph_objects.Figure` object Figure representing the Mapper graph with appropriate node colouring and size. Examples -------- Setting a colorscale different from the default one: >>> import numpy as np >>> np.random.seed(1) >>> from gtda.mapper import make_mapper_pipeline, plot_static_mapper_graph >>> pipeline = make_mapper_pipeline() >>> data = np.random.random((100, 3)) >>> plotly_params = {"node_trace": {"marker_colorscale": "Blues"}} >>> fig = plot_static_mapper_graph(pipeline, data, ... plotly_params=plotly_params) Inspect the composition of a node with "Node ID" displayed as 0 in the hovertext: >>> graph = pipeline.fit_transform(data) >>> graph.vs[0]["node_elements"] array([70]) See also -------- plot_interactive_mapper_graph, gtda.mapper.make_mapper_pipeline References ---------- .. [1] `igraph.Graph.layout <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_ documentation.
files/gtda/mapper/visualization.py
plot_static_mapper_graph
Snopoff/Mapper-experiments
0
python
def plot_static_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, color_by_columns_dropdown=False, clone_pipeline=True, n_sig_figs=3, node_scale=12, plotly_params=None, labels=None): 'Plot Mapper graphs without interactivity on pipeline parameters.\n\n The output graph is a rendition of the :class:`igraph.Graph` object\n computed by calling the :meth:`fit_transform` method of the\n :class:`~gtda.mapper.pipeline.MapperPipeline` instance `pipeline` on the\n input `data`. The graph\'s nodes correspond to subsets of elements (rows) in\n `data`; these subsets are clusters in larger portions of `data` called\n "pullback (cover) sets", which are computed by means of the `pipeline`\'s\n "filter function" and "cover" and correspond to the differently-colored\n portions in `this diagram <../../../../_images/mapper_pipeline.svg>`_.\n Two clusters from different pullback cover sets can overlap; if they do, an\n edge between the corresponding nodes in the graph may be drawn.\n\n Nodes are colored according to `color_variable` and `node_color_statistic`\n and are sized according to the number of elements they represent. The\n hovertext on each node displays, in this order:\n\n - a globally unique ID for the node, which can be used to retrieve\n node information from the :class:`igraph.Graph` object, see\n :class:`~gtda.mapper.nerve.Nerve`;\n - the label of the pullback (cover) set which the node\'s elements\n form a cluster in;\n - a label identifying the node as a cluster within that pullback set;\n - the number of elements of `data` associated with the node;\n - the value of the summary statistic which determines the node\'s color.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act onto data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : None, callable, or ndarray of shape (n_nodes,) or (n_nodes, 1), optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`. If a numpy array, it must\n have the same length as the number of nodes in the Mapper graph and its\n values are used directly as node colors (`color_variable` is ignored).\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <https://plotly.com/python/bubble-charts/#scaling-the-size-of-bubble -charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n fig : :class:`plotly.graph_objects.Figure` object\n Figure representing the Mapper graph with appropriate node colouring\n and size.\n\n Examples\n --------\n Setting a colorscale different from the default one:\n\n >>> import numpy as np\n >>> np.random.seed(1)\n >>> from gtda.mapper import make_mapper_pipeline, plot_static_mapper_graph\n >>> pipeline = make_mapper_pipeline()\n >>> data = np.random.random((100, 3))\n >>> plotly_params = {"node_trace": {"marker_colorscale": "Blues"}}\n >>> fig = plot_static_mapper_graph(pipeline, data,\n ... plotly_params=plotly_params)\n\n Inspect the composition of a node with "Node ID" displayed as 0 in the\n hovertext:\n\n >>> graph = pipeline.fit_transform(data)\n >>> graph.vs[0]["node_elements"]\n array([70])\n\n See also\n --------\n plot_interactive_mapper_graph, gtda.mapper.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) is_node_color_statistic_ndarray = hasattr(node_color_statistic, 'dtype') if (not (is_node_color_statistic_ndarray or callable(node_color_statistic))): raise ValueError('`node_color_statistic` must be a callable or ndarray.') if is_node_color_statistic_ndarray: _node_color_statistic = node_color_statistic else: _node_color_statistic = (node_color_statistic or np.mean) is_data_dataframe = hasattr(data, 'columns') (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale, labels) layout_options = go.Layout(**PLOT_OPTIONS_LAYOUT_DEFAULTS['common'], **PLOT_OPTIONS_LAYOUT_DEFAULTS[layout_dim]) fig = go.FigureWidget(data=[edge_trace, node_trace], layout=layout_options) _plotly_params = deepcopy(plotly_params) colorscale_for_hoverlabel = None if (layout_dim == 3): compute_hoverlabel_bgcolor = True if _plotly_params: if ('node_trace' in _plotly_params): if ('hoverlabel_bgcolor' in _plotly_params['node_trace']): fig.update_traces(hoverlabel_bgcolor=_plotly_params['node_trace'].pop('hoverlabel_bgcolor'), selector={'name': 'node_trace'}) compute_hoverlabel_bgcolor = False if ('marker_colorscale' in _plotly_params['node_trace']): fig.update_traces(marker_colorscale=_plotly_params['node_trace'].pop('marker_colorscale'), selector={'name': 'node_trace'}) if compute_hoverlabel_bgcolor: colorscale_for_hoverlabel = fig.data[1].marker.colorscale node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) try: hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) except Exception as e: if (e.args[0] == 'This colorscale is not supported.'): warn('Data-dependent background hoverlabel colors cannot be generated with this choice of colorscale. Please use a standard hex- or RGB-formatted colorscale.', RuntimeWarning) else: warn('Something went wrong in generating data-dependent background hoverlabel colors. All background hoverlabel colors will be set to white.', RuntimeWarning) hoverlabel_bgcolor = 'white' colorscale_for_hoverlabel = None fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, fig.data[1].hoverlabel.bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) if color_by_columns_dropdown: fig.add_annotation(go.layout.Annotation(text='Color by:', x=0, xref='paper', y=(button_height - 0.045), yref='paper', align='left', showarrow=False)) if _plotly_params: for key in ['node_trace', 'edge_trace']: fig.update_traces(_plotly_params.pop(key, None), selector={'name': key}) fig.update_layout(_plotly_params.pop('layout', None)) return fig
def plot_static_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, color_by_columns_dropdown=False, clone_pipeline=True, n_sig_figs=3, node_scale=12, plotly_params=None, labels=None): 'Plot Mapper graphs without interactivity on pipeline parameters.\n\n The output graph is a rendition of the :class:`igraph.Graph` object\n computed by calling the :meth:`fit_transform` method of the\n :class:`~gtda.mapper.pipeline.MapperPipeline` instance `pipeline` on the\n input `data`. The graph\'s nodes correspond to subsets of elements (rows) in\n `data`; these subsets are clusters in larger portions of `data` called\n "pullback (cover) sets", which are computed by means of the `pipeline`\'s\n "filter function" and "cover" and correspond to the differently-colored\n portions in `this diagram <../../../../_images/mapper_pipeline.svg>`_.\n Two clusters from different pullback cover sets can overlap; if they do, an\n edge between the corresponding nodes in the graph may be drawn.\n\n Nodes are colored according to `color_variable` and `node_color_statistic`\n and are sized according to the number of elements they represent. The\n hovertext on each node displays, in this order:\n\n - a globally unique ID for the node, which can be used to retrieve\n node information from the :class:`igraph.Graph` object, see\n :class:`~gtda.mapper.nerve.Nerve`;\n - the label of the pullback (cover) set which the node\'s elements\n form a cluster in;\n - a label identifying the node as a cluster within that pullback set;\n - the number of elements of `data` associated with the node;\n - the value of the summary statistic which determines the node\'s color.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act onto data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : None, callable, or ndarray of shape (n_nodes,) or (n_nodes, 1), optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`. If a numpy array, it must\n have the same length as the number of nodes in the Mapper graph and its\n values are used directly as node colors (`color_variable` is ignored).\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <https://plotly.com/python/bubble-charts/#scaling-the-size-of-bubble -charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n fig : :class:`plotly.graph_objects.Figure` object\n Figure representing the Mapper graph with appropriate node colouring\n and size.\n\n Examples\n --------\n Setting a colorscale different from the default one:\n\n >>> import numpy as np\n >>> np.random.seed(1)\n >>> from gtda.mapper import make_mapper_pipeline, plot_static_mapper_graph\n >>> pipeline = make_mapper_pipeline()\n >>> data = np.random.random((100, 3))\n >>> plotly_params = {"node_trace": {"marker_colorscale": "Blues"}}\n >>> fig = plot_static_mapper_graph(pipeline, data,\n ... plotly_params=plotly_params)\n\n Inspect the composition of a node with "Node ID" displayed as 0 in the\n hovertext:\n\n >>> graph = pipeline.fit_transform(data)\n >>> graph.vs[0]["node_elements"]\n array([70])\n\n See also\n --------\n plot_interactive_mapper_graph, gtda.mapper.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) is_node_color_statistic_ndarray = hasattr(node_color_statistic, 'dtype') if (not (is_node_color_statistic_ndarray or callable(node_color_statistic))): raise ValueError('`node_color_statistic` must be a callable or ndarray.') if is_node_color_statistic_ndarray: _node_color_statistic = node_color_statistic else: _node_color_statistic = (node_color_statistic or np.mean) is_data_dataframe = hasattr(data, 'columns') (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale, labels) layout_options = go.Layout(**PLOT_OPTIONS_LAYOUT_DEFAULTS['common'], **PLOT_OPTIONS_LAYOUT_DEFAULTS[layout_dim]) fig = go.FigureWidget(data=[edge_trace, node_trace], layout=layout_options) _plotly_params = deepcopy(plotly_params) colorscale_for_hoverlabel = None if (layout_dim == 3): compute_hoverlabel_bgcolor = True if _plotly_params: if ('node_trace' in _plotly_params): if ('hoverlabel_bgcolor' in _plotly_params['node_trace']): fig.update_traces(hoverlabel_bgcolor=_plotly_params['node_trace'].pop('hoverlabel_bgcolor'), selector={'name': 'node_trace'}) compute_hoverlabel_bgcolor = False if ('marker_colorscale' in _plotly_params['node_trace']): fig.update_traces(marker_colorscale=_plotly_params['node_trace'].pop('marker_colorscale'), selector={'name': 'node_trace'}) if compute_hoverlabel_bgcolor: colorscale_for_hoverlabel = fig.data[1].marker.colorscale node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) try: hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) except Exception as e: if (e.args[0] == 'This colorscale is not supported.'): warn('Data-dependent background hoverlabel colors cannot be generated with this choice of colorscale. Please use a standard hex- or RGB-formatted colorscale.', RuntimeWarning) else: warn('Something went wrong in generating data-dependent background hoverlabel colors. All background hoverlabel colors will be set to white.', RuntimeWarning) hoverlabel_bgcolor = 'white' colorscale_for_hoverlabel = None fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, fig.data[1].hoverlabel.bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) if color_by_columns_dropdown: fig.add_annotation(go.layout.Annotation(text='Color by:', x=0, xref='paper', y=(button_height - 0.045), yref='paper', align='left', showarrow=False)) if _plotly_params: for key in ['node_trace', 'edge_trace']: fig.update_traces(_plotly_params.pop(key, None), selector={'name': key}) fig.update_layout(_plotly_params.pop('layout', None)) return fig<|docstring|>Plot Mapper graphs without interactivity on pipeline parameters. The output graph is a rendition of the :class:`igraph.Graph` object computed by calling the :meth:`fit_transform` method of the :class:`~gtda.mapper.pipeline.MapperPipeline` instance `pipeline` on the input `data`. The graph's nodes correspond to subsets of elements (rows) in `data`; these subsets are clusters in larger portions of `data` called "pullback (cover) sets", which are computed by means of the `pipeline`'s "filter function" and "cover" and correspond to the differently-colored portions in `this diagram <../../../../_images/mapper_pipeline.svg>`_. Two clusters from different pullback cover sets can overlap; if they do, an edge between the corresponding nodes in the graph may be drawn. Nodes are colored according to `color_variable` and `node_color_statistic` and are sized according to the number of elements they represent. The hovertext on each node displays, in this order: - a globally unique ID for the node, which can be used to retrieve node information from the :class:`igraph.Graph` object, see :class:`~gtda.mapper.nerve.Nerve`; - the label of the pullback (cover) set which the node's elements form a cluster in; - a label identifying the node as a cluster within that pullback set; - the number of elements of `data` associated with the node; - the value of the summary statistic which determines the node's color. Parameters ---------- pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object Mapper pipeline to act onto data. data : array-like of shape (n_samples, n_features) Data used to generate the Mapper graph. Can be a pandas dataframe. layout : None, str or callable, optional, default: ``"kamada-kawai"`` Layout algorithm for the graph. Can be any accepted value for the ``layout`` parameter in the :meth:`layout` method of :class:`igraph.Graph` [1]_. layout_dim : int, default: ``2`` The number of dimensions for the layout. Can be 2 or 3. color_variable : object or None, optional, default: ``None`` Specifies a feature of interest to be used, together with `node_color_statistic`, to determine node colors. 1. If a numpy array or pandas dataframe, it must have the same length as `data`. 2. ``None`` is equivalent to passing `data`. 3. If an object implementing :meth:`transform` or :meth:`fit_transform`, it is applied to `data` to generate the feature of interest. 4. If an index or string, or list of indices/strings, it is equivalent to selecting a column or subset of columns from `data`. node_color_statistic : None, callable, or ndarray of shape (n_nodes,) or (n_nodes, 1), optional, default: ``None`` If a callable, node colors will be computed as summary statistics from the feature array ``Y`` determined by `color_variable` – specifically, the color of a node representing the entries of `data` whose row indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None`` is equivalent to passing :func:`numpy.mean`. If a numpy array, it must have the same length as the number of nodes in the Mapper graph and its values are used directly as node colors (`color_variable` is ignored). color_by_columns_dropdown : bool, optional, default: ``False`` If ``True``, a dropdown widget is generated which allows the user to color Mapper nodes according to any column in `data` (still using `node_color_statistic`) in addition to `color_variable`. clone_pipeline : bool, optional, default: ``True`` If ``True``, the input `pipeline` is cloned before computing the Mapper graph to prevent unexpected side effects from in-place parameter updates. n_sig_figs : int or None, optional, default: ``3`` If not ``None``, number of significant figures to which to round node summary statistics. If ``None``, no rounding is performed. node_scale : int or float, optional, default: ``12`` Sets the scale factor used to determine the rendered size of the nodes. Increase for larger nodes. Implements a formula in the `Plotly documentation <https://plotly.com/python/bubble-charts/#scaling-the-size-of-bubble -charts>`_. plotly_params : dict or None, optional, default: ``None`` Custom parameters to configure the plotly figure. Allowed keys are ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the corresponding values should be dictionaries containing keyword arguments as would be fed to the :meth:`update_traces` and :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`. Returns ------- fig : :class:`plotly.graph_objects.Figure` object Figure representing the Mapper graph with appropriate node colouring and size. Examples -------- Setting a colorscale different from the default one: >>> import numpy as np >>> np.random.seed(1) >>> from gtda.mapper import make_mapper_pipeline, plot_static_mapper_graph >>> pipeline = make_mapper_pipeline() >>> data = np.random.random((100, 3)) >>> plotly_params = {"node_trace": {"marker_colorscale": "Blues"}} >>> fig = plot_static_mapper_graph(pipeline, data, ... plotly_params=plotly_params) Inspect the composition of a node with "Node ID" displayed as 0 in the hovertext: >>> graph = pipeline.fit_transform(data) >>> graph.vs[0]["node_elements"] array([70]) See also -------- plot_interactive_mapper_graph, gtda.mapper.make_mapper_pipeline References ---------- .. [1] `igraph.Graph.layout <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_ documentation.<|endoftext|>
7aaee96966f8d04542554ee0b282b9a6e00b43175c4224a259fbacb3584aa123
def plot_interactive_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, clone_pipeline=True, color_by_columns_dropdown=False, n_sig_figs=3, node_scale=12, plotly_params=None): 'Plot Mapper graphs with interactivity on pipeline parameters.\n\n Extends :func:`~gtda.mapper.visualization.plot_static_mapper_graph` by\n providing functionality to interactively update parameters from the cover,\n clustering and graph construction steps defined in `pipeline`.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act on to data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : callable or None, optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`.\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <plotly.com/python/bubble-charts/#scaling-the-size-of-bubble-charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n box : :class:`ipywidgets.VBox` object\n A box containing the following widgets: parameters of the clustering\n algorithm, parameters for the covering scheme, a Mapper graph arising\n from those parameters, a validation box, and logs.\n\n See also\n --------\n plot_static_mapper_graph, gtda.mapper.pipeline.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) _node_color_statistic = (node_color_statistic or np.mean) def get_widgets_per_param(params): for (key, value) in params.items(): style = {'description_width': 'initial'} description = (key.split('__')[1] if ('__' in key) else key) if isinstance(value, float): (yield (key, widgets.FloatText(value=value, step=0.05, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, bool): (yield (key, widgets.ToggleButton(value=value, description=description, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, int): (yield (key, widgets.IntText(value=value, step=1, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, str): (yield (key, widgets.Text(value=value, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) def on_parameter_change(change): handler.clear_logs() try: for (param, value) in cover_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cover_params_widgets[param].value}) for (param, value) in cluster_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cluster_params_widgets[param].value}) for (param, value) in nerve_params.items(): if isinstance(value, (int, bool)): _pipeline.set_params(**{param: nerve_params_widgets[param].value}) logger.info('Updating figure...') with fig.batch_update(): (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale) if (colorscale_for_hoverlabel is not None): node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) fig.update_traces(x=node_trace.x, y=node_trace.y, marker_color=node_trace.marker.color, marker_size=node_trace.marker.size, marker_sizeref=node_trace.marker.sizeref, hovertext=node_trace.hovertext, **({'z': node_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'node_trace'}) fig.update_traces(x=edge_trace.x, y=edge_trace.y, **({'z': edge_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'edge_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) if (colorscale_for_hoverlabel is not None): column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, hoverlabel_bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) valid.value = True except Exception: exception_data = traceback.format_exc().splitlines() logger.exception(exception_data[(- 1)]) valid.value = False def observe_widgets(params, widgets): for (param, value) in params.items(): if isinstance(value, (int, float, str)): widgets[param].observe(on_parameter_change, names='value') out = widgets.Output() @out.capture() def click_box(change): if logs_box.value: out.clear_output() handler.show_logs() else: out.clear_output() logger = logging.getLogger(__name__) handler = OutputWidgetHandler() handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO) mapper_params_items = _pipeline.get_mapper_params().items() cover_params = {key: value for (key, value) in mapper_params_items if key.startswith('cover__')} cover_params_widgets = dict(get_widgets_per_param(cover_params)) cluster_params = {key: value for (key, value) in mapper_params_items if key.startswith('clusterer__')} cluster_params_widgets = dict(get_widgets_per_param(cluster_params)) nerve_params = {key: value for (key, value) in mapper_params_items if (key in ['min_intersection', 'contract_nodes'])} nerve_params_widgets = dict(get_widgets_per_param(nerve_params)) valid = widgets.Valid(value=True, description='Valid parameters', style={'description_width': '100px'}) logs_box = widgets.Checkbox(description='Show logs: ', value=False, indent=False) fig = plot_static_mapper_graph(_pipeline, data, layout=layout, layout_dim=layout_dim, color_variable=color_variable, node_color_statistic=_node_color_statistic, color_by_columns_dropdown=color_by_columns_dropdown, clone_pipeline=False, n_sig_figs=n_sig_figs, node_scale=node_scale, plotly_params=plotly_params) is_data_dataframe = hasattr(data, 'columns') colorscale_for_hoverlabel = None if (layout_dim == 3): is_bgcolor_not_white = (fig.data[1].hoverlabel.bgcolor != 'white') user_hoverlabel_bgcolor = False if plotly_params: if ('node_trace' in plotly_params): if ('hoverlabel_bgcolor' in plotly_params['node_trace']): user_hoverlabel_bgcolor = True if (is_bgcolor_not_white and (not user_hoverlabel_bgcolor)): colorscale_for_hoverlabel = fig.data[1].marker.colorscale observe_widgets(cover_params, cover_params_widgets) observe_widgets(cluster_params, cluster_params_widgets) observe_widgets(nerve_params, nerve_params_widgets) logs_box.observe(click_box, names='value') cover_title = HTML(value='<b>Cover parameters</b>') container_cover = widgets.VBox(children=([cover_title] + list(cover_params_widgets.values()))) container_cover.layout.align_items = 'center' cluster_title = HTML(value='<b>Clusterer parameters</b>') container_cluster = widgets.VBox(children=([cluster_title] + list(cluster_params_widgets.values()))) container_cluster.layout.align_items = 'center' nerve_title = HTML(value='<b>Nerve parameters</b>') container_nerve = widgets.VBox(children=([nerve_title] + list(nerve_params_widgets.values()))) container_nerve.layout.align_items = 'center' container_parameters = widgets.HBox(children=[container_cover, container_cluster, container_nerve]) box = widgets.VBox([container_parameters, fig, valid, logs_box, out]) return box
Plot Mapper graphs with interactivity on pipeline parameters. Extends :func:`~gtda.mapper.visualization.plot_static_mapper_graph` by providing functionality to interactively update parameters from the cover, clustering and graph construction steps defined in `pipeline`. Parameters ---------- pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object Mapper pipeline to act on to data. data : array-like of shape (n_samples, n_features) Data used to generate the Mapper graph. Can be a pandas dataframe. layout : None, str or callable, optional, default: ``"kamada-kawai"`` Layout algorithm for the graph. Can be any accepted value for the ``layout`` parameter in the :meth:`layout` method of :class:`igraph.Graph` [1]_. layout_dim : int, default: ``2`` The number of dimensions for the layout. Can be 2 or 3. color_variable : object or None, optional, default: ``None`` Specifies a feature of interest to be used, together with `node_color_statistic`, to determine node colors. 1. If a numpy array or pandas dataframe, it must have the same length as `data`. 2. ``None`` is equivalent to passing `data`. 3. If an object implementing :meth:`transform` or :meth:`fit_transform`, it is applied to `data` to generate the feature of interest. 4. If an index or string, or list of indices/strings, it is equivalent to selecting a column or subset of columns from `data`. node_color_statistic : callable or None, optional, default: ``None`` If a callable, node colors will be computed as summary statistics from the feature array ``Y`` determined by `color_variable` – specifically, the color of a node representing the entries of `data` whose row indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None`` is equivalent to passing :func:`numpy.mean`. color_by_columns_dropdown : bool, optional, default: ``False`` If ``True``, a dropdown widget is generated which allows the user to color Mapper nodes according to any column in `data` (still using `node_color_statistic`) in addition to `color_variable`. clone_pipeline : bool, optional, default: ``True`` If ``True``, the input `pipeline` is cloned before computing the Mapper graph to prevent unexpected side effects from in-place parameter updates. n_sig_figs : int or None, optional, default: ``3`` If not ``None``, number of significant figures to which to round node summary statistics. If ``None``, no rounding is performed. node_scale : int or float, optional, default: ``12`` Sets the scale factor used to determine the rendered size of the nodes. Increase for larger nodes. Implements a formula in the `Plotly documentation <plotly.com/python/bubble-charts/#scaling-the-size-of-bubble-charts>`_. plotly_params : dict or None, optional, default: ``None`` Custom parameters to configure the plotly figure. Allowed keys are ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the corresponding values should be dictionaries containing keyword arguments as would be fed to the :meth:`update_traces` and :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`. Returns ------- box : :class:`ipywidgets.VBox` object A box containing the following widgets: parameters of the clustering algorithm, parameters for the covering scheme, a Mapper graph arising from those parameters, a validation box, and logs. See also -------- plot_static_mapper_graph, gtda.mapper.pipeline.make_mapper_pipeline References ---------- .. [1] `igraph.Graph.layout <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_ documentation.
files/gtda/mapper/visualization.py
plot_interactive_mapper_graph
Snopoff/Mapper-experiments
0
python
def plot_interactive_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, clone_pipeline=True, color_by_columns_dropdown=False, n_sig_figs=3, node_scale=12, plotly_params=None): 'Plot Mapper graphs with interactivity on pipeline parameters.\n\n Extends :func:`~gtda.mapper.visualization.plot_static_mapper_graph` by\n providing functionality to interactively update parameters from the cover,\n clustering and graph construction steps defined in `pipeline`.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act on to data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : callable or None, optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`.\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <plotly.com/python/bubble-charts/#scaling-the-size-of-bubble-charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n box : :class:`ipywidgets.VBox` object\n A box containing the following widgets: parameters of the clustering\n algorithm, parameters for the covering scheme, a Mapper graph arising\n from those parameters, a validation box, and logs.\n\n See also\n --------\n plot_static_mapper_graph, gtda.mapper.pipeline.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) _node_color_statistic = (node_color_statistic or np.mean) def get_widgets_per_param(params): for (key, value) in params.items(): style = {'description_width': 'initial'} description = (key.split('__')[1] if ('__' in key) else key) if isinstance(value, float): (yield (key, widgets.FloatText(value=value, step=0.05, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, bool): (yield (key, widgets.ToggleButton(value=value, description=description, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, int): (yield (key, widgets.IntText(value=value, step=1, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, str): (yield (key, widgets.Text(value=value, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) def on_parameter_change(change): handler.clear_logs() try: for (param, value) in cover_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cover_params_widgets[param].value}) for (param, value) in cluster_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cluster_params_widgets[param].value}) for (param, value) in nerve_params.items(): if isinstance(value, (int, bool)): _pipeline.set_params(**{param: nerve_params_widgets[param].value}) logger.info('Updating figure...') with fig.batch_update(): (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale) if (colorscale_for_hoverlabel is not None): node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) fig.update_traces(x=node_trace.x, y=node_trace.y, marker_color=node_trace.marker.color, marker_size=node_trace.marker.size, marker_sizeref=node_trace.marker.sizeref, hovertext=node_trace.hovertext, **({'z': node_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'node_trace'}) fig.update_traces(x=edge_trace.x, y=edge_trace.y, **({'z': edge_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'edge_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) if (colorscale_for_hoverlabel is not None): column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, hoverlabel_bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) valid.value = True except Exception: exception_data = traceback.format_exc().splitlines() logger.exception(exception_data[(- 1)]) valid.value = False def observe_widgets(params, widgets): for (param, value) in params.items(): if isinstance(value, (int, float, str)): widgets[param].observe(on_parameter_change, names='value') out = widgets.Output() @out.capture() def click_box(change): if logs_box.value: out.clear_output() handler.show_logs() else: out.clear_output() logger = logging.getLogger(__name__) handler = OutputWidgetHandler() handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO) mapper_params_items = _pipeline.get_mapper_params().items() cover_params = {key: value for (key, value) in mapper_params_items if key.startswith('cover__')} cover_params_widgets = dict(get_widgets_per_param(cover_params)) cluster_params = {key: value for (key, value) in mapper_params_items if key.startswith('clusterer__')} cluster_params_widgets = dict(get_widgets_per_param(cluster_params)) nerve_params = {key: value for (key, value) in mapper_params_items if (key in ['min_intersection', 'contract_nodes'])} nerve_params_widgets = dict(get_widgets_per_param(nerve_params)) valid = widgets.Valid(value=True, description='Valid parameters', style={'description_width': '100px'}) logs_box = widgets.Checkbox(description='Show logs: ', value=False, indent=False) fig = plot_static_mapper_graph(_pipeline, data, layout=layout, layout_dim=layout_dim, color_variable=color_variable, node_color_statistic=_node_color_statistic, color_by_columns_dropdown=color_by_columns_dropdown, clone_pipeline=False, n_sig_figs=n_sig_figs, node_scale=node_scale, plotly_params=plotly_params) is_data_dataframe = hasattr(data, 'columns') colorscale_for_hoverlabel = None if (layout_dim == 3): is_bgcolor_not_white = (fig.data[1].hoverlabel.bgcolor != 'white') user_hoverlabel_bgcolor = False if plotly_params: if ('node_trace' in plotly_params): if ('hoverlabel_bgcolor' in plotly_params['node_trace']): user_hoverlabel_bgcolor = True if (is_bgcolor_not_white and (not user_hoverlabel_bgcolor)): colorscale_for_hoverlabel = fig.data[1].marker.colorscale observe_widgets(cover_params, cover_params_widgets) observe_widgets(cluster_params, cluster_params_widgets) observe_widgets(nerve_params, nerve_params_widgets) logs_box.observe(click_box, names='value') cover_title = HTML(value='<b>Cover parameters</b>') container_cover = widgets.VBox(children=([cover_title] + list(cover_params_widgets.values()))) container_cover.layout.align_items = 'center' cluster_title = HTML(value='<b>Clusterer parameters</b>') container_cluster = widgets.VBox(children=([cluster_title] + list(cluster_params_widgets.values()))) container_cluster.layout.align_items = 'center' nerve_title = HTML(value='<b>Nerve parameters</b>') container_nerve = widgets.VBox(children=([nerve_title] + list(nerve_params_widgets.values()))) container_nerve.layout.align_items = 'center' container_parameters = widgets.HBox(children=[container_cover, container_cluster, container_nerve]) box = widgets.VBox([container_parameters, fig, valid, logs_box, out]) return box
def plot_interactive_mapper_graph(pipeline, data, layout='kamada_kawai', layout_dim=2, color_variable=None, node_color_statistic=None, clone_pipeline=True, color_by_columns_dropdown=False, n_sig_figs=3, node_scale=12, plotly_params=None): 'Plot Mapper graphs with interactivity on pipeline parameters.\n\n Extends :func:`~gtda.mapper.visualization.plot_static_mapper_graph` by\n providing functionality to interactively update parameters from the cover,\n clustering and graph construction steps defined in `pipeline`.\n\n Parameters\n ----------\n pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object\n Mapper pipeline to act on to data.\n\n data : array-like of shape (n_samples, n_features)\n Data used to generate the Mapper graph. Can be a pandas dataframe.\n\n layout : None, str or callable, optional, default: ``"kamada-kawai"``\n Layout algorithm for the graph. Can be any accepted value for the\n ``layout`` parameter in the :meth:`layout` method of\n :class:`igraph.Graph` [1]_.\n\n layout_dim : int, default: ``2``\n The number of dimensions for the layout. Can be 2 or 3.\n\n color_variable : object or None, optional, default: ``None``\n Specifies a feature of interest to be used, together with\n `node_color_statistic`, to determine node colors.\n\n 1. If a numpy array or pandas dataframe, it must have the same\n length as `data`.\n 2. ``None`` is equivalent to passing `data`.\n 3. If an object implementing :meth:`transform` or\n :meth:`fit_transform`, it is applied to `data` to generate the\n feature of interest.\n 4. If an index or string, or list of indices/strings, it is\n equivalent to selecting a column or subset of columns from\n `data`.\n\n node_color_statistic : callable or None, optional, default: ``None``\n If a callable, node colors will be computed as summary statistics from\n the feature array ``Y`` determined by `color_variable` – specifically,\n the color of a node representing the entries of `data` whose row\n indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None``\n is equivalent to passing :func:`numpy.mean`.\n\n color_by_columns_dropdown : bool, optional, default: ``False``\n If ``True``, a dropdown widget is generated which allows the user to\n color Mapper nodes according to any column in `data` (still using\n `node_color_statistic`) in addition to `color_variable`.\n\n clone_pipeline : bool, optional, default: ``True``\n If ``True``, the input `pipeline` is cloned before computing the\n Mapper graph to prevent unexpected side effects from in-place\n parameter updates.\n\n n_sig_figs : int or None, optional, default: ``3``\n If not ``None``, number of significant figures to which to round node\n summary statistics. If ``None``, no rounding is performed.\n\n node_scale : int or float, optional, default: ``12``\n Sets the scale factor used to determine the rendered size of the\n nodes. Increase for larger nodes. Implements a formula in the\n `Plotly documentation <plotly.com/python/bubble-charts/#scaling-the-size-of-bubble-charts>`_.\n\n plotly_params : dict or None, optional, default: ``None``\n Custom parameters to configure the plotly figure. Allowed keys are\n ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the\n corresponding values should be dictionaries containing keyword\n arguments as would be fed to the :meth:`update_traces` and\n :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`.\n\n Returns\n -------\n box : :class:`ipywidgets.VBox` object\n A box containing the following widgets: parameters of the clustering\n algorithm, parameters for the covering scheme, a Mapper graph arising\n from those parameters, a validation box, and logs.\n\n See also\n --------\n plot_static_mapper_graph, gtda.mapper.pipeline.make_mapper_pipeline\n\n References\n ----------\n .. [1] `igraph.Graph.layout\n <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_\n documentation.\n\n ' _pipeline = (clone(pipeline) if clone_pipeline else pipeline) _node_color_statistic = (node_color_statistic or np.mean) def get_widgets_per_param(params): for (key, value) in params.items(): style = {'description_width': 'initial'} description = (key.split('__')[1] if ('__' in key) else key) if isinstance(value, float): (yield (key, widgets.FloatText(value=value, step=0.05, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, bool): (yield (key, widgets.ToggleButton(value=value, description=description, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, int): (yield (key, widgets.IntText(value=value, step=1, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) elif isinstance(value, str): (yield (key, widgets.Text(value=value, description=description, continuous_update=False, disabled=False, layout=Layout(width='90%'), style=style))) def on_parameter_change(change): handler.clear_logs() try: for (param, value) in cover_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cover_params_widgets[param].value}) for (param, value) in cluster_params.items(): if isinstance(value, (int, float, str)): _pipeline.set_params(**{param: cluster_params_widgets[param].value}) for (param, value) in nerve_params.items(): if isinstance(value, (int, bool)): _pipeline.set_params(**{param: nerve_params_widgets[param].value}) logger.info('Updating figure...') with fig.batch_update(): (edge_trace, node_trace, node_elements, node_colors_color_variable) = _calculate_graph_data(_pipeline, data, is_data_dataframe, layout, layout_dim, color_variable, _node_color_statistic, n_sig_figs, node_scale) if (colorscale_for_hoverlabel is not None): node_colors_color_variable = np.asarray(node_colors_color_variable) min_col = np.min(node_colors_color_variable) max_col = np.max(node_colors_color_variable) hoverlabel_bgcolor = _get_colors_for_vals(node_colors_color_variable, min_col, max_col, colorscale_for_hoverlabel) fig.update_traces(hoverlabel_bgcolor=hoverlabel_bgcolor, selector={'name': 'node_trace'}) fig.update_traces(x=node_trace.x, y=node_trace.y, marker_color=node_trace.marker.color, marker_size=node_trace.marker.size, marker_sizeref=node_trace.marker.sizeref, hovertext=node_trace.hovertext, **({'z': node_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'node_trace'}) fig.update_traces(x=edge_trace.x, y=edge_trace.y, **({'z': edge_trace.z} if (layout_dim == 3) else dict()), selector={'name': 'edge_trace'}) if color_by_columns_dropdown: hovertext_color_variable = node_trace.hovertext column_color_buttons = _get_column_color_buttons(data, is_data_dataframe, node_elements, node_colors_color_variable, _node_color_statistic, hovertext_color_variable, colorscale_for_hoverlabel, n_sig_figs) if (colorscale_for_hoverlabel is not None): column_color_buttons[0]['args'][0]['hoverlabel.bgcolor'] = [None, hoverlabel_bgcolor] else: column_color_buttons = None button_height = 1.1 fig.update_layout(updatemenus=[go.layout.Updatemenu(buttons=column_color_buttons, direction='down', pad={'r': 10, 't': 10}, showactive=True, x=0.11, xanchor='left', y=button_height, yanchor='top')]) valid.value = True except Exception: exception_data = traceback.format_exc().splitlines() logger.exception(exception_data[(- 1)]) valid.value = False def observe_widgets(params, widgets): for (param, value) in params.items(): if isinstance(value, (int, float, str)): widgets[param].observe(on_parameter_change, names='value') out = widgets.Output() @out.capture() def click_box(change): if logs_box.value: out.clear_output() handler.show_logs() else: out.clear_output() logger = logging.getLogger(__name__) handler = OutputWidgetHandler() handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s')) logger.addHandler(handler) logger.setLevel(logging.INFO) mapper_params_items = _pipeline.get_mapper_params().items() cover_params = {key: value for (key, value) in mapper_params_items if key.startswith('cover__')} cover_params_widgets = dict(get_widgets_per_param(cover_params)) cluster_params = {key: value for (key, value) in mapper_params_items if key.startswith('clusterer__')} cluster_params_widgets = dict(get_widgets_per_param(cluster_params)) nerve_params = {key: value for (key, value) in mapper_params_items if (key in ['min_intersection', 'contract_nodes'])} nerve_params_widgets = dict(get_widgets_per_param(nerve_params)) valid = widgets.Valid(value=True, description='Valid parameters', style={'description_width': '100px'}) logs_box = widgets.Checkbox(description='Show logs: ', value=False, indent=False) fig = plot_static_mapper_graph(_pipeline, data, layout=layout, layout_dim=layout_dim, color_variable=color_variable, node_color_statistic=_node_color_statistic, color_by_columns_dropdown=color_by_columns_dropdown, clone_pipeline=False, n_sig_figs=n_sig_figs, node_scale=node_scale, plotly_params=plotly_params) is_data_dataframe = hasattr(data, 'columns') colorscale_for_hoverlabel = None if (layout_dim == 3): is_bgcolor_not_white = (fig.data[1].hoverlabel.bgcolor != 'white') user_hoverlabel_bgcolor = False if plotly_params: if ('node_trace' in plotly_params): if ('hoverlabel_bgcolor' in plotly_params['node_trace']): user_hoverlabel_bgcolor = True if (is_bgcolor_not_white and (not user_hoverlabel_bgcolor)): colorscale_for_hoverlabel = fig.data[1].marker.colorscale observe_widgets(cover_params, cover_params_widgets) observe_widgets(cluster_params, cluster_params_widgets) observe_widgets(nerve_params, nerve_params_widgets) logs_box.observe(click_box, names='value') cover_title = HTML(value='<b>Cover parameters</b>') container_cover = widgets.VBox(children=([cover_title] + list(cover_params_widgets.values()))) container_cover.layout.align_items = 'center' cluster_title = HTML(value='<b>Clusterer parameters</b>') container_cluster = widgets.VBox(children=([cluster_title] + list(cluster_params_widgets.values()))) container_cluster.layout.align_items = 'center' nerve_title = HTML(value='<b>Nerve parameters</b>') container_nerve = widgets.VBox(children=([nerve_title] + list(nerve_params_widgets.values()))) container_nerve.layout.align_items = 'center' container_parameters = widgets.HBox(children=[container_cover, container_cluster, container_nerve]) box = widgets.VBox([container_parameters, fig, valid, logs_box, out]) return box<|docstring|>Plot Mapper graphs with interactivity on pipeline parameters. Extends :func:`~gtda.mapper.visualization.plot_static_mapper_graph` by providing functionality to interactively update parameters from the cover, clustering and graph construction steps defined in `pipeline`. Parameters ---------- pipeline : :class:`~gtda.mapper.pipeline.MapperPipeline` object Mapper pipeline to act on to data. data : array-like of shape (n_samples, n_features) Data used to generate the Mapper graph. Can be a pandas dataframe. layout : None, str or callable, optional, default: ``"kamada-kawai"`` Layout algorithm for the graph. Can be any accepted value for the ``layout`` parameter in the :meth:`layout` method of :class:`igraph.Graph` [1]_. layout_dim : int, default: ``2`` The number of dimensions for the layout. Can be 2 or 3. color_variable : object or None, optional, default: ``None`` Specifies a feature of interest to be used, together with `node_color_statistic`, to determine node colors. 1. If a numpy array or pandas dataframe, it must have the same length as `data`. 2. ``None`` is equivalent to passing `data`. 3. If an object implementing :meth:`transform` or :meth:`fit_transform`, it is applied to `data` to generate the feature of interest. 4. If an index or string, or list of indices/strings, it is equivalent to selecting a column or subset of columns from `data`. node_color_statistic : callable or None, optional, default: ``None`` If a callable, node colors will be computed as summary statistics from the feature array ``Y`` determined by `color_variable` – specifically, the color of a node representing the entries of `data` whose row indices are in ``I`` will be ``node_color_statistic(Y[I])``. ``None`` is equivalent to passing :func:`numpy.mean`. color_by_columns_dropdown : bool, optional, default: ``False`` If ``True``, a dropdown widget is generated which allows the user to color Mapper nodes according to any column in `data` (still using `node_color_statistic`) in addition to `color_variable`. clone_pipeline : bool, optional, default: ``True`` If ``True``, the input `pipeline` is cloned before computing the Mapper graph to prevent unexpected side effects from in-place parameter updates. n_sig_figs : int or None, optional, default: ``3`` If not ``None``, number of significant figures to which to round node summary statistics. If ``None``, no rounding is performed. node_scale : int or float, optional, default: ``12`` Sets the scale factor used to determine the rendered size of the nodes. Increase for larger nodes. Implements a formula in the `Plotly documentation <plotly.com/python/bubble-charts/#scaling-the-size-of-bubble-charts>`_. plotly_params : dict or None, optional, default: ``None`` Custom parameters to configure the plotly figure. Allowed keys are ``"node_trace"``, ``"edge_trace"`` and ``"layout"``, and the corresponding values should be dictionaries containing keyword arguments as would be fed to the :meth:`update_traces` and :meth:`update_layout` methods of :class:`plotly.graph_objects.Figure`. Returns ------- box : :class:`ipywidgets.VBox` object A box containing the following widgets: parameters of the clustering algorithm, parameters for the covering scheme, a Mapper graph arising from those parameters, a validation box, and logs. See also -------- plot_static_mapper_graph, gtda.mapper.pipeline.make_mapper_pipeline References ---------- .. [1] `igraph.Graph.layout <https://igraph.org/python/doc/igraph.Graph-class.html#layout>`_ documentation.<|endoftext|>
0fbaed30969b0b5f99acedd43d6b56c8add45b932873e7e2cb25f9cdbbf06004
def __init__(self, name=None, version=None, description=None, last_modified=None, alias_urn=None, additional_version_weights=None): 'UpdateVersionAliasResponse - a model defined in huaweicloud sdk' super(UpdateVersionAliasResponse, self).__init__() self._name = None self._version = None self._description = None self._last_modified = None self._alias_urn = None self._additional_version_weights = None self.discriminator = None if (name is not None): self.name = name if (version is not None): self.version = version if (description is not None): self.description = description if (last_modified is not None): self.last_modified = last_modified if (alias_urn is not None): self.alias_urn = alias_urn if (additional_version_weights is not None): self.additional_version_weights = additional_version_weights
UpdateVersionAliasResponse - a model defined in huaweicloud sdk
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
__init__
huaweicloud/huaweicloud-sdk-python-v3
64
python
def __init__(self, name=None, version=None, description=None, last_modified=None, alias_urn=None, additional_version_weights=None): super(UpdateVersionAliasResponse, self).__init__() self._name = None self._version = None self._description = None self._last_modified = None self._alias_urn = None self._additional_version_weights = None self.discriminator = None if (name is not None): self.name = name if (version is not None): self.version = version if (description is not None): self.description = description if (last_modified is not None): self.last_modified = last_modified if (alias_urn is not None): self.alias_urn = alias_urn if (additional_version_weights is not None): self.additional_version_weights = additional_version_weights
def __init__(self, name=None, version=None, description=None, last_modified=None, alias_urn=None, additional_version_weights=None): super(UpdateVersionAliasResponse, self).__init__() self._name = None self._version = None self._description = None self._last_modified = None self._alias_urn = None self._additional_version_weights = None self.discriminator = None if (name is not None): self.name = name if (version is not None): self.version = version if (description is not None): self.description = description if (last_modified is not None): self.last_modified = last_modified if (alias_urn is not None): self.alias_urn = alias_urn if (additional_version_weights is not None): self.additional_version_weights = additional_version_weights<|docstring|>UpdateVersionAliasResponse - a model defined in huaweicloud sdk<|endoftext|>
cfdd11d73c9048b197e6cc3929a7ff693a864cae3d6bd8818a92bf32e98232a9
@property def name(self): 'Gets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :return: The name of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._name
Gets the name of this UpdateVersionAliasResponse. 要获取的别名名称。 :return: The name of this UpdateVersionAliasResponse. :rtype: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
name
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def name(self): 'Gets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :return: The name of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._name
@property def name(self): 'Gets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :return: The name of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._name<|docstring|>Gets the name of this UpdateVersionAliasResponse. 要获取的别名名称。 :return: The name of this UpdateVersionAliasResponse. :rtype: str<|endoftext|>
d06de1386930a687ae8c35ed76dc26829de6cb3cb4388116c28eab3382a888a5
@name.setter def name(self, name): 'Sets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :param name: The name of this UpdateVersionAliasResponse.\n :type: str\n ' self._name = name
Sets the name of this UpdateVersionAliasResponse. 要获取的别名名称。 :param name: The name of this UpdateVersionAliasResponse. :type: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
name
huaweicloud/huaweicloud-sdk-python-v3
64
python
@name.setter def name(self, name): 'Sets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :param name: The name of this UpdateVersionAliasResponse.\n :type: str\n ' self._name = name
@name.setter def name(self, name): 'Sets the name of this UpdateVersionAliasResponse.\n\n 要获取的别名名称。\n\n :param name: The name of this UpdateVersionAliasResponse.\n :type: str\n ' self._name = name<|docstring|>Sets the name of this UpdateVersionAliasResponse. 要获取的别名名称。 :param name: The name of this UpdateVersionAliasResponse. :type: str<|endoftext|>
5438cc307a13a24c6528bbc4af5b7b5930a9af3994c6411b4ca3580a38a7a035
@property def version(self): 'Gets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :return: The version of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._version
Gets the version of this UpdateVersionAliasResponse. 别名对应的版本名称。 :return: The version of this UpdateVersionAliasResponse. :rtype: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
version
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def version(self): 'Gets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :return: The version of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._version
@property def version(self): 'Gets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :return: The version of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._version<|docstring|>Gets the version of this UpdateVersionAliasResponse. 别名对应的版本名称。 :return: The version of this UpdateVersionAliasResponse. :rtype: str<|endoftext|>
ac61e395fda98f9ed6788a5d4f490c7b0e1d70c4bc4154b98fb7c1d1010e677a
@version.setter def version(self, version): 'Sets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :param version: The version of this UpdateVersionAliasResponse.\n :type: str\n ' self._version = version
Sets the version of this UpdateVersionAliasResponse. 别名对应的版本名称。 :param version: The version of this UpdateVersionAliasResponse. :type: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
version
huaweicloud/huaweicloud-sdk-python-v3
64
python
@version.setter def version(self, version): 'Sets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :param version: The version of this UpdateVersionAliasResponse.\n :type: str\n ' self._version = version
@version.setter def version(self, version): 'Sets the version of this UpdateVersionAliasResponse.\n\n 别名对应的版本名称。\n\n :param version: The version of this UpdateVersionAliasResponse.\n :type: str\n ' self._version = version<|docstring|>Sets the version of this UpdateVersionAliasResponse. 别名对应的版本名称。 :param version: The version of this UpdateVersionAliasResponse. :type: str<|endoftext|>
2a74d1eece5594492de2342ade267f3e7539e623b164cf7f6bb2b82564dcad86
@property def description(self): 'Gets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :return: The description of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._description
Gets the description of this UpdateVersionAliasResponse. 别名描述信息。 :return: The description of this UpdateVersionAliasResponse. :rtype: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
description
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def description(self): 'Gets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :return: The description of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._description
@property def description(self): 'Gets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :return: The description of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._description<|docstring|>Gets the description of this UpdateVersionAliasResponse. 别名描述信息。 :return: The description of this UpdateVersionAliasResponse. :rtype: str<|endoftext|>
1529cc1bc90f47fa92f7e96e4b40a5e2655083e494ef6bc10299c28f5cbfca68
@description.setter def description(self, description): 'Sets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :param description: The description of this UpdateVersionAliasResponse.\n :type: str\n ' self._description = description
Sets the description of this UpdateVersionAliasResponse. 别名描述信息。 :param description: The description of this UpdateVersionAliasResponse. :type: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
description
huaweicloud/huaweicloud-sdk-python-v3
64
python
@description.setter def description(self, description): 'Sets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :param description: The description of this UpdateVersionAliasResponse.\n :type: str\n ' self._description = description
@description.setter def description(self, description): 'Sets the description of this UpdateVersionAliasResponse.\n\n 别名描述信息。\n\n :param description: The description of this UpdateVersionAliasResponse.\n :type: str\n ' self._description = description<|docstring|>Sets the description of this UpdateVersionAliasResponse. 别名描述信息。 :param description: The description of this UpdateVersionAliasResponse. :type: str<|endoftext|>
7338c036cfa76ce19b655028cf70abb49054a6d1afdf40d440fee391d6c5ee46
@property def last_modified(self): 'Gets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :return: The last_modified of this UpdateVersionAliasResponse.\n :rtype: datetime\n ' return self._last_modified
Gets the last_modified of this UpdateVersionAliasResponse. 别名最后修改时间。 :return: The last_modified of this UpdateVersionAliasResponse. :rtype: datetime
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
last_modified
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def last_modified(self): 'Gets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :return: The last_modified of this UpdateVersionAliasResponse.\n :rtype: datetime\n ' return self._last_modified
@property def last_modified(self): 'Gets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :return: The last_modified of this UpdateVersionAliasResponse.\n :rtype: datetime\n ' return self._last_modified<|docstring|>Gets the last_modified of this UpdateVersionAliasResponse. 别名最后修改时间。 :return: The last_modified of this UpdateVersionAliasResponse. :rtype: datetime<|endoftext|>
15d3b667685e5ccb4bd4807a8a9183c7d81d2b2844d2087c6c0f8db5f947c324
@last_modified.setter def last_modified(self, last_modified): 'Sets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :param last_modified: The last_modified of this UpdateVersionAliasResponse.\n :type: datetime\n ' self._last_modified = last_modified
Sets the last_modified of this UpdateVersionAliasResponse. 别名最后修改时间。 :param last_modified: The last_modified of this UpdateVersionAliasResponse. :type: datetime
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
last_modified
huaweicloud/huaweicloud-sdk-python-v3
64
python
@last_modified.setter def last_modified(self, last_modified): 'Sets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :param last_modified: The last_modified of this UpdateVersionAliasResponse.\n :type: datetime\n ' self._last_modified = last_modified
@last_modified.setter def last_modified(self, last_modified): 'Sets the last_modified of this UpdateVersionAliasResponse.\n\n 别名最后修改时间。\n\n :param last_modified: The last_modified of this UpdateVersionAliasResponse.\n :type: datetime\n ' self._last_modified = last_modified<|docstring|>Sets the last_modified of this UpdateVersionAliasResponse. 别名最后修改时间。 :param last_modified: The last_modified of this UpdateVersionAliasResponse. :type: datetime<|endoftext|>
06009cab372b5e25bf2a95548736961582cf8c9f3131421d66820a837033dccc
@property def alias_urn(self): 'Gets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :return: The alias_urn of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._alias_urn
Gets the alias_urn of this UpdateVersionAliasResponse. 版本别名唯一标识。 :return: The alias_urn of this UpdateVersionAliasResponse. :rtype: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
alias_urn
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def alias_urn(self): 'Gets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :return: The alias_urn of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._alias_urn
@property def alias_urn(self): 'Gets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :return: The alias_urn of this UpdateVersionAliasResponse.\n :rtype: str\n ' return self._alias_urn<|docstring|>Gets the alias_urn of this UpdateVersionAliasResponse. 版本别名唯一标识。 :return: The alias_urn of this UpdateVersionAliasResponse. :rtype: str<|endoftext|>
33d6b57d513d568ac56d3e234018513188ae5dc810ec97d772919133b2ada7b2
@alias_urn.setter def alias_urn(self, alias_urn): 'Sets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :param alias_urn: The alias_urn of this UpdateVersionAliasResponse.\n :type: str\n ' self._alias_urn = alias_urn
Sets the alias_urn of this UpdateVersionAliasResponse. 版本别名唯一标识。 :param alias_urn: The alias_urn of this UpdateVersionAliasResponse. :type: str
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
alias_urn
huaweicloud/huaweicloud-sdk-python-v3
64
python
@alias_urn.setter def alias_urn(self, alias_urn): 'Sets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :param alias_urn: The alias_urn of this UpdateVersionAliasResponse.\n :type: str\n ' self._alias_urn = alias_urn
@alias_urn.setter def alias_urn(self, alias_urn): 'Sets the alias_urn of this UpdateVersionAliasResponse.\n\n 版本别名唯一标识。\n\n :param alias_urn: The alias_urn of this UpdateVersionAliasResponse.\n :type: str\n ' self._alias_urn = alias_urn<|docstring|>Sets the alias_urn of this UpdateVersionAliasResponse. 版本别名唯一标识。 :param alias_urn: The alias_urn of this UpdateVersionAliasResponse. :type: str<|endoftext|>
6bb581d94ce5a4488e75d0ec2273325f45f39886537eee749cf29bde23c9cecc
@property def additional_version_weights(self): 'Gets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :return: The additional_version_weights of this UpdateVersionAliasResponse.\n :rtype: dict(str, int)\n ' return self._additional_version_weights
Gets the additional_version_weights of this UpdateVersionAliasResponse. 灰度版本信息 :return: The additional_version_weights of this UpdateVersionAliasResponse. :rtype: dict(str, int)
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
additional_version_weights
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def additional_version_weights(self): 'Gets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :return: The additional_version_weights of this UpdateVersionAliasResponse.\n :rtype: dict(str, int)\n ' return self._additional_version_weights
@property def additional_version_weights(self): 'Gets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :return: The additional_version_weights of this UpdateVersionAliasResponse.\n :rtype: dict(str, int)\n ' return self._additional_version_weights<|docstring|>Gets the additional_version_weights of this UpdateVersionAliasResponse. 灰度版本信息 :return: The additional_version_weights of this UpdateVersionAliasResponse. :rtype: dict(str, int)<|endoftext|>
89781b89740018517af62b8703d20def5396d47d9a2824fbbf31319ccf7a334c
@additional_version_weights.setter def additional_version_weights(self, additional_version_weights): 'Sets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :param additional_version_weights: The additional_version_weights of this UpdateVersionAliasResponse.\n :type: dict(str, int)\n ' self._additional_version_weights = additional_version_weights
Sets the additional_version_weights of this UpdateVersionAliasResponse. 灰度版本信息 :param additional_version_weights: The additional_version_weights of this UpdateVersionAliasResponse. :type: dict(str, int)
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
additional_version_weights
huaweicloud/huaweicloud-sdk-python-v3
64
python
@additional_version_weights.setter def additional_version_weights(self, additional_version_weights): 'Sets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :param additional_version_weights: The additional_version_weights of this UpdateVersionAliasResponse.\n :type: dict(str, int)\n ' self._additional_version_weights = additional_version_weights
@additional_version_weights.setter def additional_version_weights(self, additional_version_weights): 'Sets the additional_version_weights of this UpdateVersionAliasResponse.\n\n 灰度版本信息\n\n :param additional_version_weights: The additional_version_weights of this UpdateVersionAliasResponse.\n :type: dict(str, int)\n ' self._additional_version_weights = additional_version_weights<|docstring|>Sets the additional_version_weights of this UpdateVersionAliasResponse. 灰度版本信息 :param additional_version_weights: The additional_version_weights of this UpdateVersionAliasResponse. :type: dict(str, int)<|endoftext|>
23795442a46e2cd10dec98fded44ed9172a29971e98983a30ad89baa6c9c0a03
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) elif (attr in self.sensitive_list): result[attr] = '****' else: result[attr] = value return result
Returns the model properties as a dict
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
to_dict
huaweicloud/huaweicloud-sdk-python-v3
64
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) elif (attr in self.sensitive_list): result[attr] = '****' else: result[attr] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) elif (attr in self.sensitive_list): result[attr] = '****' else: result[attr] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
a85eb2dd57daf3998acb705f217af08ef0b14fd68fee87605500331b1a5f2987
def to_str(self): 'Returns the string representation of the model' import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding('utf-8') return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
Returns the string representation of the model
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
to_str
huaweicloud/huaweicloud-sdk-python-v3
64
python
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding('utf-8') return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding('utf-8') return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)<|docstring|>Returns the string representation of the model<|endoftext|>
122cefd5382ee9078015a8ccdeba1aa42a0625442bf0dcfc7748dc07a3e45d3f
def __repr__(self): 'For `print`' return self.to_str()
For `print`
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
__repr__
huaweicloud/huaweicloud-sdk-python-v3
64
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print`<|endoftext|>
2fdc431bb6c99ebe3c5f3ce9b8ae54319d6dc7bb87884271ba454def1cda1afa
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, UpdateVersionAliasResponse)): return False return (self.__dict__ == other.__dict__)
Returns true if both objects are equal
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
__eq__
huaweicloud/huaweicloud-sdk-python-v3
64
python
def __eq__(self, other): if (not isinstance(other, UpdateVersionAliasResponse)): return False return (self.__dict__ == other.__dict__)
def __eq__(self, other): if (not isinstance(other, UpdateVersionAliasResponse)): return False return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
Returns true if both objects are not equal
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/update_version_alias_response.py
__ne__
huaweicloud/huaweicloud-sdk-python-v3
64
python
def __ne__(self, other): return (not (self == other))
def __ne__(self, other): return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
d7dc5c674c41b3fa71647cf626daa4d8cd475560683ba128bc66deafd017a583
def find_all_get_param_in_yml(yml): '\n Recursively find all referenced parameters in a parsed yaml body\n and return a list of parameters\n ' collector = ParameterCollector() traverse(yml, 'get_param', collector) return {p for p in collector.params if (not is_pseudo_param(p))}
Recursively find all referenced parameters in a parsed yaml body and return a list of parameters
ice_validator/tests/utils/nested_iterables.py
find_all_get_param_in_yml
rohitagarwal0910/vvp-cnf-validation-scripts
1
python
def find_all_get_param_in_yml(yml): '\n Recursively find all referenced parameters in a parsed yaml body\n and return a list of parameters\n ' collector = ParameterCollector() traverse(yml, 'get_param', collector) return {p for p in collector.params if (not is_pseudo_param(p))}
def find_all_get_param_in_yml(yml): '\n Recursively find all referenced parameters in a parsed yaml body\n and return a list of parameters\n ' collector = ParameterCollector() traverse(yml, 'get_param', collector) return {p for p in collector.params if (not is_pseudo_param(p))}<|docstring|>Recursively find all referenced parameters in a parsed yaml body and return a list of parameters<|endoftext|>
8e3d4a9e8d050bfb13a8ff07c9697f2cc343a949c23eb7b9a6053fad22ac6382
def find_all_get_resource_in_yml(yml): '\n Recursively find all referenced resources\n in a parsed yaml body and return a list of resource ids\n ' collector = ParameterCollector() traverse(yml, 'get_resource', collector) return collector.params
Recursively find all referenced resources in a parsed yaml body and return a list of resource ids
ice_validator/tests/utils/nested_iterables.py
find_all_get_resource_in_yml
rohitagarwal0910/vvp-cnf-validation-scripts
1
python
def find_all_get_resource_in_yml(yml): '\n Recursively find all referenced resources\n in a parsed yaml body and return a list of resource ids\n ' collector = ParameterCollector() traverse(yml, 'get_resource', collector) return collector.params
def find_all_get_resource_in_yml(yml): '\n Recursively find all referenced resources\n in a parsed yaml body and return a list of resource ids\n ' collector = ParameterCollector() traverse(yml, 'get_resource', collector) return collector.params<|docstring|>Recursively find all referenced resources in a parsed yaml body and return a list of resource ids<|endoftext|>
9c86c77c5bbaf0accf3df781594aa45cd3c0088066d159a43eb2690964a318e5
def find_all_get_file_in_yml(yml): '\n Recursively find all get_file in a parsed yaml body\n and return the list of referenced files/urls\n ' collector = ParameterCollector() traverse(yml, 'get_file', collector) return collector.params
Recursively find all get_file in a parsed yaml body and return the list of referenced files/urls
ice_validator/tests/utils/nested_iterables.py
find_all_get_file_in_yml
rohitagarwal0910/vvp-cnf-validation-scripts
1
python
def find_all_get_file_in_yml(yml): '\n Recursively find all get_file in a parsed yaml body\n and return the list of referenced files/urls\n ' collector = ParameterCollector() traverse(yml, 'get_file', collector) return collector.params
def find_all_get_file_in_yml(yml): '\n Recursively find all get_file in a parsed yaml body\n and return the list of referenced files/urls\n ' collector = ParameterCollector() traverse(yml, 'get_file', collector) return collector.params<|docstring|>Recursively find all get_file in a parsed yaml body and return the list of referenced files/urls<|endoftext|>
1ec3064d06846f0d40b4cd82fd124cc1127daa7b13200f335f7014e6bb0175cb
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = Net() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))
Feature: AdaFactor Description: Test AdaFactor Expectation: Run success
tests/st/ops/cpu/test_fused_ada_factor_op.py
test_adafactor
Aaron911/mindspore
1
python
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = Net() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = Net() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))<|docstring|>Feature: AdaFactor Description: Test AdaFactor Expectation: Run success<|endoftext|>
2a3dd7bb59e04bdd06757df1fb6d1d567bf0f13785db2a4200e3a9b84d586447
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor_with_global_norm(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = NetWithGlobalNorm() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient, 10.0) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))
Feature: AdaFactor Description: Test AdaFactor Expectation: Run success
tests/st/ops/cpu/test_fused_ada_factor_op.py
test_adafactor_with_global_norm
Aaron911/mindspore
1
python
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor_with_global_norm(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = NetWithGlobalNorm() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient, 10.0) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))
@pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_adafactor_with_global_norm(): '\n Feature: AdaFactor\n Description: Test AdaFactor\n Expectation: Run success\n ' context.set_context(mode=context.GRAPH_MODE, device_target='CPU') net = NetWithGlobalNorm() gradient = Tensor(np.ones(param_shape), mstype.float32) net((1e-30, 0.001), 1.0, 0.9, 0.8, 0.01, 0.03, gradient, 10.0) diff = (net.param.asnumpy() - (np.ones(param_shape) * 0.97)) assert np.all((diff < 0.001))<|docstring|>Feature: AdaFactor Description: Test AdaFactor Expectation: Run success<|endoftext|>
b244a33f7eaccd76bfb02653603238b2b1c41a9419512aba00ad014903a9374e
def setupWorld(): '\n Create the world, the scenes that can be visited, the objects in the\n scenes, and the player.\n ' world = worldFactory(name='Game World') bedroom = world.addScene('The Bedroom') bedroom.setSkin(Skin(imageLoader.load('bedroom.png'))) lounge = world.addScene('The Lounge') lounge.setSkin(Skin(imageLoader.load('lounge.png'))) ian_curtis = bedroom.addPlayer(name='Ian Curtis', location=[90, 90, 100], size=[14, 14, 50], velocityModifier=2) south_facing = imageLoader.load(['player/ian_curtis1.png', 'player/ian_curtis2.png', 'player/ian_curtis3.png']) east_facing = imageLoader.load(['player/ian_curtis4.png', 'player/ian_curtis5.png', 'player/ian_curtis6.png']) ian_curtis.setSkin(DirectedAnimatedSkin(south_facing, east_facing, frameSequence=[0, 2, 2, 1, 1, 2, 2, 0])) ground = PhysicalThing('ground', [(- 1000), (- 1000), (- 100)], [2000, 2000, 100]) wall0 = PhysicalThing('wall', [180, 0, (- 20)], [20, 180, 120]) wall1 = PhysicalThing('wall', [0, 180, (- 20)], [180, 20, 120]) wall2 = PhysicalThing('wall', [0, (- 20), (- 20)], [180, 20, 120]) wall3 = PhysicalThing('wall', [(- 20), 0, (- 20)], [20, 180, 120]) door = Portal(name='door', location=[180, 105, 0], size=[10, 30, 56], toScene=lounge, toLocation=[10, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) bed = MovableThing(name='bed', location=[0, 100, 0], size=[70, 52, 28], fixed=False) bed.setSkin(Skin(imageLoader.load(['bed.png']))) guitar = PortableThing(name='guitar', location=[60, 0, 40], size=[20, 12, 20]) guitar.setSkin(Skin(imageLoader.load(['guitar.png']))) guitar.text.setPickedUp('You feel your hands vibrate with anticiation as you pick up the guitar.') guitar.text.setUsed('You strum the guitar and begin to rock out hard.') guitar.text.setDropped('The guitar makes a startling, clanging sound when you drop it.') bedroom.addObjects([ground, wall0, wall1, wall2, wall3, door, bed, guitar, ian_curtis]) door = Portal(name='door', location=[0, 105, 0], size=[10, 30, 56], toScene=bedroom, toLocation=[160, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) sofa = PhysicalThing(name='sofa', location=[0, 0, 0], size=[39, 66, 37], fixed=False) sofa.setSkin(Skin(imageLoader.load(['sofa.png']))) amp = PortableThing(name='amp', location=[60, 0, 25], size=[16, 10, 18]) amp.setSkin(Skin(imageLoader.load(['amp.png']))) amp.text.setUsed('The amp crackles and pops and you turn it up to 11.') lounge.addObjects([ground, wall0, wall1, wall2, wall3, door, sofa, amp]) return world
Create the world, the scenes that can be visited, the objects in the scenes, and the player.
examples/TwoRooms/tworooms.py
setupWorld
dave-leblanc/isomyr
0
python
def setupWorld(): '\n Create the world, the scenes that can be visited, the objects in the\n scenes, and the player.\n ' world = worldFactory(name='Game World') bedroom = world.addScene('The Bedroom') bedroom.setSkin(Skin(imageLoader.load('bedroom.png'))) lounge = world.addScene('The Lounge') lounge.setSkin(Skin(imageLoader.load('lounge.png'))) ian_curtis = bedroom.addPlayer(name='Ian Curtis', location=[90, 90, 100], size=[14, 14, 50], velocityModifier=2) south_facing = imageLoader.load(['player/ian_curtis1.png', 'player/ian_curtis2.png', 'player/ian_curtis3.png']) east_facing = imageLoader.load(['player/ian_curtis4.png', 'player/ian_curtis5.png', 'player/ian_curtis6.png']) ian_curtis.setSkin(DirectedAnimatedSkin(south_facing, east_facing, frameSequence=[0, 2, 2, 1, 1, 2, 2, 0])) ground = PhysicalThing('ground', [(- 1000), (- 1000), (- 100)], [2000, 2000, 100]) wall0 = PhysicalThing('wall', [180, 0, (- 20)], [20, 180, 120]) wall1 = PhysicalThing('wall', [0, 180, (- 20)], [180, 20, 120]) wall2 = PhysicalThing('wall', [0, (- 20), (- 20)], [180, 20, 120]) wall3 = PhysicalThing('wall', [(- 20), 0, (- 20)], [20, 180, 120]) door = Portal(name='door', location=[180, 105, 0], size=[10, 30, 56], toScene=lounge, toLocation=[10, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) bed = MovableThing(name='bed', location=[0, 100, 0], size=[70, 52, 28], fixed=False) bed.setSkin(Skin(imageLoader.load(['bed.png']))) guitar = PortableThing(name='guitar', location=[60, 0, 40], size=[20, 12, 20]) guitar.setSkin(Skin(imageLoader.load(['guitar.png']))) guitar.text.setPickedUp('You feel your hands vibrate with anticiation as you pick up the guitar.') guitar.text.setUsed('You strum the guitar and begin to rock out hard.') guitar.text.setDropped('The guitar makes a startling, clanging sound when you drop it.') bedroom.addObjects([ground, wall0, wall1, wall2, wall3, door, bed, guitar, ian_curtis]) door = Portal(name='door', location=[0, 105, 0], size=[10, 30, 56], toScene=bedroom, toLocation=[160, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) sofa = PhysicalThing(name='sofa', location=[0, 0, 0], size=[39, 66, 37], fixed=False) sofa.setSkin(Skin(imageLoader.load(['sofa.png']))) amp = PortableThing(name='amp', location=[60, 0, 25], size=[16, 10, 18]) amp.setSkin(Skin(imageLoader.load(['amp.png']))) amp.text.setUsed('The amp crackles and pops and you turn it up to 11.') lounge.addObjects([ground, wall0, wall1, wall2, wall3, door, sofa, amp]) return world
def setupWorld(): '\n Create the world, the scenes that can be visited, the objects in the\n scenes, and the player.\n ' world = worldFactory(name='Game World') bedroom = world.addScene('The Bedroom') bedroom.setSkin(Skin(imageLoader.load('bedroom.png'))) lounge = world.addScene('The Lounge') lounge.setSkin(Skin(imageLoader.load('lounge.png'))) ian_curtis = bedroom.addPlayer(name='Ian Curtis', location=[90, 90, 100], size=[14, 14, 50], velocityModifier=2) south_facing = imageLoader.load(['player/ian_curtis1.png', 'player/ian_curtis2.png', 'player/ian_curtis3.png']) east_facing = imageLoader.load(['player/ian_curtis4.png', 'player/ian_curtis5.png', 'player/ian_curtis6.png']) ian_curtis.setSkin(DirectedAnimatedSkin(south_facing, east_facing, frameSequence=[0, 2, 2, 1, 1, 2, 2, 0])) ground = PhysicalThing('ground', [(- 1000), (- 1000), (- 100)], [2000, 2000, 100]) wall0 = PhysicalThing('wall', [180, 0, (- 20)], [20, 180, 120]) wall1 = PhysicalThing('wall', [0, 180, (- 20)], [180, 20, 120]) wall2 = PhysicalThing('wall', [0, (- 20), (- 20)], [180, 20, 120]) wall3 = PhysicalThing('wall', [(- 20), 0, (- 20)], [20, 180, 120]) door = Portal(name='door', location=[180, 105, 0], size=[10, 30, 56], toScene=lounge, toLocation=[10, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) bed = MovableThing(name='bed', location=[0, 100, 0], size=[70, 52, 28], fixed=False) bed.setSkin(Skin(imageLoader.load(['bed.png']))) guitar = PortableThing(name='guitar', location=[60, 0, 40], size=[20, 12, 20]) guitar.setSkin(Skin(imageLoader.load(['guitar.png']))) guitar.text.setPickedUp('You feel your hands vibrate with anticiation as you pick up the guitar.') guitar.text.setUsed('You strum the guitar and begin to rock out hard.') guitar.text.setDropped('The guitar makes a startling, clanging sound when you drop it.') bedroom.addObjects([ground, wall0, wall1, wall2, wall3, door, bed, guitar, ian_curtis]) door = Portal(name='door', location=[0, 105, 0], size=[10, 30, 56], toScene=bedroom, toLocation=[160, 115, 0]) door.setSkin(Skin(imageLoader.load(['door.png']))) sofa = PhysicalThing(name='sofa', location=[0, 0, 0], size=[39, 66, 37], fixed=False) sofa.setSkin(Skin(imageLoader.load(['sofa.png']))) amp = PortableThing(name='amp', location=[60, 0, 25], size=[16, 10, 18]) amp.setSkin(Skin(imageLoader.load(['amp.png']))) amp.text.setUsed('The amp crackles and pops and you turn it up to 11.') lounge.addObjects([ground, wall0, wall1, wall2, wall3, door, sofa, amp]) return world<|docstring|>Create the world, the scenes that can be visited, the objects in the scenes, and the player.<|endoftext|>
91dac5b7c63365e35923a97d6948898bfcaf6a4a5a512c1dae64e24a089bf10e
def euclidean_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' (m, n) = (x.size(0), y.size(0)) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = (xx + yy) dist.addmm_(1, (- 2), x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist
Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n]
reid/utils/reid_metric.py
euclidean_dist
raoyongming/CAL
58
python
def euclidean_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' (m, n) = (x.size(0), y.size(0)) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = (xx + yy) dist.addmm_(1, (- 2), x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist
def euclidean_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' (m, n) = (x.size(0), y.size(0)) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = (xx + yy) dist.addmm_(1, (- 2), x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist<|docstring|>Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n]<|endoftext|>
a9c8796b43bdc3370e701a07e8c5551bfdea87e5c73e3627dc9b3ebda559efcc
def cos_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' xx = (x / x.norm(dim=1)[(:, None)]) yy = (y / y.norm(dim=1)[(:, None)]) dist = torch.mm(xx, yy.t()) return (1 - dist)
Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n]
reid/utils/reid_metric.py
cos_dist
raoyongming/CAL
58
python
def cos_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' xx = (x / x.norm(dim=1)[(:, None)]) yy = (y / y.norm(dim=1)[(:, None)]) dist = torch.mm(xx, yy.t()) return (1 - dist)
def cos_dist(x, y): '\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n ' xx = (x / x.norm(dim=1)[(:, None)]) yy = (y / y.norm(dim=1)[(:, None)]) dist = torch.mm(xx, yy.t()) return (1 - dist)<|docstring|>Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n]<|endoftext|>
d9ce10a930983a4c4115fa7338b419207d341b6e31952b073f02cb51329e13c5
def is_ace(card): 'Boolean evaluation of whether `card` is an Ace' return (card[VALUE] == ACE)
Boolean evaluation of whether `card` is an Ace
group_validation_sample.py
is_ace
HesterLim/Card_Game
0
python
def is_ace(card): return (card[VALUE] == ACE)
def is_ace(card): return (card[VALUE] == ACE)<|docstring|>Boolean evaluation of whether `card` is an Ace<|endoftext|>
72233f1bbeb17911675687ee85653abc9134c756d78ef3c73066665f0103d652
def get_score(card): 'return the score of `card`, based on its value' return card_score[card[VALUE]]
return the score of `card`, based on its value
group_validation_sample.py
get_score
HesterLim/Card_Game
0
python
def get_score(card): return card_score[card[VALUE]]
def get_score(card): return card_score[card[VALUE]]<|docstring|>return the score of `card`, based on its value<|endoftext|>
f9ea02e12b08eb3fe087d4f72fc2474291096970fa328d5c76299cc469da7a09
def get_colour(card): 'Return the colour of `card` (`RED` or `BLACK`)' if (card[SUIT] in RED_SUITS): return RED else: return BLACK
Return the colour of `card` (`RED` or `BLACK`)
group_validation_sample.py
get_colour
HesterLim/Card_Game
0
python
def get_colour(card): if (card[SUIT] in RED_SUITS): return RED else: return BLACK
def get_colour(card): if (card[SUIT] in RED_SUITS): return RED else: return BLACK<|docstring|>Return the colour of `card` (`RED` or `BLACK`)<|endoftext|>
2e8fa1ca3dd30cf02b8e17e50a48c1a5be03e15adafb6a6accc6e0d177286628
def comp10001go_score_group(cards): 'Validate/score a group of cards (order unimportant), supplied as a \n list of cards (each a string); return the positive score of the group if \n valid, and negative score otherwise. Note, assumes that all cards are \n valid, and unique.' values = sorted([get_score(card) for card in cards]) if ((len(set(values)) == 1) and (len(cards) >= MIN_CARDS_NKIND) and (not is_ace(cards[0]))): return (factorial(len(cards)) * card_score[cards[0][VALUE]]) nonace_cards = sorted([card for card in cards if (not is_ace(card))], key=(lambda x: get_score(x))) ace_cards = list((set(cards) - set(nonace_cards))) if (len(nonace_cards) >= MIN_NONACE_RUN): is_run = True prev_val = prev_colour = None score = 0 for card in nonace_cards: if (prev_val is None): score = prev_val = get_score(card) prev_colour = get_colour(card) elif ((get_score(card) - prev_val) == 1): if (get_colour(card) != prev_colour): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False break elif (get_score(card) == prev_val): is_run = False break else: gap = ((get_score(card) - prev_val) - 1) gap_filled = False while (is_run and gap and (len(ace_cards) >= gap)): gap_filled = False for (i, ace) in enumerate(ace_cards): if (get_colour(ace) != prev_colour): ace_cards.pop(i) prev_val += 1 prev_colour = get_colour(ace) score += prev_val gap -= 1 gap_filled = True break if (not gap_filled): is_run = False if (is_run and gap_filled and (get_colour(card) != prev_colour)): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False if (is_run and (len(cards) >= MIN_RUN) and (not ace_cards)): return score return (- sum(values))
Validate/score a group of cards (order unimportant), supplied as a list of cards (each a string); return the positive score of the group if valid, and negative score otherwise. Note, assumes that all cards are valid, and unique.
group_validation_sample.py
comp10001go_score_group
HesterLim/Card_Game
0
python
def comp10001go_score_group(cards): 'Validate/score a group of cards (order unimportant), supplied as a \n list of cards (each a string); return the positive score of the group if \n valid, and negative score otherwise. Note, assumes that all cards are \n valid, and unique.' values = sorted([get_score(card) for card in cards]) if ((len(set(values)) == 1) and (len(cards) >= MIN_CARDS_NKIND) and (not is_ace(cards[0]))): return (factorial(len(cards)) * card_score[cards[0][VALUE]]) nonace_cards = sorted([card for card in cards if (not is_ace(card))], key=(lambda x: get_score(x))) ace_cards = list((set(cards) - set(nonace_cards))) if (len(nonace_cards) >= MIN_NONACE_RUN): is_run = True prev_val = prev_colour = None score = 0 for card in nonace_cards: if (prev_val is None): score = prev_val = get_score(card) prev_colour = get_colour(card) elif ((get_score(card) - prev_val) == 1): if (get_colour(card) != prev_colour): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False break elif (get_score(card) == prev_val): is_run = False break else: gap = ((get_score(card) - prev_val) - 1) gap_filled = False while (is_run and gap and (len(ace_cards) >= gap)): gap_filled = False for (i, ace) in enumerate(ace_cards): if (get_colour(ace) != prev_colour): ace_cards.pop(i) prev_val += 1 prev_colour = get_colour(ace) score += prev_val gap -= 1 gap_filled = True break if (not gap_filled): is_run = False if (is_run and gap_filled and (get_colour(card) != prev_colour)): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False if (is_run and (len(cards) >= MIN_RUN) and (not ace_cards)): return score return (- sum(values))
def comp10001go_score_group(cards): 'Validate/score a group of cards (order unimportant), supplied as a \n list of cards (each a string); return the positive score of the group if \n valid, and negative score otherwise. Note, assumes that all cards are \n valid, and unique.' values = sorted([get_score(card) for card in cards]) if ((len(set(values)) == 1) and (len(cards) >= MIN_CARDS_NKIND) and (not is_ace(cards[0]))): return (factorial(len(cards)) * card_score[cards[0][VALUE]]) nonace_cards = sorted([card for card in cards if (not is_ace(card))], key=(lambda x: get_score(x))) ace_cards = list((set(cards) - set(nonace_cards))) if (len(nonace_cards) >= MIN_NONACE_RUN): is_run = True prev_val = prev_colour = None score = 0 for card in nonace_cards: if (prev_val is None): score = prev_val = get_score(card) prev_colour = get_colour(card) elif ((get_score(card) - prev_val) == 1): if (get_colour(card) != prev_colour): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False break elif (get_score(card) == prev_val): is_run = False break else: gap = ((get_score(card) - prev_val) - 1) gap_filled = False while (is_run and gap and (len(ace_cards) >= gap)): gap_filled = False for (i, ace) in enumerate(ace_cards): if (get_colour(ace) != prev_colour): ace_cards.pop(i) prev_val += 1 prev_colour = get_colour(ace) score += prev_val gap -= 1 gap_filled = True break if (not gap_filled): is_run = False if (is_run and gap_filled and (get_colour(card) != prev_colour)): prev_val = get_score(card) prev_colour = get_colour(card) score += prev_val else: is_run = False if (is_run and (len(cards) >= MIN_RUN) and (not ace_cards)): return score return (- sum(values))<|docstring|>Validate/score a group of cards (order unimportant), supplied as a list of cards (each a string); return the positive score of the group if valid, and negative score otherwise. Note, assumes that all cards are valid, and unique.<|endoftext|>
38cd885584b6d7f85f0aacc3bf9841462ecc37b74ef2f41948d5c74febd805b4
def _get_relationships_from_consul(consul_handle, service_name): 'Fetch the relationship information from Consul for a service by service\n name. Returns a list of service names.' index = None rel_key = '{0}:rel'.format(service_name) while True: (index, data) = consul_handle.kv.get(rel_key, index=index) if data: return json.loads(data['Value'].decode('utf-8')) else: _logger.warn('No relationships found for {0}. Try again in a bit.'.format(service_name)) time.sleep(5)
Fetch the relationship information from Consul for a service by service name. Returns a list of service names.
python-discovery-client/discovery_client/discovery.py
_get_relationships_from_consul
onap/dcaegen2-utils
2
python
def _get_relationships_from_consul(consul_handle, service_name): 'Fetch the relationship information from Consul for a service by service\n name. Returns a list of service names.' index = None rel_key = '{0}:rel'.format(service_name) while True: (index, data) = consul_handle.kv.get(rel_key, index=index) if data: return json.loads(data['Value'].decode('utf-8')) else: _logger.warn('No relationships found for {0}. Try again in a bit.'.format(service_name)) time.sleep(5)
def _get_relationships_from_consul(consul_handle, service_name): 'Fetch the relationship information from Consul for a service by service\n name. Returns a list of service names.' index = None rel_key = '{0}:rel'.format(service_name) while True: (index, data) = consul_handle.kv.get(rel_key, index=index) if data: return json.loads(data['Value'].decode('utf-8')) else: _logger.warn('No relationships found for {0}. Try again in a bit.'.format(service_name)) time.sleep(5)<|docstring|>Fetch the relationship information from Consul for a service by service name. Returns a list of service names.<|endoftext|>
770d718e73878942d5d37ccb2d408104bba1ff19ff93fb1839384f36b0b224bb
def _get_configuration_resolved_from_cbs(consul_handle, service_name): '\n This is what a minimal python client library that wraps the CBS would look like.\n POSSIBLE TODO: break this out into pypi repo\n\n This call does not raise an exception if Consul or the CBS cannot complete the request.\n It logs an error and returns {} if the config is not bindable. \n It could be a temporary network outage. Call me again later. \n\n It will raise an exception if the necessary env parameters were not set because that is irrecoverable.\n This function is called in my /heatlhcheck, so this will be caught early.\n ' config = {} results = _lookup_with_consul(consul_handle, 'config_binding_service', max_attempts=5) if (results is None): logger.error('Cannot bind config at this time, cbs is unreachable') else: cbs_hostname = results[0]['ServiceAddress'] cbs_port = results[0]['ServicePort'] cbs_url = 'http://{hostname}:{port}'.format(hostname=cbs_hostname, port=cbs_port) my_config_endpoint = '{0}/service_component/{1}'.format(cbs_url, service_name) res = requests.get(my_config_endpoint) try: res.raise_for_status() config = res.json() _logger.info('get_config returned the following configuration: {0}'.format(json.dumps(config))) except: _logger.error('in get_config, the config binding service endpoint {0} blew up on me. Error code: {1}, Error text: {2}'.format(my_config_endpoint, res.status_code, res.text)) return config
This is what a minimal python client library that wraps the CBS would look like. POSSIBLE TODO: break this out into pypi repo This call does not raise an exception if Consul or the CBS cannot complete the request. It logs an error and returns {} if the config is not bindable. It could be a temporary network outage. Call me again later. It will raise an exception if the necessary env parameters were not set because that is irrecoverable. This function is called in my /heatlhcheck, so this will be caught early.
python-discovery-client/discovery_client/discovery.py
_get_configuration_resolved_from_cbs
onap/dcaegen2-utils
2
python
def _get_configuration_resolved_from_cbs(consul_handle, service_name): '\n This is what a minimal python client library that wraps the CBS would look like.\n POSSIBLE TODO: break this out into pypi repo\n\n This call does not raise an exception if Consul or the CBS cannot complete the request.\n It logs an error and returns {} if the config is not bindable. \n It could be a temporary network outage. Call me again later. \n\n It will raise an exception if the necessary env parameters were not set because that is irrecoverable.\n This function is called in my /heatlhcheck, so this will be caught early.\n ' config = {} results = _lookup_with_consul(consul_handle, 'config_binding_service', max_attempts=5) if (results is None): logger.error('Cannot bind config at this time, cbs is unreachable') else: cbs_hostname = results[0]['ServiceAddress'] cbs_port = results[0]['ServicePort'] cbs_url = 'http://{hostname}:{port}'.format(hostname=cbs_hostname, port=cbs_port) my_config_endpoint = '{0}/service_component/{1}'.format(cbs_url, service_name) res = requests.get(my_config_endpoint) try: res.raise_for_status() config = res.json() _logger.info('get_config returned the following configuration: {0}'.format(json.dumps(config))) except: _logger.error('in get_config, the config binding service endpoint {0} blew up on me. Error code: {1}, Error text: {2}'.format(my_config_endpoint, res.status_code, res.text)) return config
def _get_configuration_resolved_from_cbs(consul_handle, service_name): '\n This is what a minimal python client library that wraps the CBS would look like.\n POSSIBLE TODO: break this out into pypi repo\n\n This call does not raise an exception if Consul or the CBS cannot complete the request.\n It logs an error and returns {} if the config is not bindable. \n It could be a temporary network outage. Call me again later. \n\n It will raise an exception if the necessary env parameters were not set because that is irrecoverable.\n This function is called in my /heatlhcheck, so this will be caught early.\n ' config = {} results = _lookup_with_consul(consul_handle, 'config_binding_service', max_attempts=5) if (results is None): logger.error('Cannot bind config at this time, cbs is unreachable') else: cbs_hostname = results[0]['ServiceAddress'] cbs_port = results[0]['ServicePort'] cbs_url = 'http://{hostname}:{port}'.format(hostname=cbs_hostname, port=cbs_port) my_config_endpoint = '{0}/service_component/{1}'.format(cbs_url, service_name) res = requests.get(my_config_endpoint) try: res.raise_for_status() config = res.json() _logger.info('get_config returned the following configuration: {0}'.format(json.dumps(config))) except: _logger.error('in get_config, the config binding service endpoint {0} blew up on me. Error code: {1}, Error text: {2}'.format(my_config_endpoint, res.status_code, res.text)) return config<|docstring|>This is what a minimal python client library that wraps the CBS would look like. POSSIBLE TODO: break this out into pypi repo This call does not raise an exception if Consul or the CBS cannot complete the request. It logs an error and returns {} if the config is not bindable. It could be a temporary network outage. Call me again later. It will raise an exception if the necessary env parameters were not set because that is irrecoverable. This function is called in my /heatlhcheck, so this will be caught early.<|endoftext|>
db5f9723aa7e7f770aa63e8cc16a963f371a23235076c86faf3a713f4d74c446
def _get_connection_types(config): 'Get all the connection types for a given configuration json\n\n Crawls through the entire config dict recursively and returns the entries\n that have been identified as service connections in the form of a list of tuples -\n\n [(config key, component type), ..]\n\n where "config key" is a compound key in the form of a tuple. Each entry in\n the compound key is a key to a level within the json data structure.' def grab_component_type(v): if isinstance(v, six.string_types): result = re.match('^{{\\s*([-_.\\w]*)\\s*}}', v) return (result.group(1) if result else None) def crawl(config, parent_key=()): if isinstance(config, dict): rels = [crawl(value, (parent_key + (key,))) for (key, value) in config.items()] rels = chain(*rels) elif isinstance(config, list): rels = [crawl(config[index], (parent_key + (index,))) for index in range(0, len(config))] rels = chain(*rels) else: rels = [(parent_key, grab_component_type(config))] rels = [(key, rel) for (key, rel) in rels if rel] return rels return crawl(config)
Get all the connection types for a given configuration json Crawls through the entire config dict recursively and returns the entries that have been identified as service connections in the form of a list of tuples - [(config key, component type), ..] where "config key" is a compound key in the form of a tuple. Each entry in the compound key is a key to a level within the json data structure.
python-discovery-client/discovery_client/discovery.py
_get_connection_types
onap/dcaegen2-utils
2
python
def _get_connection_types(config): 'Get all the connection types for a given configuration json\n\n Crawls through the entire config dict recursively and returns the entries\n that have been identified as service connections in the form of a list of tuples -\n\n [(config key, component type), ..]\n\n where "config key" is a compound key in the form of a tuple. Each entry in\n the compound key is a key to a level within the json data structure.' def grab_component_type(v): if isinstance(v, six.string_types): result = re.match('^{{\\s*([-_.\\w]*)\\s*}}', v) return (result.group(1) if result else None) def crawl(config, parent_key=()): if isinstance(config, dict): rels = [crawl(value, (parent_key + (key,))) for (key, value) in config.items()] rels = chain(*rels) elif isinstance(config, list): rels = [crawl(config[index], (parent_key + (index,))) for index in range(0, len(config))] rels = chain(*rels) else: rels = [(parent_key, grab_component_type(config))] rels = [(key, rel) for (key, rel) in rels if rel] return rels return crawl(config)
def _get_connection_types(config): 'Get all the connection types for a given configuration json\n\n Crawls through the entire config dict recursively and returns the entries\n that have been identified as service connections in the form of a list of tuples -\n\n [(config key, component type), ..]\n\n where "config key" is a compound key in the form of a tuple. Each entry in\n the compound key is a key to a level within the json data structure.' def grab_component_type(v): if isinstance(v, six.string_types): result = re.match('^{{\\s*([-_.\\w]*)\\s*}}', v) return (result.group(1) if result else None) def crawl(config, parent_key=()): if isinstance(config, dict): rels = [crawl(value, (parent_key + (key,))) for (key, value) in config.items()] rels = chain(*rels) elif isinstance(config, list): rels = [crawl(config[index], (parent_key + (index,))) for index in range(0, len(config))] rels = chain(*rels) else: rels = [(parent_key, grab_component_type(config))] rels = [(key, rel) for (key, rel) in rels if rel] return rels return crawl(config)<|docstring|>Get all the connection types for a given configuration json Crawls through the entire config dict recursively and returns the entries that have been identified as service connections in the form of a list of tuples - [(config key, component type), ..] where "config key" is a compound key in the form of a tuple. Each entry in the compound key is a key to a level within the json data structure.<|endoftext|>
8274e1eead6c2506407872073070100fc86245d9ecaf3ac0b579cb245ef81e44
def _resolve_name(lookup_func, service_name): 'Resolves the service component name to detailed connection information\n\n Currently this is grouped into two ways:\n 1. CDAP applications take a two step approach - call Consul then call the\n CDAP broker\n 2. All other applications just call Consul to get IP and port\n\n Args:\n ----\n lookup_func: fn(string) -> list of dicts\n The function should return a list of dicts that have "ServiceAddress" and\n "ServicePort" key value entries\n service_name: (string) service name to lookup\n\n Return depends upon the connection type:\n 1. CDAP applications return a dict\n 2. All other applications return a string\n ' def handle_result(result): ip = result['ServiceAddress'] port = result['ServicePort'] if (not (ip and port)): raise DiscoveryResolvingNameError('Failed to resolve name for {0}: ip, port not set'.format(service_name)) if ('cdap' in service_name): redirectish_url = 'http://{0}:{1}/application/{2}'.format(ip, port, service_name) r = requests.get(redirectish_url) r.raise_for_status() details = r.json() return {key: details[key] for key in ['connectionurl', 'serviceendpoints']} else: return '{0}:{1}'.format(ip, port) try: results = lookup_func(service_name) return [handle_result(result) for result in results] except Exception as e: raise DiscoveryResolvingNameError('Failed to resolve name for {0}: {1}'.format(service_name, e))
Resolves the service component name to detailed connection information Currently this is grouped into two ways: 1. CDAP applications take a two step approach - call Consul then call the CDAP broker 2. All other applications just call Consul to get IP and port Args: ---- lookup_func: fn(string) -> list of dicts The function should return a list of dicts that have "ServiceAddress" and "ServicePort" key value entries service_name: (string) service name to lookup Return depends upon the connection type: 1. CDAP applications return a dict 2. All other applications return a string
python-discovery-client/discovery_client/discovery.py
_resolve_name
onap/dcaegen2-utils
2
python
def _resolve_name(lookup_func, service_name): 'Resolves the service component name to detailed connection information\n\n Currently this is grouped into two ways:\n 1. CDAP applications take a two step approach - call Consul then call the\n CDAP broker\n 2. All other applications just call Consul to get IP and port\n\n Args:\n ----\n lookup_func: fn(string) -> list of dicts\n The function should return a list of dicts that have "ServiceAddress" and\n "ServicePort" key value entries\n service_name: (string) service name to lookup\n\n Return depends upon the connection type:\n 1. CDAP applications return a dict\n 2. All other applications return a string\n ' def handle_result(result): ip = result['ServiceAddress'] port = result['ServicePort'] if (not (ip and port)): raise DiscoveryResolvingNameError('Failed to resolve name for {0}: ip, port not set'.format(service_name)) if ('cdap' in service_name): redirectish_url = 'http://{0}:{1}/application/{2}'.format(ip, port, service_name) r = requests.get(redirectish_url) r.raise_for_status() details = r.json() return {key: details[key] for key in ['connectionurl', 'serviceendpoints']} else: return '{0}:{1}'.format(ip, port) try: results = lookup_func(service_name) return [handle_result(result) for result in results] except Exception as e: raise DiscoveryResolvingNameError('Failed to resolve name for {0}: {1}'.format(service_name, e))
def _resolve_name(lookup_func, service_name): 'Resolves the service component name to detailed connection information\n\n Currently this is grouped into two ways:\n 1. CDAP applications take a two step approach - call Consul then call the\n CDAP broker\n 2. All other applications just call Consul to get IP and port\n\n Args:\n ----\n lookup_func: fn(string) -> list of dicts\n The function should return a list of dicts that have "ServiceAddress" and\n "ServicePort" key value entries\n service_name: (string) service name to lookup\n\n Return depends upon the connection type:\n 1. CDAP applications return a dict\n 2. All other applications return a string\n ' def handle_result(result): ip = result['ServiceAddress'] port = result['ServicePort'] if (not (ip and port)): raise DiscoveryResolvingNameError('Failed to resolve name for {0}: ip, port not set'.format(service_name)) if ('cdap' in service_name): redirectish_url = 'http://{0}:{1}/application/{2}'.format(ip, port, service_name) r = requests.get(redirectish_url) r.raise_for_status() details = r.json() return {key: details[key] for key in ['connectionurl', 'serviceendpoints']} else: return '{0}:{1}'.format(ip, port) try: results = lookup_func(service_name) return [handle_result(result) for result in results] except Exception as e: raise DiscoveryResolvingNameError('Failed to resolve name for {0}: {1}'.format(service_name, e))<|docstring|>Resolves the service component name to detailed connection information Currently this is grouped into two ways: 1. CDAP applications take a two step approach - call Consul then call the CDAP broker 2. All other applications just call Consul to get IP and port Args: ---- lookup_func: fn(string) -> list of dicts The function should return a list of dicts that have "ServiceAddress" and "ServicePort" key value entries service_name: (string) service name to lookup Return depends upon the connection type: 1. CDAP applications return a dict 2. All other applications return a string<|endoftext|>
208b387e1b38812c6540fe7ab4357cb9dc2642509ec7540f2c67ade436e891ff
def _resolve_configuration_dict(ch, service_name, config): '\n Helper used by both resolve_configuration_dict and get_configuration\n ' if _has_connections(config): rels = _get_relationships_from_consul(ch, service_name) connection_types = _get_connection_types(config) connection_names = _resolve_connection_types(service_name, connection_types, rels) for (key, conn) in [(key, [_resolve_name(partial(_lookup_with_consul, ch), name)[0] for name in names]) for (key, names) in connection_names]: config = util.update_json(config, key, conn) _logger.info('Generated config: {0}'.format(config)) return config
Helper used by both resolve_configuration_dict and get_configuration
python-discovery-client/discovery_client/discovery.py
_resolve_configuration_dict
onap/dcaegen2-utils
2
python
def _resolve_configuration_dict(ch, service_name, config): '\n \n ' if _has_connections(config): rels = _get_relationships_from_consul(ch, service_name) connection_types = _get_connection_types(config) connection_names = _resolve_connection_types(service_name, connection_types, rels) for (key, conn) in [(key, [_resolve_name(partial(_lookup_with_consul, ch), name)[0] for name in names]) for (key, names) in connection_names]: config = util.update_json(config, key, conn) _logger.info('Generated config: {0}'.format(config)) return config
def _resolve_configuration_dict(ch, service_name, config): '\n \n ' if _has_connections(config): rels = _get_relationships_from_consul(ch, service_name) connection_types = _get_connection_types(config) connection_names = _resolve_connection_types(service_name, connection_types, rels) for (key, conn) in [(key, [_resolve_name(partial(_lookup_with_consul, ch), name)[0] for name in names]) for (key, names) in connection_names]: config = util.update_json(config, key, conn) _logger.info('Generated config: {0}'.format(config)) return config<|docstring|>Helper used by both resolve_configuration_dict and get_configuration<|endoftext|>
8002f06f576da2045761737154c5e358735da3a52c5be263baadee140078323d
def get_consul_hostname(consul_hostname_override=None): 'Get the Consul hostname' try: return (consul_hostname_override if consul_hostname_override else os.environ['CONSUL_HOST']) except: raise DiscoveryInitError('CONSUL_HOST variable has not been set!')
Get the Consul hostname
python-discovery-client/discovery_client/discovery.py
get_consul_hostname
onap/dcaegen2-utils
2
python
def get_consul_hostname(consul_hostname_override=None): try: return (consul_hostname_override if consul_hostname_override else os.environ['CONSUL_HOST']) except: raise DiscoveryInitError('CONSUL_HOST variable has not been set!')
def get_consul_hostname(consul_hostname_override=None): try: return (consul_hostname_override if consul_hostname_override else os.environ['CONSUL_HOST']) except: raise DiscoveryInitError('CONSUL_HOST variable has not been set!')<|docstring|>Get the Consul hostname<|endoftext|>
d62628c82944300de18253deb93a34d3e5c085ab131e9f9ac3a29e6c7062dc18
def get_service_name(): 'Get the full service name\n\n This is expected to be given from whatever entity is starting this service\n and given by an environment variable called "HOSTNAME".' try: return os.environ['HOSTNAME'] except: raise DiscoveryInitError('HOSTNAME variable has not been set!')
Get the full service name This is expected to be given from whatever entity is starting this service and given by an environment variable called "HOSTNAME".
python-discovery-client/discovery_client/discovery.py
get_service_name
onap/dcaegen2-utils
2
python
def get_service_name(): 'Get the full service name\n\n This is expected to be given from whatever entity is starting this service\n and given by an environment variable called "HOSTNAME".' try: return os.environ['HOSTNAME'] except: raise DiscoveryInitError('HOSTNAME variable has not been set!')
def get_service_name(): 'Get the full service name\n\n This is expected to be given from whatever entity is starting this service\n and given by an environment variable called "HOSTNAME".' try: return os.environ['HOSTNAME'] except: raise DiscoveryInitError('HOSTNAME variable has not been set!')<|docstring|>Get the full service name This is expected to be given from whatever entity is starting this service and given by an environment variable called "HOSTNAME".<|endoftext|>
18a10528371b66bceedf311d72c1af5c5c929161b9b46c2a2088dd42232629d4
def resolve_name(consul_host, service_name, max_attempts=3): 'Resolve the service name\n\n Do a service discovery lookup from Consul and return back the detailed connection\n information.\n\n Returns:\n --------\n For CDAP apps, returns a dict. All others a string with the format "<ip>:<port>"\n ' ch = consul.Consul(host=consul_host) lookup_func = partial(_lookup_with_consul, ch, max_attempts=max_attempts) return _resolve_name(lookup_func, service_name)
Resolve the service name Do a service discovery lookup from Consul and return back the detailed connection information. Returns: -------- For CDAP apps, returns a dict. All others a string with the format "<ip>:<port>"
python-discovery-client/discovery_client/discovery.py
resolve_name
onap/dcaegen2-utils
2
python
def resolve_name(consul_host, service_name, max_attempts=3): 'Resolve the service name\n\n Do a service discovery lookup from Consul and return back the detailed connection\n information.\n\n Returns:\n --------\n For CDAP apps, returns a dict. All others a string with the format "<ip>:<port>"\n ' ch = consul.Consul(host=consul_host) lookup_func = partial(_lookup_with_consul, ch, max_attempts=max_attempts) return _resolve_name(lookup_func, service_name)
def resolve_name(consul_host, service_name, max_attempts=3): 'Resolve the service name\n\n Do a service discovery lookup from Consul and return back the detailed connection\n information.\n\n Returns:\n --------\n For CDAP apps, returns a dict. All others a string with the format "<ip>:<port>"\n ' ch = consul.Consul(host=consul_host) lookup_func = partial(_lookup_with_consul, ch, max_attempts=max_attempts) return _resolve_name(lookup_func, service_name)<|docstring|>Resolve the service name Do a service discovery lookup from Consul and return back the detailed connection information. Returns: -------- For CDAP apps, returns a dict. All others a string with the format "<ip>:<port>"<|endoftext|>
c26fb4f1a89dac837b4c581900440f90be531e336b989729cae7321d6e466d19
def resolve_configuration_dict(consul_host, service_name, config): '\n Utility method for taking a given service_name, and config dict, and resolving it\n ' ch = consul.Consul(host=consul_host) return _resolve_configuration_dict(ch, service_name, config)
Utility method for taking a given service_name, and config dict, and resolving it
python-discovery-client/discovery_client/discovery.py
resolve_configuration_dict
onap/dcaegen2-utils
2
python
def resolve_configuration_dict(consul_host, service_name, config): '\n \n ' ch = consul.Consul(host=consul_host) return _resolve_configuration_dict(ch, service_name, config)
def resolve_configuration_dict(consul_host, service_name, config): '\n \n ' ch = consul.Consul(host=consul_host) return _resolve_configuration_dict(ch, service_name, config)<|docstring|>Utility method for taking a given service_name, and config dict, and resolving it<|endoftext|>
1f904afafc96caabc3d61a4a0c1a1825c68748a95222df5fd95cda1b7a78e971
def get_configuration(override_consul_hostname=None, override_service_name=None, from_cbs=True): "Provides this service component's configuration information fully resolved\n\n This method can either resolve the configuration locally here or make a\n remote call to the config binding service. The default is to use the config\n binding service.\n\n Args:\n -----\n override_consul_hostname (string): Consul hostname to use rather than the one\n set by the environment variable CONSUL_HOST\n override_service_name (string): Use this name over the name set on the\n HOSTNAME environment variable. Default is None.\n from_cbs (boolean): True (default) means use the config binding service otherwise\n set to False to have the config pulled and resolved by this library\n\n Returns the fully resolved service component configuration as a dict\n " consul_hostname = get_consul_hostname(override_consul_hostname) ch = consul.Consul(host=consul_hostname) service_name = (override_service_name if override_service_name else get_service_name()) _logger.info('service name: {0}'.format(service_name)) if from_cbs: return _get_configuration_resolved_from_cbs(ch, service_name) else: config = _get_configuration_from_consul(ch, service_name) return _resolve_configuration_dict(ch, service_name, config)
Provides this service component's configuration information fully resolved This method can either resolve the configuration locally here or make a remote call to the config binding service. The default is to use the config binding service. Args: ----- override_consul_hostname (string): Consul hostname to use rather than the one set by the environment variable CONSUL_HOST override_service_name (string): Use this name over the name set on the HOSTNAME environment variable. Default is None. from_cbs (boolean): True (default) means use the config binding service otherwise set to False to have the config pulled and resolved by this library Returns the fully resolved service component configuration as a dict
python-discovery-client/discovery_client/discovery.py
get_configuration
onap/dcaegen2-utils
2
python
def get_configuration(override_consul_hostname=None, override_service_name=None, from_cbs=True): "Provides this service component's configuration information fully resolved\n\n This method can either resolve the configuration locally here or make a\n remote call to the config binding service. The default is to use the config\n binding service.\n\n Args:\n -----\n override_consul_hostname (string): Consul hostname to use rather than the one\n set by the environment variable CONSUL_HOST\n override_service_name (string): Use this name over the name set on the\n HOSTNAME environment variable. Default is None.\n from_cbs (boolean): True (default) means use the config binding service otherwise\n set to False to have the config pulled and resolved by this library\n\n Returns the fully resolved service component configuration as a dict\n " consul_hostname = get_consul_hostname(override_consul_hostname) ch = consul.Consul(host=consul_hostname) service_name = (override_service_name if override_service_name else get_service_name()) _logger.info('service name: {0}'.format(service_name)) if from_cbs: return _get_configuration_resolved_from_cbs(ch, service_name) else: config = _get_configuration_from_consul(ch, service_name) return _resolve_configuration_dict(ch, service_name, config)
def get_configuration(override_consul_hostname=None, override_service_name=None, from_cbs=True): "Provides this service component's configuration information fully resolved\n\n This method can either resolve the configuration locally here or make a\n remote call to the config binding service. The default is to use the config\n binding service.\n\n Args:\n -----\n override_consul_hostname (string): Consul hostname to use rather than the one\n set by the environment variable CONSUL_HOST\n override_service_name (string): Use this name over the name set on the\n HOSTNAME environment variable. Default is None.\n from_cbs (boolean): True (default) means use the config binding service otherwise\n set to False to have the config pulled and resolved by this library\n\n Returns the fully resolved service component configuration as a dict\n " consul_hostname = get_consul_hostname(override_consul_hostname) ch = consul.Consul(host=consul_hostname) service_name = (override_service_name if override_service_name else get_service_name()) _logger.info('service name: {0}'.format(service_name)) if from_cbs: return _get_configuration_resolved_from_cbs(ch, service_name) else: config = _get_configuration_from_consul(ch, service_name) return _resolve_configuration_dict(ch, service_name, config)<|docstring|>Provides this service component's configuration information fully resolved This method can either resolve the configuration locally here or make a remote call to the config binding service. The default is to use the config binding service. Args: ----- override_consul_hostname (string): Consul hostname to use rather than the one set by the environment variable CONSUL_HOST override_service_name (string): Use this name over the name set on the HOSTNAME environment variable. Default is None. from_cbs (boolean): True (default) means use the config binding service otherwise set to False to have the config pulled and resolved by this library Returns the fully resolved service component configuration as a dict<|endoftext|>
f9dc0e7e06ae2ea686d27d782ed6f806f0fe8f59ed1fae48159dbaf1f8fc137d
def register_for_discovery(consul_host, service_ip, service_port): 'Register the service component for service discovery\n\n This is required in order for other services to "discover" you so that you\n can service their requests.\n\n NOTE: Applications may not need to make this call depending upon if the\n environment is using Registrator.\n ' ch = consul.Consul(host=consul_host) service_name = get_service_name() if _register_with_consul(ch, service_name, service_ip, service_port, 'health'): _logger.info('Registered to consul: {0}'.format(service_name)) else: _logger.error('Failed to register to consul: {0}'.format(service_name)) raise DiscoveryRegistrationError()
Register the service component for service discovery This is required in order for other services to "discover" you so that you can service their requests. NOTE: Applications may not need to make this call depending upon if the environment is using Registrator.
python-discovery-client/discovery_client/discovery.py
register_for_discovery
onap/dcaegen2-utils
2
python
def register_for_discovery(consul_host, service_ip, service_port): 'Register the service component for service discovery\n\n This is required in order for other services to "discover" you so that you\n can service their requests.\n\n NOTE: Applications may not need to make this call depending upon if the\n environment is using Registrator.\n ' ch = consul.Consul(host=consul_host) service_name = get_service_name() if _register_with_consul(ch, service_name, service_ip, service_port, 'health'): _logger.info('Registered to consul: {0}'.format(service_name)) else: _logger.error('Failed to register to consul: {0}'.format(service_name)) raise DiscoveryRegistrationError()
def register_for_discovery(consul_host, service_ip, service_port): 'Register the service component for service discovery\n\n This is required in order for other services to "discover" you so that you\n can service their requests.\n\n NOTE: Applications may not need to make this call depending upon if the\n environment is using Registrator.\n ' ch = consul.Consul(host=consul_host) service_name = get_service_name() if _register_with_consul(ch, service_name, service_ip, service_port, 'health'): _logger.info('Registered to consul: {0}'.format(service_name)) else: _logger.error('Failed to register to consul: {0}'.format(service_name)) raise DiscoveryRegistrationError()<|docstring|>Register the service component for service discovery This is required in order for other services to "discover" you so that you can service their requests. NOTE: Applications may not need to make this call depending upon if the environment is using Registrator.<|endoftext|>
e4236f91b66995c44c57dfbdffd9f7ed8443213f18dafefd76f2ea433277177a
async def close(self) -> None: 'This method is to close the sockets opened by the client.\n It need not be used when using with a context manager.\n ' (await self._client.close())
This method is to close the sockets opened by the client. It need not be used when using with a context manager.
sdk/tables/azure-data-tables/azure/data/tables/aio/_base_client_async.py
close
automagically/azure-sdk-for-python
1
python
async def close(self) -> None: 'This method is to close the sockets opened by the client.\n It need not be used when using with a context manager.\n ' (await self._client.close())
async def close(self) -> None: 'This method is to close the sockets opened by the client.\n It need not be used when using with a context manager.\n ' (await self._client.close())<|docstring|>This method is to close the sockets opened by the client. It need not be used when using with a context manager.<|endoftext|>
9f5ff04c9be232c7a8ac87c5b80ae51116ede2b4440b9864cb2cd9f58c4c1ddb
async def _batch_send(self, *reqs: 'HttpRequest', **kwargs) -> List[Mapping[(str, Any)]]: 'Given a series of request, do a Storage batch call.' policies = [StorageHeadersPolicy()] changeset = HttpRequest('POST', None) changeset.set_multipart_mixed(*reqs, policies=policies, boundary='changeset_{}'.format(uuid4())) request = self._client._client.post(url='{}://{}/$batch'.format(self.scheme, self._primary_hostname), headers={'x-ms-version': self.api_version, 'DataServiceVersion': '3.0', 'MaxDataServiceVersion': '3.0;NetFx', 'Content-Type': 'application/json', 'Accept': 'application/json'}) request.set_multipart_mixed(changeset, policies=policies, enforce_https=False, boundary='batch_{}'.format(uuid4())) pipeline_response = (await self._client._client._pipeline.run(request, **kwargs)) response = pipeline_response.http_response if (response.status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) if (response.status_code != 202): raise _decode_error(response) parts_iter = response.parts() parts = [] async for p in parts_iter: parts.append(p) error_parts = [p for p in parts if (not (200 <= p.status_code < 300))] if any(error_parts): if (error_parts[0].status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) raise _decode_error(response=error_parts[0], error_type=TableTransactionError) return [extract_batch_part_metadata(p) for p in parts]
Given a series of request, do a Storage batch call.
sdk/tables/azure-data-tables/azure/data/tables/aio/_base_client_async.py
_batch_send
automagically/azure-sdk-for-python
1
python
async def _batch_send(self, *reqs: 'HttpRequest', **kwargs) -> List[Mapping[(str, Any)]]: policies = [StorageHeadersPolicy()] changeset = HttpRequest('POST', None) changeset.set_multipart_mixed(*reqs, policies=policies, boundary='changeset_{}'.format(uuid4())) request = self._client._client.post(url='{}://{}/$batch'.format(self.scheme, self._primary_hostname), headers={'x-ms-version': self.api_version, 'DataServiceVersion': '3.0', 'MaxDataServiceVersion': '3.0;NetFx', 'Content-Type': 'application/json', 'Accept': 'application/json'}) request.set_multipart_mixed(changeset, policies=policies, enforce_https=False, boundary='batch_{}'.format(uuid4())) pipeline_response = (await self._client._client._pipeline.run(request, **kwargs)) response = pipeline_response.http_response if (response.status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) if (response.status_code != 202): raise _decode_error(response) parts_iter = response.parts() parts = [] async for p in parts_iter: parts.append(p) error_parts = [p for p in parts if (not (200 <= p.status_code < 300))] if any(error_parts): if (error_parts[0].status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) raise _decode_error(response=error_parts[0], error_type=TableTransactionError) return [extract_batch_part_metadata(p) for p in parts]
async def _batch_send(self, *reqs: 'HttpRequest', **kwargs) -> List[Mapping[(str, Any)]]: policies = [StorageHeadersPolicy()] changeset = HttpRequest('POST', None) changeset.set_multipart_mixed(*reqs, policies=policies, boundary='changeset_{}'.format(uuid4())) request = self._client._client.post(url='{}://{}/$batch'.format(self.scheme, self._primary_hostname), headers={'x-ms-version': self.api_version, 'DataServiceVersion': '3.0', 'MaxDataServiceVersion': '3.0;NetFx', 'Content-Type': 'application/json', 'Accept': 'application/json'}) request.set_multipart_mixed(changeset, policies=policies, enforce_https=False, boundary='batch_{}'.format(uuid4())) pipeline_response = (await self._client._client._pipeline.run(request, **kwargs)) response = pipeline_response.http_response if (response.status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) if (response.status_code != 202): raise _decode_error(response) parts_iter = response.parts() parts = [] async for p in parts_iter: parts.append(p) error_parts = [p for p in parts if (not (200 <= p.status_code < 300))] if any(error_parts): if (error_parts[0].status_code == 413): raise _decode_error(response, error_message='The transaction request was too large', error_type=RequestTooLargeError) raise _decode_error(response=error_parts[0], error_type=TableTransactionError) return [extract_batch_part_metadata(p) for p in parts]<|docstring|>Given a series of request, do a Storage batch call.<|endoftext|>
3ad42d8d4677fdd4f2a975ba4248fe98b06ee87fc7d9a03a6862780503be7caf
def ParetoCdf(x, alpha, xmin): 'Evaluates CDF of the Pareto distribution with parameters alpha, xmin.' if (x < xmin): return 0 return (1 - pow((x / xmin), (- alpha)))
Evaluates CDF of the Pareto distribution with parameters alpha, xmin.
DSC 530 - Data Exploration and Analysis/ThinkStats2/solutions/pareto_world.py
ParetoCdf
Hakuna-Patata/BU_MSDS_PTW
0
python
def ParetoCdf(x, alpha, xmin): if (x < xmin): return 0 return (1 - pow((x / xmin), (- alpha)))
def ParetoCdf(x, alpha, xmin): if (x < xmin): return 0 return (1 - pow((x / xmin), (- alpha)))<|docstring|>Evaluates CDF of the Pareto distribution with parameters alpha, xmin.<|endoftext|>
c81f091fdfc09bb56fc9a34e35745bb0d309c0197ac5b2ff80a8bc8bc25d7be7
def ParetoMedian(xmin, alpha): 'Computes the median of a Pareto distribution.' return (xmin * pow(2, (1 / alpha)))
Computes the median of a Pareto distribution.
DSC 530 - Data Exploration and Analysis/ThinkStats2/solutions/pareto_world.py
ParetoMedian
Hakuna-Patata/BU_MSDS_PTW
0
python
def ParetoMedian(xmin, alpha): return (xmin * pow(2, (1 / alpha)))
def ParetoMedian(xmin, alpha): return (xmin * pow(2, (1 / alpha)))<|docstring|>Computes the median of a Pareto distribution.<|endoftext|>
8e0a155f2be90e07c5fd1ce2e01733fbc07bde3d0ac9f22e0572a173307b254e
def MakeParetoCdf(): 'Generates a plot of the CDF of height in Pareto World.' n = 50 max = 1000.0 xs = [((max * i) / n) for i in range(n)] xmin = 100 alpha = 1.7 ps = [ParetoCdf(x, alpha, xmin) for x in xs] print('Median', ParetoMedian(xmin, alpha)) pyplot.clf() pyplot.plot(xs, ps, linewidth=2) myplot.Save('pareto_world1', title='Pareto CDF', xlabel='height (cm)', ylabel='CDF', legend=False)
Generates a plot of the CDF of height in Pareto World.
DSC 530 - Data Exploration and Analysis/ThinkStats2/solutions/pareto_world.py
MakeParetoCdf
Hakuna-Patata/BU_MSDS_PTW
0
python
def MakeParetoCdf(): n = 50 max = 1000.0 xs = [((max * i) / n) for i in range(n)] xmin = 100 alpha = 1.7 ps = [ParetoCdf(x, alpha, xmin) for x in xs] print('Median', ParetoMedian(xmin, alpha)) pyplot.clf() pyplot.plot(xs, ps, linewidth=2) myplot.Save('pareto_world1', title='Pareto CDF', xlabel='height (cm)', ylabel='CDF', legend=False)
def MakeParetoCdf(): n = 50 max = 1000.0 xs = [((max * i) / n) for i in range(n)] xmin = 100 alpha = 1.7 ps = [ParetoCdf(x, alpha, xmin) for x in xs] print('Median', ParetoMedian(xmin, alpha)) pyplot.clf() pyplot.plot(xs, ps, linewidth=2) myplot.Save('pareto_world1', title='Pareto CDF', xlabel='height (cm)', ylabel='CDF', legend=False)<|docstring|>Generates a plot of the CDF of height in Pareto World.<|endoftext|>
f873227626a3fd92ad84dec7b6b8482fb3bf9f914d2dd7f921351d45dfbd5802
def MakeFigure(xmin=100, alpha=1.7, mu=150, sigma=25): 'Makes a figure showing the CDF of height in ParetoWorld.\n\n Compared to a normal distribution.\n\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n mu: parameter of the Normal distribution\n sigma: parameter of the Normal distribution\n ' t1 = [(xmin * random.paretovariate(alpha)) for i in range(10000)] cdf1 = Cdf.MakeCdfFromList(t1, name='pareto') t2 = [random.normalvariate(mu, sigma) for i in range(10000)] cdf2 = Cdf.MakeCdfFromList(t2, name='normal') myplot.Clf() myplot.Cdfs([cdf1, cdf2]) myplot.Save(root='pareto_world2', title='Pareto World', xlabel='height (cm)', ylabel='CDF')
Makes a figure showing the CDF of height in ParetoWorld. Compared to a normal distribution. xmin: parameter of the Pareto distribution alpha: parameter of the Pareto distribution mu: parameter of the Normal distribution sigma: parameter of the Normal distribution
DSC 530 - Data Exploration and Analysis/ThinkStats2/solutions/pareto_world.py
MakeFigure
Hakuna-Patata/BU_MSDS_PTW
0
python
def MakeFigure(xmin=100, alpha=1.7, mu=150, sigma=25): 'Makes a figure showing the CDF of height in ParetoWorld.\n\n Compared to a normal distribution.\n\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n mu: parameter of the Normal distribution\n sigma: parameter of the Normal distribution\n ' t1 = [(xmin * random.paretovariate(alpha)) for i in range(10000)] cdf1 = Cdf.MakeCdfFromList(t1, name='pareto') t2 = [random.normalvariate(mu, sigma) for i in range(10000)] cdf2 = Cdf.MakeCdfFromList(t2, name='normal') myplot.Clf() myplot.Cdfs([cdf1, cdf2]) myplot.Save(root='pareto_world2', title='Pareto World', xlabel='height (cm)', ylabel='CDF')
def MakeFigure(xmin=100, alpha=1.7, mu=150, sigma=25): 'Makes a figure showing the CDF of height in ParetoWorld.\n\n Compared to a normal distribution.\n\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n mu: parameter of the Normal distribution\n sigma: parameter of the Normal distribution\n ' t1 = [(xmin * random.paretovariate(alpha)) for i in range(10000)] cdf1 = Cdf.MakeCdfFromList(t1, name='pareto') t2 = [random.normalvariate(mu, sigma) for i in range(10000)] cdf2 = Cdf.MakeCdfFromList(t2, name='normal') myplot.Clf() myplot.Cdfs([cdf1, cdf2]) myplot.Save(root='pareto_world2', title='Pareto World', xlabel='height (cm)', ylabel='CDF')<|docstring|>Makes a figure showing the CDF of height in ParetoWorld. Compared to a normal distribution. xmin: parameter of the Pareto distribution alpha: parameter of the Pareto distribution mu: parameter of the Normal distribution sigma: parameter of the Normal distribution<|endoftext|>
3004eb372d3a1fd4b2262f4e6e03a42ef3f7152b1b9b0531f80b276d912c9131
def TallestPareto(iters=2, n=10000, xmin=100, alpha=1.7): 'Find the tallest person in Pareto World.\n\n iters: how many samples to generate\n n: how many in each sample\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n ' tallest = 0 for i in range(iters): t = [(xmin * random.paretovariate(alpha)) for i in range(n)] tallest = max(max(t), tallest) return tallest
Find the tallest person in Pareto World. iters: how many samples to generate n: how many in each sample xmin: parameter of the Pareto distribution alpha: parameter of the Pareto distribution
DSC 530 - Data Exploration and Analysis/ThinkStats2/solutions/pareto_world.py
TallestPareto
Hakuna-Patata/BU_MSDS_PTW
0
python
def TallestPareto(iters=2, n=10000, xmin=100, alpha=1.7): 'Find the tallest person in Pareto World.\n\n iters: how many samples to generate\n n: how many in each sample\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n ' tallest = 0 for i in range(iters): t = [(xmin * random.paretovariate(alpha)) for i in range(n)] tallest = max(max(t), tallest) return tallest
def TallestPareto(iters=2, n=10000, xmin=100, alpha=1.7): 'Find the tallest person in Pareto World.\n\n iters: how many samples to generate\n n: how many in each sample\n xmin: parameter of the Pareto distribution\n alpha: parameter of the Pareto distribution\n ' tallest = 0 for i in range(iters): t = [(xmin * random.paretovariate(alpha)) for i in range(n)] tallest = max(max(t), tallest) return tallest<|docstring|>Find the tallest person in Pareto World. iters: how many samples to generate n: how many in each sample xmin: parameter of the Pareto distribution alpha: parameter of the Pareto distribution<|endoftext|>
5b4ec42c4b404a45c152b91f90732d309761ef984ab1df67ccaf2d86afae1120
def __init__(self, username=None, first_name=None, last_name=None, email=None, phone=None, company=None, timezone=None): 'UpdateCurrentUserInputObject - a model defined in Swagger' self._username = None self._first_name = None self._last_name = None self._email = None self._phone = None self._company = None self._timezone = None self.discriminator = None if (username is not None): self.username = username if (first_name is not None): self.first_name = first_name if (last_name is not None): self.last_name = last_name if (email is not None): self.email = email if (phone is not None): self.phone = phone if (company is not None): self.company = company if (timezone is not None): self.timezone = timezone
UpdateCurrentUserInputObject - a model defined in Swagger
TextMagic/models/update_current_user_input_object.py
__init__
textmagic/textmagic-rest-python-v2
2
python
def __init__(self, username=None, first_name=None, last_name=None, email=None, phone=None, company=None, timezone=None): self._username = None self._first_name = None self._last_name = None self._email = None self._phone = None self._company = None self._timezone = None self.discriminator = None if (username is not None): self.username = username if (first_name is not None): self.first_name = first_name if (last_name is not None): self.last_name = last_name if (email is not None): self.email = email if (phone is not None): self.phone = phone if (company is not None): self.company = company if (timezone is not None): self.timezone = timezone
def __init__(self, username=None, first_name=None, last_name=None, email=None, phone=None, company=None, timezone=None): self._username = None self._first_name = None self._last_name = None self._email = None self._phone = None self._company = None self._timezone = None self.discriminator = None if (username is not None): self.username = username if (first_name is not None): self.first_name = first_name if (last_name is not None): self.last_name = last_name if (email is not None): self.email = email if (phone is not None): self.phone = phone if (company is not None): self.company = company if (timezone is not None): self.timezone = timezone<|docstring|>UpdateCurrentUserInputObject - a model defined in Swagger<|endoftext|>
2c0786b72098da259115e0fb6b9f5c56ee9e6cde9cbb965c5283d8126d762d57
@property def username(self): 'Gets the username of this UpdateCurrentUserInputObject. # noqa: E501\n\n\n :return: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._username
Gets the username of this UpdateCurrentUserInputObject. # noqa: E501 :return: The username of this UpdateCurrentUserInputObject. # noqa: E501 :rtype: str
TextMagic/models/update_current_user_input_object.py
username
textmagic/textmagic-rest-python-v2
2
python
@property def username(self): 'Gets the username of this UpdateCurrentUserInputObject. # noqa: E501\n\n\n :return: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._username
@property def username(self): 'Gets the username of this UpdateCurrentUserInputObject. # noqa: E501\n\n\n :return: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._username<|docstring|>Gets the username of this UpdateCurrentUserInputObject. # noqa: E501 :return: The username of this UpdateCurrentUserInputObject. # noqa: E501 :rtype: str<|endoftext|>
2ea9f4c153f0a420234755b9eb82dc546adfd9ae79949577013943e991adee00
@username.setter def username(self, username): 'Sets the username of this UpdateCurrentUserInputObject.\n\n\n :param username: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :type: str\n ' self._username = username
Sets the username of this UpdateCurrentUserInputObject. :param username: The username of this UpdateCurrentUserInputObject. # noqa: E501 :type: str
TextMagic/models/update_current_user_input_object.py
username
textmagic/textmagic-rest-python-v2
2
python
@username.setter def username(self, username): 'Sets the username of this UpdateCurrentUserInputObject.\n\n\n :param username: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :type: str\n ' self._username = username
@username.setter def username(self, username): 'Sets the username of this UpdateCurrentUserInputObject.\n\n\n :param username: The username of this UpdateCurrentUserInputObject. # noqa: E501\n :type: str\n ' self._username = username<|docstring|>Sets the username of this UpdateCurrentUserInputObject. :param username: The username of this UpdateCurrentUserInputObject. # noqa: E501 :type: str<|endoftext|>
fd9463d2e3777084dcc9a2f30e24b175c67a99739ec28247aafec57f5fa07e3b
@property def first_name(self): 'Gets the first_name of this UpdateCurrentUserInputObject. # noqa: E501\n\n Account first name. # noqa: E501\n\n :return: The first_name of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._first_name
Gets the first_name of this UpdateCurrentUserInputObject. # noqa: E501 Account first name. # noqa: E501 :return: The first_name of this UpdateCurrentUserInputObject. # noqa: E501 :rtype: str
TextMagic/models/update_current_user_input_object.py
first_name
textmagic/textmagic-rest-python-v2
2
python
@property def first_name(self): 'Gets the first_name of this UpdateCurrentUserInputObject. # noqa: E501\n\n Account first name. # noqa: E501\n\n :return: The first_name of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._first_name
@property def first_name(self): 'Gets the first_name of this UpdateCurrentUserInputObject. # noqa: E501\n\n Account first name. # noqa: E501\n\n :return: The first_name of this UpdateCurrentUserInputObject. # noqa: E501\n :rtype: str\n ' return self._first_name<|docstring|>Gets the first_name of this UpdateCurrentUserInputObject. # noqa: E501 Account first name. # noqa: E501 :return: The first_name of this UpdateCurrentUserInputObject. # noqa: E501 :rtype: str<|endoftext|>