repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.predict
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False): """ Predict with data. NOTE: This function is not thread safe. For each booster object, predict can only be called from one thread. If you want to run prediction using multiple thread, call bst.copy() to make copies of model object and then call predict Parameters ---------- data : DMatrix The dmatrix storing the input. output_margin : bool Whether to output the raw untransformed margin value. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). pred_leaf : bool When this option is on, the output will be a matrix of (nsample, ntrees) with each record indicating the predicted leaf index of each sample in each tree. Note that the leaf index of a tree is unique per tree, so you may find leaf 1 in both tree 1 and tree 0. Returns ------- prediction : numpy array """ option_mask = 0x00 if output_margin: option_mask |= 0x01 if pred_leaf: option_mask |= 0x02 self._validate_features(data) length = ctypes.c_ulong() preds = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGBoosterPredict(self.handle, data.handle, option_mask, ntree_limit, ctypes.byref(length), ctypes.byref(preds))) preds = ctypes2numpy(preds, length.value, np.float32) if pred_leaf: preds = preds.astype(np.int32) nrow = data.num_row() if preds.size != nrow and preds.size % nrow == 0: preds = preds.reshape(nrow, preds.size / nrow) return preds
python
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False): """ Predict with data. NOTE: This function is not thread safe. For each booster object, predict can only be called from one thread. If you want to run prediction using multiple thread, call bst.copy() to make copies of model object and then call predict Parameters ---------- data : DMatrix The dmatrix storing the input. output_margin : bool Whether to output the raw untransformed margin value. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). pred_leaf : bool When this option is on, the output will be a matrix of (nsample, ntrees) with each record indicating the predicted leaf index of each sample in each tree. Note that the leaf index of a tree is unique per tree, so you may find leaf 1 in both tree 1 and tree 0. Returns ------- prediction : numpy array """ option_mask = 0x00 if output_margin: option_mask |= 0x01 if pred_leaf: option_mask |= 0x02 self._validate_features(data) length = ctypes.c_ulong() preds = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGBoosterPredict(self.handle, data.handle, option_mask, ntree_limit, ctypes.byref(length), ctypes.byref(preds))) preds = ctypes2numpy(preds, length.value, np.float32) if pred_leaf: preds = preds.astype(np.int32) nrow = data.num_row() if preds.size != nrow and preds.size % nrow == 0: preds = preds.reshape(nrow, preds.size / nrow) return preds
[ "def", "predict", "(", "self", ",", "data", ",", "output_margin", "=", "False", ",", "ntree_limit", "=", "0", ",", "pred_leaf", "=", "False", ")", ":", "option_mask", "=", "0x00", "if", "output_margin", ":", "option_mask", "|=", "0x01", "if", "pred_leaf", ":", "option_mask", "|=", "0x02", "self", ".", "_validate_features", "(", "data", ")", "length", "=", "ctypes", ".", "c_ulong", "(", ")", "preds", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_float", ")", "(", ")", "_check_call", "(", "_LIB", ".", "XGBoosterPredict", "(", "self", ".", "handle", ",", "data", ".", "handle", ",", "option_mask", ",", "ntree_limit", ",", "ctypes", ".", "byref", "(", "length", ")", ",", "ctypes", ".", "byref", "(", "preds", ")", ")", ")", "preds", "=", "ctypes2numpy", "(", "preds", ",", "length", ".", "value", ",", "np", ".", "float32", ")", "if", "pred_leaf", ":", "preds", "=", "preds", ".", "astype", "(", "np", ".", "int32", ")", "nrow", "=", "data", ".", "num_row", "(", ")", "if", "preds", ".", "size", "!=", "nrow", "and", "preds", ".", "size", "%", "nrow", "==", "0", ":", "preds", "=", "preds", ".", "reshape", "(", "nrow", ",", "preds", ".", "size", "/", "nrow", ")", "return", "preds" ]
Predict with data. NOTE: This function is not thread safe. For each booster object, predict can only be called from one thread. If you want to run prediction using multiple thread, call bst.copy() to make copies of model object and then call predict Parameters ---------- data : DMatrix The dmatrix storing the input. output_margin : bool Whether to output the raw untransformed margin value. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). pred_leaf : bool When this option is on, the output will be a matrix of (nsample, ntrees) with each record indicating the predicted leaf index of each sample in each tree. Note that the leaf index of a tree is unique per tree, so you may find leaf 1 in both tree 1 and tree 0. Returns ------- prediction : numpy array
[ "Predict", "with", "data", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L774-L824
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.save_raw
def save_raw(self): """ Save the model to a in memory buffer represetation Returns ------- a in memory buffer represetation of the model """ length = ctypes.c_ulong() cptr = ctypes.POINTER(ctypes.c_char)() _check_call(_LIB.XGBoosterGetModelRaw(self.handle, ctypes.byref(length), ctypes.byref(cptr))) return ctypes2buffer(cptr, length.value)
python
def save_raw(self): """ Save the model to a in memory buffer represetation Returns ------- a in memory buffer represetation of the model """ length = ctypes.c_ulong() cptr = ctypes.POINTER(ctypes.c_char)() _check_call(_LIB.XGBoosterGetModelRaw(self.handle, ctypes.byref(length), ctypes.byref(cptr))) return ctypes2buffer(cptr, length.value)
[ "def", "save_raw", "(", "self", ")", ":", "length", "=", "ctypes", ".", "c_ulong", "(", ")", "cptr", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", "(", ")", "_check_call", "(", "_LIB", ".", "XGBoosterGetModelRaw", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "length", ")", ",", "ctypes", ".", "byref", "(", "cptr", ")", ")", ")", "return", "ctypes2buffer", "(", "cptr", ",", "length", ".", "value", ")" ]
Save the model to a in memory buffer represetation Returns ------- a in memory buffer represetation of the model
[ "Save", "the", "model", "to", "a", "in", "memory", "buffer", "represetation" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L840-L853
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.load_model
def load_model(self, fname): """ Load the model from a file. Parameters ---------- fname : string or a memory buffer Input file name or memory buffer(see also save_raw) """ if isinstance(fname, STRING_TYPES): # assume file name if os.path.exists(fname): _LIB.XGBoosterLoadModel(self.handle, c_str(fname)) else: raise ValueError("No such file: {0}".format(fname)) else: buf = fname length = ctypes.c_ulong(len(buf)) ptr = (ctypes.c_char * len(buf)).from_buffer(buf) _check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
python
def load_model(self, fname): """ Load the model from a file. Parameters ---------- fname : string or a memory buffer Input file name or memory buffer(see also save_raw) """ if isinstance(fname, STRING_TYPES): # assume file name if os.path.exists(fname): _LIB.XGBoosterLoadModel(self.handle, c_str(fname)) else: raise ValueError("No such file: {0}".format(fname)) else: buf = fname length = ctypes.c_ulong(len(buf)) ptr = (ctypes.c_char * len(buf)).from_buffer(buf) _check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
[ "def", "load_model", "(", "self", ",", "fname", ")", ":", "if", "isinstance", "(", "fname", ",", "STRING_TYPES", ")", ":", "# assume file name", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "_LIB", ".", "XGBoosterLoadModel", "(", "self", ".", "handle", ",", "c_str", "(", "fname", ")", ")", "else", ":", "raise", "ValueError", "(", "\"No such file: {0}\"", ".", "format", "(", "fname", ")", ")", "else", ":", "buf", "=", "fname", "length", "=", "ctypes", ".", "c_ulong", "(", "len", "(", "buf", ")", ")", "ptr", "=", "(", "ctypes", ".", "c_char", "*", "len", "(", "buf", ")", ")", ".", "from_buffer", "(", "buf", ")", "_check_call", "(", "_LIB", ".", "XGBoosterLoadModelFromBuffer", "(", "self", ".", "handle", ",", "ptr", ",", "length", ")", ")" ]
Load the model from a file. Parameters ---------- fname : string or a memory buffer Input file name or memory buffer(see also save_raw)
[ "Load", "the", "model", "from", "a", "file", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L855-L873
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.dump_model
def dump_model(self, fout, fmap='', with_stats=False): """ Dump model into a text file. Parameters ---------- foout : string Output file name. fmap : string, optional Name of the file containing feature map names. with_stats : bool (optional) Controls whether the split statistics are output. """ if isinstance(fout, STRING_TYPES): fout = open(fout, 'w') need_close = True else: need_close = False ret = self.get_dump(fmap, with_stats) for i in range(len(ret)): fout.write('booster[{}]:\n'.format(i)) fout.write(ret[i]) if need_close: fout.close()
python
def dump_model(self, fout, fmap='', with_stats=False): """ Dump model into a text file. Parameters ---------- foout : string Output file name. fmap : string, optional Name of the file containing feature map names. with_stats : bool (optional) Controls whether the split statistics are output. """ if isinstance(fout, STRING_TYPES): fout = open(fout, 'w') need_close = True else: need_close = False ret = self.get_dump(fmap, with_stats) for i in range(len(ret)): fout.write('booster[{}]:\n'.format(i)) fout.write(ret[i]) if need_close: fout.close()
[ "def", "dump_model", "(", "self", ",", "fout", ",", "fmap", "=", "''", ",", "with_stats", "=", "False", ")", ":", "if", "isinstance", "(", "fout", ",", "STRING_TYPES", ")", ":", "fout", "=", "open", "(", "fout", ",", "'w'", ")", "need_close", "=", "True", "else", ":", "need_close", "=", "False", "ret", "=", "self", ".", "get_dump", "(", "fmap", ",", "with_stats", ")", "for", "i", "in", "range", "(", "len", "(", "ret", ")", ")", ":", "fout", ".", "write", "(", "'booster[{}]:\\n'", ".", "format", "(", "i", ")", ")", "fout", ".", "write", "(", "ret", "[", "i", "]", ")", "if", "need_close", ":", "fout", ".", "close", "(", ")" ]
Dump model into a text file. Parameters ---------- foout : string Output file name. fmap : string, optional Name of the file containing feature map names. with_stats : bool (optional) Controls whether the split statistics are output.
[ "Dump", "model", "into", "a", "text", "file", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L875-L898
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.get_dump
def get_dump(self, fmap='', with_stats=False): """ Returns the dump the model as a list of strings. """ length = ctypes.c_ulong() sarr = ctypes.POINTER(ctypes.c_char_p)() if self.feature_names is not None and fmap == '': flen = int(len(self.feature_names)) fname = from_pystr_to_cstr(self.feature_names) if self.feature_types is None: # use quantitative as default # {'q': quantitative, 'i': indicator} ftype = from_pystr_to_cstr(['q'] * flen) else: ftype = from_pystr_to_cstr(self.feature_types) _check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle, flen, fname, ftype, int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) else: if fmap != '' and not os.path.exists(fmap): raise ValueError("No such file: {0}".format(fmap)) _check_call(_LIB.XGBoosterDumpModel(self.handle, c_str(fmap), int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) res = from_cstr_to_pystr(sarr, length) return res
python
def get_dump(self, fmap='', with_stats=False): """ Returns the dump the model as a list of strings. """ length = ctypes.c_ulong() sarr = ctypes.POINTER(ctypes.c_char_p)() if self.feature_names is not None and fmap == '': flen = int(len(self.feature_names)) fname = from_pystr_to_cstr(self.feature_names) if self.feature_types is None: # use quantitative as default # {'q': quantitative, 'i': indicator} ftype = from_pystr_to_cstr(['q'] * flen) else: ftype = from_pystr_to_cstr(self.feature_types) _check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle, flen, fname, ftype, int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) else: if fmap != '' and not os.path.exists(fmap): raise ValueError("No such file: {0}".format(fmap)) _check_call(_LIB.XGBoosterDumpModel(self.handle, c_str(fmap), int(with_stats), ctypes.byref(length), ctypes.byref(sarr))) res = from_cstr_to_pystr(sarr, length) return res
[ "def", "get_dump", "(", "self", ",", "fmap", "=", "''", ",", "with_stats", "=", "False", ")", ":", "length", "=", "ctypes", ".", "c_ulong", "(", ")", "sarr", "=", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char_p", ")", "(", ")", "if", "self", ".", "feature_names", "is", "not", "None", "and", "fmap", "==", "''", ":", "flen", "=", "int", "(", "len", "(", "self", ".", "feature_names", ")", ")", "fname", "=", "from_pystr_to_cstr", "(", "self", ".", "feature_names", ")", "if", "self", ".", "feature_types", "is", "None", ":", "# use quantitative as default", "# {'q': quantitative, 'i': indicator}", "ftype", "=", "from_pystr_to_cstr", "(", "[", "'q'", "]", "*", "flen", ")", "else", ":", "ftype", "=", "from_pystr_to_cstr", "(", "self", ".", "feature_types", ")", "_check_call", "(", "_LIB", ".", "XGBoosterDumpModelWithFeatures", "(", "self", ".", "handle", ",", "flen", ",", "fname", ",", "ftype", ",", "int", "(", "with_stats", ")", ",", "ctypes", ".", "byref", "(", "length", ")", ",", "ctypes", ".", "byref", "(", "sarr", ")", ")", ")", "else", ":", "if", "fmap", "!=", "''", "and", "not", "os", ".", "path", ".", "exists", "(", "fmap", ")", ":", "raise", "ValueError", "(", "\"No such file: {0}\"", ".", "format", "(", "fmap", ")", ")", "_check_call", "(", "_LIB", ".", "XGBoosterDumpModel", "(", "self", ".", "handle", ",", "c_str", "(", "fmap", ")", ",", "int", "(", "with_stats", ")", ",", "ctypes", ".", "byref", "(", "length", ")", ",", "ctypes", ".", "byref", "(", "sarr", ")", ")", ")", "res", "=", "from_cstr_to_pystr", "(", "sarr", ",", "length", ")", "return", "res" ]
Returns the dump the model as a list of strings.
[ "Returns", "the", "dump", "the", "model", "as", "a", "list", "of", "strings", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L900-L934
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster.get_fscore
def get_fscore(self, fmap=''): """Get feature importance of each feature. Parameters ---------- fmap: str (optional) The name of feature map file """ trees = self.get_dump(fmap) fmap = {} for tree in trees: for line in tree.split('\n'): arr = line.split('[') if len(arr) == 1: continue fid = arr[1].split(']')[0] fid = fid.split('<')[0] if fid not in fmap: fmap[fid] = 1 else: fmap[fid] += 1 return fmap
python
def get_fscore(self, fmap=''): """Get feature importance of each feature. Parameters ---------- fmap: str (optional) The name of feature map file """ trees = self.get_dump(fmap) fmap = {} for tree in trees: for line in tree.split('\n'): arr = line.split('[') if len(arr) == 1: continue fid = arr[1].split(']')[0] fid = fid.split('<')[0] if fid not in fmap: fmap[fid] = 1 else: fmap[fid] += 1 return fmap
[ "def", "get_fscore", "(", "self", ",", "fmap", "=", "''", ")", ":", "trees", "=", "self", ".", "get_dump", "(", "fmap", ")", "fmap", "=", "{", "}", "for", "tree", "in", "trees", ":", "for", "line", "in", "tree", ".", "split", "(", "'\\n'", ")", ":", "arr", "=", "line", ".", "split", "(", "'['", ")", "if", "len", "(", "arr", ")", "==", "1", ":", "continue", "fid", "=", "arr", "[", "1", "]", ".", "split", "(", "']'", ")", "[", "0", "]", "fid", "=", "fid", ".", "split", "(", "'<'", ")", "[", "0", "]", "if", "fid", "not", "in", "fmap", ":", "fmap", "[", "fid", "]", "=", "1", "else", ":", "fmap", "[", "fid", "]", "+=", "1", "return", "fmap" ]
Get feature importance of each feature. Parameters ---------- fmap: str (optional) The name of feature map file
[ "Get", "feature", "importance", "of", "each", "feature", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L936-L957
train
apple/turicreate
src/external/xgboost/python-package/xgboost/core.py
Booster._validate_features
def _validate_features(self, data): """ Validate Booster and data's feature_names are identical. Set feature_names and feature_types from DMatrix """ if self.feature_names is None: self.feature_names = data.feature_names self.feature_types = data.feature_types else: # Booster can't accept data with different feature names if self.feature_names != data.feature_names: msg = 'feature_names mismatch: {0} {1}' raise ValueError(msg.format(self.feature_names, data.feature_names))
python
def _validate_features(self, data): """ Validate Booster and data's feature_names are identical. Set feature_names and feature_types from DMatrix """ if self.feature_names is None: self.feature_names = data.feature_names self.feature_types = data.feature_types else: # Booster can't accept data with different feature names if self.feature_names != data.feature_names: msg = 'feature_names mismatch: {0} {1}' raise ValueError(msg.format(self.feature_names, data.feature_names))
[ "def", "_validate_features", "(", "self", ",", "data", ")", ":", "if", "self", ".", "feature_names", "is", "None", ":", "self", ".", "feature_names", "=", "data", ".", "feature_names", "self", ".", "feature_types", "=", "data", ".", "feature_types", "else", ":", "# Booster can't accept data with different feature names", "if", "self", ".", "feature_names", "!=", "data", ".", "feature_names", ":", "msg", "=", "'feature_names mismatch: {0} {1}'", "raise", "ValueError", "(", "msg", ".", "format", "(", "self", ".", "feature_names", ",", "data", ".", "feature_names", ")", ")" ]
Validate Booster and data's feature_names are identical. Set feature_names and feature_types from DMatrix
[ "Validate", "Booster", "and", "data", "s", "feature_names", "are", "identical", ".", "Set", "feature_names", "and", "feature_types", "from", "DMatrix" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L959-L972
train
apple/turicreate
src/unity/python/turicreate/meta/bytecodetools/disassembler_.py
disassembler
def disassembler(co, lasti= -1): """Disassemble a code object. :param co: code object :param lasti: internal :yields: Instructions. """ code = co.co_code labels = dis.findlabels(code) linestarts = dict(dis.findlinestarts(co)) i = 0 extended_arg = 0 lineno = 0 free = None for i, op, oparg in _walk_ops(co): if i in linestarts: lineno = linestarts[i] instr = Instruction(i=i, op=op, lineno=lineno) instr.linestart = i in linestarts if i == lasti: instr.lasti = True else: instr.lasti = False if i in labels: instr.label = True else: instr.label = False instr.oparg = oparg extended_arg = 0 if op == dis.EXTENDED_ARG: extended_arg = oparg * 65536 instr.extended_arg = extended_arg if op >= dis.HAVE_ARGUMENT: if op in dis.hasconst: instr.arg = co.co_consts[oparg] elif op in dis.hasname: instr.arg = co.co_names[oparg] elif op in dis.hasjrel: instr.arg = i + oparg elif op in dis.haslocal: instr.arg = co.co_varnames[oparg] elif op in dis.hascompare: instr.arg = dis.cmp_op[oparg] elif op in dis.hasfree: if free is None: free = co.co_cellvars + co.co_freevars instr.arg = free[oparg] yield instr
python
def disassembler(co, lasti= -1): """Disassemble a code object. :param co: code object :param lasti: internal :yields: Instructions. """ code = co.co_code labels = dis.findlabels(code) linestarts = dict(dis.findlinestarts(co)) i = 0 extended_arg = 0 lineno = 0 free = None for i, op, oparg in _walk_ops(co): if i in linestarts: lineno = linestarts[i] instr = Instruction(i=i, op=op, lineno=lineno) instr.linestart = i in linestarts if i == lasti: instr.lasti = True else: instr.lasti = False if i in labels: instr.label = True else: instr.label = False instr.oparg = oparg extended_arg = 0 if op == dis.EXTENDED_ARG: extended_arg = oparg * 65536 instr.extended_arg = extended_arg if op >= dis.HAVE_ARGUMENT: if op in dis.hasconst: instr.arg = co.co_consts[oparg] elif op in dis.hasname: instr.arg = co.co_names[oparg] elif op in dis.hasjrel: instr.arg = i + oparg elif op in dis.haslocal: instr.arg = co.co_varnames[oparg] elif op in dis.hascompare: instr.arg = dis.cmp_op[oparg] elif op in dis.hasfree: if free is None: free = co.co_cellvars + co.co_freevars instr.arg = free[oparg] yield instr
[ "def", "disassembler", "(", "co", ",", "lasti", "=", "-", "1", ")", ":", "code", "=", "co", ".", "co_code", "labels", "=", "dis", ".", "findlabels", "(", "code", ")", "linestarts", "=", "dict", "(", "dis", ".", "findlinestarts", "(", "co", ")", ")", "i", "=", "0", "extended_arg", "=", "0", "lineno", "=", "0", "free", "=", "None", "for", "i", ",", "op", ",", "oparg", "in", "_walk_ops", "(", "co", ")", ":", "if", "i", "in", "linestarts", ":", "lineno", "=", "linestarts", "[", "i", "]", "instr", "=", "Instruction", "(", "i", "=", "i", ",", "op", "=", "op", ",", "lineno", "=", "lineno", ")", "instr", ".", "linestart", "=", "i", "in", "linestarts", "if", "i", "==", "lasti", ":", "instr", ".", "lasti", "=", "True", "else", ":", "instr", ".", "lasti", "=", "False", "if", "i", "in", "labels", ":", "instr", ".", "label", "=", "True", "else", ":", "instr", ".", "label", "=", "False", "instr", ".", "oparg", "=", "oparg", "extended_arg", "=", "0", "if", "op", "==", "dis", ".", "EXTENDED_ARG", ":", "extended_arg", "=", "oparg", "*", "65536", "instr", ".", "extended_arg", "=", "extended_arg", "if", "op", ">=", "dis", ".", "HAVE_ARGUMENT", ":", "if", "op", "in", "dis", ".", "hasconst", ":", "instr", ".", "arg", "=", "co", ".", "co_consts", "[", "oparg", "]", "elif", "op", "in", "dis", ".", "hasname", ":", "instr", ".", "arg", "=", "co", ".", "co_names", "[", "oparg", "]", "elif", "op", "in", "dis", ".", "hasjrel", ":", "instr", ".", "arg", "=", "i", "+", "oparg", "elif", "op", "in", "dis", ".", "haslocal", ":", "instr", ".", "arg", "=", "co", ".", "co_varnames", "[", "oparg", "]", "elif", "op", "in", "dis", ".", "hascompare", ":", "instr", ".", "arg", "=", "dis", ".", "cmp_op", "[", "oparg", "]", "elif", "op", "in", "dis", ".", "hasfree", ":", "if", "free", "is", "None", ":", "free", "=", "co", ".", "co_cellvars", "+", "co", ".", "co_freevars", "instr", ".", "arg", "=", "free", "[", "oparg", "]", "yield", "instr" ]
Disassemble a code object. :param co: code object :param lasti: internal :yields: Instructions.
[ "Disassemble", "a", "code", "object", ".", ":", "param", "co", ":", "code", "object", ":", "param", "lasti", ":", "internal", ":", "yields", ":", "Instructions", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/bytecodetools/disassembler_.py#L58-L111
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/regex.py
transform
def transform (list, pattern, indices = [1]): """ Matches all elements of 'list' agains the 'pattern' and returns a list of the elements indicated by indices of all successfull matches. If 'indices' is omitted returns a list of first paranthethised groups of all successfull matches. """ result = [] for e in list: m = re.match (pattern, e) if m: for i in indices: result.append (m.group (i)) return result
python
def transform (list, pattern, indices = [1]): """ Matches all elements of 'list' agains the 'pattern' and returns a list of the elements indicated by indices of all successfull matches. If 'indices' is omitted returns a list of first paranthethised groups of all successfull matches. """ result = [] for e in list: m = re.match (pattern, e) if m: for i in indices: result.append (m.group (i)) return result
[ "def", "transform", "(", "list", ",", "pattern", ",", "indices", "=", "[", "1", "]", ")", ":", "result", "=", "[", "]", "for", "e", "in", "list", ":", "m", "=", "re", ".", "match", "(", "pattern", ",", "e", ")", "if", "m", ":", "for", "i", "in", "indices", ":", "result", ".", "append", "(", "m", ".", "group", "(", "i", ")", ")", "return", "result" ]
Matches all elements of 'list' agains the 'pattern' and returns a list of the elements indicated by indices of all successfull matches. If 'indices' is omitted returns a list of first paranthethised groups of all successfull matches.
[ "Matches", "all", "elements", "of", "list", "agains", "the", "pattern", "and", "returns", "a", "list", "of", "the", "elements", "indicated", "by", "indices", "of", "all", "successfull", "matches", ".", "If", "indices", "is", "omitted", "returns", "a", "list", "of", "first", "paranthethised", "groups", "of", "all", "successfull", "matches", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L11-L27
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/regex.py
replace
def replace(s, pattern, replacement): """Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with """ # the replacement string may contain invalid backreferences (like \1 or \g) # which will cause python's regex to blow up. Since this should emulate # the jam version exactly and the jam version didn't support # backreferences, this version shouldn't either. re.sub # allows replacement to be a callable; this is being used # to simply return the replacement string and avoid the hassle # of worrying about backreferences within the string. def _replacement(matchobj): return replacement return re.sub(pattern, _replacement, s)
python
def replace(s, pattern, replacement): """Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with """ # the replacement string may contain invalid backreferences (like \1 or \g) # which will cause python's regex to blow up. Since this should emulate # the jam version exactly and the jam version didn't support # backreferences, this version shouldn't either. re.sub # allows replacement to be a callable; this is being used # to simply return the replacement string and avoid the hassle # of worrying about backreferences within the string. def _replacement(matchobj): return replacement return re.sub(pattern, _replacement, s)
[ "def", "replace", "(", "s", ",", "pattern", ",", "replacement", ")", ":", "# the replacement string may contain invalid backreferences (like \\1 or \\g)", "# which will cause python's regex to blow up. Since this should emulate", "# the jam version exactly and the jam version didn't support", "# backreferences, this version shouldn't either. re.sub", "# allows replacement to be a callable; this is being used", "# to simply return the replacement string and avoid the hassle", "# of worrying about backreferences within the string.", "def", "_replacement", "(", "matchobj", ")", ":", "return", "replacement", "return", "re", ".", "sub", "(", "pattern", ",", "_replacement", ",", "s", ")" ]
Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with
[ "Replaces", "occurrences", "of", "a", "match", "string", "in", "a", "given", "string", "and", "returns", "the", "new", "string", ".", "The", "match", "string", "can", "be", "a", "regex", "expression", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L31-L50
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/regex.py
replace_list
def replace_list(items, match, replacement): """Replaces occurrences of a match string in a given list of strings and returns a list of new strings. The match string can be a regex expression. Args: items (list): the list of strings to modify. match (str): the search expression. replacement (str): the string to replace with. """ return [replace(item, match, replacement) for item in items]
python
def replace_list(items, match, replacement): """Replaces occurrences of a match string in a given list of strings and returns a list of new strings. The match string can be a regex expression. Args: items (list): the list of strings to modify. match (str): the search expression. replacement (str): the string to replace with. """ return [replace(item, match, replacement) for item in items]
[ "def", "replace_list", "(", "items", ",", "match", ",", "replacement", ")", ":", "return", "[", "replace", "(", "item", ",", "match", ",", "replacement", ")", "for", "item", "in", "items", "]" ]
Replaces occurrences of a match string in a given list of strings and returns a list of new strings. The match string can be a regex expression. Args: items (list): the list of strings to modify. match (str): the search expression. replacement (str): the string to replace with.
[ "Replaces", "occurrences", "of", "a", "match", "string", "in", "a", "given", "list", "of", "strings", "and", "returns", "a", "list", "of", "new", "strings", ".", "The", "match", "string", "can", "be", "a", "regex", "expression", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L54-L63
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
create
def create(dataset, num_topics=10, initial_topics=None, alpha=None, beta=.1, num_iterations=10, num_burnin=5, associations=None, verbose=False, print_interval=10, validation_set=None, method='auto'): """ Create a topic model from the given data set. A topic model assumes each document is a mixture of a set of topics, where for each topic some words are more likely than others. One statistical approach to do this is called a "topic model". This method learns a topic model for the given document collection. Parameters ---------- dataset : SArray of type dict or SFrame with a single column of type dict A bag of words representation of a document corpus. Each element is a dictionary representing a single document, where the keys are words and the values are the number of times that word occurs in that document. num_topics : int, optional The number of topics to learn. initial_topics : SFrame, optional An SFrame with a column of unique words representing the vocabulary and a column of dense vectors representing probability of that word given each topic. When provided, these values are used to initialize the algorithm. alpha : float, optional Hyperparameter that controls the diversity of topics in a document. Smaller values encourage fewer topics per document. Provided value must be positive. Default value is 50/num_topics. beta : float, optional Hyperparameter that controls the diversity of words in a topic. Smaller values encourage fewer words per topic. Provided value must be positive. num_iterations : int, optional The number of iterations to perform. num_burnin : int, optional The number of iterations to perform when inferring the topics for documents at prediction time. verbose : bool, optional When True, print most probable words for each topic while printing progress. print_interval : int, optional The number of iterations to wait between progress reports. associations : SFrame, optional An SFrame with two columns named "word" and "topic" containing words and the topic id that the word should be associated with. These words are not considered during learning. validation_set : SArray of type dict or SFrame with a single column A bag of words representation of a document corpus, similar to the format required for `dataset`. This will be used to monitor model performance during training. Each document in the provided validation set is randomly split: the first portion is used estimate which topic each document belongs to, and the second portion is used to estimate the model's performance at predicting the unseen words in the test data. method : {'cgs', 'alias'}, optional The algorithm used for learning the model. - *cgs:* Collapsed Gibbs sampling - *alias:* AliasLDA method. Returns ------- out : TopicModel A fitted topic model. This can be used with :py:func:`~TopicModel.get_topics()` and :py:func:`~TopicModel.predict()`. While fitting is in progress, several metrics are shown, including: +------------------+---------------------------------------------------+ | Field | Description | +==================+===================================================+ | Elapsed Time | The number of elapsed seconds. | +------------------+---------------------------------------------------+ | Tokens/second | The number of unique words processed per second | +------------------+---------------------------------------------------+ | Est. Perplexity | An estimate of the model's ability to model the | | | training data. See the documentation on evaluate. | +------------------+---------------------------------------------------+ See Also -------- TopicModel, TopicModel.get_topics, TopicModel.predict, turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate References ---------- - `Wikipedia - Latent Dirichlet allocation <http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_ - Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_. KDD 2014. Examples -------- The following example includes an SArray of documents, where each element represents a document in "bag of words" representation -- a dictionary with word keys and whose values are the number of times that word occurred in the document: >>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes') Once in this form, it is straightforward to learn a topic model. >>> m = turicreate.topic_model.create(docs) It is also easy to create a new topic model from an old one -- whether it was created using Turi Create or another package. >>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics']) To manually fix several words to always be assigned to a topic, use the `associations` argument. The following will ensure that topic 0 has the most probability for each of the provided words: >>> from turicreate import SFrame >>> associations = SFrame({'word':['hurricane', 'wind', 'storm'], 'topic': [0, 0, 0]}) >>> m = turicreate.topic_model.create(docs, associations=associations) More advanced usage allows you to control aspects of the model and the learning method. >>> import turicreate as tc >>> m = tc.topic_model.create(docs, num_topics=20, # number of topics num_iterations=10, # algorithm parameters alpha=.01, beta=.1) # hyperparameters To evaluate the model's ability to generalize, we can create a train/test split where a portion of the words in each document are held out from training. >>> train, test = tc.text_analytics.random_split(.8) >>> m = tc.topic_model.create(train) >>> results = m.evaluate(test) >>> print results['perplexity'] """ dataset = _check_input(dataset) _check_categorical_option_type("method", method, ['auto', 'cgs', 'alias']) if method == 'cgs' or method == 'auto': model_name = 'cgs_topic_model' else: model_name = 'alias_topic_model' # If associations are provided, check they are in the proper format if associations is None: associations = _turicreate.SFrame({'word': [], 'topic': []}) if isinstance(associations, _turicreate.SFrame) and \ associations.num_rows() > 0: assert set(associations.column_names()) == set(['word', 'topic']), \ "Provided associations must be an SFrame containing a word column\ and a topic column." assert associations['word'].dtype == str, \ "Words must be strings." assert associations['topic'].dtype == int, \ "Topic ids must be of int type." if alpha is None: alpha = float(50) / num_topics if validation_set is not None: _check_input(validation_set) # Must be a single column if isinstance(validation_set, _turicreate.SFrame): column_name = validation_set.column_names()[0] validation_set = validation_set[column_name] (validation_train, validation_test) = _random_split(validation_set) else: validation_train = _SArray() validation_test = _SArray() opts = {'model_name': model_name, 'data': dataset, 'num_topics': num_topics, 'num_iterations': num_iterations, 'print_interval': print_interval, 'alpha': alpha, 'beta': beta, 'num_burnin': num_burnin, 'associations': associations} # Initialize the model with basic parameters response = _turicreate.extensions._text.topicmodel_init(opts) m = TopicModel(response['model']) # If initial_topics provided, load it into the model if isinstance(initial_topics, _turicreate.SFrame): assert set(['vocabulary', 'topic_probabilities']) == \ set(initial_topics.column_names()), \ "The provided initial_topics does not have the proper format, \ e.g. wrong column names." observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x)) assert all(observed_topics == num_topics), \ "Provided num_topics value does not match the number of provided initial_topics." # Rough estimate of total number of words weight = len(dataset) * 1000 opts = {'model': m.__proxy__, 'topics': initial_topics['topic_probabilities'], 'vocabulary': initial_topics['vocabulary'], 'weight': weight} response = _turicreate.extensions._text.topicmodel_set_topics(opts) m = TopicModel(response['model']) # Train the model on the given data set and retrieve predictions opts = {'model': m.__proxy__, 'data': dataset, 'verbose': verbose, 'validation_train': validation_train, 'validation_test': validation_test} response = _turicreate.extensions._text.topicmodel_train(opts) m = TopicModel(response['model']) return m
python
def create(dataset, num_topics=10, initial_topics=None, alpha=None, beta=.1, num_iterations=10, num_burnin=5, associations=None, verbose=False, print_interval=10, validation_set=None, method='auto'): """ Create a topic model from the given data set. A topic model assumes each document is a mixture of a set of topics, where for each topic some words are more likely than others. One statistical approach to do this is called a "topic model". This method learns a topic model for the given document collection. Parameters ---------- dataset : SArray of type dict or SFrame with a single column of type dict A bag of words representation of a document corpus. Each element is a dictionary representing a single document, where the keys are words and the values are the number of times that word occurs in that document. num_topics : int, optional The number of topics to learn. initial_topics : SFrame, optional An SFrame with a column of unique words representing the vocabulary and a column of dense vectors representing probability of that word given each topic. When provided, these values are used to initialize the algorithm. alpha : float, optional Hyperparameter that controls the diversity of topics in a document. Smaller values encourage fewer topics per document. Provided value must be positive. Default value is 50/num_topics. beta : float, optional Hyperparameter that controls the diversity of words in a topic. Smaller values encourage fewer words per topic. Provided value must be positive. num_iterations : int, optional The number of iterations to perform. num_burnin : int, optional The number of iterations to perform when inferring the topics for documents at prediction time. verbose : bool, optional When True, print most probable words for each topic while printing progress. print_interval : int, optional The number of iterations to wait between progress reports. associations : SFrame, optional An SFrame with two columns named "word" and "topic" containing words and the topic id that the word should be associated with. These words are not considered during learning. validation_set : SArray of type dict or SFrame with a single column A bag of words representation of a document corpus, similar to the format required for `dataset`. This will be used to monitor model performance during training. Each document in the provided validation set is randomly split: the first portion is used estimate which topic each document belongs to, and the second portion is used to estimate the model's performance at predicting the unseen words in the test data. method : {'cgs', 'alias'}, optional The algorithm used for learning the model. - *cgs:* Collapsed Gibbs sampling - *alias:* AliasLDA method. Returns ------- out : TopicModel A fitted topic model. This can be used with :py:func:`~TopicModel.get_topics()` and :py:func:`~TopicModel.predict()`. While fitting is in progress, several metrics are shown, including: +------------------+---------------------------------------------------+ | Field | Description | +==================+===================================================+ | Elapsed Time | The number of elapsed seconds. | +------------------+---------------------------------------------------+ | Tokens/second | The number of unique words processed per second | +------------------+---------------------------------------------------+ | Est. Perplexity | An estimate of the model's ability to model the | | | training data. See the documentation on evaluate. | +------------------+---------------------------------------------------+ See Also -------- TopicModel, TopicModel.get_topics, TopicModel.predict, turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate References ---------- - `Wikipedia - Latent Dirichlet allocation <http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_ - Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_. KDD 2014. Examples -------- The following example includes an SArray of documents, where each element represents a document in "bag of words" representation -- a dictionary with word keys and whose values are the number of times that word occurred in the document: >>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes') Once in this form, it is straightforward to learn a topic model. >>> m = turicreate.topic_model.create(docs) It is also easy to create a new topic model from an old one -- whether it was created using Turi Create or another package. >>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics']) To manually fix several words to always be assigned to a topic, use the `associations` argument. The following will ensure that topic 0 has the most probability for each of the provided words: >>> from turicreate import SFrame >>> associations = SFrame({'word':['hurricane', 'wind', 'storm'], 'topic': [0, 0, 0]}) >>> m = turicreate.topic_model.create(docs, associations=associations) More advanced usage allows you to control aspects of the model and the learning method. >>> import turicreate as tc >>> m = tc.topic_model.create(docs, num_topics=20, # number of topics num_iterations=10, # algorithm parameters alpha=.01, beta=.1) # hyperparameters To evaluate the model's ability to generalize, we can create a train/test split where a portion of the words in each document are held out from training. >>> train, test = tc.text_analytics.random_split(.8) >>> m = tc.topic_model.create(train) >>> results = m.evaluate(test) >>> print results['perplexity'] """ dataset = _check_input(dataset) _check_categorical_option_type("method", method, ['auto', 'cgs', 'alias']) if method == 'cgs' or method == 'auto': model_name = 'cgs_topic_model' else: model_name = 'alias_topic_model' # If associations are provided, check they are in the proper format if associations is None: associations = _turicreate.SFrame({'word': [], 'topic': []}) if isinstance(associations, _turicreate.SFrame) and \ associations.num_rows() > 0: assert set(associations.column_names()) == set(['word', 'topic']), \ "Provided associations must be an SFrame containing a word column\ and a topic column." assert associations['word'].dtype == str, \ "Words must be strings." assert associations['topic'].dtype == int, \ "Topic ids must be of int type." if alpha is None: alpha = float(50) / num_topics if validation_set is not None: _check_input(validation_set) # Must be a single column if isinstance(validation_set, _turicreate.SFrame): column_name = validation_set.column_names()[0] validation_set = validation_set[column_name] (validation_train, validation_test) = _random_split(validation_set) else: validation_train = _SArray() validation_test = _SArray() opts = {'model_name': model_name, 'data': dataset, 'num_topics': num_topics, 'num_iterations': num_iterations, 'print_interval': print_interval, 'alpha': alpha, 'beta': beta, 'num_burnin': num_burnin, 'associations': associations} # Initialize the model with basic parameters response = _turicreate.extensions._text.topicmodel_init(opts) m = TopicModel(response['model']) # If initial_topics provided, load it into the model if isinstance(initial_topics, _turicreate.SFrame): assert set(['vocabulary', 'topic_probabilities']) == \ set(initial_topics.column_names()), \ "The provided initial_topics does not have the proper format, \ e.g. wrong column names." observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x)) assert all(observed_topics == num_topics), \ "Provided num_topics value does not match the number of provided initial_topics." # Rough estimate of total number of words weight = len(dataset) * 1000 opts = {'model': m.__proxy__, 'topics': initial_topics['topic_probabilities'], 'vocabulary': initial_topics['vocabulary'], 'weight': weight} response = _turicreate.extensions._text.topicmodel_set_topics(opts) m = TopicModel(response['model']) # Train the model on the given data set and retrieve predictions opts = {'model': m.__proxy__, 'data': dataset, 'verbose': verbose, 'validation_train': validation_train, 'validation_test': validation_test} response = _turicreate.extensions._text.topicmodel_train(opts) m = TopicModel(response['model']) return m
[ "def", "create", "(", "dataset", ",", "num_topics", "=", "10", ",", "initial_topics", "=", "None", ",", "alpha", "=", "None", ",", "beta", "=", ".1", ",", "num_iterations", "=", "10", ",", "num_burnin", "=", "5", ",", "associations", "=", "None", ",", "verbose", "=", "False", ",", "print_interval", "=", "10", ",", "validation_set", "=", "None", ",", "method", "=", "'auto'", ")", ":", "dataset", "=", "_check_input", "(", "dataset", ")", "_check_categorical_option_type", "(", "\"method\"", ",", "method", ",", "[", "'auto'", ",", "'cgs'", ",", "'alias'", "]", ")", "if", "method", "==", "'cgs'", "or", "method", "==", "'auto'", ":", "model_name", "=", "'cgs_topic_model'", "else", ":", "model_name", "=", "'alias_topic_model'", "# If associations are provided, check they are in the proper format", "if", "associations", "is", "None", ":", "associations", "=", "_turicreate", ".", "SFrame", "(", "{", "'word'", ":", "[", "]", ",", "'topic'", ":", "[", "]", "}", ")", "if", "isinstance", "(", "associations", ",", "_turicreate", ".", "SFrame", ")", "and", "associations", ".", "num_rows", "(", ")", ">", "0", ":", "assert", "set", "(", "associations", ".", "column_names", "(", ")", ")", "==", "set", "(", "[", "'word'", ",", "'topic'", "]", ")", ",", "\"Provided associations must be an SFrame containing a word column\\\n and a topic column.\"", "assert", "associations", "[", "'word'", "]", ".", "dtype", "==", "str", ",", "\"Words must be strings.\"", "assert", "associations", "[", "'topic'", "]", ".", "dtype", "==", "int", ",", "\"Topic ids must be of int type.\"", "if", "alpha", "is", "None", ":", "alpha", "=", "float", "(", "50", ")", "/", "num_topics", "if", "validation_set", "is", "not", "None", ":", "_check_input", "(", "validation_set", ")", "# Must be a single column", "if", "isinstance", "(", "validation_set", ",", "_turicreate", ".", "SFrame", ")", ":", "column_name", "=", "validation_set", ".", "column_names", "(", ")", "[", "0", "]", "validation_set", "=", "validation_set", "[", "column_name", "]", "(", "validation_train", ",", "validation_test", ")", "=", "_random_split", "(", "validation_set", ")", "else", ":", "validation_train", "=", "_SArray", "(", ")", "validation_test", "=", "_SArray", "(", ")", "opts", "=", "{", "'model_name'", ":", "model_name", ",", "'data'", ":", "dataset", ",", "'num_topics'", ":", "num_topics", ",", "'num_iterations'", ":", "num_iterations", ",", "'print_interval'", ":", "print_interval", ",", "'alpha'", ":", "alpha", ",", "'beta'", ":", "beta", ",", "'num_burnin'", ":", "num_burnin", ",", "'associations'", ":", "associations", "}", "# Initialize the model with basic parameters", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_init", "(", "opts", ")", "m", "=", "TopicModel", "(", "response", "[", "'model'", "]", ")", "# If initial_topics provided, load it into the model", "if", "isinstance", "(", "initial_topics", ",", "_turicreate", ".", "SFrame", ")", ":", "assert", "set", "(", "[", "'vocabulary'", ",", "'topic_probabilities'", "]", ")", "==", "set", "(", "initial_topics", ".", "column_names", "(", ")", ")", ",", "\"The provided initial_topics does not have the proper format, \\\n e.g. wrong column names.\"", "observed_topics", "=", "initial_topics", "[", "'topic_probabilities'", "]", ".", "apply", "(", "lambda", "x", ":", "len", "(", "x", ")", ")", "assert", "all", "(", "observed_topics", "==", "num_topics", ")", ",", "\"Provided num_topics value does not match the number of provided initial_topics.\"", "# Rough estimate of total number of words", "weight", "=", "len", "(", "dataset", ")", "*", "1000", "opts", "=", "{", "'model'", ":", "m", ".", "__proxy__", ",", "'topics'", ":", "initial_topics", "[", "'topic_probabilities'", "]", ",", "'vocabulary'", ":", "initial_topics", "[", "'vocabulary'", "]", ",", "'weight'", ":", "weight", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_set_topics", "(", "opts", ")", "m", "=", "TopicModel", "(", "response", "[", "'model'", "]", ")", "# Train the model on the given data set and retrieve predictions", "opts", "=", "{", "'model'", ":", "m", ".", "__proxy__", ",", "'data'", ":", "dataset", ",", "'verbose'", ":", "verbose", ",", "'validation_train'", ":", "validation_train", ",", "'validation_test'", ":", "validation_test", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_train", "(", "opts", ")", "m", "=", "TopicModel", "(", "response", "[", "'model'", "]", ")", "return", "m" ]
Create a topic model from the given data set. A topic model assumes each document is a mixture of a set of topics, where for each topic some words are more likely than others. One statistical approach to do this is called a "topic model". This method learns a topic model for the given document collection. Parameters ---------- dataset : SArray of type dict or SFrame with a single column of type dict A bag of words representation of a document corpus. Each element is a dictionary representing a single document, where the keys are words and the values are the number of times that word occurs in that document. num_topics : int, optional The number of topics to learn. initial_topics : SFrame, optional An SFrame with a column of unique words representing the vocabulary and a column of dense vectors representing probability of that word given each topic. When provided, these values are used to initialize the algorithm. alpha : float, optional Hyperparameter that controls the diversity of topics in a document. Smaller values encourage fewer topics per document. Provided value must be positive. Default value is 50/num_topics. beta : float, optional Hyperparameter that controls the diversity of words in a topic. Smaller values encourage fewer words per topic. Provided value must be positive. num_iterations : int, optional The number of iterations to perform. num_burnin : int, optional The number of iterations to perform when inferring the topics for documents at prediction time. verbose : bool, optional When True, print most probable words for each topic while printing progress. print_interval : int, optional The number of iterations to wait between progress reports. associations : SFrame, optional An SFrame with two columns named "word" and "topic" containing words and the topic id that the word should be associated with. These words are not considered during learning. validation_set : SArray of type dict or SFrame with a single column A bag of words representation of a document corpus, similar to the format required for `dataset`. This will be used to monitor model performance during training. Each document in the provided validation set is randomly split: the first portion is used estimate which topic each document belongs to, and the second portion is used to estimate the model's performance at predicting the unseen words in the test data. method : {'cgs', 'alias'}, optional The algorithm used for learning the model. - *cgs:* Collapsed Gibbs sampling - *alias:* AliasLDA method. Returns ------- out : TopicModel A fitted topic model. This can be used with :py:func:`~TopicModel.get_topics()` and :py:func:`~TopicModel.predict()`. While fitting is in progress, several metrics are shown, including: +------------------+---------------------------------------------------+ | Field | Description | +==================+===================================================+ | Elapsed Time | The number of elapsed seconds. | +------------------+---------------------------------------------------+ | Tokens/second | The number of unique words processed per second | +------------------+---------------------------------------------------+ | Est. Perplexity | An estimate of the model's ability to model the | | | training data. See the documentation on evaluate. | +------------------+---------------------------------------------------+ See Also -------- TopicModel, TopicModel.get_topics, TopicModel.predict, turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate References ---------- - `Wikipedia - Latent Dirichlet allocation <http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_ - Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_. KDD 2014. Examples -------- The following example includes an SArray of documents, where each element represents a document in "bag of words" representation -- a dictionary with word keys and whose values are the number of times that word occurred in the document: >>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes') Once in this form, it is straightforward to learn a topic model. >>> m = turicreate.topic_model.create(docs) It is also easy to create a new topic model from an old one -- whether it was created using Turi Create or another package. >>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics']) To manually fix several words to always be assigned to a topic, use the `associations` argument. The following will ensure that topic 0 has the most probability for each of the provided words: >>> from turicreate import SFrame >>> associations = SFrame({'word':['hurricane', 'wind', 'storm'], 'topic': [0, 0, 0]}) >>> m = turicreate.topic_model.create(docs, associations=associations) More advanced usage allows you to control aspects of the model and the learning method. >>> import turicreate as tc >>> m = tc.topic_model.create(docs, num_topics=20, # number of topics num_iterations=10, # algorithm parameters alpha=.01, beta=.1) # hyperparameters To evaluate the model's ability to generalize, we can create a train/test split where a portion of the words in each document are held out from training. >>> train, test = tc.text_analytics.random_split(.8) >>> m = tc.topic_model.create(train) >>> results = m.evaluate(test) >>> print results['perplexity']
[ "Create", "a", "topic", "model", "from", "the", "given", "data", "set", ".", "A", "topic", "model", "assumes", "each", "document", "is", "a", "mixture", "of", "a", "set", "of", "topics", "where", "for", "each", "topic", "some", "words", "are", "more", "likely", "than", "others", ".", "One", "statistical", "approach", "to", "do", "this", "is", "called", "a", "topic", "model", ".", "This", "method", "learns", "a", "topic", "model", "for", "the", "given", "document", "collection", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L35-L271
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
perplexity
def perplexity(test_data, predictions, topics, vocabulary): """ Compute the perplexity of a set of test documents given a set of predicted topics. Let theta be the matrix of document-topic probabilities, where theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic probabilities, where phi_jk = p(word j | topic k). Then for each word in each document, we compute for a given word w and document d .. math:: p(word | \theta[doc_id,:], \phi[word_id,:]) = \sum_k \theta[doc_id, k] * \phi[word_id, k] We compute loglikelihood to be: .. math:: l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi) and perplexity to be .. math:: \exp \{ - l(D) / \sum_i \sum_j count_{i,j} \} Parameters ---------- test_data : SArray of type dict or SFrame with a single column of type dict Documents in bag-of-words format. predictions : SArray An SArray of vector type, where each vector contains estimates of the probability that this document belongs to each of the topics. This must have the same size as test_data; otherwise an exception occurs. This can be the output of :py:func:`~turicreate.topic_model.TopicModel.predict`, for example. topics : SFrame An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'. The value returned by m['topics'] is a valid input for this argument, where m is a trained :py:class:`~turicreate.topic_model.TopicModel`. vocabulary : SArray An SArray of words to use. All words in test_data that are not in this vocabulary will be ignored. Notes ----- For more details, see equations 13-16 of [PattersonTeh2013]. References ---------- .. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_ .. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian Langevin Dynamics on the Probability Simplex" <http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_ NIPS, 2013. Examples -------- >>> from turicreate import topic_model >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = topic_model.create(train_data) >>> pred = m.predict(train_data) >>> topics = m['topics'] >>> p = topic_model.perplexity(test_data, pred, topics['topic_probabilities'], topics['vocabulary']) >>> p 1720.7 # lower values are better """ test_data = _check_input(test_data) assert isinstance(predictions, _SArray), \ "Predictions must be an SArray of vector type." assert predictions.dtype == _array.array, \ "Predictions must be probabilities. Try using m.predict() with " + \ "output_type='probability'." opts = {'test_data': test_data, 'predictions': predictions, 'topics': topics, 'vocabulary': vocabulary} response = _turicreate.extensions._text.topicmodel_get_perplexity(opts) return response['perplexity']
python
def perplexity(test_data, predictions, topics, vocabulary): """ Compute the perplexity of a set of test documents given a set of predicted topics. Let theta be the matrix of document-topic probabilities, where theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic probabilities, where phi_jk = p(word j | topic k). Then for each word in each document, we compute for a given word w and document d .. math:: p(word | \theta[doc_id,:], \phi[word_id,:]) = \sum_k \theta[doc_id, k] * \phi[word_id, k] We compute loglikelihood to be: .. math:: l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi) and perplexity to be .. math:: \exp \{ - l(D) / \sum_i \sum_j count_{i,j} \} Parameters ---------- test_data : SArray of type dict or SFrame with a single column of type dict Documents in bag-of-words format. predictions : SArray An SArray of vector type, where each vector contains estimates of the probability that this document belongs to each of the topics. This must have the same size as test_data; otherwise an exception occurs. This can be the output of :py:func:`~turicreate.topic_model.TopicModel.predict`, for example. topics : SFrame An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'. The value returned by m['topics'] is a valid input for this argument, where m is a trained :py:class:`~turicreate.topic_model.TopicModel`. vocabulary : SArray An SArray of words to use. All words in test_data that are not in this vocabulary will be ignored. Notes ----- For more details, see equations 13-16 of [PattersonTeh2013]. References ---------- .. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_ .. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian Langevin Dynamics on the Probability Simplex" <http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_ NIPS, 2013. Examples -------- >>> from turicreate import topic_model >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = topic_model.create(train_data) >>> pred = m.predict(train_data) >>> topics = m['topics'] >>> p = topic_model.perplexity(test_data, pred, topics['topic_probabilities'], topics['vocabulary']) >>> p 1720.7 # lower values are better """ test_data = _check_input(test_data) assert isinstance(predictions, _SArray), \ "Predictions must be an SArray of vector type." assert predictions.dtype == _array.array, \ "Predictions must be probabilities. Try using m.predict() with " + \ "output_type='probability'." opts = {'test_data': test_data, 'predictions': predictions, 'topics': topics, 'vocabulary': vocabulary} response = _turicreate.extensions._text.topicmodel_get_perplexity(opts) return response['perplexity']
[ "def", "perplexity", "(", "test_data", ",", "predictions", ",", "topics", ",", "vocabulary", ")", ":", "test_data", "=", "_check_input", "(", "test_data", ")", "assert", "isinstance", "(", "predictions", ",", "_SArray", ")", ",", "\"Predictions must be an SArray of vector type.\"", "assert", "predictions", ".", "dtype", "==", "_array", ".", "array", ",", "\"Predictions must be probabilities. Try using m.predict() with \"", "+", "\"output_type='probability'.\"", "opts", "=", "{", "'test_data'", ":", "test_data", ",", "'predictions'", ":", "predictions", ",", "'topics'", ":", "topics", ",", "'vocabulary'", ":", "vocabulary", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_get_perplexity", "(", "opts", ")", "return", "response", "[", "'perplexity'", "]" ]
Compute the perplexity of a set of test documents given a set of predicted topics. Let theta be the matrix of document-topic probabilities, where theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic probabilities, where phi_jk = p(word j | topic k). Then for each word in each document, we compute for a given word w and document d .. math:: p(word | \theta[doc_id,:], \phi[word_id,:]) = \sum_k \theta[doc_id, k] * \phi[word_id, k] We compute loglikelihood to be: .. math:: l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi) and perplexity to be .. math:: \exp \{ - l(D) / \sum_i \sum_j count_{i,j} \} Parameters ---------- test_data : SArray of type dict or SFrame with a single column of type dict Documents in bag-of-words format. predictions : SArray An SArray of vector type, where each vector contains estimates of the probability that this document belongs to each of the topics. This must have the same size as test_data; otherwise an exception occurs. This can be the output of :py:func:`~turicreate.topic_model.TopicModel.predict`, for example. topics : SFrame An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'. The value returned by m['topics'] is a valid input for this argument, where m is a trained :py:class:`~turicreate.topic_model.TopicModel`. vocabulary : SArray An SArray of words to use. All words in test_data that are not in this vocabulary will be ignored. Notes ----- For more details, see equations 13-16 of [PattersonTeh2013]. References ---------- .. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_ .. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian Langevin Dynamics on the Probability Simplex" <http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_ NIPS, 2013. Examples -------- >>> from turicreate import topic_model >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = topic_model.create(train_data) >>> pred = m.predict(train_data) >>> topics = m['topics'] >>> p = topic_model.perplexity(test_data, pred, topics['topic_probabilities'], topics['vocabulary']) >>> p 1720.7 # lower values are better
[ "Compute", "the", "perplexity", "of", "a", "set", "of", "test", "documents", "given", "a", "set", "of", "predicted", "topics", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L740-L826
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel._get_summary_struct
def _get_summary_struct(self): """ Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object. """ section_titles=['Schema','Settings'] vocab_length = len(self.vocabulary) verbose = self.verbose == 1 sections=[ [ ('Vocabulary Size',_precomputed_field(vocab_length)) ], [ ('Number of Topics', 'num_topics'), ('alpha','alpha'), ('beta','beta'), ('Iterations', 'num_iterations'), ('Training time', 'training_time'), ('Verbose', _precomputed_field(verbose)) ] ] return (sections, section_titles)
python
def _get_summary_struct(self): """ Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object. """ section_titles=['Schema','Settings'] vocab_length = len(self.vocabulary) verbose = self.verbose == 1 sections=[ [ ('Vocabulary Size',_precomputed_field(vocab_length)) ], [ ('Number of Topics', 'num_topics'), ('alpha','alpha'), ('beta','beta'), ('Iterations', 'num_iterations'), ('Training time', 'training_time'), ('Verbose', _precomputed_field(verbose)) ] ] return (sections, section_titles)
[ "def", "_get_summary_struct", "(", "self", ")", ":", "section_titles", "=", "[", "'Schema'", ",", "'Settings'", "]", "vocab_length", "=", "len", "(", "self", ".", "vocabulary", ")", "verbose", "=", "self", ".", "verbose", "==", "1", "sections", "=", "[", "[", "(", "'Vocabulary Size'", ",", "_precomputed_field", "(", "vocab_length", ")", ")", "]", ",", "[", "(", "'Number of Topics'", ",", "'num_topics'", ")", ",", "(", "'alpha'", ",", "'alpha'", ")", ",", "(", "'beta'", ",", "'beta'", ")", ",", "(", "'Iterations'", ",", "'num_iterations'", ")", ",", "(", "'Training time'", ",", "'training_time'", ")", ",", "(", "'Verbose'", ",", "_precomputed_field", "(", "verbose", ")", ")", "]", "]", "return", "(", "sections", ",", "section_titles", ")" ]
Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object.
[ "Returns", "a", "structured", "description", "of", "the", "model", "including", "(", "where", "relevant", ")", "the", "schema", "of", "the", "training", "data", "description", "of", "the", "training", "data", "training", "statistics", "and", "model", "hyperparameters", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L303-L338
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel._get
def _get(self, field): """ Return the value of a given field. The list of all queryable fields is detailed below, and can be obtained with the :py:func:`~TopicModel._list_fields` method. +-----------------------+----------------------------------------------+ | Field | Description | +=======================+==============================================+ | topics | An SFrame containing a column with the unique| | | words observed during training, and a column | | | of arrays containing the probability values | | | for each word given each of the topics. | +-----------------------+----------------------------------------------+ | vocabulary | An SArray containing the words used. This is | | | same as the vocabulary column in the topics | | | field above. | +-----------------------+----------------------------------------------+ Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out Value of the requested field. """ opts = {'model': self.__proxy__, 'field': field} response = _turicreate.extensions._text.topicmodel_get_value(opts) return response['value']
python
def _get(self, field): """ Return the value of a given field. The list of all queryable fields is detailed below, and can be obtained with the :py:func:`~TopicModel._list_fields` method. +-----------------------+----------------------------------------------+ | Field | Description | +=======================+==============================================+ | topics | An SFrame containing a column with the unique| | | words observed during training, and a column | | | of arrays containing the probability values | | | for each word given each of the topics. | +-----------------------+----------------------------------------------+ | vocabulary | An SArray containing the words used. This is | | | same as the vocabulary column in the topics | | | field above. | +-----------------------+----------------------------------------------+ Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out Value of the requested field. """ opts = {'model': self.__proxy__, 'field': field} response = _turicreate.extensions._text.topicmodel_get_value(opts) return response['value']
[ "def", "_get", "(", "self", ",", "field", ")", ":", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'field'", ":", "field", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_get_value", "(", "opts", ")", "return", "response", "[", "'value'", "]" ]
Return the value of a given field. The list of all queryable fields is detailed below, and can be obtained with the :py:func:`~TopicModel._list_fields` method. +-----------------------+----------------------------------------------+ | Field | Description | +=======================+==============================================+ | topics | An SFrame containing a column with the unique| | | words observed during training, and a column | | | of arrays containing the probability values | | | for each word given each of the topics. | +-----------------------+----------------------------------------------+ | vocabulary | An SArray containing the words used. This is | | | same as the vocabulary column in the topics | | | field above. | +-----------------------+----------------------------------------------+ Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out Value of the requested field.
[ "Return", "the", "value", "of", "a", "given", "field", ".", "The", "list", "of", "all", "queryable", "fields", "is", "detailed", "below", "and", "can", "be", "obtained", "with", "the", ":", "py", ":", "func", ":", "~TopicModel", ".", "_list_fields", "method", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L362-L394
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel._training_stats
def _training_stats(self): """ Return a dictionary of statistics collected during creation of the model. These statistics are also available with the ``get`` method and are described in more detail in that method's documentation. Returns ------- out : dict Dictionary of statistics compiled during creation of the TopicModel. See Also -------- summary Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> m._training_stats() {'training_iterations': 20, 'training_time': 20.5034} """ fields = self._list_fields() stat_fields = ['training_time', 'training_iterations'] if 'validation_perplexity' in fields: stat_fields.append('validation_perplexity') ret = {k : self._get(k) for k in stat_fields} return ret
python
def _training_stats(self): """ Return a dictionary of statistics collected during creation of the model. These statistics are also available with the ``get`` method and are described in more detail in that method's documentation. Returns ------- out : dict Dictionary of statistics compiled during creation of the TopicModel. See Also -------- summary Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> m._training_stats() {'training_iterations': 20, 'training_time': 20.5034} """ fields = self._list_fields() stat_fields = ['training_time', 'training_iterations'] if 'validation_perplexity' in fields: stat_fields.append('validation_perplexity') ret = {k : self._get(k) for k in stat_fields} return ret
[ "def", "_training_stats", "(", "self", ")", ":", "fields", "=", "self", ".", "_list_fields", "(", ")", "stat_fields", "=", "[", "'training_time'", ",", "'training_iterations'", "]", "if", "'validation_perplexity'", "in", "fields", ":", "stat_fields", ".", "append", "(", "'validation_perplexity'", ")", "ret", "=", "{", "k", ":", "self", ".", "_get", "(", "k", ")", "for", "k", "in", "stat_fields", "}", "return", "ret" ]
Return a dictionary of statistics collected during creation of the model. These statistics are also available with the ``get`` method and are described in more detail in that method's documentation. Returns ------- out : dict Dictionary of statistics compiled during creation of the TopicModel. See Also -------- summary Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> m._training_stats() {'training_iterations': 20, 'training_time': 20.5034}
[ "Return", "a", "dictionary", "of", "statistics", "collected", "during", "creation", "of", "the", "model", ".", "These", "statistics", "are", "also", "available", "with", "the", "get", "method", "and", "are", "described", "in", "more", "detail", "in", "that", "method", "s", "documentation", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L396-L428
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel.get_topics
def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0, output_type='topic_probabilities'): """ Get the words associated with a given topic. The score column is the probability of choosing that word given that you have chosen a particular topic. Parameters ---------- topic_ids : list of int, optional The topics to retrieve words. Topic ids are zero-based. Throws an error if greater than or equal to m['num_topics'], or if the requested topic name is not present. num_words : int, optional The number of words to show. cdf_cutoff : float, optional Allows one to only show the most probable words whose cumulative probability is below this cutoff. For example if there exist three words where .. math:: p(word_1 | topic_k) = .1 p(word_2 | topic_k) = .2 p(word_3 | topic_k) = .05 then setting :math:`cdf_{cutoff}=.3` would return only :math:`word_1` and :math:`word_2` since :math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}` output_type : {'topic_probabilities' | 'topic_words'}, optional Determine the type of desired output. See below. Returns ------- out : SFrame If output_type is 'topic_probabilities', then the returned value is an SFrame with a column of words ranked by a column of scores for each topic. Otherwise, the returned value is a SArray where each element is a list of the most probable words for each topic. Examples -------- Get the highest ranked words for all topics. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs, num_iterations=50) >>> m.get_topics() +-------+----------+-----------------+ | topic | word | score | +-------+----------+-----------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 1 | function | 0.0482834508265 | | 1 | input | 0.0456270024091 | | 1 | point | 0.0302662839454 | | 1 | result | 0.0239474934631 | | 1 | problem | 0.0231750116011 | | ... | ... | ... | +-------+----------+-----------------+ Get the highest ranked words for topics 0 and 1 and show 15 words per topic. >>> m.get_topics([0, 1], num_words=15) +-------+----------+------------------+ | topic | word | score | +-------+----------+------------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 0 | response | 0.0139740298286 | | 0 | layer | 0.0122585145062 | | 0 | features | 0.0115343177265 | | 0 | feature | 0.0103530459301 | | 0 | spatial | 0.00823387994361 | | ... | ... | ... | +-------+----------+------------------+ If one wants to instead just get the top words per topic, one may change the format of the output as follows. >>> topics = m.get_topics(output_type='topic_words') dtype: list Rows: 10 [['cell', 'image', 'input', 'object', 'visual'], ['algorithm', 'data', 'learning', 'method', 'set'], ['function', 'input', 'point', 'problem', 'result'], ['model', 'output', 'pattern', 'set', 'unit'], ['action', 'learning', 'net', 'problem', 'system'], ['error', 'function', 'network', 'parameter', 'weight'], ['information', 'level', 'neural', 'threshold', 'weight'], ['control', 'field', 'model', 'network', 'neuron'], ['hidden', 'layer', 'system', 'training', 'vector'], ['component', 'distribution', 'local', 'model', 'optimal']] """ _check_categorical_option_type('output_type', output_type, ['topic_probabilities', 'topic_words']) if topic_ids is None: topic_ids = list(range(self._get('num_topics'))) assert isinstance(topic_ids, list), \ "The provided topic_ids is not a list." if any([type(x) == str for x in topic_ids]): raise ValueError("Only integer topic_ids can be used at this point in time.") if not all([x >= 0 and x < self.num_topics for x in topic_ids]): raise ValueError("Topic id values must be non-negative and less than the " + \ "number of topics used to fit the model.") opts = {'model': self.__proxy__, 'topic_ids': topic_ids, 'num_words': num_words, 'cdf_cutoff': cdf_cutoff} response = _turicreate.extensions._text.topicmodel_get_topic(opts) ret = response['top_words'] def sort_wordlist_by_prob(z): words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True) return [word for (word, prob) in words] if output_type != 'topic_probabilities': ret = ret.groupby('topic', {'word': _turicreate.aggregate.CONCAT('word', 'score')}) words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob) ret = _SFrame({'words': words}) return ret
python
def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0, output_type='topic_probabilities'): """ Get the words associated with a given topic. The score column is the probability of choosing that word given that you have chosen a particular topic. Parameters ---------- topic_ids : list of int, optional The topics to retrieve words. Topic ids are zero-based. Throws an error if greater than or equal to m['num_topics'], or if the requested topic name is not present. num_words : int, optional The number of words to show. cdf_cutoff : float, optional Allows one to only show the most probable words whose cumulative probability is below this cutoff. For example if there exist three words where .. math:: p(word_1 | topic_k) = .1 p(word_2 | topic_k) = .2 p(word_3 | topic_k) = .05 then setting :math:`cdf_{cutoff}=.3` would return only :math:`word_1` and :math:`word_2` since :math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}` output_type : {'topic_probabilities' | 'topic_words'}, optional Determine the type of desired output. See below. Returns ------- out : SFrame If output_type is 'topic_probabilities', then the returned value is an SFrame with a column of words ranked by a column of scores for each topic. Otherwise, the returned value is a SArray where each element is a list of the most probable words for each topic. Examples -------- Get the highest ranked words for all topics. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs, num_iterations=50) >>> m.get_topics() +-------+----------+-----------------+ | topic | word | score | +-------+----------+-----------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 1 | function | 0.0482834508265 | | 1 | input | 0.0456270024091 | | 1 | point | 0.0302662839454 | | 1 | result | 0.0239474934631 | | 1 | problem | 0.0231750116011 | | ... | ... | ... | +-------+----------+-----------------+ Get the highest ranked words for topics 0 and 1 and show 15 words per topic. >>> m.get_topics([0, 1], num_words=15) +-------+----------+------------------+ | topic | word | score | +-------+----------+------------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 0 | response | 0.0139740298286 | | 0 | layer | 0.0122585145062 | | 0 | features | 0.0115343177265 | | 0 | feature | 0.0103530459301 | | 0 | spatial | 0.00823387994361 | | ... | ... | ... | +-------+----------+------------------+ If one wants to instead just get the top words per topic, one may change the format of the output as follows. >>> topics = m.get_topics(output_type='topic_words') dtype: list Rows: 10 [['cell', 'image', 'input', 'object', 'visual'], ['algorithm', 'data', 'learning', 'method', 'set'], ['function', 'input', 'point', 'problem', 'result'], ['model', 'output', 'pattern', 'set', 'unit'], ['action', 'learning', 'net', 'problem', 'system'], ['error', 'function', 'network', 'parameter', 'weight'], ['information', 'level', 'neural', 'threshold', 'weight'], ['control', 'field', 'model', 'network', 'neuron'], ['hidden', 'layer', 'system', 'training', 'vector'], ['component', 'distribution', 'local', 'model', 'optimal']] """ _check_categorical_option_type('output_type', output_type, ['topic_probabilities', 'topic_words']) if topic_ids is None: topic_ids = list(range(self._get('num_topics'))) assert isinstance(topic_ids, list), \ "The provided topic_ids is not a list." if any([type(x) == str for x in topic_ids]): raise ValueError("Only integer topic_ids can be used at this point in time.") if not all([x >= 0 and x < self.num_topics for x in topic_ids]): raise ValueError("Topic id values must be non-negative and less than the " + \ "number of topics used to fit the model.") opts = {'model': self.__proxy__, 'topic_ids': topic_ids, 'num_words': num_words, 'cdf_cutoff': cdf_cutoff} response = _turicreate.extensions._text.topicmodel_get_topic(opts) ret = response['top_words'] def sort_wordlist_by_prob(z): words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True) return [word for (word, prob) in words] if output_type != 'topic_probabilities': ret = ret.groupby('topic', {'word': _turicreate.aggregate.CONCAT('word', 'score')}) words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob) ret = _SFrame({'words': words}) return ret
[ "def", "get_topics", "(", "self", ",", "topic_ids", "=", "None", ",", "num_words", "=", "5", ",", "cdf_cutoff", "=", "1.0", ",", "output_type", "=", "'topic_probabilities'", ")", ":", "_check_categorical_option_type", "(", "'output_type'", ",", "output_type", ",", "[", "'topic_probabilities'", ",", "'topic_words'", "]", ")", "if", "topic_ids", "is", "None", ":", "topic_ids", "=", "list", "(", "range", "(", "self", ".", "_get", "(", "'num_topics'", ")", ")", ")", "assert", "isinstance", "(", "topic_ids", ",", "list", ")", ",", "\"The provided topic_ids is not a list.\"", "if", "any", "(", "[", "type", "(", "x", ")", "==", "str", "for", "x", "in", "topic_ids", "]", ")", ":", "raise", "ValueError", "(", "\"Only integer topic_ids can be used at this point in time.\"", ")", "if", "not", "all", "(", "[", "x", ">=", "0", "and", "x", "<", "self", ".", "num_topics", "for", "x", "in", "topic_ids", "]", ")", ":", "raise", "ValueError", "(", "\"Topic id values must be non-negative and less than the \"", "+", "\"number of topics used to fit the model.\"", ")", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'topic_ids'", ":", "topic_ids", ",", "'num_words'", ":", "num_words", ",", "'cdf_cutoff'", ":", "cdf_cutoff", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_get_topic", "(", "opts", ")", "ret", "=", "response", "[", "'top_words'", "]", "def", "sort_wordlist_by_prob", "(", "z", ")", ":", "words", "=", "sorted", "(", "z", ".", "items", "(", ")", ",", "key", "=", "_operator", ".", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "return", "[", "word", "for", "(", "word", ",", "prob", ")", "in", "words", "]", "if", "output_type", "!=", "'topic_probabilities'", ":", "ret", "=", "ret", ".", "groupby", "(", "'topic'", ",", "{", "'word'", ":", "_turicreate", ".", "aggregate", ".", "CONCAT", "(", "'word'", ",", "'score'", ")", "}", ")", "words", "=", "ret", ".", "sort", "(", "'topic'", ")", "[", "'word'", "]", ".", "apply", "(", "sort_wordlist_by_prob", ")", "ret", "=", "_SFrame", "(", "{", "'words'", ":", "words", "}", ")", "return", "ret" ]
Get the words associated with a given topic. The score column is the probability of choosing that word given that you have chosen a particular topic. Parameters ---------- topic_ids : list of int, optional The topics to retrieve words. Topic ids are zero-based. Throws an error if greater than or equal to m['num_topics'], or if the requested topic name is not present. num_words : int, optional The number of words to show. cdf_cutoff : float, optional Allows one to only show the most probable words whose cumulative probability is below this cutoff. For example if there exist three words where .. math:: p(word_1 | topic_k) = .1 p(word_2 | topic_k) = .2 p(word_3 | topic_k) = .05 then setting :math:`cdf_{cutoff}=.3` would return only :math:`word_1` and :math:`word_2` since :math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}` output_type : {'topic_probabilities' | 'topic_words'}, optional Determine the type of desired output. See below. Returns ------- out : SFrame If output_type is 'topic_probabilities', then the returned value is an SFrame with a column of words ranked by a column of scores for each topic. Otherwise, the returned value is a SArray where each element is a list of the most probable words for each topic. Examples -------- Get the highest ranked words for all topics. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs, num_iterations=50) >>> m.get_topics() +-------+----------+-----------------+ | topic | word | score | +-------+----------+-----------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 1 | function | 0.0482834508265 | | 1 | input | 0.0456270024091 | | 1 | point | 0.0302662839454 | | 1 | result | 0.0239474934631 | | 1 | problem | 0.0231750116011 | | ... | ... | ... | +-------+----------+-----------------+ Get the highest ranked words for topics 0 and 1 and show 15 words per topic. >>> m.get_topics([0, 1], num_words=15) +-------+----------+------------------+ | topic | word | score | +-------+----------+------------------+ | 0 | cell | 0.028974400831 | | 0 | input | 0.0259470208503 | | 0 | image | 0.0215721599763 | | 0 | visual | 0.0173635081992 | | 0 | object | 0.0172447874156 | | 0 | response | 0.0139740298286 | | 0 | layer | 0.0122585145062 | | 0 | features | 0.0115343177265 | | 0 | feature | 0.0103530459301 | | 0 | spatial | 0.00823387994361 | | ... | ... | ... | +-------+----------+------------------+ If one wants to instead just get the top words per topic, one may change the format of the output as follows. >>> topics = m.get_topics(output_type='topic_words') dtype: list Rows: 10 [['cell', 'image', 'input', 'object', 'visual'], ['algorithm', 'data', 'learning', 'method', 'set'], ['function', 'input', 'point', 'problem', 'result'], ['model', 'output', 'pattern', 'set', 'unit'], ['action', 'learning', 'net', 'problem', 'system'], ['error', 'function', 'network', 'parameter', 'weight'], ['information', 'level', 'neural', 'threshold', 'weight'], ['control', 'field', 'model', 'network', 'neuron'], ['hidden', 'layer', 'system', 'training', 'vector'], ['component', 'distribution', 'local', 'model', 'optimal']]
[ "Get", "the", "words", "associated", "with", "a", "given", "topic", ".", "The", "score", "column", "is", "the", "probability", "of", "choosing", "that", "word", "given", "that", "you", "have", "chosen", "a", "particular", "topic", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L430-L568
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel.predict
def predict(self, dataset, output_type='assignment', num_burnin=None): """ Use the model to predict topics for each document. The provided `dataset` should be an SArray object where each element is a dict representing a single document in bag-of-words format, where keys are words and values are their corresponding counts. If `dataset` is an SFrame, then it must contain a single column of dict type. The current implementation will make inferences about each document given its estimates of the topics learned when creating the model. This is done via Gibbs sampling. Parameters ---------- dataset : SArray, SFrame of type dict A set of documents to use for making predictions. output_type : str, optional The type of output desired. This can either be - assignment: the returned values are integers in [0, num_topics) - probability: each returned prediction is a vector with length num_topics, where element k represents the probability that document belongs to topic k. num_burnin : int, optional The number of iterations of Gibbs sampling to perform when inferring the topics for documents at prediction time. If provided this will override the burnin value set during training. Returns ------- out : SArray See Also -------- evaluate Examples -------- Make predictions about which topic each document belongs to. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> pred = m.predict(docs) If one is interested in the probability of each topic >>> pred = m.predict(docs, output_type='probability') Notes ----- For each unique word w in a document d, we sample an assignment to topic k with probability proportional to .. math:: p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k} where - :math:`W` is the size of the vocabulary, - :math:`n_{d,k}` is the number of other times we have assigned a word in document to d to topic :math:`k`, - :math:`\Phi_{w,k}` is the probability under the model of choosing word :math:`w` given the word is of topic :math:`k`. This is the matrix returned by calling `m['topics']`. This represents a collapsed Gibbs sampler for the document assignments while we keep the topics learned during training fixed. This process is done in parallel across all documents, five times per document. """ dataset = _check_input(dataset) if num_burnin is None: num_burnin = self.num_burnin opts = {'model': self.__proxy__, 'data': dataset, 'num_burnin': num_burnin} response = _turicreate.extensions._text.topicmodel_predict(opts) preds = response['predictions'] # Get most likely topic if probabilities are not requested if output_type not in ['probability', 'probabilities', 'prob']: # equivalent to numpy.argmax(x) preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1]) return preds
python
def predict(self, dataset, output_type='assignment', num_burnin=None): """ Use the model to predict topics for each document. The provided `dataset` should be an SArray object where each element is a dict representing a single document in bag-of-words format, where keys are words and values are their corresponding counts. If `dataset` is an SFrame, then it must contain a single column of dict type. The current implementation will make inferences about each document given its estimates of the topics learned when creating the model. This is done via Gibbs sampling. Parameters ---------- dataset : SArray, SFrame of type dict A set of documents to use for making predictions. output_type : str, optional The type of output desired. This can either be - assignment: the returned values are integers in [0, num_topics) - probability: each returned prediction is a vector with length num_topics, where element k represents the probability that document belongs to topic k. num_burnin : int, optional The number of iterations of Gibbs sampling to perform when inferring the topics for documents at prediction time. If provided this will override the burnin value set during training. Returns ------- out : SArray See Also -------- evaluate Examples -------- Make predictions about which topic each document belongs to. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> pred = m.predict(docs) If one is interested in the probability of each topic >>> pred = m.predict(docs, output_type='probability') Notes ----- For each unique word w in a document d, we sample an assignment to topic k with probability proportional to .. math:: p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k} where - :math:`W` is the size of the vocabulary, - :math:`n_{d,k}` is the number of other times we have assigned a word in document to d to topic :math:`k`, - :math:`\Phi_{w,k}` is the probability under the model of choosing word :math:`w` given the word is of topic :math:`k`. This is the matrix returned by calling `m['topics']`. This represents a collapsed Gibbs sampler for the document assignments while we keep the topics learned during training fixed. This process is done in parallel across all documents, five times per document. """ dataset = _check_input(dataset) if num_burnin is None: num_burnin = self.num_burnin opts = {'model': self.__proxy__, 'data': dataset, 'num_burnin': num_burnin} response = _turicreate.extensions._text.topicmodel_predict(opts) preds = response['predictions'] # Get most likely topic if probabilities are not requested if output_type not in ['probability', 'probabilities', 'prob']: # equivalent to numpy.argmax(x) preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1]) return preds
[ "def", "predict", "(", "self", ",", "dataset", ",", "output_type", "=", "'assignment'", ",", "num_burnin", "=", "None", ")", ":", "dataset", "=", "_check_input", "(", "dataset", ")", "if", "num_burnin", "is", "None", ":", "num_burnin", "=", "self", ".", "num_burnin", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'data'", ":", "dataset", ",", "'num_burnin'", ":", "num_burnin", "}", "response", "=", "_turicreate", ".", "extensions", ".", "_text", ".", "topicmodel_predict", "(", "opts", ")", "preds", "=", "response", "[", "'predictions'", "]", "# Get most likely topic if probabilities are not requested", "if", "output_type", "not", "in", "[", "'probability'", ",", "'probabilities'", ",", "'prob'", "]", ":", "# equivalent to numpy.argmax(x)", "preds", "=", "preds", ".", "apply", "(", "lambda", "x", ":", "max", "(", "_izip", "(", "x", ",", "_xrange", "(", "len", "(", "x", ")", ")", ")", ")", "[", "1", "]", ")", "return", "preds" ]
Use the model to predict topics for each document. The provided `dataset` should be an SArray object where each element is a dict representing a single document in bag-of-words format, where keys are words and values are their corresponding counts. If `dataset` is an SFrame, then it must contain a single column of dict type. The current implementation will make inferences about each document given its estimates of the topics learned when creating the model. This is done via Gibbs sampling. Parameters ---------- dataset : SArray, SFrame of type dict A set of documents to use for making predictions. output_type : str, optional The type of output desired. This can either be - assignment: the returned values are integers in [0, num_topics) - probability: each returned prediction is a vector with length num_topics, where element k represents the probability that document belongs to topic k. num_burnin : int, optional The number of iterations of Gibbs sampling to perform when inferring the topics for documents at prediction time. If provided this will override the burnin value set during training. Returns ------- out : SArray See Also -------- evaluate Examples -------- Make predictions about which topic each document belongs to. >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> m = turicreate.topic_model.create(docs) >>> pred = m.predict(docs) If one is interested in the probability of each topic >>> pred = m.predict(docs, output_type='probability') Notes ----- For each unique word w in a document d, we sample an assignment to topic k with probability proportional to .. math:: p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k} where - :math:`W` is the size of the vocabulary, - :math:`n_{d,k}` is the number of other times we have assigned a word in document to d to topic :math:`k`, - :math:`\Phi_{w,k}` is the probability under the model of choosing word :math:`w` given the word is of topic :math:`k`. This is the matrix returned by calling `m['topics']`. This represents a collapsed Gibbs sampler for the document assignments while we keep the topics learned during training fixed. This process is done in parallel across all documents, five times per document.
[ "Use", "the", "model", "to", "predict", "topics", "for", "each", "document", ".", "The", "provided", "dataset", "should", "be", "an", "SArray", "object", "where", "each", "element", "is", "a", "dict", "representing", "a", "single", "document", "in", "bag", "-", "of", "-", "words", "format", "where", "keys", "are", "words", "and", "values", "are", "their", "corresponding", "counts", ".", "If", "dataset", "is", "an", "SFrame", "then", "it", "must", "contain", "a", "single", "column", "of", "dict", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L570-L660
train
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
TopicModel.evaluate
def evaluate(self, train_data, test_data=None, metric='perplexity'): """ Estimate the model's ability to predict new data. Imagine you have a corpus of books. One common approach to evaluating topic models is to train on the first half of all of the books and see how well the model predicts the second half of each book. This method returns a metric called perplexity, which is related to the likelihood of observing these words under the given model. See :py:func:`~turicreate.topic_model.perplexity` for more details. The provided `train_data` and `test_data` must have the same length, i.e., both data sets must have the same number of documents; the model will use train_data to estimate which topic the document belongs to, and this is used to estimate the model's performance at predicting the unseen words in the test data. See :py:func:`~turicreate.topic_model.TopicModel.predict` for details on how these predictions are made, and see :py:func:`~turicreate.text_analytics.random_split` for a helper function that can be used for making train/test splits. Parameters ---------- train_data : SArray or SFrame A set of documents to predict topics for. test_data : SArray or SFrame, optional A set of documents to evaluate performance on. By default this will set to be the same as train_data. metric : str The chosen metric to use for evaluating the topic model. Currently only 'perplexity' is supported. Returns ------- out : dict The set of estimated evaluation metrics. See Also -------- predict, turicreate.toolkits.text_analytics.random_split Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = turicreate.topic_model.create(train_data) >>> m.evaluate(train_data, test_data) {'perplexity': 2467.530370396021} """ train_data = _check_input(train_data) if test_data is None: test_data = train_data else: test_data = _check_input(test_data) predictions = self.predict(train_data, output_type='probability') topics = self.topics ret = {} ret['perplexity'] = perplexity(test_data, predictions, topics['topic_probabilities'], topics['vocabulary']) return ret
python
def evaluate(self, train_data, test_data=None, metric='perplexity'): """ Estimate the model's ability to predict new data. Imagine you have a corpus of books. One common approach to evaluating topic models is to train on the first half of all of the books and see how well the model predicts the second half of each book. This method returns a metric called perplexity, which is related to the likelihood of observing these words under the given model. See :py:func:`~turicreate.topic_model.perplexity` for more details. The provided `train_data` and `test_data` must have the same length, i.e., both data sets must have the same number of documents; the model will use train_data to estimate which topic the document belongs to, and this is used to estimate the model's performance at predicting the unseen words in the test data. See :py:func:`~turicreate.topic_model.TopicModel.predict` for details on how these predictions are made, and see :py:func:`~turicreate.text_analytics.random_split` for a helper function that can be used for making train/test splits. Parameters ---------- train_data : SArray or SFrame A set of documents to predict topics for. test_data : SArray or SFrame, optional A set of documents to evaluate performance on. By default this will set to be the same as train_data. metric : str The chosen metric to use for evaluating the topic model. Currently only 'perplexity' is supported. Returns ------- out : dict The set of estimated evaluation metrics. See Also -------- predict, turicreate.toolkits.text_analytics.random_split Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = turicreate.topic_model.create(train_data) >>> m.evaluate(train_data, test_data) {'perplexity': 2467.530370396021} """ train_data = _check_input(train_data) if test_data is None: test_data = train_data else: test_data = _check_input(test_data) predictions = self.predict(train_data, output_type='probability') topics = self.topics ret = {} ret['perplexity'] = perplexity(test_data, predictions, topics['topic_probabilities'], topics['vocabulary']) return ret
[ "def", "evaluate", "(", "self", ",", "train_data", ",", "test_data", "=", "None", ",", "metric", "=", "'perplexity'", ")", ":", "train_data", "=", "_check_input", "(", "train_data", ")", "if", "test_data", "is", "None", ":", "test_data", "=", "train_data", "else", ":", "test_data", "=", "_check_input", "(", "test_data", ")", "predictions", "=", "self", ".", "predict", "(", "train_data", ",", "output_type", "=", "'probability'", ")", "topics", "=", "self", ".", "topics", "ret", "=", "{", "}", "ret", "[", "'perplexity'", "]", "=", "perplexity", "(", "test_data", ",", "predictions", ",", "topics", "[", "'topic_probabilities'", "]", ",", "topics", "[", "'vocabulary'", "]", ")", "return", "ret" ]
Estimate the model's ability to predict new data. Imagine you have a corpus of books. One common approach to evaluating topic models is to train on the first half of all of the books and see how well the model predicts the second half of each book. This method returns a metric called perplexity, which is related to the likelihood of observing these words under the given model. See :py:func:`~turicreate.topic_model.perplexity` for more details. The provided `train_data` and `test_data` must have the same length, i.e., both data sets must have the same number of documents; the model will use train_data to estimate which topic the document belongs to, and this is used to estimate the model's performance at predicting the unseen words in the test data. See :py:func:`~turicreate.topic_model.TopicModel.predict` for details on how these predictions are made, and see :py:func:`~turicreate.text_analytics.random_split` for a helper function that can be used for making train/test splits. Parameters ---------- train_data : SArray or SFrame A set of documents to predict topics for. test_data : SArray or SFrame, optional A set of documents to evaluate performance on. By default this will set to be the same as train_data. metric : str The chosen metric to use for evaluating the topic model. Currently only 'perplexity' is supported. Returns ------- out : dict The set of estimated evaluation metrics. See Also -------- predict, turicreate.toolkits.text_analytics.random_split Examples -------- >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> train_data, test_data = turicreate.text_analytics.random_split(docs) >>> m = turicreate.topic_model.create(train_data) >>> m.evaluate(train_data, test_data) {'perplexity': 2467.530370396021}
[ "Estimate", "the", "model", "s", "ability", "to", "predict", "new", "data", ".", "Imagine", "you", "have", "a", "corpus", "of", "books", ".", "One", "common", "approach", "to", "evaluating", "topic", "models", "is", "to", "train", "on", "the", "first", "half", "of", "all", "of", "the", "books", "and", "see", "how", "well", "the", "model", "predicts", "the", "second", "half", "of", "each", "book", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L663-L731
train
apple/turicreate
src/unity/python/turicreate/toolkits/object_detector/_detection.py
bbox_to_ybox
def bbox_to_ybox(bbox): """Convert from corner bounding box to center/shape""" return [ (bbox[1] + bbox[3]) / 2, (bbox[0] + bbox[2]) / 2, (bbox[3] - bbox[1]), (bbox[2] - bbox[0]), ]
python
def bbox_to_ybox(bbox): """Convert from corner bounding box to center/shape""" return [ (bbox[1] + bbox[3]) / 2, (bbox[0] + bbox[2]) / 2, (bbox[3] - bbox[1]), (bbox[2] - bbox[0]), ]
[ "def", "bbox_to_ybox", "(", "bbox", ")", ":", "return", "[", "(", "bbox", "[", "1", "]", "+", "bbox", "[", "3", "]", ")", "/", "2", ",", "(", "bbox", "[", "0", "]", "+", "bbox", "[", "2", "]", ")", "/", "2", ",", "(", "bbox", "[", "3", "]", "-", "bbox", "[", "1", "]", ")", ",", "(", "bbox", "[", "2", "]", "-", "bbox", "[", "0", "]", ")", ",", "]" ]
Convert from corner bounding box to center/shape
[ "Convert", "from", "corner", "bounding", "box", "to", "center", "/", "shape" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/_detection.py#L17-L24
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
_raise_error_if_not_drawing_classifier_input_sframe
def _raise_error_if_not_drawing_classifier_input_sframe( dataset, feature, target): """ Performs some sanity checks on the SFrame provided as input to `turicreate.drawing_classifier.create` and raises a ToolkitError if something in the dataset is missing or wrong. """ from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe _raise_error_if_not_sframe(dataset) if feature not in dataset.column_names(): raise _ToolkitError("Feature column '%s' does not exist" % feature) if target not in dataset.column_names(): raise _ToolkitError("Target column '%s' does not exist" % target) if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list): raise _ToolkitError("Feature column must contain images" + " or stroke-based drawings encoded as lists of strokes" + " where each stroke is a list of points and" + " each point is stored as a dictionary") if dataset[target].dtype != int and dataset[target].dtype != str: raise _ToolkitError("Target column contains " + str(dataset[target].dtype) + " but it must contain strings or integers to represent" + " labels for drawings.") if len(dataset) == 0: raise _ToolkitError("Input Dataset is empty!")
python
def _raise_error_if_not_drawing_classifier_input_sframe( dataset, feature, target): """ Performs some sanity checks on the SFrame provided as input to `turicreate.drawing_classifier.create` and raises a ToolkitError if something in the dataset is missing or wrong. """ from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe _raise_error_if_not_sframe(dataset) if feature not in dataset.column_names(): raise _ToolkitError("Feature column '%s' does not exist" % feature) if target not in dataset.column_names(): raise _ToolkitError("Target column '%s' does not exist" % target) if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list): raise _ToolkitError("Feature column must contain images" + " or stroke-based drawings encoded as lists of strokes" + " where each stroke is a list of points and" + " each point is stored as a dictionary") if dataset[target].dtype != int and dataset[target].dtype != str: raise _ToolkitError("Target column contains " + str(dataset[target].dtype) + " but it must contain strings or integers to represent" + " labels for drawings.") if len(dataset) == 0: raise _ToolkitError("Input Dataset is empty!")
[ "def", "_raise_error_if_not_drawing_classifier_input_sframe", "(", "dataset", ",", "feature", ",", "target", ")", ":", "from", "turicreate", ".", "toolkits", ".", "_internal_utils", "import", "_raise_error_if_not_sframe", "_raise_error_if_not_sframe", "(", "dataset", ")", "if", "feature", "not", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "_ToolkitError", "(", "\"Feature column '%s' does not exist\"", "%", "feature", ")", "if", "target", "not", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "_ToolkitError", "(", "\"Target column '%s' does not exist\"", "%", "target", ")", "if", "(", "dataset", "[", "feature", "]", ".", "dtype", "!=", "_tc", ".", "Image", "and", "dataset", "[", "feature", "]", ".", "dtype", "!=", "list", ")", ":", "raise", "_ToolkitError", "(", "\"Feature column must contain images\"", "+", "\" or stroke-based drawings encoded as lists of strokes\"", "+", "\" where each stroke is a list of points and\"", "+", "\" each point is stored as a dictionary\"", ")", "if", "dataset", "[", "target", "]", ".", "dtype", "!=", "int", "and", "dataset", "[", "target", "]", ".", "dtype", "!=", "str", ":", "raise", "_ToolkitError", "(", "\"Target column contains \"", "+", "str", "(", "dataset", "[", "target", "]", ".", "dtype", ")", "+", "\" but it must contain strings or integers to represent\"", "+", "\" labels for drawings.\"", ")", "if", "len", "(", "dataset", ")", "==", "0", ":", "raise", "_ToolkitError", "(", "\"Input Dataset is empty!\"", ")" ]
Performs some sanity checks on the SFrame provided as input to `turicreate.drawing_classifier.create` and raises a ToolkitError if something in the dataset is missing or wrong.
[ "Performs", "some", "sanity", "checks", "on", "the", "SFrame", "provided", "as", "input", "to", "turicreate", ".", "drawing_classifier", ".", "create", "and", "raises", "a", "ToolkitError", "if", "something", "in", "the", "dataset", "is", "missing", "or", "wrong", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L22-L45
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
create
def create(input_dataset, target, feature=None, validation_set='auto', warm_start='auto', batch_size=256, max_iterations=100, verbose=True): """ Create a :class:`DrawingClassifier` model. Parameters ---------- dataset : SFrame Input data. The columns named by the ``feature`` and ``target`` parameters will be extracted for training the drawing classifier. target : string Name of the column containing the target variable. The values in this column must be of string or integer type. feature : string optional Name of the column containing the input drawings. 'None' (the default) indicates the column in `dataset` named "drawing" should be used as the feature. The feature column can contain both bitmap-based drawings as well as stroke-based drawings. Bitmap-based drawing input can be a grayscale tc.Image of any size. Stroke-based drawing input must be in the following format: Every drawing must be represented by a list of strokes, where each stroke must be a list of points in the order in which they were drawn on the canvas. Each point must be a dictionary with two keys, "x" and "y", and their respective values must be numerical, i.e. either integer or float. validation_set : SFrame optional A dataset for monitoring the model's generalization performance. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. warm_start : string optional A string to denote which pretrained model to use. Set to "auto" by default which uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. To disable warm start, pass in None to this argument. Here is a list of all the pretrained models that can be passed in as this argument: "auto": Uses quickdraw_245_v0 "quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. None: No Warm Start batch_size: int optional The number of drawings per training step. If not set, a default value of 256 will be used. If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. max_iterations : int optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. verbose : bool optional If True, print progress updates and model details. Returns ------- out : DrawingClassifier A trained :class:`DrawingClassifier` model. See Also -------- DrawingClassifier Examples -------- .. sourcecode:: python # Train a drawing classifier model >>> model = turicreate.drawing_classifier.create(data) # Make predictions on the training set and as column to the SFrame >>> data['predictions'] = model.predict(data) """ import mxnet as _mx from mxnet import autograd as _autograd from ._model_architecture import Model as _Model from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter from .._mxnet import _mxnet_utils start_time = _time.time() accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None] # @TODO: Should be able to automatically choose number of iterations # based on data size: Tracked in Github Issue #1576 # automatically infer feature column if feature is None: feature = _tkutl._find_only_drawing_column(input_dataset) _raise_error_if_not_drawing_classifier_input_sframe( input_dataset, feature, target) if batch_size is not None and not isinstance(batch_size, int): raise TypeError("'batch_size' must be an integer >= 1") if batch_size is not None and batch_size < 1: raise ValueError("'batch_size' must be >= 1") if max_iterations is not None and not isinstance(max_iterations, int): raise TypeError("'max_iterations' must be an integer >= 1") if max_iterations is not None and max_iterations < 1: raise ValueError("'max_iterations' must be >= 1") is_stroke_input = (input_dataset[feature].dtype != _tc.Image) dataset = _extensions._drawing_classifier_prepare_data( input_dataset, feature) if is_stroke_input else input_dataset iteration = 0 classes = dataset[target].unique() classes = sorted(classes) class_to_index = {name: index for index, name in enumerate(classes)} validation_set_corrective_string = ("'validation_set' parameter must be " + "an SFrame, or None, or must be set to 'auto' for the toolkit to " + "automatically create a validation set.") if isinstance(validation_set, _tc.SFrame): _raise_error_if_not_drawing_classifier_input_sframe( validation_set, feature, target) is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image) validation_dataset = _extensions._drawing_classifier_prepare_data( validation_set, feature) if is_validation_stroke_input else validation_set elif isinstance(validation_set, str): if validation_set == 'auto': if dataset.num_rows() >= 100: if verbose: print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n" " You can set ``validation_set=None`` to disable validation tracking.\n") dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True) else: validation_set = None validation_dataset = _tc.SFrame() else: raise _ToolkitError("Unrecognized value for 'validation_set'. " + validation_set_corrective_string) elif validation_set is None: validation_dataset = _tc.SFrame() else: raise TypeError("Unrecognized type for 'validation_set'." + validation_set_corrective_string) train_loader = _SFrameClassifierIter(dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=max_iterations) train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=1) validation_loader = _SFrameClassifierIter(validation_dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=1) if verbose and iteration == 0: column_names = ['iteration', 'train_loss', 'train_accuracy', 'time'] column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)'] if validation_set is not None: column_names.insert(3, 'validation_accuracy') column_titles.insert(3, 'Validation Accuracy') table_printer = _tc.util._ProgressTablePrinter( column_names, column_titles) ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size) model = _Model(num_classes = len(classes), prefix="drawing_") model_params = model.collect_params() model_params.initialize(_mx.init.Xavier(), ctx=ctx) if warm_start is not None: if type(warm_start) is not str: raise TypeError("'warm_start' must be a string or None. " + "'warm_start' can take in the following values: " + str(accepted_values_for_warm_start)) if warm_start not in accepted_values_for_warm_start: raise _ToolkitError("Unrecognized value for 'warm_start': " + warm_start + ". 'warm_start' can take in the following " + "values: " + str(accepted_values_for_warm_start)) pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel( warm_start) pretrained_model_params_path = pretrained_model.get_model_path() model.load_params(pretrained_model_params_path, ctx=ctx, allow_missing=True) softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss() model.hybridize() trainer = _mx.gluon.Trainer(model.collect_params(), 'adam') train_accuracy = _mx.metric.Accuracy() validation_accuracy = _mx.metric.Accuracy() def get_data_and_label_from_batch(batch): if batch.pad is not None: size = batch_size - batch.pad sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size) sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size) num_devices = min(sliced_data.shape[0], len(ctx)) batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False) batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False) else: batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) return batch_data, batch_label def compute_accuracy(accuracy_metric, batch_loader): batch_loader.reset() accuracy_metric.reset() for batch in batch_loader: batch_data, batch_label = get_data_and_label_from_batch(batch) outputs = [] for x, y in zip(batch_data, batch_label): if x is None or y is None: continue z = model(x) outputs.append(z) accuracy_metric.update(batch_label, outputs) for train_batch in train_loader: train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch) with _autograd.record(): # Inside training scope for x, y in zip(train_batch_data, train_batch_label): z = model(x) # Computes softmax cross entropy loss. loss = softmax_cross_entropy(z, y) # Backpropagate the error for one iteration. loss.backward() # Make one step of parameter update. Trainer needs to know the # batch size of data to normalize the gradient by 1/batch_size. trainer.step(train_batch.data[0].shape[0]) # calculate training metrics train_loss = loss.mean().asscalar() train_time = _time.time() - start_time if train_batch.iteration > iteration: # Compute training accuracy compute_accuracy(train_accuracy, train_loader_to_compute_accuracy) # Compute validation accuracy if validation_set is not None: compute_accuracy(validation_accuracy, validation_loader) iteration = train_batch.iteration if verbose: kwargs = { "iteration": iteration, "train_loss": float(train_loss), "train_accuracy": train_accuracy.get()[1], "time": train_time} if validation_set is not None: kwargs["validation_accuracy"] = validation_accuracy.get()[1] table_printer.print_row(**kwargs) state = { '_model': model, '_class_to_index': class_to_index, 'num_classes': len(classes), 'classes': classes, 'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT), 'batch_size': batch_size, 'training_loss': train_loss, 'training_accuracy': train_accuracy.get()[1], 'training_time': train_time, 'validation_accuracy': validation_accuracy.get()[1], # nan if validation_set=None 'max_iterations': max_iterations, 'target': target, 'feature': feature, 'num_examples': len(input_dataset) } return DrawingClassifier(state)
python
def create(input_dataset, target, feature=None, validation_set='auto', warm_start='auto', batch_size=256, max_iterations=100, verbose=True): """ Create a :class:`DrawingClassifier` model. Parameters ---------- dataset : SFrame Input data. The columns named by the ``feature`` and ``target`` parameters will be extracted for training the drawing classifier. target : string Name of the column containing the target variable. The values in this column must be of string or integer type. feature : string optional Name of the column containing the input drawings. 'None' (the default) indicates the column in `dataset` named "drawing" should be used as the feature. The feature column can contain both bitmap-based drawings as well as stroke-based drawings. Bitmap-based drawing input can be a grayscale tc.Image of any size. Stroke-based drawing input must be in the following format: Every drawing must be represented by a list of strokes, where each stroke must be a list of points in the order in which they were drawn on the canvas. Each point must be a dictionary with two keys, "x" and "y", and their respective values must be numerical, i.e. either integer or float. validation_set : SFrame optional A dataset for monitoring the model's generalization performance. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. warm_start : string optional A string to denote which pretrained model to use. Set to "auto" by default which uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. To disable warm start, pass in None to this argument. Here is a list of all the pretrained models that can be passed in as this argument: "auto": Uses quickdraw_245_v0 "quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. None: No Warm Start batch_size: int optional The number of drawings per training step. If not set, a default value of 256 will be used. If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. max_iterations : int optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. verbose : bool optional If True, print progress updates and model details. Returns ------- out : DrawingClassifier A trained :class:`DrawingClassifier` model. See Also -------- DrawingClassifier Examples -------- .. sourcecode:: python # Train a drawing classifier model >>> model = turicreate.drawing_classifier.create(data) # Make predictions on the training set and as column to the SFrame >>> data['predictions'] = model.predict(data) """ import mxnet as _mx from mxnet import autograd as _autograd from ._model_architecture import Model as _Model from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter from .._mxnet import _mxnet_utils start_time = _time.time() accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None] # @TODO: Should be able to automatically choose number of iterations # based on data size: Tracked in Github Issue #1576 # automatically infer feature column if feature is None: feature = _tkutl._find_only_drawing_column(input_dataset) _raise_error_if_not_drawing_classifier_input_sframe( input_dataset, feature, target) if batch_size is not None and not isinstance(batch_size, int): raise TypeError("'batch_size' must be an integer >= 1") if batch_size is not None and batch_size < 1: raise ValueError("'batch_size' must be >= 1") if max_iterations is not None and not isinstance(max_iterations, int): raise TypeError("'max_iterations' must be an integer >= 1") if max_iterations is not None and max_iterations < 1: raise ValueError("'max_iterations' must be >= 1") is_stroke_input = (input_dataset[feature].dtype != _tc.Image) dataset = _extensions._drawing_classifier_prepare_data( input_dataset, feature) if is_stroke_input else input_dataset iteration = 0 classes = dataset[target].unique() classes = sorted(classes) class_to_index = {name: index for index, name in enumerate(classes)} validation_set_corrective_string = ("'validation_set' parameter must be " + "an SFrame, or None, or must be set to 'auto' for the toolkit to " + "automatically create a validation set.") if isinstance(validation_set, _tc.SFrame): _raise_error_if_not_drawing_classifier_input_sframe( validation_set, feature, target) is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image) validation_dataset = _extensions._drawing_classifier_prepare_data( validation_set, feature) if is_validation_stroke_input else validation_set elif isinstance(validation_set, str): if validation_set == 'auto': if dataset.num_rows() >= 100: if verbose: print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n" " You can set ``validation_set=None`` to disable validation tracking.\n") dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True) else: validation_set = None validation_dataset = _tc.SFrame() else: raise _ToolkitError("Unrecognized value for 'validation_set'. " + validation_set_corrective_string) elif validation_set is None: validation_dataset = _tc.SFrame() else: raise TypeError("Unrecognized type for 'validation_set'." + validation_set_corrective_string) train_loader = _SFrameClassifierIter(dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=max_iterations) train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=1) validation_loader = _SFrameClassifierIter(validation_dataset, batch_size, feature_column=feature, target_column=target, class_to_index=class_to_index, load_labels=True, shuffle=True, iterations=1) if verbose and iteration == 0: column_names = ['iteration', 'train_loss', 'train_accuracy', 'time'] column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)'] if validation_set is not None: column_names.insert(3, 'validation_accuracy') column_titles.insert(3, 'Validation Accuracy') table_printer = _tc.util._ProgressTablePrinter( column_names, column_titles) ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size) model = _Model(num_classes = len(classes), prefix="drawing_") model_params = model.collect_params() model_params.initialize(_mx.init.Xavier(), ctx=ctx) if warm_start is not None: if type(warm_start) is not str: raise TypeError("'warm_start' must be a string or None. " + "'warm_start' can take in the following values: " + str(accepted_values_for_warm_start)) if warm_start not in accepted_values_for_warm_start: raise _ToolkitError("Unrecognized value for 'warm_start': " + warm_start + ". 'warm_start' can take in the following " + "values: " + str(accepted_values_for_warm_start)) pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel( warm_start) pretrained_model_params_path = pretrained_model.get_model_path() model.load_params(pretrained_model_params_path, ctx=ctx, allow_missing=True) softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss() model.hybridize() trainer = _mx.gluon.Trainer(model.collect_params(), 'adam') train_accuracy = _mx.metric.Accuracy() validation_accuracy = _mx.metric.Accuracy() def get_data_and_label_from_batch(batch): if batch.pad is not None: size = batch_size - batch.pad sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size) sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size) num_devices = min(sliced_data.shape[0], len(ctx)) batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False) batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False) else: batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0) batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0) return batch_data, batch_label def compute_accuracy(accuracy_metric, batch_loader): batch_loader.reset() accuracy_metric.reset() for batch in batch_loader: batch_data, batch_label = get_data_and_label_from_batch(batch) outputs = [] for x, y in zip(batch_data, batch_label): if x is None or y is None: continue z = model(x) outputs.append(z) accuracy_metric.update(batch_label, outputs) for train_batch in train_loader: train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch) with _autograd.record(): # Inside training scope for x, y in zip(train_batch_data, train_batch_label): z = model(x) # Computes softmax cross entropy loss. loss = softmax_cross_entropy(z, y) # Backpropagate the error for one iteration. loss.backward() # Make one step of parameter update. Trainer needs to know the # batch size of data to normalize the gradient by 1/batch_size. trainer.step(train_batch.data[0].shape[0]) # calculate training metrics train_loss = loss.mean().asscalar() train_time = _time.time() - start_time if train_batch.iteration > iteration: # Compute training accuracy compute_accuracy(train_accuracy, train_loader_to_compute_accuracy) # Compute validation accuracy if validation_set is not None: compute_accuracy(validation_accuracy, validation_loader) iteration = train_batch.iteration if verbose: kwargs = { "iteration": iteration, "train_loss": float(train_loss), "train_accuracy": train_accuracy.get()[1], "time": train_time} if validation_set is not None: kwargs["validation_accuracy"] = validation_accuracy.get()[1] table_printer.print_row(**kwargs) state = { '_model': model, '_class_to_index': class_to_index, 'num_classes': len(classes), 'classes': classes, 'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT), 'batch_size': batch_size, 'training_loss': train_loss, 'training_accuracy': train_accuracy.get()[1], 'training_time': train_time, 'validation_accuracy': validation_accuracy.get()[1], # nan if validation_set=None 'max_iterations': max_iterations, 'target': target, 'feature': feature, 'num_examples': len(input_dataset) } return DrawingClassifier(state)
[ "def", "create", "(", "input_dataset", ",", "target", ",", "feature", "=", "None", ",", "validation_set", "=", "'auto'", ",", "warm_start", "=", "'auto'", ",", "batch_size", "=", "256", ",", "max_iterations", "=", "100", ",", "verbose", "=", "True", ")", ":", "import", "mxnet", "as", "_mx", "from", "mxnet", "import", "autograd", "as", "_autograd", "from", ".", "_model_architecture", "import", "Model", "as", "_Model", "from", ".", "_sframe_loader", "import", "SFrameClassifierIter", "as", "_SFrameClassifierIter", "from", ".", ".", "_mxnet", "import", "_mxnet_utils", "start_time", "=", "_time", ".", "time", "(", ")", "accepted_values_for_warm_start", "=", "[", "\"auto\"", ",", "\"quickdraw_245_v0\"", ",", "None", "]", "# @TODO: Should be able to automatically choose number of iterations", "# based on data size: Tracked in Github Issue #1576", "# automatically infer feature column", "if", "feature", "is", "None", ":", "feature", "=", "_tkutl", ".", "_find_only_drawing_column", "(", "input_dataset", ")", "_raise_error_if_not_drawing_classifier_input_sframe", "(", "input_dataset", ",", "feature", ",", "target", ")", "if", "batch_size", "is", "not", "None", "and", "not", "isinstance", "(", "batch_size", ",", "int", ")", ":", "raise", "TypeError", "(", "\"'batch_size' must be an integer >= 1\"", ")", "if", "batch_size", "is", "not", "None", "and", "batch_size", "<", "1", ":", "raise", "ValueError", "(", "\"'batch_size' must be >= 1\"", ")", "if", "max_iterations", "is", "not", "None", "and", "not", "isinstance", "(", "max_iterations", ",", "int", ")", ":", "raise", "TypeError", "(", "\"'max_iterations' must be an integer >= 1\"", ")", "if", "max_iterations", "is", "not", "None", "and", "max_iterations", "<", "1", ":", "raise", "ValueError", "(", "\"'max_iterations' must be >= 1\"", ")", "is_stroke_input", "=", "(", "input_dataset", "[", "feature", "]", ".", "dtype", "!=", "_tc", ".", "Image", ")", "dataset", "=", "_extensions", ".", "_drawing_classifier_prepare_data", "(", "input_dataset", ",", "feature", ")", "if", "is_stroke_input", "else", "input_dataset", "iteration", "=", "0", "classes", "=", "dataset", "[", "target", "]", ".", "unique", "(", ")", "classes", "=", "sorted", "(", "classes", ")", "class_to_index", "=", "{", "name", ":", "index", "for", "index", ",", "name", "in", "enumerate", "(", "classes", ")", "}", "validation_set_corrective_string", "=", "(", "\"'validation_set' parameter must be \"", "+", "\"an SFrame, or None, or must be set to 'auto' for the toolkit to \"", "+", "\"automatically create a validation set.\"", ")", "if", "isinstance", "(", "validation_set", ",", "_tc", ".", "SFrame", ")", ":", "_raise_error_if_not_drawing_classifier_input_sframe", "(", "validation_set", ",", "feature", ",", "target", ")", "is_validation_stroke_input", "=", "(", "validation_set", "[", "feature", "]", ".", "dtype", "!=", "_tc", ".", "Image", ")", "validation_dataset", "=", "_extensions", ".", "_drawing_classifier_prepare_data", "(", "validation_set", ",", "feature", ")", "if", "is_validation_stroke_input", "else", "validation_set", "elif", "isinstance", "(", "validation_set", ",", "str", ")", ":", "if", "validation_set", "==", "'auto'", ":", "if", "dataset", ".", "num_rows", "(", ")", ">=", "100", ":", "if", "verbose", ":", "print", "(", "\"PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\\n\"", "\" You can set ``validation_set=None`` to disable validation tracking.\\n\"", ")", "dataset", ",", "validation_dataset", "=", "dataset", ".", "random_split", "(", "TRAIN_VALIDATION_SPLIT", ",", "exact", "=", "True", ")", "else", ":", "validation_set", "=", "None", "validation_dataset", "=", "_tc", ".", "SFrame", "(", ")", "else", ":", "raise", "_ToolkitError", "(", "\"Unrecognized value for 'validation_set'. \"", "+", "validation_set_corrective_string", ")", "elif", "validation_set", "is", "None", ":", "validation_dataset", "=", "_tc", ".", "SFrame", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Unrecognized type for 'validation_set'.\"", "+", "validation_set_corrective_string", ")", "train_loader", "=", "_SFrameClassifierIter", "(", "dataset", ",", "batch_size", ",", "feature_column", "=", "feature", ",", "target_column", "=", "target", ",", "class_to_index", "=", "class_to_index", ",", "load_labels", "=", "True", ",", "shuffle", "=", "True", ",", "iterations", "=", "max_iterations", ")", "train_loader_to_compute_accuracy", "=", "_SFrameClassifierIter", "(", "dataset", ",", "batch_size", ",", "feature_column", "=", "feature", ",", "target_column", "=", "target", ",", "class_to_index", "=", "class_to_index", ",", "load_labels", "=", "True", ",", "shuffle", "=", "True", ",", "iterations", "=", "1", ")", "validation_loader", "=", "_SFrameClassifierIter", "(", "validation_dataset", ",", "batch_size", ",", "feature_column", "=", "feature", ",", "target_column", "=", "target", ",", "class_to_index", "=", "class_to_index", ",", "load_labels", "=", "True", ",", "shuffle", "=", "True", ",", "iterations", "=", "1", ")", "if", "verbose", "and", "iteration", "==", "0", ":", "column_names", "=", "[", "'iteration'", ",", "'train_loss'", ",", "'train_accuracy'", ",", "'time'", "]", "column_titles", "=", "[", "'Iteration'", ",", "'Training Loss'", ",", "'Training Accuracy'", ",", "'Elapsed Time (seconds)'", "]", "if", "validation_set", "is", "not", "None", ":", "column_names", ".", "insert", "(", "3", ",", "'validation_accuracy'", ")", "column_titles", ".", "insert", "(", "3", ",", "'Validation Accuracy'", ")", "table_printer", "=", "_tc", ".", "util", ".", "_ProgressTablePrinter", "(", "column_names", ",", "column_titles", ")", "ctx", "=", "_mxnet_utils", ".", "get_mxnet_context", "(", "max_devices", "=", "batch_size", ")", "model", "=", "_Model", "(", "num_classes", "=", "len", "(", "classes", ")", ",", "prefix", "=", "\"drawing_\"", ")", "model_params", "=", "model", ".", "collect_params", "(", ")", "model_params", ".", "initialize", "(", "_mx", ".", "init", ".", "Xavier", "(", ")", ",", "ctx", "=", "ctx", ")", "if", "warm_start", "is", "not", "None", ":", "if", "type", "(", "warm_start", ")", "is", "not", "str", ":", "raise", "TypeError", "(", "\"'warm_start' must be a string or None. \"", "+", "\"'warm_start' can take in the following values: \"", "+", "str", "(", "accepted_values_for_warm_start", ")", ")", "if", "warm_start", "not", "in", "accepted_values_for_warm_start", ":", "raise", "_ToolkitError", "(", "\"Unrecognized value for 'warm_start': \"", "+", "warm_start", "+", "\". 'warm_start' can take in the following \"", "+", "\"values: \"", "+", "str", "(", "accepted_values_for_warm_start", ")", ")", "pretrained_model", "=", "_pre_trained_models", ".", "DrawingClassifierPreTrainedModel", "(", "warm_start", ")", "pretrained_model_params_path", "=", "pretrained_model", ".", "get_model_path", "(", ")", "model", ".", "load_params", "(", "pretrained_model_params_path", ",", "ctx", "=", "ctx", ",", "allow_missing", "=", "True", ")", "softmax_cross_entropy", "=", "_mx", ".", "gluon", ".", "loss", ".", "SoftmaxCrossEntropyLoss", "(", ")", "model", ".", "hybridize", "(", ")", "trainer", "=", "_mx", ".", "gluon", ".", "Trainer", "(", "model", ".", "collect_params", "(", ")", ",", "'adam'", ")", "train_accuracy", "=", "_mx", ".", "metric", ".", "Accuracy", "(", ")", "validation_accuracy", "=", "_mx", ".", "metric", ".", "Accuracy", "(", ")", "def", "get_data_and_label_from_batch", "(", "batch", ")", ":", "if", "batch", ".", "pad", "is", "not", "None", ":", "size", "=", "batch_size", "-", "batch", ".", "pad", "sliced_data", "=", "_mx", ".", "nd", ".", "slice_axis", "(", "batch", ".", "data", "[", "0", "]", ",", "axis", "=", "0", ",", "begin", "=", "0", ",", "end", "=", "size", ")", "sliced_label", "=", "_mx", ".", "nd", ".", "slice_axis", "(", "batch", ".", "label", "[", "0", "]", ",", "axis", "=", "0", ",", "begin", "=", "0", ",", "end", "=", "size", ")", "num_devices", "=", "min", "(", "sliced_data", ".", "shape", "[", "0", "]", ",", "len", "(", "ctx", ")", ")", "batch_data", "=", "_mx", ".", "gluon", ".", "utils", ".", "split_and_load", "(", "sliced_data", ",", "ctx_list", "=", "ctx", "[", ":", "num_devices", "]", ",", "even_split", "=", "False", ")", "batch_label", "=", "_mx", ".", "gluon", ".", "utils", ".", "split_and_load", "(", "sliced_label", ",", "ctx_list", "=", "ctx", "[", ":", "num_devices", "]", ",", "even_split", "=", "False", ")", "else", ":", "batch_data", "=", "_mx", ".", "gluon", ".", "utils", ".", "split_and_load", "(", "batch", ".", "data", "[", "0", "]", ",", "ctx_list", "=", "ctx", ",", "batch_axis", "=", "0", ")", "batch_label", "=", "_mx", ".", "gluon", ".", "utils", ".", "split_and_load", "(", "batch", ".", "label", "[", "0", "]", ",", "ctx_list", "=", "ctx", ",", "batch_axis", "=", "0", ")", "return", "batch_data", ",", "batch_label", "def", "compute_accuracy", "(", "accuracy_metric", ",", "batch_loader", ")", ":", "batch_loader", ".", "reset", "(", ")", "accuracy_metric", ".", "reset", "(", ")", "for", "batch", "in", "batch_loader", ":", "batch_data", ",", "batch_label", "=", "get_data_and_label_from_batch", "(", "batch", ")", "outputs", "=", "[", "]", "for", "x", ",", "y", "in", "zip", "(", "batch_data", ",", "batch_label", ")", ":", "if", "x", "is", "None", "or", "y", "is", "None", ":", "continue", "z", "=", "model", "(", "x", ")", "outputs", ".", "append", "(", "z", ")", "accuracy_metric", ".", "update", "(", "batch_label", ",", "outputs", ")", "for", "train_batch", "in", "train_loader", ":", "train_batch_data", ",", "train_batch_label", "=", "get_data_and_label_from_batch", "(", "train_batch", ")", "with", "_autograd", ".", "record", "(", ")", ":", "# Inside training scope", "for", "x", ",", "y", "in", "zip", "(", "train_batch_data", ",", "train_batch_label", ")", ":", "z", "=", "model", "(", "x", ")", "# Computes softmax cross entropy loss.", "loss", "=", "softmax_cross_entropy", "(", "z", ",", "y", ")", "# Backpropagate the error for one iteration.", "loss", ".", "backward", "(", ")", "# Make one step of parameter update. Trainer needs to know the", "# batch size of data to normalize the gradient by 1/batch_size.", "trainer", ".", "step", "(", "train_batch", ".", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ")", "# calculate training metrics", "train_loss", "=", "loss", ".", "mean", "(", ")", ".", "asscalar", "(", ")", "train_time", "=", "_time", ".", "time", "(", ")", "-", "start_time", "if", "train_batch", ".", "iteration", ">", "iteration", ":", "# Compute training accuracy", "compute_accuracy", "(", "train_accuracy", ",", "train_loader_to_compute_accuracy", ")", "# Compute validation accuracy", "if", "validation_set", "is", "not", "None", ":", "compute_accuracy", "(", "validation_accuracy", ",", "validation_loader", ")", "iteration", "=", "train_batch", ".", "iteration", "if", "verbose", ":", "kwargs", "=", "{", "\"iteration\"", ":", "iteration", ",", "\"train_loss\"", ":", "float", "(", "train_loss", ")", ",", "\"train_accuracy\"", ":", "train_accuracy", ".", "get", "(", ")", "[", "1", "]", ",", "\"time\"", ":", "train_time", "}", "if", "validation_set", "is", "not", "None", ":", "kwargs", "[", "\"validation_accuracy\"", "]", "=", "validation_accuracy", ".", "get", "(", ")", "[", "1", "]", "table_printer", ".", "print_row", "(", "*", "*", "kwargs", ")", "state", "=", "{", "'_model'", ":", "model", ",", "'_class_to_index'", ":", "class_to_index", ",", "'num_classes'", ":", "len", "(", "classes", ")", ",", "'classes'", ":", "classes", ",", "'input_image_shape'", ":", "(", "1", ",", "BITMAP_WIDTH", ",", "BITMAP_HEIGHT", ")", ",", "'batch_size'", ":", "batch_size", ",", "'training_loss'", ":", "train_loss", ",", "'training_accuracy'", ":", "train_accuracy", ".", "get", "(", ")", "[", "1", "]", ",", "'training_time'", ":", "train_time", ",", "'validation_accuracy'", ":", "validation_accuracy", ".", "get", "(", ")", "[", "1", "]", ",", "# nan if validation_set=None", "'max_iterations'", ":", "max_iterations", ",", "'target'", ":", "target", ",", "'feature'", ":", "feature", ",", "'num_examples'", ":", "len", "(", "input_dataset", ")", "}", "return", "DrawingClassifier", "(", "state", ")" ]
Create a :class:`DrawingClassifier` model. Parameters ---------- dataset : SFrame Input data. The columns named by the ``feature`` and ``target`` parameters will be extracted for training the drawing classifier. target : string Name of the column containing the target variable. The values in this column must be of string or integer type. feature : string optional Name of the column containing the input drawings. 'None' (the default) indicates the column in `dataset` named "drawing" should be used as the feature. The feature column can contain both bitmap-based drawings as well as stroke-based drawings. Bitmap-based drawing input can be a grayscale tc.Image of any size. Stroke-based drawing input must be in the following format: Every drawing must be represented by a list of strokes, where each stroke must be a list of points in the order in which they were drawn on the canvas. Each point must be a dictionary with two keys, "x" and "y", and their respective values must be numerical, i.e. either integer or float. validation_set : SFrame optional A dataset for monitoring the model's generalization performance. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. warm_start : string optional A string to denote which pretrained model to use. Set to "auto" by default which uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. To disable warm start, pass in None to this argument. Here is a list of all the pretrained models that can be passed in as this argument: "auto": Uses quickdraw_245_v0 "quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the Quick, Draw! dataset. None: No Warm Start batch_size: int optional The number of drawings per training step. If not set, a default value of 256 will be used. If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. max_iterations : int optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. verbose : bool optional If True, print progress updates and model details. Returns ------- out : DrawingClassifier A trained :class:`DrawingClassifier` model. See Also -------- DrawingClassifier Examples -------- .. sourcecode:: python # Train a drawing classifier model >>> model = turicreate.drawing_classifier.create(data) # Make predictions on the training set and as column to the SFrame >>> data['predictions'] = model.predict(data)
[ "Create", "a", ":", "class", ":", "DrawingClassifier", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L47-L329
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
DrawingClassifier.export_coreml
def export_coreml(self, filename, verbose=False): """ Save the model in Core ML format. The Core ML model takes a grayscale drawing of fixed size as input and produces two outputs: `classLabel` and `labelProbabilities`. The first one, `classLabel` is an integer or string (depending on the classes the model was trained on) to store the label of the top prediction by the model. The second one, `labelProbabilities`, is a dictionary with all the class labels in the dataset as the keys, and their respective probabilities as the values. See Also -------- save Parameters ---------- filename : string The path of the file where we want to save the Core ML model. verbose : bool optional If True, prints export progress. Examples -------- >>> model.export_coreml('drawing_classifier.mlmodel') """ import mxnet as _mx from .._mxnet._mxnet_to_coreml import _mxnet_converter import coremltools as _coremltools batch_size = 1 image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT) s_image = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32) from copy import copy as _copy net = _copy(self._model) s_ymap = net(s_image) mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature]) mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)]) mod.init_params() arg_params, aux_params = mod.get_params() net_params = net.collect_params() new_arg_params = {} for k, param in arg_params.items(): new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0]) new_aux_params = {} for k, param in aux_params.items(): new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0]) mod.set_params(new_arg_params, new_aux_params) coreml_model = _mxnet_converter.convert(mod, mode='classifier', class_labels=self.classes, input_shape=[(self.feature, image_shape)], builder=None, verbose=verbose, preprocessor_args={ 'image_input_names': [self.feature], 'image_scale': 1.0/255 }) DESIRED_OUTPUT_NAME = self.target + "Probabilities" spec = coreml_model._spec class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1 probabilities_output_index = 1-class_label_output_index spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME from turicreate.toolkits import _coreml_utils model_type = "drawing classifier" spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type) spec.description.input[0].shortDescription = self.feature spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities' spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction' from coremltools.models.utils import save_spec as _save_spec _save_spec(spec, filename)
python
def export_coreml(self, filename, verbose=False): """ Save the model in Core ML format. The Core ML model takes a grayscale drawing of fixed size as input and produces two outputs: `classLabel` and `labelProbabilities`. The first one, `classLabel` is an integer or string (depending on the classes the model was trained on) to store the label of the top prediction by the model. The second one, `labelProbabilities`, is a dictionary with all the class labels in the dataset as the keys, and their respective probabilities as the values. See Also -------- save Parameters ---------- filename : string The path of the file where we want to save the Core ML model. verbose : bool optional If True, prints export progress. Examples -------- >>> model.export_coreml('drawing_classifier.mlmodel') """ import mxnet as _mx from .._mxnet._mxnet_to_coreml import _mxnet_converter import coremltools as _coremltools batch_size = 1 image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT) s_image = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32) from copy import copy as _copy net = _copy(self._model) s_ymap = net(s_image) mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature]) mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)]) mod.init_params() arg_params, aux_params = mod.get_params() net_params = net.collect_params() new_arg_params = {} for k, param in arg_params.items(): new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0]) new_aux_params = {} for k, param in aux_params.items(): new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0]) mod.set_params(new_arg_params, new_aux_params) coreml_model = _mxnet_converter.convert(mod, mode='classifier', class_labels=self.classes, input_shape=[(self.feature, image_shape)], builder=None, verbose=verbose, preprocessor_args={ 'image_input_names': [self.feature], 'image_scale': 1.0/255 }) DESIRED_OUTPUT_NAME = self.target + "Probabilities" spec = coreml_model._spec class_label_output_index = 0 if spec.description.output[0].name == "classLabel" else 1 probabilities_output_index = 1-class_label_output_index spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME from turicreate.toolkits import _coreml_utils model_type = "drawing classifier" spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type) spec.description.input[0].shortDescription = self.feature spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities' spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction' from coremltools.models.utils import save_spec as _save_spec _save_spec(spec, filename)
[ "def", "export_coreml", "(", "self", ",", "filename", ",", "verbose", "=", "False", ")", ":", "import", "mxnet", "as", "_mx", "from", ".", ".", "_mxnet", ".", "_mxnet_to_coreml", "import", "_mxnet_converter", "import", "coremltools", "as", "_coremltools", "batch_size", "=", "1", "image_shape", "=", "(", "batch_size", ",", ")", "+", "(", "1", ",", "BITMAP_WIDTH", ",", "BITMAP_HEIGHT", ")", "s_image", "=", "_mx", ".", "sym", ".", "Variable", "(", "self", ".", "feature", ",", "shape", "=", "image_shape", ",", "dtype", "=", "_np", ".", "float32", ")", "from", "copy", "import", "copy", "as", "_copy", "net", "=", "_copy", "(", "self", ".", "_model", ")", "s_ymap", "=", "net", "(", "s_image", ")", "mod", "=", "_mx", ".", "mod", ".", "Module", "(", "symbol", "=", "s_ymap", ",", "label_names", "=", "None", ",", "data_names", "=", "[", "self", ".", "feature", "]", ")", "mod", ".", "bind", "(", "for_training", "=", "False", ",", "data_shapes", "=", "[", "(", "self", ".", "feature", ",", "image_shape", ")", "]", ")", "mod", ".", "init_params", "(", ")", "arg_params", ",", "aux_params", "=", "mod", ".", "get_params", "(", ")", "net_params", "=", "net", ".", "collect_params", "(", ")", "new_arg_params", "=", "{", "}", "for", "k", ",", "param", "in", "arg_params", ".", "items", "(", ")", ":", "new_arg_params", "[", "k", "]", "=", "net_params", "[", "k", "]", ".", "data", "(", "net_params", "[", "k", "]", ".", "list_ctx", "(", ")", "[", "0", "]", ")", "new_aux_params", "=", "{", "}", "for", "k", ",", "param", "in", "aux_params", ".", "items", "(", ")", ":", "new_aux_params", "[", "k", "]", "=", "net_params", "[", "k", "]", ".", "data", "(", "net_params", "[", "k", "]", ".", "list_ctx", "(", ")", "[", "0", "]", ")", "mod", ".", "set_params", "(", "new_arg_params", ",", "new_aux_params", ")", "coreml_model", "=", "_mxnet_converter", ".", "convert", "(", "mod", ",", "mode", "=", "'classifier'", ",", "class_labels", "=", "self", ".", "classes", ",", "input_shape", "=", "[", "(", "self", ".", "feature", ",", "image_shape", ")", "]", ",", "builder", "=", "None", ",", "verbose", "=", "verbose", ",", "preprocessor_args", "=", "{", "'image_input_names'", ":", "[", "self", ".", "feature", "]", ",", "'image_scale'", ":", "1.0", "/", "255", "}", ")", "DESIRED_OUTPUT_NAME", "=", "self", ".", "target", "+", "\"Probabilities\"", "spec", "=", "coreml_model", ".", "_spec", "class_label_output_index", "=", "0", "if", "spec", ".", "description", ".", "output", "[", "0", "]", ".", "name", "==", "\"classLabel\"", "else", "1", "probabilities_output_index", "=", "1", "-", "class_label_output_index", "spec", ".", "neuralNetworkClassifier", ".", "labelProbabilityLayerName", "=", "DESIRED_OUTPUT_NAME", "spec", ".", "neuralNetworkClassifier", ".", "layers", "[", "-", "1", "]", ".", "name", "=", "DESIRED_OUTPUT_NAME", "spec", ".", "neuralNetworkClassifier", ".", "layers", "[", "-", "1", "]", ".", "output", "[", "0", "]", "=", "DESIRED_OUTPUT_NAME", "spec", ".", "description", ".", "predictedProbabilitiesName", "=", "DESIRED_OUTPUT_NAME", "spec", ".", "description", ".", "output", "[", "probabilities_output_index", "]", ".", "name", "=", "DESIRED_OUTPUT_NAME", "from", "turicreate", ".", "toolkits", "import", "_coreml_utils", "model_type", "=", "\"drawing classifier\"", "spec", ".", "description", ".", "metadata", ".", "shortDescription", "=", "_coreml_utils", ".", "_mlmodel_short_description", "(", "model_type", ")", "spec", ".", "description", ".", "input", "[", "0", "]", ".", "shortDescription", "=", "self", ".", "feature", "spec", ".", "description", ".", "output", "[", "probabilities_output_index", "]", ".", "shortDescription", "=", "'Prediction probabilities'", "spec", ".", "description", ".", "output", "[", "class_label_output_index", "]", ".", "shortDescription", "=", "'Class Label of Top Prediction'", "from", "coremltools", ".", "models", ".", "utils", "import", "save_spec", "as", "_save_spec", "_save_spec", "(", "spec", ",", "filename", ")" ]
Save the model in Core ML format. The Core ML model takes a grayscale drawing of fixed size as input and produces two outputs: `classLabel` and `labelProbabilities`. The first one, `classLabel` is an integer or string (depending on the classes the model was trained on) to store the label of the top prediction by the model. The second one, `labelProbabilities`, is a dictionary with all the class labels in the dataset as the keys, and their respective probabilities as the values. See Also -------- save Parameters ---------- filename : string The path of the file where we want to save the Core ML model. verbose : bool optional If True, prints export progress. Examples -------- >>> model.export_coreml('drawing_classifier.mlmodel')
[ "Save", "the", "model", "in", "Core", "ML", "format", ".", "The", "Core", "ML", "model", "takes", "a", "grayscale", "drawing", "of", "fixed", "size", "as", "input", "and", "produces", "two", "outputs", ":", "classLabel", "and", "labelProbabilities", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L435-L519
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
DrawingClassifier._predict_with_probabilities
def _predict_with_probabilities(self, input_dataset, batch_size=None, verbose=True): """ Predict with probabilities. The core prediction part that both `evaluate` and `predict` share. Returns an SFrame with two columns, self.target, and "probabilities". The column with column name, self.target, contains the predictions made by the model for the provided dataset. The "probabilities" column contains the probabilities for each class that the model predicted for the data provided to the function. """ from .._mxnet import _mxnet_utils import mxnet as _mx from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image) dataset = _extensions._drawing_classifier_prepare_data( input_dataset, self.feature) if is_stroke_input else input_dataset batch_size = self.batch_size if batch_size is None else batch_size loader = _SFrameClassifierIter(dataset, batch_size, class_to_index=self._class_to_index, feature_column=self.feature, target_column=self.target, load_labels=False, shuffle=False, iterations=1) dataset_size = len(dataset) ctx = _mxnet_utils.get_mxnet_context() index = 0 last_time = 0 done = False from turicreate import SArrayBuilder from array import array classes = self.classes all_predicted_builder = SArrayBuilder(dtype=type(classes[0])) all_probabilities_builder = SArrayBuilder(dtype=array) for batch in loader: if batch.pad is not None: size = batch_size - batch.pad batch_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size) else: batch_data = batch.data[0] size = batch_size num_devices = min(batch_data.shape[0], len(ctx)) split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False) for data in split_data: z = self._model(data).asnumpy() predicted = list(map(lambda x: classes[x], z.argmax(axis=1))) split_length = z.shape[0] all_predicted_builder.append_multiple(predicted) all_probabilities_builder.append_multiple(z.tolist()) index += split_length if index == dataset_size - 1: done = True cur_time = _time.time() # Do not print progress if only a few samples are predicted if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done): print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format( cur_n = index + 1, max_n = dataset_size, width = len(str(dataset_size)))) last_time = cur_time return (_tc.SFrame({self.target: all_predicted_builder.close(), 'probability': all_probabilities_builder.close()}))
python
def _predict_with_probabilities(self, input_dataset, batch_size=None, verbose=True): """ Predict with probabilities. The core prediction part that both `evaluate` and `predict` share. Returns an SFrame with two columns, self.target, and "probabilities". The column with column name, self.target, contains the predictions made by the model for the provided dataset. The "probabilities" column contains the probabilities for each class that the model predicted for the data provided to the function. """ from .._mxnet import _mxnet_utils import mxnet as _mx from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image) dataset = _extensions._drawing_classifier_prepare_data( input_dataset, self.feature) if is_stroke_input else input_dataset batch_size = self.batch_size if batch_size is None else batch_size loader = _SFrameClassifierIter(dataset, batch_size, class_to_index=self._class_to_index, feature_column=self.feature, target_column=self.target, load_labels=False, shuffle=False, iterations=1) dataset_size = len(dataset) ctx = _mxnet_utils.get_mxnet_context() index = 0 last_time = 0 done = False from turicreate import SArrayBuilder from array import array classes = self.classes all_predicted_builder = SArrayBuilder(dtype=type(classes[0])) all_probabilities_builder = SArrayBuilder(dtype=array) for batch in loader: if batch.pad is not None: size = batch_size - batch.pad batch_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size) else: batch_data = batch.data[0] size = batch_size num_devices = min(batch_data.shape[0], len(ctx)) split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False) for data in split_data: z = self._model(data).asnumpy() predicted = list(map(lambda x: classes[x], z.argmax(axis=1))) split_length = z.shape[0] all_predicted_builder.append_multiple(predicted) all_probabilities_builder.append_multiple(z.tolist()) index += split_length if index == dataset_size - 1: done = True cur_time = _time.time() # Do not print progress if only a few samples are predicted if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done): print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format( cur_n = index + 1, max_n = dataset_size, width = len(str(dataset_size)))) last_time = cur_time return (_tc.SFrame({self.target: all_predicted_builder.close(), 'probability': all_probabilities_builder.close()}))
[ "def", "_predict_with_probabilities", "(", "self", ",", "input_dataset", ",", "batch_size", "=", "None", ",", "verbose", "=", "True", ")", ":", "from", ".", ".", "_mxnet", "import", "_mxnet_utils", "import", "mxnet", "as", "_mx", "from", ".", "_sframe_loader", "import", "SFrameClassifierIter", "as", "_SFrameClassifierIter", "is_stroke_input", "=", "(", "input_dataset", "[", "self", ".", "feature", "]", ".", "dtype", "!=", "_tc", ".", "Image", ")", "dataset", "=", "_extensions", ".", "_drawing_classifier_prepare_data", "(", "input_dataset", ",", "self", ".", "feature", ")", "if", "is_stroke_input", "else", "input_dataset", "batch_size", "=", "self", ".", "batch_size", "if", "batch_size", "is", "None", "else", "batch_size", "loader", "=", "_SFrameClassifierIter", "(", "dataset", ",", "batch_size", ",", "class_to_index", "=", "self", ".", "_class_to_index", ",", "feature_column", "=", "self", ".", "feature", ",", "target_column", "=", "self", ".", "target", ",", "load_labels", "=", "False", ",", "shuffle", "=", "False", ",", "iterations", "=", "1", ")", "dataset_size", "=", "len", "(", "dataset", ")", "ctx", "=", "_mxnet_utils", ".", "get_mxnet_context", "(", ")", "index", "=", "0", "last_time", "=", "0", "done", "=", "False", "from", "turicreate", "import", "SArrayBuilder", "from", "array", "import", "array", "classes", "=", "self", ".", "classes", "all_predicted_builder", "=", "SArrayBuilder", "(", "dtype", "=", "type", "(", "classes", "[", "0", "]", ")", ")", "all_probabilities_builder", "=", "SArrayBuilder", "(", "dtype", "=", "array", ")", "for", "batch", "in", "loader", ":", "if", "batch", ".", "pad", "is", "not", "None", ":", "size", "=", "batch_size", "-", "batch", ".", "pad", "batch_data", "=", "_mx", ".", "nd", ".", "slice_axis", "(", "batch", ".", "data", "[", "0", "]", ",", "axis", "=", "0", ",", "begin", "=", "0", ",", "end", "=", "size", ")", "else", ":", "batch_data", "=", "batch", ".", "data", "[", "0", "]", "size", "=", "batch_size", "num_devices", "=", "min", "(", "batch_data", ".", "shape", "[", "0", "]", ",", "len", "(", "ctx", ")", ")", "split_data", "=", "_mx", ".", "gluon", ".", "utils", ".", "split_and_load", "(", "batch_data", ",", "ctx_list", "=", "ctx", "[", ":", "num_devices", "]", ",", "even_split", "=", "False", ")", "for", "data", "in", "split_data", ":", "z", "=", "self", ".", "_model", "(", "data", ")", ".", "asnumpy", "(", ")", "predicted", "=", "list", "(", "map", "(", "lambda", "x", ":", "classes", "[", "x", "]", ",", "z", ".", "argmax", "(", "axis", "=", "1", ")", ")", ")", "split_length", "=", "z", ".", "shape", "[", "0", "]", "all_predicted_builder", ".", "append_multiple", "(", "predicted", ")", "all_probabilities_builder", ".", "append_multiple", "(", "z", ".", "tolist", "(", ")", ")", "index", "+=", "split_length", "if", "index", "==", "dataset_size", "-", "1", ":", "done", "=", "True", "cur_time", "=", "_time", ".", "time", "(", ")", "# Do not print progress if only a few samples are predicted", "if", "verbose", "and", "(", "dataset_size", ">=", "5", "and", "cur_time", ">", "last_time", "+", "10", "or", "done", ")", ":", "print", "(", "'Predicting {cur_n:{width}d}/{max_n:{width}d}'", ".", "format", "(", "cur_n", "=", "index", "+", "1", ",", "max_n", "=", "dataset_size", ",", "width", "=", "len", "(", "str", "(", "dataset_size", ")", ")", ")", ")", "last_time", "=", "cur_time", "return", "(", "_tc", ".", "SFrame", "(", "{", "self", ".", "target", ":", "all_predicted_builder", ".", "close", "(", ")", ",", "'probability'", ":", "all_probabilities_builder", ".", "close", "(", ")", "}", ")", ")" ]
Predict with probabilities. The core prediction part that both `evaluate` and `predict` share. Returns an SFrame with two columns, self.target, and "probabilities". The column with column name, self.target, contains the predictions made by the model for the provided dataset. The "probabilities" column contains the probabilities for each class that the model predicted for the data provided to the function.
[ "Predict", "with", "probabilities", ".", "The", "core", "prediction", "part", "that", "both", "evaluate", "and", "predict", "share", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L522-L601
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
DrawingClassifier.evaluate
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True): """ Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the feature and target columns used for model training. Additional columns are ignored. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve verbose : bool, optional If True, prints prediction progress. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict Examples ---------- .. sourcecode:: python >>> results = model.evaluate(data) >>> print(results['accuracy']) """ if self.target not in dataset.column_names(): raise _ToolkitError("Must provide ground truth column, '" + self.target + "' in the evaluation dataset.") predicted = self._predict_with_probabilities(dataset, batch_size, verbose) avail_metrics = ['accuracy', 'auc', 'precision', 'recall', 'f1_score', 'confusion_matrix', 'roc_curve'] _tkutl._check_categorical_option_type( 'metric', metric, avail_metrics + ['auto']) metrics = avail_metrics if metric == 'auto' else [metric] ret = {} if 'accuracy' in metrics: ret['accuracy'] = _evaluation.accuracy( dataset[self.target], predicted[self.target]) if 'auc' in metrics: ret['auc'] = _evaluation.auc( dataset[self.target], predicted['probability'], index_map=self._class_to_index) if 'precision' in metrics: ret['precision'] = _evaluation.precision( dataset[self.target], predicted[self.target]) if 'recall' in metrics: ret['recall'] = _evaluation.recall( dataset[self.target], predicted[self.target]) if 'f1_score' in metrics: ret['f1_score'] = _evaluation.f1_score( dataset[self.target], predicted[self.target]) if 'confusion_matrix' in metrics: ret['confusion_matrix'] = _evaluation.confusion_matrix( dataset[self.target], predicted[self.target]) if 'roc_curve' in metrics: ret['roc_curve'] = _evaluation.roc_curve( dataset[self.target], predicted['probability'], index_map=self._class_to_index) return ret
python
def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True): """ Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the feature and target columns used for model training. Additional columns are ignored. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve verbose : bool, optional If True, prints prediction progress. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict Examples ---------- .. sourcecode:: python >>> results = model.evaluate(data) >>> print(results['accuracy']) """ if self.target not in dataset.column_names(): raise _ToolkitError("Must provide ground truth column, '" + self.target + "' in the evaluation dataset.") predicted = self._predict_with_probabilities(dataset, batch_size, verbose) avail_metrics = ['accuracy', 'auc', 'precision', 'recall', 'f1_score', 'confusion_matrix', 'roc_curve'] _tkutl._check_categorical_option_type( 'metric', metric, avail_metrics + ['auto']) metrics = avail_metrics if metric == 'auto' else [metric] ret = {} if 'accuracy' in metrics: ret['accuracy'] = _evaluation.accuracy( dataset[self.target], predicted[self.target]) if 'auc' in metrics: ret['auc'] = _evaluation.auc( dataset[self.target], predicted['probability'], index_map=self._class_to_index) if 'precision' in metrics: ret['precision'] = _evaluation.precision( dataset[self.target], predicted[self.target]) if 'recall' in metrics: ret['recall'] = _evaluation.recall( dataset[self.target], predicted[self.target]) if 'f1_score' in metrics: ret['f1_score'] = _evaluation.f1_score( dataset[self.target], predicted[self.target]) if 'confusion_matrix' in metrics: ret['confusion_matrix'] = _evaluation.confusion_matrix( dataset[self.target], predicted[self.target]) if 'roc_curve' in metrics: ret['roc_curve'] = _evaluation.roc_curve( dataset[self.target], predicted['probability'], index_map=self._class_to_index) return ret
[ "def", "evaluate", "(", "self", ",", "dataset", ",", "metric", "=", "'auto'", ",", "batch_size", "=", "None", ",", "verbose", "=", "True", ")", ":", "if", "self", ".", "target", "not", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "_ToolkitError", "(", "\"Must provide ground truth column, '\"", "+", "self", ".", "target", "+", "\"' in the evaluation dataset.\"", ")", "predicted", "=", "self", ".", "_predict_with_probabilities", "(", "dataset", ",", "batch_size", ",", "verbose", ")", "avail_metrics", "=", "[", "'accuracy'", ",", "'auc'", ",", "'precision'", ",", "'recall'", ",", "'f1_score'", ",", "'confusion_matrix'", ",", "'roc_curve'", "]", "_tkutl", ".", "_check_categorical_option_type", "(", "'metric'", ",", "metric", ",", "avail_metrics", "+", "[", "'auto'", "]", ")", "metrics", "=", "avail_metrics", "if", "metric", "==", "'auto'", "else", "[", "metric", "]", "ret", "=", "{", "}", "if", "'accuracy'", "in", "metrics", ":", "ret", "[", "'accuracy'", "]", "=", "_evaluation", ".", "accuracy", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "self", ".", "target", "]", ")", "if", "'auc'", "in", "metrics", ":", "ret", "[", "'auc'", "]", "=", "_evaluation", ".", "auc", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "'probability'", "]", ",", "index_map", "=", "self", ".", "_class_to_index", ")", "if", "'precision'", "in", "metrics", ":", "ret", "[", "'precision'", "]", "=", "_evaluation", ".", "precision", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "self", ".", "target", "]", ")", "if", "'recall'", "in", "metrics", ":", "ret", "[", "'recall'", "]", "=", "_evaluation", ".", "recall", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "self", ".", "target", "]", ")", "if", "'f1_score'", "in", "metrics", ":", "ret", "[", "'f1_score'", "]", "=", "_evaluation", ".", "f1_score", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "self", ".", "target", "]", ")", "if", "'confusion_matrix'", "in", "metrics", ":", "ret", "[", "'confusion_matrix'", "]", "=", "_evaluation", ".", "confusion_matrix", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "self", ".", "target", "]", ")", "if", "'roc_curve'", "in", "metrics", ":", "ret", "[", "'roc_curve'", "]", "=", "_evaluation", ".", "roc_curve", "(", "dataset", "[", "self", ".", "target", "]", ",", "predicted", "[", "'probability'", "]", ",", "index_map", "=", "self", ".", "_class_to_index", ")", "return", "ret" ]
Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the feature and target columns used for model training. Additional columns are ignored. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve verbose : bool, optional If True, prints prediction progress. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict Examples ---------- .. sourcecode:: python >>> results = model.evaluate(data) >>> print(results['accuracy'])
[ "Evaluate", "the", "model", "by", "making", "predictions", "of", "target", "values", "and", "comparing", "these", "to", "actual", "values", ".", "Parameters", "----------", "dataset", ":", "SFrame", "Dataset", "of", "new", "observations", ".", "Must", "include", "columns", "with", "the", "same", "names", "as", "the", "feature", "and", "target", "columns", "used", "for", "model", "training", ".", "Additional", "columns", "are", "ignored", ".", "metric", ":", "str", "optional", "Name", "of", "the", "evaluation", "metric", ".", "Possible", "values", "are", ":", "-", "auto", ":", "Returns", "all", "available", "metrics", ".", "-", "accuracy", ":", "Classification", "accuracy", "(", "micro", "average", ")", ".", "-", "auc", ":", "Area", "under", "the", "ROC", "curve", "(", "macro", "average", ")", "-", "precision", ":", "Precision", "score", "(", "macro", "average", ")", "-", "recall", ":", "Recall", "score", "(", "macro", "average", ")", "-", "f1_score", ":", "F1", "score", "(", "macro", "average", ")", "-", "confusion_matrix", ":", "An", "SFrame", "with", "counts", "of", "possible", "prediction", "/", "true", "label", "combinations", ".", "-", "roc_curve", ":", "An", "SFrame", "containing", "information", "needed", "for", "an", "ROC", "curve", "verbose", ":", "bool", "optional", "If", "True", "prints", "prediction", "progress", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L603-L690
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
DrawingClassifier.predict_topk
def predict_topk(self, dataset, output_type="probability", k=3, batch_size=None): """ Return top-k predictions for the ``dataset``, using the trained model. Predictions are returned as an SFrame with three columns: `id`, `class`, and `probability` or `rank`, depending on the ``output_type`` parameter. Parameters ---------- dataset : SFrame | SArray | turicreate.Image | list Drawings to be classified. If dataset is an SFrame, it must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'rank'}, optional Choose the return type of the prediction: - `probability`: Probability associated with each label in the prediction. - `rank` : Rank associated with each label in the prediction. k : int, optional Number of classes to return for each input example. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with model predictions. See Also -------- predict, evaluate Examples -------- >>> pred = m.predict_topk(validation_data, k=3) >>> pred +----+-------+-------------------+ | id | class | probability | +----+-------+-------------------+ | 0 | 4 | 0.995623886585 | | 0 | 9 | 0.0038311756216 | | 0 | 7 | 0.000301006948575 | | 1 | 1 | 0.928708016872 | | 1 | 3 | 0.0440889261663 | | 1 | 2 | 0.0176190119237 | | 2 | 3 | 0.996967732906 | | 2 | 2 | 0.00151345680933 | | 2 | 7 | 0.000637513934635 | | 3 | 1 | 0.998070061207 | | .. | ... | ... | +----+-------+-------------------+ [35688 rows x 3 columns] """ _tkutl._check_categorical_option_type("output_type", output_type, ["probability", "rank"]) if not isinstance(k, int): raise TypeError("'k' must be an integer >= 1") if k <= 0: raise ValueError("'k' must be >= 1") if batch_size is not None and not isinstance(batch_size, int): raise TypeError("'batch_size' must be an integer >= 1") if batch_size is not None and batch_size < 1: raise ValueError("'batch_size' must be >= 1") prob_vector = self.predict( dataset, output_type='probability_vector', batch_size=batch_size) classes = self.classes if output_type == 'probability': results = prob_vector.apply(lambda p: [ {'class': classes[i], 'probability': p[i]} for i in reversed(_np.argsort(p)[-k:])] ) else: assert(output_type == 'rank') results = prob_vector.apply(lambda p: [ {'class': classes[index], 'rank': rank} for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))] ) results = _tc.SFrame({'X': results}) results = results.add_row_number() results = results.stack('X', new_column_name='X') results = results.unpack('X', column_name_prefix='') return results
python
def predict_topk(self, dataset, output_type="probability", k=3, batch_size=None): """ Return top-k predictions for the ``dataset``, using the trained model. Predictions are returned as an SFrame with three columns: `id`, `class`, and `probability` or `rank`, depending on the ``output_type`` parameter. Parameters ---------- dataset : SFrame | SArray | turicreate.Image | list Drawings to be classified. If dataset is an SFrame, it must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'rank'}, optional Choose the return type of the prediction: - `probability`: Probability associated with each label in the prediction. - `rank` : Rank associated with each label in the prediction. k : int, optional Number of classes to return for each input example. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with model predictions. See Also -------- predict, evaluate Examples -------- >>> pred = m.predict_topk(validation_data, k=3) >>> pred +----+-------+-------------------+ | id | class | probability | +----+-------+-------------------+ | 0 | 4 | 0.995623886585 | | 0 | 9 | 0.0038311756216 | | 0 | 7 | 0.000301006948575 | | 1 | 1 | 0.928708016872 | | 1 | 3 | 0.0440889261663 | | 1 | 2 | 0.0176190119237 | | 2 | 3 | 0.996967732906 | | 2 | 2 | 0.00151345680933 | | 2 | 7 | 0.000637513934635 | | 3 | 1 | 0.998070061207 | | .. | ... | ... | +----+-------+-------------------+ [35688 rows x 3 columns] """ _tkutl._check_categorical_option_type("output_type", output_type, ["probability", "rank"]) if not isinstance(k, int): raise TypeError("'k' must be an integer >= 1") if k <= 0: raise ValueError("'k' must be >= 1") if batch_size is not None and not isinstance(batch_size, int): raise TypeError("'batch_size' must be an integer >= 1") if batch_size is not None and batch_size < 1: raise ValueError("'batch_size' must be >= 1") prob_vector = self.predict( dataset, output_type='probability_vector', batch_size=batch_size) classes = self.classes if output_type == 'probability': results = prob_vector.apply(lambda p: [ {'class': classes[i], 'probability': p[i]} for i in reversed(_np.argsort(p)[-k:])] ) else: assert(output_type == 'rank') results = prob_vector.apply(lambda p: [ {'class': classes[index], 'rank': rank} for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))] ) results = _tc.SFrame({'X': results}) results = results.add_row_number() results = results.stack('X', new_column_name='X') results = results.unpack('X', column_name_prefix='') return results
[ "def", "predict_topk", "(", "self", ",", "dataset", ",", "output_type", "=", "\"probability\"", ",", "k", "=", "3", ",", "batch_size", "=", "None", ")", ":", "_tkutl", ".", "_check_categorical_option_type", "(", "\"output_type\"", ",", "output_type", ",", "[", "\"probability\"", ",", "\"rank\"", "]", ")", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "TypeError", "(", "\"'k' must be an integer >= 1\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"'k' must be >= 1\"", ")", "if", "batch_size", "is", "not", "None", "and", "not", "isinstance", "(", "batch_size", ",", "int", ")", ":", "raise", "TypeError", "(", "\"'batch_size' must be an integer >= 1\"", ")", "if", "batch_size", "is", "not", "None", "and", "batch_size", "<", "1", ":", "raise", "ValueError", "(", "\"'batch_size' must be >= 1\"", ")", "prob_vector", "=", "self", ".", "predict", "(", "dataset", ",", "output_type", "=", "'probability_vector'", ",", "batch_size", "=", "batch_size", ")", "classes", "=", "self", ".", "classes", "if", "output_type", "==", "'probability'", ":", "results", "=", "prob_vector", ".", "apply", "(", "lambda", "p", ":", "[", "{", "'class'", ":", "classes", "[", "i", "]", ",", "'probability'", ":", "p", "[", "i", "]", "}", "for", "i", "in", "reversed", "(", "_np", ".", "argsort", "(", "p", ")", "[", "-", "k", ":", "]", ")", "]", ")", "else", ":", "assert", "(", "output_type", "==", "'rank'", ")", "results", "=", "prob_vector", ".", "apply", "(", "lambda", "p", ":", "[", "{", "'class'", ":", "classes", "[", "index", "]", ",", "'rank'", ":", "rank", "}", "for", "rank", ",", "index", "in", "enumerate", "(", "reversed", "(", "_np", ".", "argsort", "(", "p", ")", "[", "-", "k", ":", "]", ")", ")", "]", ")", "results", "=", "_tc", ".", "SFrame", "(", "{", "'X'", ":", "results", "}", ")", "results", "=", "results", ".", "add_row_number", "(", ")", "results", "=", "results", ".", "stack", "(", "'X'", ",", "new_column_name", "=", "'X'", ")", "results", "=", "results", ".", "unpack", "(", "'X'", ",", "column_name_prefix", "=", "''", ")", "return", "results" ]
Return top-k predictions for the ``dataset``, using the trained model. Predictions are returned as an SFrame with three columns: `id`, `class`, and `probability` or `rank`, depending on the ``output_type`` parameter. Parameters ---------- dataset : SFrame | SArray | turicreate.Image | list Drawings to be classified. If dataset is an SFrame, it must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'probability', 'rank'}, optional Choose the return type of the prediction: - `probability`: Probability associated with each label in the prediction. - `rank` : Rank associated with each label in the prediction. k : int, optional Number of classes to return for each input example. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with model predictions. See Also -------- predict, evaluate Examples -------- >>> pred = m.predict_topk(validation_data, k=3) >>> pred +----+-------+-------------------+ | id | class | probability | +----+-------+-------------------+ | 0 | 4 | 0.995623886585 | | 0 | 9 | 0.0038311756216 | | 0 | 7 | 0.000301006948575 | | 1 | 1 | 0.928708016872 | | 1 | 3 | 0.0440889261663 | | 1 | 2 | 0.0176190119237 | | 2 | 3 | 0.996967732906 | | 2 | 2 | 0.00151345680933 | | 2 | 7 | 0.000637513934635 | | 3 | 1 | 0.998070061207 | | .. | ... | ... | +----+-------+-------------------+ [35688 rows x 3 columns]
[ "Return", "top", "-", "k", "predictions", "for", "the", "dataset", "using", "the", "trained", "model", ".", "Predictions", "are", "returned", "as", "an", "SFrame", "with", "three", "columns", ":", "id", "class", "and", "probability", "or", "rank", "depending", "on", "the", "output_type", "parameter", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L692-L785
train
apple/turicreate
src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py
DrawingClassifier.predict
def predict(self, data, output_type='class', batch_size=None, verbose=True): """ Predict on an SFrame or SArray of drawings, or on a single drawing. Parameters ---------- data : SFrame | SArray | tc.Image | list The drawing(s) on which to perform drawing classification. If dataset is an SFrame, it must have a column with the same name as the feature column during training. Additional columns are ignored. If the data is a single drawing, it can be either of type tc.Image, in which case it is a bitmap-based drawing input, or of type list, in which case it is a stroke-based drawing input. output_type : {'probability', 'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. - 'probability': Prediction probability associated with the True class (not applicable for multi-class classification) - 'probability_vector': Prediction probability associated with each class as a vector. Label ordering is dictated by the ``classes`` member variable. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. verbose : bool, optional If True, prints prediction progress. Returns ------- out : SArray An SArray with model predictions. Each element corresponds to a drawing and contains a single value corresponding to the predicted label. Each prediction will have type integer or string depending on the type of the classes the model was trained on. If `data` is a single drawing, the return value will be a single prediction. See Also -------- evaluate Examples -------- .. sourcecode:: python # Make predictions >>> pred = model.predict(data) # Print predictions, for a better overview >>> print(pred) dtype: int Rows: 10 [3, 4, 3, 3, 4, 5, 8, 8, 8, 4] """ _tkutl._check_categorical_option_type("output_type", output_type, ["probability", "class", "probability_vector"]) if isinstance(data, _tc.SArray): predicted = self._predict_with_probabilities( _tc.SFrame({ self.feature: data }), batch_size, verbose ) elif isinstance(data, _tc.SFrame): predicted = self._predict_with_probabilities(data, batch_size, verbose) else: # single input predicted = self._predict_with_probabilities( _tc.SFrame({ self.feature: [data] }), batch_size, verbose ) if output_type == "class": return predicted[self.target] elif output_type == "probability": _class_to_index = self._class_to_index target = self.target return predicted.apply( lambda row: row["probability"][_class_to_index[row[target]]]) else: assert (output_type == "probability_vector") return predicted["probability"]
python
def predict(self, data, output_type='class', batch_size=None, verbose=True): """ Predict on an SFrame or SArray of drawings, or on a single drawing. Parameters ---------- data : SFrame | SArray | tc.Image | list The drawing(s) on which to perform drawing classification. If dataset is an SFrame, it must have a column with the same name as the feature column during training. Additional columns are ignored. If the data is a single drawing, it can be either of type tc.Image, in which case it is a bitmap-based drawing input, or of type list, in which case it is a stroke-based drawing input. output_type : {'probability', 'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. - 'probability': Prediction probability associated with the True class (not applicable for multi-class classification) - 'probability_vector': Prediction probability associated with each class as a vector. Label ordering is dictated by the ``classes`` member variable. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. verbose : bool, optional If True, prints prediction progress. Returns ------- out : SArray An SArray with model predictions. Each element corresponds to a drawing and contains a single value corresponding to the predicted label. Each prediction will have type integer or string depending on the type of the classes the model was trained on. If `data` is a single drawing, the return value will be a single prediction. See Also -------- evaluate Examples -------- .. sourcecode:: python # Make predictions >>> pred = model.predict(data) # Print predictions, for a better overview >>> print(pred) dtype: int Rows: 10 [3, 4, 3, 3, 4, 5, 8, 8, 8, 4] """ _tkutl._check_categorical_option_type("output_type", output_type, ["probability", "class", "probability_vector"]) if isinstance(data, _tc.SArray): predicted = self._predict_with_probabilities( _tc.SFrame({ self.feature: data }), batch_size, verbose ) elif isinstance(data, _tc.SFrame): predicted = self._predict_with_probabilities(data, batch_size, verbose) else: # single input predicted = self._predict_with_probabilities( _tc.SFrame({ self.feature: [data] }), batch_size, verbose ) if output_type == "class": return predicted[self.target] elif output_type == "probability": _class_to_index = self._class_to_index target = self.target return predicted.apply( lambda row: row["probability"][_class_to_index[row[target]]]) else: assert (output_type == "probability_vector") return predicted["probability"]
[ "def", "predict", "(", "self", ",", "data", ",", "output_type", "=", "'class'", ",", "batch_size", "=", "None", ",", "verbose", "=", "True", ")", ":", "_tkutl", ".", "_check_categorical_option_type", "(", "\"output_type\"", ",", "output_type", ",", "[", "\"probability\"", ",", "\"class\"", ",", "\"probability_vector\"", "]", ")", "if", "isinstance", "(", "data", ",", "_tc", ".", "SArray", ")", ":", "predicted", "=", "self", ".", "_predict_with_probabilities", "(", "_tc", ".", "SFrame", "(", "{", "self", ".", "feature", ":", "data", "}", ")", ",", "batch_size", ",", "verbose", ")", "elif", "isinstance", "(", "data", ",", "_tc", ".", "SFrame", ")", ":", "predicted", "=", "self", ".", "_predict_with_probabilities", "(", "data", ",", "batch_size", ",", "verbose", ")", "else", ":", "# single input", "predicted", "=", "self", ".", "_predict_with_probabilities", "(", "_tc", ".", "SFrame", "(", "{", "self", ".", "feature", ":", "[", "data", "]", "}", ")", ",", "batch_size", ",", "verbose", ")", "if", "output_type", "==", "\"class\"", ":", "return", "predicted", "[", "self", ".", "target", "]", "elif", "output_type", "==", "\"probability\"", ":", "_class_to_index", "=", "self", ".", "_class_to_index", "target", "=", "self", ".", "target", "return", "predicted", ".", "apply", "(", "lambda", "row", ":", "row", "[", "\"probability\"", "]", "[", "_class_to_index", "[", "row", "[", "target", "]", "]", "]", ")", "else", ":", "assert", "(", "output_type", "==", "\"probability_vector\"", ")", "return", "predicted", "[", "\"probability\"", "]" ]
Predict on an SFrame or SArray of drawings, or on a single drawing. Parameters ---------- data : SFrame | SArray | tc.Image | list The drawing(s) on which to perform drawing classification. If dataset is an SFrame, it must have a column with the same name as the feature column during training. Additional columns are ignored. If the data is a single drawing, it can be either of type tc.Image, in which case it is a bitmap-based drawing input, or of type list, in which case it is a stroke-based drawing input. output_type : {'probability', 'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. - 'probability': Prediction probability associated with the True class (not applicable for multi-class classification) - 'probability_vector': Prediction probability associated with each class as a vector. Label ordering is dictated by the ``classes`` member variable. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. verbose : bool, optional If True, prints prediction progress. Returns ------- out : SArray An SArray with model predictions. Each element corresponds to a drawing and contains a single value corresponding to the predicted label. Each prediction will have type integer or string depending on the type of the classes the model was trained on. If `data` is a single drawing, the return value will be a single prediction. See Also -------- evaluate Examples -------- .. sourcecode:: python # Make predictions >>> pred = model.predict(data) # Print predictions, for a better overview >>> print(pred) dtype: int Rows: 10 [3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
[ "Predict", "on", "an", "SFrame", "or", "SArray", "of", "drawings", "or", "on", "a", "single", "drawing", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L788-L879
train
apple/turicreate
src/unity/python/turicreate/toolkits/_image_feature_extractor.py
MXFeatureExtractor.extract_features
def extract_features(self, dataset, feature, batch_size=64, verbose=False): """ Parameters ---------- dataset: SFrame SFrame of images """ from ._mxnet._mx_sframe_iter import SFrameImageIter as _SFrameImageIter from six.moves.queue import Queue as _Queue from threading import Thread as _Thread import turicreate as _tc import array if len(dataset) == 0: return _tc.SArray([], array.array) batch_size = min(len(dataset), batch_size) # Make a data iterator dataIter = _SFrameImageIter(sframe=dataset, data_field=[feature], batch_size=batch_size, image_shape=self.image_shape) # Setup the MXNet model model = MXFeatureExtractor._get_mx_module(self.ptModel.mxmodel, self.data_layer, self.feature_layer, self.context, self.image_shape, batch_size) out = _tc.SArrayBuilder(dtype = array.array) progress = { 'num_processed' : 0, 'total' : len(dataset) } if verbose: print("Performing feature extraction on resized images...") # Encapsulates the work done by the MXNet model for a single batch def handle_request(batch): model.forward(batch) mx_out = [array.array('d',m) for m in model.get_outputs()[0].asnumpy()] if batch.pad != 0: # If batch size is not evenly divisible by the length, it will loop back around. # We don't want that. mx_out = mx_out[:-batch.pad] return mx_out # Copies the output from MXNet into the SArrayBuilder and emits progress def consume_response(mx_out): out.append_multiple(mx_out) progress['num_processed'] += len(mx_out) if verbose: print('Completed {num_processed:{width}d}/{total:{width}d}'.format( width = len(str(progress['total'])), **progress)) # Create a dedicated thread for performing MXNet work, using two FIFO # queues for communication back and forth with this thread, with the # goal of keeping MXNet busy throughout. request_queue = _Queue() response_queue = _Queue() def mx_worker(): while True: batch = request_queue.get() # Consume request if batch is None: # No more work remains. Allow the thread to finish. return response_queue.put(handle_request(batch)) # Produce response mx_worker_thread = _Thread(target=mx_worker) mx_worker_thread.start() try: # Attempt to have two requests in progress at any one time (double # buffering), so that the iterator is creating one batch while MXNet # performs inference on the other. if dataIter.has_next: request_queue.put(next(dataIter)) # Produce request while dataIter.has_next: request_queue.put(next(dataIter)) # Produce request consume_response(response_queue.get()) consume_response(response_queue.get()) finally: # Tell the worker thread to shut down. request_queue.put(None) return out.close()
python
def extract_features(self, dataset, feature, batch_size=64, verbose=False): """ Parameters ---------- dataset: SFrame SFrame of images """ from ._mxnet._mx_sframe_iter import SFrameImageIter as _SFrameImageIter from six.moves.queue import Queue as _Queue from threading import Thread as _Thread import turicreate as _tc import array if len(dataset) == 0: return _tc.SArray([], array.array) batch_size = min(len(dataset), batch_size) # Make a data iterator dataIter = _SFrameImageIter(sframe=dataset, data_field=[feature], batch_size=batch_size, image_shape=self.image_shape) # Setup the MXNet model model = MXFeatureExtractor._get_mx_module(self.ptModel.mxmodel, self.data_layer, self.feature_layer, self.context, self.image_shape, batch_size) out = _tc.SArrayBuilder(dtype = array.array) progress = { 'num_processed' : 0, 'total' : len(dataset) } if verbose: print("Performing feature extraction on resized images...") # Encapsulates the work done by the MXNet model for a single batch def handle_request(batch): model.forward(batch) mx_out = [array.array('d',m) for m in model.get_outputs()[0].asnumpy()] if batch.pad != 0: # If batch size is not evenly divisible by the length, it will loop back around. # We don't want that. mx_out = mx_out[:-batch.pad] return mx_out # Copies the output from MXNet into the SArrayBuilder and emits progress def consume_response(mx_out): out.append_multiple(mx_out) progress['num_processed'] += len(mx_out) if verbose: print('Completed {num_processed:{width}d}/{total:{width}d}'.format( width = len(str(progress['total'])), **progress)) # Create a dedicated thread for performing MXNet work, using two FIFO # queues for communication back and forth with this thread, with the # goal of keeping MXNet busy throughout. request_queue = _Queue() response_queue = _Queue() def mx_worker(): while True: batch = request_queue.get() # Consume request if batch is None: # No more work remains. Allow the thread to finish. return response_queue.put(handle_request(batch)) # Produce response mx_worker_thread = _Thread(target=mx_worker) mx_worker_thread.start() try: # Attempt to have two requests in progress at any one time (double # buffering), so that the iterator is creating one batch while MXNet # performs inference on the other. if dataIter.has_next: request_queue.put(next(dataIter)) # Produce request while dataIter.has_next: request_queue.put(next(dataIter)) # Produce request consume_response(response_queue.get()) consume_response(response_queue.get()) finally: # Tell the worker thread to shut down. request_queue.put(None) return out.close()
[ "def", "extract_features", "(", "self", ",", "dataset", ",", "feature", ",", "batch_size", "=", "64", ",", "verbose", "=", "False", ")", ":", "from", ".", "_mxnet", ".", "_mx_sframe_iter", "import", "SFrameImageIter", "as", "_SFrameImageIter", "from", "six", ".", "moves", ".", "queue", "import", "Queue", "as", "_Queue", "from", "threading", "import", "Thread", "as", "_Thread", "import", "turicreate", "as", "_tc", "import", "array", "if", "len", "(", "dataset", ")", "==", "0", ":", "return", "_tc", ".", "SArray", "(", "[", "]", ",", "array", ".", "array", ")", "batch_size", "=", "min", "(", "len", "(", "dataset", ")", ",", "batch_size", ")", "# Make a data iterator", "dataIter", "=", "_SFrameImageIter", "(", "sframe", "=", "dataset", ",", "data_field", "=", "[", "feature", "]", ",", "batch_size", "=", "batch_size", ",", "image_shape", "=", "self", ".", "image_shape", ")", "# Setup the MXNet model", "model", "=", "MXFeatureExtractor", ".", "_get_mx_module", "(", "self", ".", "ptModel", ".", "mxmodel", ",", "self", ".", "data_layer", ",", "self", ".", "feature_layer", ",", "self", ".", "context", ",", "self", ".", "image_shape", ",", "batch_size", ")", "out", "=", "_tc", ".", "SArrayBuilder", "(", "dtype", "=", "array", ".", "array", ")", "progress", "=", "{", "'num_processed'", ":", "0", ",", "'total'", ":", "len", "(", "dataset", ")", "}", "if", "verbose", ":", "print", "(", "\"Performing feature extraction on resized images...\"", ")", "# Encapsulates the work done by the MXNet model for a single batch", "def", "handle_request", "(", "batch", ")", ":", "model", ".", "forward", "(", "batch", ")", "mx_out", "=", "[", "array", ".", "array", "(", "'d'", ",", "m", ")", "for", "m", "in", "model", ".", "get_outputs", "(", ")", "[", "0", "]", ".", "asnumpy", "(", ")", "]", "if", "batch", ".", "pad", "!=", "0", ":", "# If batch size is not evenly divisible by the length, it will loop back around.", "# We don't want that.", "mx_out", "=", "mx_out", "[", ":", "-", "batch", ".", "pad", "]", "return", "mx_out", "# Copies the output from MXNet into the SArrayBuilder and emits progress", "def", "consume_response", "(", "mx_out", ")", ":", "out", ".", "append_multiple", "(", "mx_out", ")", "progress", "[", "'num_processed'", "]", "+=", "len", "(", "mx_out", ")", "if", "verbose", ":", "print", "(", "'Completed {num_processed:{width}d}/{total:{width}d}'", ".", "format", "(", "width", "=", "len", "(", "str", "(", "progress", "[", "'total'", "]", ")", ")", ",", "*", "*", "progress", ")", ")", "# Create a dedicated thread for performing MXNet work, using two FIFO", "# queues for communication back and forth with this thread, with the", "# goal of keeping MXNet busy throughout.", "request_queue", "=", "_Queue", "(", ")", "response_queue", "=", "_Queue", "(", ")", "def", "mx_worker", "(", ")", ":", "while", "True", ":", "batch", "=", "request_queue", ".", "get", "(", ")", "# Consume request", "if", "batch", "is", "None", ":", "# No more work remains. Allow the thread to finish.", "return", "response_queue", ".", "put", "(", "handle_request", "(", "batch", ")", ")", "# Produce response", "mx_worker_thread", "=", "_Thread", "(", "target", "=", "mx_worker", ")", "mx_worker_thread", ".", "start", "(", ")", "try", ":", "# Attempt to have two requests in progress at any one time (double", "# buffering), so that the iterator is creating one batch while MXNet", "# performs inference on the other.", "if", "dataIter", ".", "has_next", ":", "request_queue", ".", "put", "(", "next", "(", "dataIter", ")", ")", "# Produce request", "while", "dataIter", ".", "has_next", ":", "request_queue", ".", "put", "(", "next", "(", "dataIter", ")", ")", "# Produce request", "consume_response", "(", "response_queue", ".", "get", "(", ")", ")", "consume_response", "(", "response_queue", ".", "get", "(", ")", ")", "finally", ":", "# Tell the worker thread to shut down.", "request_queue", ".", "put", "(", "None", ")", "return", "out", ".", "close", "(", ")" ]
Parameters ---------- dataset: SFrame SFrame of images
[ "Parameters", "----------", "dataset", ":", "SFrame", "SFrame", "of", "images" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_image_feature_extractor.py#L106-L183
train
apple/turicreate
src/unity/python/turicreate/toolkits/_image_feature_extractor.py
MXFeatureExtractor.get_coreml_model
def get_coreml_model(self, mode = 'classifier'): """ Parameters ---------- mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. Returns ------- model: MLModel Return the underlying model. """ import mxnet as _mx from ._mxnet import _mxnet_utils from ._mxnet._mxnet_to_coreml import _mxnet_converter (sym, arg_params, aux_params) = self.ptModel.mxmodel fe_mxmodel = self.ptModel.mxmodel if self.ptModel.is_feature_layer_final: feature_layer_size = self.ptModel.feature_layer_size num_dummy_classes = 10 feature_layer_sym = sym.get_children()[0] fc_symbol = _mx.symbol.FullyConnected(feature_layer_sym, num_hidden=num_dummy_classes) prob = _mx.symbol.SoftmaxOutput(fc_symbol, name = sym.name, attr=sym.attr_dict()[sym.name]) arg_params['%s_weight' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes, feature_layer_size)) arg_params['%s_bias' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes)) fe_mxmodel = (prob, arg_params, aux_params) model = MXFeatureExtractor._get_mx_module(fe_mxmodel, self.data_layer, self.ptModel.output_layer, _mxnet_utils.get_mxnet_context(max_devices=1), self.image_shape, label_layer = self.ptModel.label_layer) preprocessor_args = {'image_input_names': [self.data_layer]} return _mxnet_converter.convert(model, mode = 'classifier', input_shape=[(self.data_layer, (1, ) + self.image_shape)], class_labels = list(map(str, range(self.ptModel.num_classes))), preprocessor_args = preprocessor_args, verbose = False)
python
def get_coreml_model(self, mode = 'classifier'): """ Parameters ---------- mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. Returns ------- model: MLModel Return the underlying model. """ import mxnet as _mx from ._mxnet import _mxnet_utils from ._mxnet._mxnet_to_coreml import _mxnet_converter (sym, arg_params, aux_params) = self.ptModel.mxmodel fe_mxmodel = self.ptModel.mxmodel if self.ptModel.is_feature_layer_final: feature_layer_size = self.ptModel.feature_layer_size num_dummy_classes = 10 feature_layer_sym = sym.get_children()[0] fc_symbol = _mx.symbol.FullyConnected(feature_layer_sym, num_hidden=num_dummy_classes) prob = _mx.symbol.SoftmaxOutput(fc_symbol, name = sym.name, attr=sym.attr_dict()[sym.name]) arg_params['%s_weight' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes, feature_layer_size)) arg_params['%s_bias' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes)) fe_mxmodel = (prob, arg_params, aux_params) model = MXFeatureExtractor._get_mx_module(fe_mxmodel, self.data_layer, self.ptModel.output_layer, _mxnet_utils.get_mxnet_context(max_devices=1), self.image_shape, label_layer = self.ptModel.label_layer) preprocessor_args = {'image_input_names': [self.data_layer]} return _mxnet_converter.convert(model, mode = 'classifier', input_shape=[(self.data_layer, (1, ) + self.image_shape)], class_labels = list(map(str, range(self.ptModel.num_classes))), preprocessor_args = preprocessor_args, verbose = False)
[ "def", "get_coreml_model", "(", "self", ",", "mode", "=", "'classifier'", ")", ":", "import", "mxnet", "as", "_mx", "from", ".", "_mxnet", "import", "_mxnet_utils", "from", ".", "_mxnet", ".", "_mxnet_to_coreml", "import", "_mxnet_converter", "(", "sym", ",", "arg_params", ",", "aux_params", ")", "=", "self", ".", "ptModel", ".", "mxmodel", "fe_mxmodel", "=", "self", ".", "ptModel", ".", "mxmodel", "if", "self", ".", "ptModel", ".", "is_feature_layer_final", ":", "feature_layer_size", "=", "self", ".", "ptModel", ".", "feature_layer_size", "num_dummy_classes", "=", "10", "feature_layer_sym", "=", "sym", ".", "get_children", "(", ")", "[", "0", "]", "fc_symbol", "=", "_mx", ".", "symbol", ".", "FullyConnected", "(", "feature_layer_sym", ",", "num_hidden", "=", "num_dummy_classes", ")", "prob", "=", "_mx", ".", "symbol", ".", "SoftmaxOutput", "(", "fc_symbol", ",", "name", "=", "sym", ".", "name", ",", "attr", "=", "sym", ".", "attr_dict", "(", ")", "[", "sym", ".", "name", "]", ")", "arg_params", "[", "'%s_weight'", "%", "fc_symbol", ".", "name", "]", "=", "_mx", ".", "ndarray", ".", "zeros", "(", "(", "num_dummy_classes", ",", "feature_layer_size", ")", ")", "arg_params", "[", "'%s_bias'", "%", "fc_symbol", ".", "name", "]", "=", "_mx", ".", "ndarray", ".", "zeros", "(", "(", "num_dummy_classes", ")", ")", "fe_mxmodel", "=", "(", "prob", ",", "arg_params", ",", "aux_params", ")", "model", "=", "MXFeatureExtractor", ".", "_get_mx_module", "(", "fe_mxmodel", ",", "self", ".", "data_layer", ",", "self", ".", "ptModel", ".", "output_layer", ",", "_mxnet_utils", ".", "get_mxnet_context", "(", "max_devices", "=", "1", ")", ",", "self", ".", "image_shape", ",", "label_layer", "=", "self", ".", "ptModel", ".", "label_layer", ")", "preprocessor_args", "=", "{", "'image_input_names'", ":", "[", "self", ".", "data_layer", "]", "}", "return", "_mxnet_converter", ".", "convert", "(", "model", ",", "mode", "=", "'classifier'", ",", "input_shape", "=", "[", "(", "self", ".", "data_layer", ",", "(", "1", ",", ")", "+", "self", ".", "image_shape", ")", "]", ",", "class_labels", "=", "list", "(", "map", "(", "str", ",", "range", "(", "self", ".", "ptModel", ".", "num_classes", ")", ")", ")", ",", "preprocessor_args", "=", "preprocessor_args", ",", "verbose", "=", "False", ")" ]
Parameters ---------- mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. Returns ------- model: MLModel Return the underlying model.
[ "Parameters", "----------", "mode", ":", "str", "(", "classifier", "regressor", "or", "None", ")", "Mode", "of", "the", "converted", "coreml", "model", ".", "When", "mode", "=", "classifier", "a", "NeuralNetworkClassifier", "spec", "will", "be", "constructed", ".", "When", "mode", "=", "regressor", "a", "NeuralNetworkRegressor", "spec", "will", "be", "constructed", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_image_feature_extractor.py#L185-L224
train
apple/turicreate
src/unity/python/turicreate/toolkits/_pre_trained_models.py
DarkNetObjectDetectorBase.available_parameters_subset
def available_parameters_subset(self, mx_params): """ Takes an mxnet parameter collect (from Block.collect_params()) and subsets it with the parameters available in this base network. """ from copy import copy from collections import OrderedDict subset_params = copy(mx_params) subset_params._params = OrderedDict([ (k, v) for k, v in mx_params.items() if k in self.weight_names ]) return subset_params
python
def available_parameters_subset(self, mx_params): """ Takes an mxnet parameter collect (from Block.collect_params()) and subsets it with the parameters available in this base network. """ from copy import copy from collections import OrderedDict subset_params = copy(mx_params) subset_params._params = OrderedDict([ (k, v) for k, v in mx_params.items() if k in self.weight_names ]) return subset_params
[ "def", "available_parameters_subset", "(", "self", ",", "mx_params", ")", ":", "from", "copy", "import", "copy", "from", "collections", "import", "OrderedDict", "subset_params", "=", "copy", "(", "mx_params", ")", "subset_params", ".", "_params", "=", "OrderedDict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "mx_params", ".", "items", "(", ")", "if", "k", "in", "self", ".", "weight_names", "]", ")", "return", "subset_params" ]
Takes an mxnet parameter collect (from Block.collect_params()) and subsets it with the parameters available in this base network.
[ "Takes", "an", "mxnet", "parameter", "collect", "(", "from", "Block", ".", "collect_params", "()", ")", "and", "subsets", "it", "with", "the", "parameters", "available", "in", "this", "base", "network", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_pre_trained_models.py#L166-L177
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
_BOW_FEATURE_EXTRACTOR
def _BOW_FEATURE_EXTRACTOR(sf, target=None): """ Return an SFrame containing a bag of words representation of each column. """ if isinstance(sf, dict): out = _tc.SArray([sf]).unpack('') elif isinstance(sf, _tc.SFrame): out = sf.__copy__() else: raise ValueError("Unrecognized input to feature extractor.") for f in _get_str_columns(out): if target != f: out[f] = _tc.text_analytics.count_words(out[f]) return out
python
def _BOW_FEATURE_EXTRACTOR(sf, target=None): """ Return an SFrame containing a bag of words representation of each column. """ if isinstance(sf, dict): out = _tc.SArray([sf]).unpack('') elif isinstance(sf, _tc.SFrame): out = sf.__copy__() else: raise ValueError("Unrecognized input to feature extractor.") for f in _get_str_columns(out): if target != f: out[f] = _tc.text_analytics.count_words(out[f]) return out
[ "def", "_BOW_FEATURE_EXTRACTOR", "(", "sf", ",", "target", "=", "None", ")", ":", "if", "isinstance", "(", "sf", ",", "dict", ")", ":", "out", "=", "_tc", ".", "SArray", "(", "[", "sf", "]", ")", ".", "unpack", "(", "''", ")", "elif", "isinstance", "(", "sf", ",", "_tc", ".", "SFrame", ")", ":", "out", "=", "sf", ".", "__copy__", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Unrecognized input to feature extractor.\"", ")", "for", "f", "in", "_get_str_columns", "(", "out", ")", ":", "if", "target", "!=", "f", ":", "out", "[", "f", "]", "=", "_tc", ".", "text_analytics", ".", "count_words", "(", "out", "[", "f", "]", ")", "return", "out" ]
Return an SFrame containing a bag of words representation of each column.
[ "Return", "an", "SFrame", "containing", "a", "bag", "of", "words", "representation", "of", "each", "column", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L18-L31
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
create
def create(dataset, target, features = None, drop_stop_words = True, word_count_threshold = 2, method = 'auto', validation_set = 'auto', max_iterations = 10): """ Create a model that trains a classifier to classify text from a collection of documents. The model is a :class:`~turicreate.logistic_classifier.LogisticClassifier` model trained using a bag-of-words representation of the text dataset. Parameters ---------- dataset : SFrame Contains one or more columns of text data. This can be unstructured text dataset, such as that appearing in forums, user-generated reviews, etc. target : str The column name containing class labels for each document. features : list[str], optional The column names of interest containing text dataset. Each provided column must be str type. Defaults to using all columns of type str. drop_stop_words : bool, optional Ignore very common words, eg: "the", "a", "is". For the complete list of stop words, see: `text_classifier.drop_words()`. word_count_threshold : int, optional Words which occur less than this often, in the entire dataset, will be ignored. method: str, optional Method to use for feature engineering and modeling. Currently only bag-of-words and logistic classifier ('bow-logistic') is available. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. Returns ------- out : :class:`~TextClassifier` Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset) You may also evaluate predictions against known text scores. >>> metrics = m.evaluate(dataset) See Also -------- text_classifier.stop_words, text_classifier.drop_words """ _raise_error_if_not_sframe(dataset, "dataset") # Validate method. if method == 'auto': method = 'bow-logistic' if method not in ['bow-logistic']: raise ValueError("Unsupported method provided.") # Validate dataset if features is None: features = dataset.column_names() # Remove target column from list of feature columns. features = [f for f in features if f != target] # Process training set using the default feature extractor. feature_extractor = _BOW_FEATURE_EXTRACTOR train = feature_extractor(dataset, target) stop_words = None if drop_stop_words: stop_words = _text_analytics.stop_words() for cur_feature in features: train[cur_feature] = _text_analytics.drop_words(train[cur_feature], threshold = word_count_threshold, stop_words = stop_words) # Check for a validation set. if isinstance(validation_set, _tc.SFrame): validation_set = feature_extractor(validation_set, target) m = _tc.logistic_classifier.create(train, target=target, features=features, l2_penalty=.2, max_iterations=max_iterations, validation_set=validation_set) num_examples = len(dataset) model = TextClassifier() model.__proxy__.update( {'target': target, 'features': features, 'method': method, 'num_examples': num_examples, 'num_features': len(features), 'classifier': m}) return model
python
def create(dataset, target, features = None, drop_stop_words = True, word_count_threshold = 2, method = 'auto', validation_set = 'auto', max_iterations = 10): """ Create a model that trains a classifier to classify text from a collection of documents. The model is a :class:`~turicreate.logistic_classifier.LogisticClassifier` model trained using a bag-of-words representation of the text dataset. Parameters ---------- dataset : SFrame Contains one or more columns of text data. This can be unstructured text dataset, such as that appearing in forums, user-generated reviews, etc. target : str The column name containing class labels for each document. features : list[str], optional The column names of interest containing text dataset. Each provided column must be str type. Defaults to using all columns of type str. drop_stop_words : bool, optional Ignore very common words, eg: "the", "a", "is". For the complete list of stop words, see: `text_classifier.drop_words()`. word_count_threshold : int, optional Words which occur less than this often, in the entire dataset, will be ignored. method: str, optional Method to use for feature engineering and modeling. Currently only bag-of-words and logistic classifier ('bow-logistic') is available. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. Returns ------- out : :class:`~TextClassifier` Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset) You may also evaluate predictions against known text scores. >>> metrics = m.evaluate(dataset) See Also -------- text_classifier.stop_words, text_classifier.drop_words """ _raise_error_if_not_sframe(dataset, "dataset") # Validate method. if method == 'auto': method = 'bow-logistic' if method not in ['bow-logistic']: raise ValueError("Unsupported method provided.") # Validate dataset if features is None: features = dataset.column_names() # Remove target column from list of feature columns. features = [f for f in features if f != target] # Process training set using the default feature extractor. feature_extractor = _BOW_FEATURE_EXTRACTOR train = feature_extractor(dataset, target) stop_words = None if drop_stop_words: stop_words = _text_analytics.stop_words() for cur_feature in features: train[cur_feature] = _text_analytics.drop_words(train[cur_feature], threshold = word_count_threshold, stop_words = stop_words) # Check for a validation set. if isinstance(validation_set, _tc.SFrame): validation_set = feature_extractor(validation_set, target) m = _tc.logistic_classifier.create(train, target=target, features=features, l2_penalty=.2, max_iterations=max_iterations, validation_set=validation_set) num_examples = len(dataset) model = TextClassifier() model.__proxy__.update( {'target': target, 'features': features, 'method': method, 'num_examples': num_examples, 'num_features': len(features), 'classifier': m}) return model
[ "def", "create", "(", "dataset", ",", "target", ",", "features", "=", "None", ",", "drop_stop_words", "=", "True", ",", "word_count_threshold", "=", "2", ",", "method", "=", "'auto'", ",", "validation_set", "=", "'auto'", ",", "max_iterations", "=", "10", ")", ":", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "# Validate method.", "if", "method", "==", "'auto'", ":", "method", "=", "'bow-logistic'", "if", "method", "not", "in", "[", "'bow-logistic'", "]", ":", "raise", "ValueError", "(", "\"Unsupported method provided.\"", ")", "# Validate dataset", "if", "features", "is", "None", ":", "features", "=", "dataset", ".", "column_names", "(", ")", "# Remove target column from list of feature columns.", "features", "=", "[", "f", "for", "f", "in", "features", "if", "f", "!=", "target", "]", "# Process training set using the default feature extractor.", "feature_extractor", "=", "_BOW_FEATURE_EXTRACTOR", "train", "=", "feature_extractor", "(", "dataset", ",", "target", ")", "stop_words", "=", "None", "if", "drop_stop_words", ":", "stop_words", "=", "_text_analytics", ".", "stop_words", "(", ")", "for", "cur_feature", "in", "features", ":", "train", "[", "cur_feature", "]", "=", "_text_analytics", ".", "drop_words", "(", "train", "[", "cur_feature", "]", ",", "threshold", "=", "word_count_threshold", ",", "stop_words", "=", "stop_words", ")", "# Check for a validation set.", "if", "isinstance", "(", "validation_set", ",", "_tc", ".", "SFrame", ")", ":", "validation_set", "=", "feature_extractor", "(", "validation_set", ",", "target", ")", "m", "=", "_tc", ".", "logistic_classifier", ".", "create", "(", "train", ",", "target", "=", "target", ",", "features", "=", "features", ",", "l2_penalty", "=", ".2", ",", "max_iterations", "=", "max_iterations", ",", "validation_set", "=", "validation_set", ")", "num_examples", "=", "len", "(", "dataset", ")", "model", "=", "TextClassifier", "(", ")", "model", ".", "__proxy__", ".", "update", "(", "{", "'target'", ":", "target", ",", "'features'", ":", "features", ",", "'method'", ":", "method", ",", "'num_examples'", ":", "num_examples", ",", "'num_features'", ":", "len", "(", "features", ")", ",", "'classifier'", ":", "m", "}", ")", "return", "model" ]
Create a model that trains a classifier to classify text from a collection of documents. The model is a :class:`~turicreate.logistic_classifier.LogisticClassifier` model trained using a bag-of-words representation of the text dataset. Parameters ---------- dataset : SFrame Contains one or more columns of text data. This can be unstructured text dataset, such as that appearing in forums, user-generated reviews, etc. target : str The column name containing class labels for each document. features : list[str], optional The column names of interest containing text dataset. Each provided column must be str type. Defaults to using all columns of type str. drop_stop_words : bool, optional Ignore very common words, eg: "the", "a", "is". For the complete list of stop words, see: `text_classifier.drop_words()`. word_count_threshold : int, optional Words which occur less than this often, in the entire dataset, will be ignored. method: str, optional Method to use for feature engineering and modeling. Currently only bag-of-words and logistic classifier ('bow-logistic') is available. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. Returns ------- out : :class:`~TextClassifier` Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset) You may also evaluate predictions against known text scores. >>> metrics = m.evaluate(dataset) See Also -------- text_classifier.stop_words, text_classifier.drop_words
[ "Create", "a", "model", "that", "trains", "a", "classifier", "to", "classify", "text", "from", "a", "collection", "of", "documents", ".", "The", "model", "is", "a", ":", "class", ":", "~turicreate", ".", "logistic_classifier", ".", "LogisticClassifier", "model", "trained", "using", "a", "bag", "-", "of", "-", "words", "representation", "of", "the", "text", "dataset", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L33-L151
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
_get_str_columns
def _get_str_columns(sf): """ Returns a list of names of columns that are string type. """ return [name for name in sf.column_names() if sf[name].dtype == str]
python
def _get_str_columns(sf): """ Returns a list of names of columns that are string type. """ return [name for name in sf.column_names() if sf[name].dtype == str]
[ "def", "_get_str_columns", "(", "sf", ")", ":", "return", "[", "name", "for", "name", "in", "sf", ".", "column_names", "(", ")", "if", "sf", "[", "name", "]", ".", "dtype", "==", "str", "]" ]
Returns a list of names of columns that are string type.
[ "Returns", "a", "list", "of", "names", "of", "columns", "that", "are", "string", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L372-L376
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
TextClassifier.predict
def predict(self, dataset, output_type='class'): """ Return predictions for ``dataset``, using the trained model. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'probability_vector': Prediction probability associated with each class as a vector. The probability of the first class (sorted alphanumerically by name of the class in the training set) is in position 0 of the vector, the second in position 1 and so on. - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. Returns ------- out : SArray An SArray with model predictions. See Also ---------- create, evaluate, classify Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset) """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR return m.predict(f(dataset, target), output_type=output_type)
python
def predict(self, dataset, output_type='class'): """ Return predictions for ``dataset``, using the trained model. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'probability_vector': Prediction probability associated with each class as a vector. The probability of the first class (sorted alphanumerically by name of the class in the training set) is in position 0 of the vector, the second in position 1 and so on. - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. Returns ------- out : SArray An SArray with model predictions. See Also ---------- create, evaluate, classify Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset) """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR return m.predict(f(dataset, target), output_type=output_type)
[ "def", "predict", "(", "self", ",", "dataset", ",", "output_type", "=", "'class'", ")", ":", "m", "=", "self", ".", "__proxy__", "[", "'classifier'", "]", "target", "=", "self", ".", "__proxy__", "[", "'target'", "]", "f", "=", "_BOW_FEATURE_EXTRACTOR", "return", "m", ".", "predict", "(", "f", "(", "dataset", ",", "target", ")", ",", "output_type", "=", "output_type", ")" ]
Return predictions for ``dataset``, using the trained model. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. output_type : {'class', 'probability_vector'}, optional Form of the predictions which are one of: - 'probability_vector': Prediction probability associated with each class as a vector. The probability of the first class (sorted alphanumerically by name of the class in the training set) is in position 0 of the vector, the second in position 1 and so on. - 'class': Class prediction. For multi-class classification, this returns the class with maximum probability. Returns ------- out : SArray An SArray with model predictions. See Also ---------- create, evaluate, classify Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> m.predict(dataset)
[ "Return", "predictions", "for", "dataset", "using", "the", "trained", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L182-L224
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
TextClassifier.classify
def classify(self, dataset): """ Return a classification, for each example in the ``dataset``, using the trained model. The output SFrame contains predictions as both class labels as well as probabilities that the predicted value is the associated label. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. Returns ------- out : SFrame An SFrame with model predictions i.e class labels and probabilities. See Also ---------- create, evaluate, predict Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> output = m.classify(dataset) """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR return m.classify(f(dataset, target))
python
def classify(self, dataset): """ Return a classification, for each example in the ``dataset``, using the trained model. The output SFrame contains predictions as both class labels as well as probabilities that the predicted value is the associated label. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. Returns ------- out : SFrame An SFrame with model predictions i.e class labels and probabilities. See Also ---------- create, evaluate, predict Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> output = m.classify(dataset) """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR return m.classify(f(dataset, target))
[ "def", "classify", "(", "self", ",", "dataset", ")", ":", "m", "=", "self", ".", "__proxy__", "[", "'classifier'", "]", "target", "=", "self", ".", "__proxy__", "[", "'target'", "]", "f", "=", "_BOW_FEATURE_EXTRACTOR", "return", "m", ".", "classify", "(", "f", "(", "dataset", ",", "target", ")", ")" ]
Return a classification, for each example in the ``dataset``, using the trained model. The output SFrame contains predictions as both class labels as well as probabilities that the predicted value is the associated label. Parameters ---------- dataset : SFrame dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. Returns ------- out : SFrame An SFrame with model predictions i.e class labels and probabilities. See Also ---------- create, evaluate, predict Examples -------- >>> import turicreate as tc >>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']}) >>> m = tc.text_classifier.create(dataset, 'rating', features=['text']) >>> output = m.classify(dataset)
[ "Return", "a", "classification", "for", "each", "example", "in", "the", "dataset", "using", "the", "trained", "model", ".", "The", "output", "SFrame", "contains", "predictions", "as", "both", "class", "labels", "as", "well", "as", "probabilities", "that", "the", "predicted", "value", "is", "the", "associated", "label", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L226-L260
train
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
TextClassifier.evaluate
def evaluate(self, dataset, metric='auto', **kwargs): """ Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame An SFrame having the same feature columns as provided when creating the model. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'log_loss' : Log loss - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve For more flexibility in calculating evaluation metrics, use the :class:`~turicreate.evaluation` module. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict, classify """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR test = f(dataset, target) return m.evaluate(test, metric, **kwargs)
python
def evaluate(self, dataset, metric='auto', **kwargs): """ Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame An SFrame having the same feature columns as provided when creating the model. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'log_loss' : Log loss - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve For more flexibility in calculating evaluation metrics, use the :class:`~turicreate.evaluation` module. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict, classify """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR test = f(dataset, target) return m.evaluate(test, metric, **kwargs)
[ "def", "evaluate", "(", "self", ",", "dataset", ",", "metric", "=", "'auto'", ",", "*", "*", "kwargs", ")", ":", "m", "=", "self", ".", "__proxy__", "[", "'classifier'", "]", "target", "=", "self", ".", "__proxy__", "[", "'target'", "]", "f", "=", "_BOW_FEATURE_EXTRACTOR", "test", "=", "f", "(", "dataset", ",", "target", ")", "return", "m", ".", "evaluate", "(", "test", ",", "metric", ",", "*", "*", "kwargs", ")" ]
Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame An SFrame having the same feature columns as provided when creating the model. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'log_loss' : Log loss - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve For more flexibility in calculating evaluation metrics, use the :class:`~turicreate.evaluation` module. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict, classify
[ "Evaluate", "the", "model", "by", "making", "predictions", "of", "target", "values", "and", "comparing", "these", "to", "actual", "values", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L289-L332
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py
_generate_base_svm_regression_spec
def _generate_base_svm_regression_spec(model): """ Takes an SVM regression model produces a starting spec using the parts. that are shared between all SVMs. """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION svm = spec.supportVectorRegressor _set_kernel(model, svm) svm.rho = -model.intercept_[0] for i in range(len(model._dual_coef_)): for cur_alpha in model._dual_coef_[i]: svm.coefficients.alpha.append(cur_alpha) for cur_src_vector in model.support_vectors_: cur_dest_vector = svm.denseSupportVectors.vectors.add() for i in cur_src_vector: cur_dest_vector.values.append(i) return spec
python
def _generate_base_svm_regression_spec(model): """ Takes an SVM regression model produces a starting spec using the parts. that are shared between all SVMs. """ if not(_HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') spec = _Model_pb2.Model() spec.specificationVersion = SPECIFICATION_VERSION svm = spec.supportVectorRegressor _set_kernel(model, svm) svm.rho = -model.intercept_[0] for i in range(len(model._dual_coef_)): for cur_alpha in model._dual_coef_[i]: svm.coefficients.alpha.append(cur_alpha) for cur_src_vector in model.support_vectors_: cur_dest_vector = svm.denseSupportVectors.vectors.add() for i in cur_src_vector: cur_dest_vector.values.append(i) return spec
[ "def", "_generate_base_svm_regression_spec", "(", "model", ")", ":", "if", "not", "(", "_HAS_SKLEARN", ")", ":", "raise", "RuntimeError", "(", "'scikit-learn not found. scikit-learn conversion API is disabled.'", ")", "spec", "=", "_Model_pb2", ".", "Model", "(", ")", "spec", ".", "specificationVersion", "=", "SPECIFICATION_VERSION", "svm", "=", "spec", ".", "supportVectorRegressor", "_set_kernel", "(", "model", ",", "svm", ")", "svm", ".", "rho", "=", "-", "model", ".", "intercept_", "[", "0", "]", "for", "i", "in", "range", "(", "len", "(", "model", ".", "_dual_coef_", ")", ")", ":", "for", "cur_alpha", "in", "model", ".", "_dual_coef_", "[", "i", "]", ":", "svm", ".", "coefficients", ".", "alpha", ".", "append", "(", "cur_alpha", ")", "for", "cur_src_vector", "in", "model", ".", "support_vectors_", ":", "cur_dest_vector", "=", "svm", ".", "denseSupportVectors", ".", "vectors", ".", "add", "(", ")", "for", "i", "in", "cur_src_vector", ":", "cur_dest_vector", ".", "values", ".", "append", "(", "i", ")", "return", "spec" ]
Takes an SVM regression model produces a starting spec using the parts. that are shared between all SVMs.
[ "Takes", "an", "SVM", "regression", "model", "produces", "a", "starting", "spec", "using", "the", "parts", ".", "that", "are", "shared", "between", "all", "SVMs", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py#L23-L46
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py
convert
def convert(model, features, target): """Convert a Support Vector Regressor (SVR) model to the protobuf spec. Parameters ---------- model: SVR A trained SVR encoder model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ spec = _generate_base_svm_regression_spec(model) spec = set_regressor_interface_params(spec, features, target) return _MLModel(spec)
python
def convert(model, features, target): """Convert a Support Vector Regressor (SVR) model to the protobuf spec. Parameters ---------- model: SVR A trained SVR encoder model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model """ spec = _generate_base_svm_regression_spec(model) spec = set_regressor_interface_params(spec, features, target) return _MLModel(spec)
[ "def", "convert", "(", "model", ",", "features", ",", "target", ")", ":", "spec", "=", "_generate_base_svm_regression_spec", "(", "model", ")", "spec", "=", "set_regressor_interface_params", "(", "spec", ",", "features", ",", "target", ")", "return", "_MLModel", "(", "spec", ")" ]
Convert a Support Vector Regressor (SVR) model to the protobuf spec. Parameters ---------- model: SVR A trained SVR encoder model. feature_names: [str] Name of the input columns. target: str Name of the output column. Returns ------- model_spec: An object of type Model_pb. Protobuf representation of the model
[ "Convert", "a", "Support", "Vector", "Regressor", "(", "SVR", ")", "model", "to", "the", "protobuf", "spec", ".", "Parameters", "----------", "model", ":", "SVR", "A", "trained", "SVR", "encoder", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py#L48-L68
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_VerifyExtensionHandle
def _VerifyExtensionHandle(message, extension_handle): """Verify that the given extension handle is valid.""" if not isinstance(extension_handle, _FieldDescriptor): raise KeyError('HasExtension() expects an extension handle, got: %s' % extension_handle) if not extension_handle.is_extension: raise KeyError('"%s" is not an extension.' % extension_handle.full_name) if not extension_handle.containing_type: raise KeyError('"%s" is missing a containing_type.' % extension_handle.full_name) if extension_handle.containing_type is not message.DESCRIPTOR: raise KeyError('Extension "%s" extends message type "%s", but this ' 'message is of type "%s".' % (extension_handle.full_name, extension_handle.containing_type.full_name, message.DESCRIPTOR.full_name))
python
def _VerifyExtensionHandle(message, extension_handle): """Verify that the given extension handle is valid.""" if not isinstance(extension_handle, _FieldDescriptor): raise KeyError('HasExtension() expects an extension handle, got: %s' % extension_handle) if not extension_handle.is_extension: raise KeyError('"%s" is not an extension.' % extension_handle.full_name) if not extension_handle.containing_type: raise KeyError('"%s" is missing a containing_type.' % extension_handle.full_name) if extension_handle.containing_type is not message.DESCRIPTOR: raise KeyError('Extension "%s" extends message type "%s", but this ' 'message is of type "%s".' % (extension_handle.full_name, extension_handle.containing_type.full_name, message.DESCRIPTOR.full_name))
[ "def", "_VerifyExtensionHandle", "(", "message", ",", "extension_handle", ")", ":", "if", "not", "isinstance", "(", "extension_handle", ",", "_FieldDescriptor", ")", ":", "raise", "KeyError", "(", "'HasExtension() expects an extension handle, got: %s'", "%", "extension_handle", ")", "if", "not", "extension_handle", ".", "is_extension", ":", "raise", "KeyError", "(", "'\"%s\" is not an extension.'", "%", "extension_handle", ".", "full_name", ")", "if", "not", "extension_handle", ".", "containing_type", ":", "raise", "KeyError", "(", "'\"%s\" is missing a containing_type.'", "%", "extension_handle", ".", "full_name", ")", "if", "extension_handle", ".", "containing_type", "is", "not", "message", ".", "DESCRIPTOR", ":", "raise", "KeyError", "(", "'Extension \"%s\" extends message type \"%s\", but this '", "'message is of type \"%s\".'", "%", "(", "extension_handle", ".", "full_name", ",", "extension_handle", ".", "containing_type", ".", "full_name", ",", "message", ".", "DESCRIPTOR", ".", "full_name", ")", ")" ]
Verify that the given extension handle is valid.
[ "Verify", "that", "the", "given", "extension", "handle", "is", "valid", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L213-L232
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddEnumValues
def _AddEnumValues(descriptor, cls): """Sets class-level attributes for all enum fields defined in this message. Also exporting a class-level object that can name enum values. Args: descriptor: Descriptor object for this message type. cls: Class we're constructing for this message type. """ for enum_type in descriptor.enum_types: setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) for enum_value in enum_type.values: setattr(cls, enum_value.name, enum_value.number)
python
def _AddEnumValues(descriptor, cls): """Sets class-level attributes for all enum fields defined in this message. Also exporting a class-level object that can name enum values. Args: descriptor: Descriptor object for this message type. cls: Class we're constructing for this message type. """ for enum_type in descriptor.enum_types: setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type)) for enum_value in enum_type.values: setattr(cls, enum_value.name, enum_value.number)
[ "def", "_AddEnumValues", "(", "descriptor", ",", "cls", ")", ":", "for", "enum_type", "in", "descriptor", ".", "enum_types", ":", "setattr", "(", "cls", ",", "enum_type", ".", "name", ",", "enum_type_wrapper", ".", "EnumTypeWrapper", "(", "enum_type", ")", ")", "for", "enum_value", "in", "enum_type", ".", "values", ":", "setattr", "(", "cls", ",", "enum_value", ".", "name", ",", "enum_value", ".", "number", ")" ]
Sets class-level attributes for all enum fields defined in this message. Also exporting a class-level object that can name enum values. Args: descriptor: Descriptor object for this message type. cls: Class we're constructing for this message type.
[ "Sets", "class", "-", "level", "attributes", "for", "all", "enum", "fields", "defined", "in", "this", "message", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L347-L359
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_DefaultValueConstructorForField
def _DefaultValueConstructorForField(field): """Returns a function which returns a default value for a field. Args: field: FieldDescriptor object for this field. The returned function has one argument: message: Message instance containing this field, or a weakref proxy of same. That function in turn returns a default value for this field. The default value may refer back to |message| via a weak reference. """ if _IsMapField(field): return _GetInitializeDefaultForMap(field) if field.label == _FieldDescriptor.LABEL_REPEATED: if field.has_default_value and field.default_value != []: raise ValueError('Repeated field default value not empty list: %s' % ( field.default_value)) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # We can't look at _concrete_class yet since it might not have # been set. (Depends on order in which we initialize the classes). message_type = field.message_type def MakeRepeatedMessageDefault(message): return containers.RepeatedCompositeFieldContainer( message._listener_for_children, field.message_type) return MakeRepeatedMessageDefault else: type_checker = type_checkers.GetTypeChecker(field) def MakeRepeatedScalarDefault(message): return containers.RepeatedScalarFieldContainer( message._listener_for_children, type_checker) return MakeRepeatedScalarDefault if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # _concrete_class may not yet be initialized. message_type = field.message_type def MakeSubMessageDefault(message): result = message_type._concrete_class() result._SetListener( _OneofListener(message, field) if field.containing_oneof is not None else message._listener_for_children) return result return MakeSubMessageDefault def MakeScalarDefault(message): # TODO(protobuf-team): This may be broken since there may not be # default_value. Combine with has_default_value somehow. return field.default_value return MakeScalarDefault
python
def _DefaultValueConstructorForField(field): """Returns a function which returns a default value for a field. Args: field: FieldDescriptor object for this field. The returned function has one argument: message: Message instance containing this field, or a weakref proxy of same. That function in turn returns a default value for this field. The default value may refer back to |message| via a weak reference. """ if _IsMapField(field): return _GetInitializeDefaultForMap(field) if field.label == _FieldDescriptor.LABEL_REPEATED: if field.has_default_value and field.default_value != []: raise ValueError('Repeated field default value not empty list: %s' % ( field.default_value)) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # We can't look at _concrete_class yet since it might not have # been set. (Depends on order in which we initialize the classes). message_type = field.message_type def MakeRepeatedMessageDefault(message): return containers.RepeatedCompositeFieldContainer( message._listener_for_children, field.message_type) return MakeRepeatedMessageDefault else: type_checker = type_checkers.GetTypeChecker(field) def MakeRepeatedScalarDefault(message): return containers.RepeatedScalarFieldContainer( message._listener_for_children, type_checker) return MakeRepeatedScalarDefault if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # _concrete_class may not yet be initialized. message_type = field.message_type def MakeSubMessageDefault(message): result = message_type._concrete_class() result._SetListener( _OneofListener(message, field) if field.containing_oneof is not None else message._listener_for_children) return result return MakeSubMessageDefault def MakeScalarDefault(message): # TODO(protobuf-team): This may be broken since there may not be # default_value. Combine with has_default_value somehow. return field.default_value return MakeScalarDefault
[ "def", "_DefaultValueConstructorForField", "(", "field", ")", ":", "if", "_IsMapField", "(", "field", ")", ":", "return", "_GetInitializeDefaultForMap", "(", "field", ")", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "if", "field", ".", "has_default_value", "and", "field", ".", "default_value", "!=", "[", "]", ":", "raise", "ValueError", "(", "'Repeated field default value not empty list: %s'", "%", "(", "field", ".", "default_value", ")", ")", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "# We can't look at _concrete_class yet since it might not have", "# been set. (Depends on order in which we initialize the classes).", "message_type", "=", "field", ".", "message_type", "def", "MakeRepeatedMessageDefault", "(", "message", ")", ":", "return", "containers", ".", "RepeatedCompositeFieldContainer", "(", "message", ".", "_listener_for_children", ",", "field", ".", "message_type", ")", "return", "MakeRepeatedMessageDefault", "else", ":", "type_checker", "=", "type_checkers", ".", "GetTypeChecker", "(", "field", ")", "def", "MakeRepeatedScalarDefault", "(", "message", ")", ":", "return", "containers", ".", "RepeatedScalarFieldContainer", "(", "message", ".", "_listener_for_children", ",", "type_checker", ")", "return", "MakeRepeatedScalarDefault", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "# _concrete_class may not yet be initialized.", "message_type", "=", "field", ".", "message_type", "def", "MakeSubMessageDefault", "(", "message", ")", ":", "result", "=", "message_type", ".", "_concrete_class", "(", ")", "result", ".", "_SetListener", "(", "_OneofListener", "(", "message", ",", "field", ")", "if", "field", ".", "containing_oneof", "is", "not", "None", "else", "message", ".", "_listener_for_children", ")", "return", "result", "return", "MakeSubMessageDefault", "def", "MakeScalarDefault", "(", "message", ")", ":", "# TODO(protobuf-team): This may be broken since there may not be", "# default_value. Combine with has_default_value somehow.", "return", "field", ".", "default_value", "return", "MakeScalarDefault" ]
Returns a function which returns a default value for a field. Args: field: FieldDescriptor object for this field. The returned function has one argument: message: Message instance containing this field, or a weakref proxy of same. That function in turn returns a default value for this field. The default value may refer back to |message| via a weak reference.
[ "Returns", "a", "function", "which", "returns", "a", "default", "value", "for", "a", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L384-L436
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_ReraiseTypeErrorWithFieldName
def _ReraiseTypeErrorWithFieldName(message_name, field_name): """Re-raise the currently-handled TypeError with the field name added.""" exc = sys.exc_info()[1] if len(exc.args) == 1 and type(exc) is TypeError: # simple TypeError; add field name to exception message exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) # re-raise possibly-amended exception with original traceback: six.reraise(type(exc), exc, sys.exc_info()[2])
python
def _ReraiseTypeErrorWithFieldName(message_name, field_name): """Re-raise the currently-handled TypeError with the field name added.""" exc = sys.exc_info()[1] if len(exc.args) == 1 and type(exc) is TypeError: # simple TypeError; add field name to exception message exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) # re-raise possibly-amended exception with original traceback: six.reraise(type(exc), exc, sys.exc_info()[2])
[ "def", "_ReraiseTypeErrorWithFieldName", "(", "message_name", ",", "field_name", ")", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "len", "(", "exc", ".", "args", ")", "==", "1", "and", "type", "(", "exc", ")", "is", "TypeError", ":", "# simple TypeError; add field name to exception message", "exc", "=", "TypeError", "(", "'%s for field %s.%s'", "%", "(", "str", "(", "exc", ")", ",", "message_name", ",", "field_name", ")", ")", "# re-raise possibly-amended exception with original traceback:", "six", ".", "reraise", "(", "type", "(", "exc", ")", ",", "exc", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")" ]
Re-raise the currently-handled TypeError with the field name added.
[ "Re", "-", "raise", "the", "currently", "-", "handled", "TypeError", "with", "the", "field", "name", "added", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L439-L447
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddInitMethod
def _AddInitMethod(message_descriptor, cls): """Adds an __init__ method to cls.""" def _GetIntegerEnumValue(enum_type, value): """Convert a string or integer enum value to an integer. If the value is a string, it is converted to the enum value in enum_type with the same name. If the value is not a string, it's returned as-is. (No conversion or bounds-checking is done.) """ if isinstance(value, six.string_types): try: return enum_type.values_by_name[value].number except KeyError: raise ValueError('Enum type %s: unknown label "%s"' % ( enum_type.full_name, value)) return value def init(self, **kwargs): self._cached_byte_size = 0 self._cached_byte_size_dirty = len(kwargs) > 0 self._fields = {} # Contains a mapping from oneof field descriptors to the descriptor # of the currently set field in that oneof field. self._oneofs = {} # _unknown_fields is () when empty for efficiency, and will be turned into # a list if fields are added. self._unknown_fields = () self._is_present_in_parent = False self._listener = message_listener_mod.NullMessageListener() self._listener_for_children = _Listener(self) for field_name, field_value in kwargs.items(): field = _GetFieldByName(message_descriptor, field_name) if field is None: raise TypeError("%s() got an unexpected keyword argument '%s'" % (message_descriptor.name, field_name)) if field_value is None: # field=None is the same as no field at all. continue if field.label == _FieldDescriptor.LABEL_REPEATED: copy = field._default_constructor(self) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite if _IsMapField(field): if _IsMessageMapField(field): for key in field_value: copy[key].MergeFrom(field_value[key]) else: copy.update(field_value) else: for val in field_value: if isinstance(val, dict): copy.add(**val) else: copy.add().MergeFrom(val) else: # Scalar if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: field_value = [_GetIntegerEnumValue(field.enum_type, val) for val in field_value] copy.extend(field_value) self._fields[field] = copy elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: copy = field._default_constructor(self) new_val = field_value if isinstance(field_value, dict): new_val = field.message_type._concrete_class(**field_value) try: copy.MergeFrom(new_val) except TypeError: _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) self._fields[field] = copy else: if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: field_value = _GetIntegerEnumValue(field.enum_type, field_value) try: setattr(self, field_name, field_value) except TypeError: _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) init.__module__ = None init.__doc__ = None cls.__init__ = init
python
def _AddInitMethod(message_descriptor, cls): """Adds an __init__ method to cls.""" def _GetIntegerEnumValue(enum_type, value): """Convert a string or integer enum value to an integer. If the value is a string, it is converted to the enum value in enum_type with the same name. If the value is not a string, it's returned as-is. (No conversion or bounds-checking is done.) """ if isinstance(value, six.string_types): try: return enum_type.values_by_name[value].number except KeyError: raise ValueError('Enum type %s: unknown label "%s"' % ( enum_type.full_name, value)) return value def init(self, **kwargs): self._cached_byte_size = 0 self._cached_byte_size_dirty = len(kwargs) > 0 self._fields = {} # Contains a mapping from oneof field descriptors to the descriptor # of the currently set field in that oneof field. self._oneofs = {} # _unknown_fields is () when empty for efficiency, and will be turned into # a list if fields are added. self._unknown_fields = () self._is_present_in_parent = False self._listener = message_listener_mod.NullMessageListener() self._listener_for_children = _Listener(self) for field_name, field_value in kwargs.items(): field = _GetFieldByName(message_descriptor, field_name) if field is None: raise TypeError("%s() got an unexpected keyword argument '%s'" % (message_descriptor.name, field_name)) if field_value is None: # field=None is the same as no field at all. continue if field.label == _FieldDescriptor.LABEL_REPEATED: copy = field._default_constructor(self) if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite if _IsMapField(field): if _IsMessageMapField(field): for key in field_value: copy[key].MergeFrom(field_value[key]) else: copy.update(field_value) else: for val in field_value: if isinstance(val, dict): copy.add(**val) else: copy.add().MergeFrom(val) else: # Scalar if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: field_value = [_GetIntegerEnumValue(field.enum_type, val) for val in field_value] copy.extend(field_value) self._fields[field] = copy elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: copy = field._default_constructor(self) new_val = field_value if isinstance(field_value, dict): new_val = field.message_type._concrete_class(**field_value) try: copy.MergeFrom(new_val) except TypeError: _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) self._fields[field] = copy else: if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM: field_value = _GetIntegerEnumValue(field.enum_type, field_value) try: setattr(self, field_name, field_value) except TypeError: _ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name) init.__module__ = None init.__doc__ = None cls.__init__ = init
[ "def", "_AddInitMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "_GetIntegerEnumValue", "(", "enum_type", ",", "value", ")", ":", "\"\"\"Convert a string or integer enum value to an integer.\n\n If the value is a string, it is converted to the enum value in\n enum_type with the same name. If the value is not a string, it's\n returned as-is. (No conversion or bounds-checking is done.)\n \"\"\"", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "try", ":", "return", "enum_type", ".", "values_by_name", "[", "value", "]", ".", "number", "except", "KeyError", ":", "raise", "ValueError", "(", "'Enum type %s: unknown label \"%s\"'", "%", "(", "enum_type", ".", "full_name", ",", "value", ")", ")", "return", "value", "def", "init", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_cached_byte_size", "=", "0", "self", ".", "_cached_byte_size_dirty", "=", "len", "(", "kwargs", ")", ">", "0", "self", ".", "_fields", "=", "{", "}", "# Contains a mapping from oneof field descriptors to the descriptor", "# of the currently set field in that oneof field.", "self", ".", "_oneofs", "=", "{", "}", "# _unknown_fields is () when empty for efficiency, and will be turned into", "# a list if fields are added.", "self", ".", "_unknown_fields", "=", "(", ")", "self", ".", "_is_present_in_parent", "=", "False", "self", ".", "_listener", "=", "message_listener_mod", ".", "NullMessageListener", "(", ")", "self", ".", "_listener_for_children", "=", "_Listener", "(", "self", ")", "for", "field_name", ",", "field_value", "in", "kwargs", ".", "items", "(", ")", ":", "field", "=", "_GetFieldByName", "(", "message_descriptor", ",", "field_name", ")", "if", "field", "is", "None", ":", "raise", "TypeError", "(", "\"%s() got an unexpected keyword argument '%s'\"", "%", "(", "message_descriptor", ".", "name", ",", "field_name", ")", ")", "if", "field_value", "is", "None", ":", "# field=None is the same as no field at all.", "continue", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "copy", "=", "field", ".", "_default_constructor", "(", "self", ")", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "# Composite", "if", "_IsMapField", "(", "field", ")", ":", "if", "_IsMessageMapField", "(", "field", ")", ":", "for", "key", "in", "field_value", ":", "copy", "[", "key", "]", ".", "MergeFrom", "(", "field_value", "[", "key", "]", ")", "else", ":", "copy", ".", "update", "(", "field_value", ")", "else", ":", "for", "val", "in", "field_value", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "copy", ".", "add", "(", "*", "*", "val", ")", "else", ":", "copy", ".", "add", "(", ")", ".", "MergeFrom", "(", "val", ")", "else", ":", "# Scalar", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_ENUM", ":", "field_value", "=", "[", "_GetIntegerEnumValue", "(", "field", ".", "enum_type", ",", "val", ")", "for", "val", "in", "field_value", "]", "copy", ".", "extend", "(", "field_value", ")", "self", ".", "_fields", "[", "field", "]", "=", "copy", "elif", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "copy", "=", "field", ".", "_default_constructor", "(", "self", ")", "new_val", "=", "field_value", "if", "isinstance", "(", "field_value", ",", "dict", ")", ":", "new_val", "=", "field", ".", "message_type", ".", "_concrete_class", "(", "*", "*", "field_value", ")", "try", ":", "copy", ".", "MergeFrom", "(", "new_val", ")", "except", "TypeError", ":", "_ReraiseTypeErrorWithFieldName", "(", "message_descriptor", ".", "name", ",", "field_name", ")", "self", ".", "_fields", "[", "field", "]", "=", "copy", "else", ":", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_ENUM", ":", "field_value", "=", "_GetIntegerEnumValue", "(", "field", ".", "enum_type", ",", "field_value", ")", "try", ":", "setattr", "(", "self", ",", "field_name", ",", "field_value", ")", "except", "TypeError", ":", "_ReraiseTypeErrorWithFieldName", "(", "message_descriptor", ".", "name", ",", "field_name", ")", "init", ".", "__module__", "=", "None", "init", ".", "__doc__", "=", "None", "cls", ".", "__init__", "=", "init" ]
Adds an __init__ method to cls.
[ "Adds", "an", "__init__", "method", "to", "cls", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L450-L531
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_GetFieldByName
def _GetFieldByName(message_descriptor, field_name): """Returns a field descriptor by field name. Args: message_descriptor: A Descriptor describing all fields in message. field_name: The name of the field to retrieve. Returns: The field descriptor associated with the field name. """ try: return message_descriptor.fields_by_name[field_name] except KeyError: raise ValueError('Protocol message %s has no "%s" field.' % (message_descriptor.name, field_name))
python
def _GetFieldByName(message_descriptor, field_name): """Returns a field descriptor by field name. Args: message_descriptor: A Descriptor describing all fields in message. field_name: The name of the field to retrieve. Returns: The field descriptor associated with the field name. """ try: return message_descriptor.fields_by_name[field_name] except KeyError: raise ValueError('Protocol message %s has no "%s" field.' % (message_descriptor.name, field_name))
[ "def", "_GetFieldByName", "(", "message_descriptor", ",", "field_name", ")", ":", "try", ":", "return", "message_descriptor", ".", "fields_by_name", "[", "field_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Protocol message %s has no \"%s\" field.'", "%", "(", "message_descriptor", ".", "name", ",", "field_name", ")", ")" ]
Returns a field descriptor by field name. Args: message_descriptor: A Descriptor describing all fields in message. field_name: The name of the field to retrieve. Returns: The field descriptor associated with the field name.
[ "Returns", "a", "field", "descriptor", "by", "field", "name", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L534-L547
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPropertiesForFields
def _AddPropertiesForFields(descriptor, cls): """Adds properties for all fields in this protocol message type.""" for field in descriptor.fields: _AddPropertiesForField(field, cls) if descriptor.is_extendable: # _ExtensionDict is just an adaptor with no state so we allocate a new one # every time it is accessed. cls.Extensions = property(lambda self: _ExtensionDict(self))
python
def _AddPropertiesForFields(descriptor, cls): """Adds properties for all fields in this protocol message type.""" for field in descriptor.fields: _AddPropertiesForField(field, cls) if descriptor.is_extendable: # _ExtensionDict is just an adaptor with no state so we allocate a new one # every time it is accessed. cls.Extensions = property(lambda self: _ExtensionDict(self))
[ "def", "_AddPropertiesForFields", "(", "descriptor", ",", "cls", ")", ":", "for", "field", "in", "descriptor", ".", "fields", ":", "_AddPropertiesForField", "(", "field", ",", "cls", ")", "if", "descriptor", ".", "is_extendable", ":", "# _ExtensionDict is just an adaptor with no state so we allocate a new one", "# every time it is accessed.", "cls", ".", "Extensions", "=", "property", "(", "lambda", "self", ":", "_ExtensionDict", "(", "self", ")", ")" ]
Adds properties for all fields in this protocol message type.
[ "Adds", "properties", "for", "all", "fields", "in", "this", "protocol", "message", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L550-L558
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPropertiesForField
def _AddPropertiesForField(field, cls): """Adds a public property for a protocol message field. Clients can use this property to get and (in the case of non-repeated scalar fields) directly set the value of a protocol message field. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ # Catch it if we add other types that we should # handle specially here. assert _FieldDescriptor.MAX_CPPTYPE == 10 constant_name = field.name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, field.number) if field.label == _FieldDescriptor.LABEL_REPEATED: _AddPropertiesForRepeatedField(field, cls) elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: _AddPropertiesForNonRepeatedCompositeField(field, cls) else: _AddPropertiesForNonRepeatedScalarField(field, cls)
python
def _AddPropertiesForField(field, cls): """Adds a public property for a protocol message field. Clients can use this property to get and (in the case of non-repeated scalar fields) directly set the value of a protocol message field. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ # Catch it if we add other types that we should # handle specially here. assert _FieldDescriptor.MAX_CPPTYPE == 10 constant_name = field.name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, field.number) if field.label == _FieldDescriptor.LABEL_REPEATED: _AddPropertiesForRepeatedField(field, cls) elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: _AddPropertiesForNonRepeatedCompositeField(field, cls) else: _AddPropertiesForNonRepeatedScalarField(field, cls)
[ "def", "_AddPropertiesForField", "(", "field", ",", "cls", ")", ":", "# Catch it if we add other types that we should", "# handle specially here.", "assert", "_FieldDescriptor", ".", "MAX_CPPTYPE", "==", "10", "constant_name", "=", "field", ".", "name", ".", "upper", "(", ")", "+", "\"_FIELD_NUMBER\"", "setattr", "(", "cls", ",", "constant_name", ",", "field", ".", "number", ")", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "_AddPropertiesForRepeatedField", "(", "field", ",", "cls", ")", "elif", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "_AddPropertiesForNonRepeatedCompositeField", "(", "field", ",", "cls", ")", "else", ":", "_AddPropertiesForNonRepeatedScalarField", "(", "field", ",", "cls", ")" ]
Adds a public property for a protocol message field. Clients can use this property to get and (in the case of non-repeated scalar fields) directly set the value of a protocol message field. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
[ "Adds", "a", "public", "property", "for", "a", "protocol", "message", "field", ".", "Clients", "can", "use", "this", "property", "to", "get", "and", "(", "in", "the", "case", "of", "non", "-", "repeated", "scalar", "fields", ")", "directly", "set", "the", "value", "of", "a", "protocol", "message", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L561-L583
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPropertiesForRepeatedField
def _AddPropertiesForRepeatedField(field, cls): """Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) def getter(self): field_value = self._fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name # We define a setter just so we can throw an exception with a more # helpful error message. def setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % proto_field_name) doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
python
def _AddPropertiesForRepeatedField(field, cls): """Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) def getter(self): field_value = self._fields.get(field) if field_value is None: # Construct a new object to represent this field. field_value = field._default_constructor(self) # Atomically check if another thread has preempted us and, if not, swap # in the new object we just created. If someone has preempted us, we # take that object and discard ours. # WARNING: We are relying on setdefault() being atomic. This is true # in CPython but we haven't investigated others. This warning appears # in several other locations in this file. field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name # We define a setter just so we can throw an exception with a more # helpful error message. def setter(self, new_value): raise AttributeError('Assignment not allowed to repeated field ' '"%s" in protocol message object.' % proto_field_name) doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
[ "def", "_AddPropertiesForRepeatedField", "(", "field", ",", "cls", ")", ":", "proto_field_name", "=", "field", ".", "name", "property_name", "=", "_PropertyName", "(", "proto_field_name", ")", "def", "getter", "(", "self", ")", ":", "field_value", "=", "self", ".", "_fields", ".", "get", "(", "field", ")", "if", "field_value", "is", "None", ":", "# Construct a new object to represent this field.", "field_value", "=", "field", ".", "_default_constructor", "(", "self", ")", "# Atomically check if another thread has preempted us and, if not, swap", "# in the new object we just created. If someone has preempted us, we", "# take that object and discard ours.", "# WARNING: We are relying on setdefault() being atomic. This is true", "# in CPython but we haven't investigated others. This warning appears", "# in several other locations in this file.", "field_value", "=", "self", ".", "_fields", ".", "setdefault", "(", "field", ",", "field_value", ")", "return", "field_value", "getter", ".", "__module__", "=", "None", "getter", ".", "__doc__", "=", "'Getter for %s.'", "%", "proto_field_name", "# We define a setter just so we can throw an exception with a more", "# helpful error message.", "def", "setter", "(", "self", ",", "new_value", ")", ":", "raise", "AttributeError", "(", "'Assignment not allowed to repeated field '", "'\"%s\" in protocol message object.'", "%", "proto_field_name", ")", "doc", "=", "'Magic attribute generated for \"%s\" proto field.'", "%", "proto_field_name", "setattr", "(", "cls", ",", "property_name", ",", "property", "(", "getter", ",", "setter", ",", "doc", "=", "doc", ")", ")" ]
Adds a public property for a "repeated" protocol message field. Clients can use this property to get the value of the field, which will be either a _RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see below). Note that when clients add values to these containers, we perform type-checking in the case of repeated scalar fields, and we also set any necessary "has" bits as a side-effect. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
[ "Adds", "a", "public", "property", "for", "a", "repeated", "protocol", "message", "field", ".", "Clients", "can", "use", "this", "property", "to", "get", "the", "value", "of", "the", "field", "which", "will", "be", "either", "a", "_RepeatedScalarFieldContainer", "or", "_RepeatedCompositeFieldContainer", "(", "see", "below", ")", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L586-L627
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPropertiesForNonRepeatedScalarField
def _AddPropertiesForNonRepeatedScalarField(field, cls): """Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field) default_value = field.default_value valid_values = set() is_proto3 = field.containing_type.syntax == "proto3" def getter(self): # TODO(protobuf-team): This may be broken since there may not be # default_value. Combine with has_default_value somehow. return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name clear_when_set_to_default = is_proto3 and not field.containing_oneof def field_setter(self, new_value): # pylint: disable=protected-access # Testing the value for truthiness captures all of the proto3 defaults # (0, 0.0, enum 0, and False). new_value = type_checker.CheckValue(new_value) if clear_when_set_to_default and not new_value: self._fields.pop(field, None) else: self._fields[field] = new_value # Check _cached_byte_size_dirty inline to improve performance, since scalar # setters are called frequently. if not self._cached_byte_size_dirty: self._Modified() if field.containing_oneof: def setter(self, new_value): field_setter(self, new_value) self._UpdateOneofState(field) else: setter = field_setter setter.__module__ = None setter.__doc__ = 'Setter for %s.' % proto_field_name # Add a property to encapsulate the getter/setter. doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
python
def _AddPropertiesForNonRepeatedScalarField(field, cls): """Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field) default_value = field.default_value valid_values = set() is_proto3 = field.containing_type.syntax == "proto3" def getter(self): # TODO(protobuf-team): This may be broken since there may not be # default_value. Combine with has_default_value somehow. return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name clear_when_set_to_default = is_proto3 and not field.containing_oneof def field_setter(self, new_value): # pylint: disable=protected-access # Testing the value for truthiness captures all of the proto3 defaults # (0, 0.0, enum 0, and False). new_value = type_checker.CheckValue(new_value) if clear_when_set_to_default and not new_value: self._fields.pop(field, None) else: self._fields[field] = new_value # Check _cached_byte_size_dirty inline to improve performance, since scalar # setters are called frequently. if not self._cached_byte_size_dirty: self._Modified() if field.containing_oneof: def setter(self, new_value): field_setter(self, new_value) self._UpdateOneofState(field) else: setter = field_setter setter.__module__ = None setter.__doc__ = 'Setter for %s.' % proto_field_name # Add a property to encapsulate the getter/setter. doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
[ "def", "_AddPropertiesForNonRepeatedScalarField", "(", "field", ",", "cls", ")", ":", "proto_field_name", "=", "field", ".", "name", "property_name", "=", "_PropertyName", "(", "proto_field_name", ")", "type_checker", "=", "type_checkers", ".", "GetTypeChecker", "(", "field", ")", "default_value", "=", "field", ".", "default_value", "valid_values", "=", "set", "(", ")", "is_proto3", "=", "field", ".", "containing_type", ".", "syntax", "==", "\"proto3\"", "def", "getter", "(", "self", ")", ":", "# TODO(protobuf-team): This may be broken since there may not be", "# default_value. Combine with has_default_value somehow.", "return", "self", ".", "_fields", ".", "get", "(", "field", ",", "default_value", ")", "getter", ".", "__module__", "=", "None", "getter", ".", "__doc__", "=", "'Getter for %s.'", "%", "proto_field_name", "clear_when_set_to_default", "=", "is_proto3", "and", "not", "field", ".", "containing_oneof", "def", "field_setter", "(", "self", ",", "new_value", ")", ":", "# pylint: disable=protected-access", "# Testing the value for truthiness captures all of the proto3 defaults", "# (0, 0.0, enum 0, and False).", "new_value", "=", "type_checker", ".", "CheckValue", "(", "new_value", ")", "if", "clear_when_set_to_default", "and", "not", "new_value", ":", "self", ".", "_fields", ".", "pop", "(", "field", ",", "None", ")", "else", ":", "self", ".", "_fields", "[", "field", "]", "=", "new_value", "# Check _cached_byte_size_dirty inline to improve performance, since scalar", "# setters are called frequently.", "if", "not", "self", ".", "_cached_byte_size_dirty", ":", "self", ".", "_Modified", "(", ")", "if", "field", ".", "containing_oneof", ":", "def", "setter", "(", "self", ",", "new_value", ")", ":", "field_setter", "(", "self", ",", "new_value", ")", "self", ".", "_UpdateOneofState", "(", "field", ")", "else", ":", "setter", "=", "field_setter", "setter", ".", "__module__", "=", "None", "setter", ".", "__doc__", "=", "'Setter for %s.'", "%", "proto_field_name", "# Add a property to encapsulate the getter/setter.", "doc", "=", "'Magic attribute generated for \"%s\" proto field.'", "%", "proto_field_name", "setattr", "(", "cls", ",", "property_name", ",", "property", "(", "getter", ",", "setter", ",", "doc", "=", "doc", ")", ")" ]
Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
[ "Adds", "a", "public", "property", "for", "a", "nonrepeated", "scalar", "protocol", "message", "field", ".", "Clients", "can", "use", "this", "property", "to", "get", "and", "directly", "set", "the", "value", "of", "the", "field", ".", "Note", "that", "when", "the", "client", "sets", "the", "value", "of", "a", "field", "by", "using", "this", "property", "all", "necessary", "has", "bits", "are", "set", "as", "a", "side", "-", "effect", "and", "we", "also", "perform", "type", "-", "checking", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L630-L683
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPropertiesForExtensions
def _AddPropertiesForExtensions(descriptor, cls): """Adds properties for all fields in this protocol message type.""" extension_dict = descriptor.extensions_by_name for extension_name, extension_field in extension_dict.items(): constant_name = extension_name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, extension_field.number) # TODO(amauryfa): Migrate all users of these attributes to functions like # pool.FindExtensionByNumber(descriptor). if descriptor.file is not None: # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. pool = descriptor.file.pool cls._extensions_by_number = pool._extensions_by_number[descriptor] cls._extensions_by_name = pool._extensions_by_name[descriptor]
python
def _AddPropertiesForExtensions(descriptor, cls): """Adds properties for all fields in this protocol message type.""" extension_dict = descriptor.extensions_by_name for extension_name, extension_field in extension_dict.items(): constant_name = extension_name.upper() + "_FIELD_NUMBER" setattr(cls, constant_name, extension_field.number) # TODO(amauryfa): Migrate all users of these attributes to functions like # pool.FindExtensionByNumber(descriptor). if descriptor.file is not None: # TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available. pool = descriptor.file.pool cls._extensions_by_number = pool._extensions_by_number[descriptor] cls._extensions_by_name = pool._extensions_by_name[descriptor]
[ "def", "_AddPropertiesForExtensions", "(", "descriptor", ",", "cls", ")", ":", "extension_dict", "=", "descriptor", ".", "extensions_by_name", "for", "extension_name", ",", "extension_field", "in", "extension_dict", ".", "items", "(", ")", ":", "constant_name", "=", "extension_name", ".", "upper", "(", ")", "+", "\"_FIELD_NUMBER\"", "setattr", "(", "cls", ",", "constant_name", ",", "extension_field", ".", "number", ")", "# TODO(amauryfa): Migrate all users of these attributes to functions like", "# pool.FindExtensionByNumber(descriptor).", "if", "descriptor", ".", "file", "is", "not", "None", ":", "# TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available.", "pool", "=", "descriptor", ".", "file", ".", "pool", "cls", ".", "_extensions_by_number", "=", "pool", ".", "_extensions_by_number", "[", "descriptor", "]", "cls", ".", "_extensions_by_name", "=", "pool", ".", "_extensions_by_name", "[", "descriptor", "]" ]
Adds properties for all fields in this protocol message type.
[ "Adds", "properties", "for", "all", "fields", "in", "this", "protocol", "message", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L730-L743
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_IsPresent
def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True
python
def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True
[ "def", "_IsPresent", "(", "item", ")", ":", "if", "item", "[", "0", "]", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "return", "bool", "(", "item", "[", "1", "]", ")", "elif", "item", "[", "0", "]", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "return", "item", "[", "1", "]", ".", "_is_present_in_parent", "else", ":", "return", "True" ]
Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().
[ "Given", "a", "(", "FieldDescriptor", "value", ")", "tuple", "from", "_fields", "return", "true", "if", "the", "value", "should", "be", "included", "in", "the", "list", "returned", "by", "ListFields", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L761-L770
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddListFieldsMethod
def _AddListFieldsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ListFields(self): all_fields = [item for item in self._fields.items() if _IsPresent(item)] all_fields.sort(key = lambda item: item[0].number) return all_fields cls.ListFields = ListFields
python
def _AddListFieldsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ListFields(self): all_fields = [item for item in self._fields.items() if _IsPresent(item)] all_fields.sort(key = lambda item: item[0].number) return all_fields cls.ListFields = ListFields
[ "def", "_AddListFieldsMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "ListFields", "(", "self", ")", ":", "all_fields", "=", "[", "item", "for", "item", "in", "self", ".", "_fields", ".", "items", "(", ")", "if", "_IsPresent", "(", "item", ")", "]", "all_fields", ".", "sort", "(", "key", "=", "lambda", "item", ":", "item", "[", "0", "]", ".", "number", ")", "return", "all_fields", "cls", ".", "ListFields", "=", "ListFields" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L773-L781
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddHasFieldMethod
def _AddHasFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" is_proto3 = (message_descriptor.syntax == "proto3") error_msg = _Proto3HasError if is_proto3 else _Proto2HasError hassable_fields = {} for field in message_descriptor.fields: if field.label == _FieldDescriptor.LABEL_REPEATED: continue # For proto3, only submessages and fields inside a oneof have presence. if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and not field.containing_oneof): continue hassable_fields[field.name] = field if not is_proto3: # Fields inside oneofs are never repeated (enforced by the compiler). for oneof in message_descriptor.oneofs: hassable_fields[oneof.name] = oneof def HasField(self, field_name): try: field = hassable_fields[field_name] except KeyError: raise ValueError(error_msg % field_name) if isinstance(field, descriptor_mod.OneofDescriptor): try: return HasField(self, self._oneofs[field].name) except KeyError: return False else: if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(field) return value is not None and value._is_present_in_parent else: return field in self._fields cls.HasField = HasField
python
def _AddHasFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" is_proto3 = (message_descriptor.syntax == "proto3") error_msg = _Proto3HasError if is_proto3 else _Proto2HasError hassable_fields = {} for field in message_descriptor.fields: if field.label == _FieldDescriptor.LABEL_REPEATED: continue # For proto3, only submessages and fields inside a oneof have presence. if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and not field.containing_oneof): continue hassable_fields[field.name] = field if not is_proto3: # Fields inside oneofs are never repeated (enforced by the compiler). for oneof in message_descriptor.oneofs: hassable_fields[oneof.name] = oneof def HasField(self, field_name): try: field = hassable_fields[field_name] except KeyError: raise ValueError(error_msg % field_name) if isinstance(field, descriptor_mod.OneofDescriptor): try: return HasField(self, self._oneofs[field].name) except KeyError: return False else: if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(field) return value is not None and value._is_present_in_parent else: return field in self._fields cls.HasField = HasField
[ "def", "_AddHasFieldMethod", "(", "message_descriptor", ",", "cls", ")", ":", "is_proto3", "=", "(", "message_descriptor", ".", "syntax", "==", "\"proto3\"", ")", "error_msg", "=", "_Proto3HasError", "if", "is_proto3", "else", "_Proto2HasError", "hassable_fields", "=", "{", "}", "for", "field", "in", "message_descriptor", ".", "fields", ":", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "continue", "# For proto3, only submessages and fields inside a oneof have presence.", "if", "(", "is_proto3", "and", "field", ".", "cpp_type", "!=", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", "and", "not", "field", ".", "containing_oneof", ")", ":", "continue", "hassable_fields", "[", "field", ".", "name", "]", "=", "field", "if", "not", "is_proto3", ":", "# Fields inside oneofs are never repeated (enforced by the compiler).", "for", "oneof", "in", "message_descriptor", ".", "oneofs", ":", "hassable_fields", "[", "oneof", ".", "name", "]", "=", "oneof", "def", "HasField", "(", "self", ",", "field_name", ")", ":", "try", ":", "field", "=", "hassable_fields", "[", "field_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "error_msg", "%", "field_name", ")", "if", "isinstance", "(", "field", ",", "descriptor_mod", ".", "OneofDescriptor", ")", ":", "try", ":", "return", "HasField", "(", "self", ",", "self", ".", "_oneofs", "[", "field", "]", ".", "name", ")", "except", "KeyError", ":", "return", "False", "else", ":", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "value", "=", "self", ".", "_fields", ".", "get", "(", "field", ")", "return", "value", "is", "not", "None", "and", "value", ".", "_is_present_in_parent", "else", ":", "return", "field", "in", "self", ".", "_fields", "cls", ".", "HasField", "=", "HasField" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L786-L825
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddClearFieldMethod
def _AddClearFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ClearField(self, field_name): try: field = message_descriptor.fields_by_name[field_name] except KeyError: try: field = message_descriptor.oneofs_by_name[field_name] if field in self._oneofs: field = self._oneofs[field] else: return except KeyError: raise ValueError('Protocol message %s() has no "%s" field.' % (message_descriptor.name, field_name)) if field in self._fields: # To match the C++ implementation, we need to invalidate iterators # for map fields when ClearField() happens. if hasattr(self._fields[field], 'InvalidateIterators'): self._fields[field].InvalidateIterators() # Note: If the field is a sub-message, its listener will still point # at us. That's fine, because the worst than can happen is that it # will call _Modified() and invalidate our byte size. Big deal. del self._fields[field] if self._oneofs.get(field.containing_oneof, None) is field: del self._oneofs[field.containing_oneof] # Always call _Modified() -- even if nothing was changed, this is # a mutating method, and thus calling it should cause the field to become # present in the parent message. self._Modified() cls.ClearField = ClearField
python
def _AddClearFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ClearField(self, field_name): try: field = message_descriptor.fields_by_name[field_name] except KeyError: try: field = message_descriptor.oneofs_by_name[field_name] if field in self._oneofs: field = self._oneofs[field] else: return except KeyError: raise ValueError('Protocol message %s() has no "%s" field.' % (message_descriptor.name, field_name)) if field in self._fields: # To match the C++ implementation, we need to invalidate iterators # for map fields when ClearField() happens. if hasattr(self._fields[field], 'InvalidateIterators'): self._fields[field].InvalidateIterators() # Note: If the field is a sub-message, its listener will still point # at us. That's fine, because the worst than can happen is that it # will call _Modified() and invalidate our byte size. Big deal. del self._fields[field] if self._oneofs.get(field.containing_oneof, None) is field: del self._oneofs[field.containing_oneof] # Always call _Modified() -- even if nothing was changed, this is # a mutating method, and thus calling it should cause the field to become # present in the parent message. self._Modified() cls.ClearField = ClearField
[ "def", "_AddClearFieldMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "ClearField", "(", "self", ",", "field_name", ")", ":", "try", ":", "field", "=", "message_descriptor", ".", "fields_by_name", "[", "field_name", "]", "except", "KeyError", ":", "try", ":", "field", "=", "message_descriptor", ".", "oneofs_by_name", "[", "field_name", "]", "if", "field", "in", "self", ".", "_oneofs", ":", "field", "=", "self", ".", "_oneofs", "[", "field", "]", "else", ":", "return", "except", "KeyError", ":", "raise", "ValueError", "(", "'Protocol message %s() has no \"%s\" field.'", "%", "(", "message_descriptor", ".", "name", ",", "field_name", ")", ")", "if", "field", "in", "self", ".", "_fields", ":", "# To match the C++ implementation, we need to invalidate iterators", "# for map fields when ClearField() happens.", "if", "hasattr", "(", "self", ".", "_fields", "[", "field", "]", ",", "'InvalidateIterators'", ")", ":", "self", ".", "_fields", "[", "field", "]", ".", "InvalidateIterators", "(", ")", "# Note: If the field is a sub-message, its listener will still point", "# at us. That's fine, because the worst than can happen is that it", "# will call _Modified() and invalidate our byte size. Big deal.", "del", "self", ".", "_fields", "[", "field", "]", "if", "self", ".", "_oneofs", ".", "get", "(", "field", ".", "containing_oneof", ",", "None", ")", "is", "field", ":", "del", "self", ".", "_oneofs", "[", "field", ".", "containing_oneof", "]", "# Always call _Modified() -- even if nothing was changed, this is", "# a mutating method, and thus calling it should cause the field to become", "# present in the parent message.", "self", ".", "_Modified", "(", ")", "cls", ".", "ClearField", "=", "ClearField" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L828-L863
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddClearExtensionMethod
def _AddClearExtensionMethod(cls): """Helper for _AddMessageMethods().""" def ClearExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) # Similar to ClearField(), above. if extension_handle in self._fields: del self._fields[extension_handle] self._Modified() cls.ClearExtension = ClearExtension
python
def _AddClearExtensionMethod(cls): """Helper for _AddMessageMethods().""" def ClearExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) # Similar to ClearField(), above. if extension_handle in self._fields: del self._fields[extension_handle] self._Modified() cls.ClearExtension = ClearExtension
[ "def", "_AddClearExtensionMethod", "(", "cls", ")", ":", "def", "ClearExtension", "(", "self", ",", "extension_handle", ")", ":", "_VerifyExtensionHandle", "(", "self", ",", "extension_handle", ")", "# Similar to ClearField(), above.", "if", "extension_handle", "in", "self", ".", "_fields", ":", "del", "self", ".", "_fields", "[", "extension_handle", "]", "self", ".", "_Modified", "(", ")", "cls", ".", "ClearExtension", "=", "ClearExtension" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L866-L875
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddHasExtensionMethod
def _AddHasExtensionMethod(cls): """Helper for _AddMessageMethods().""" def HasExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: raise KeyError('"%s" is repeated.' % extension_handle.full_name) if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(extension_handle) return value is not None and value._is_present_in_parent else: return extension_handle in self._fields cls.HasExtension = HasExtension
python
def _AddHasExtensionMethod(cls): """Helper for _AddMessageMethods().""" def HasExtension(self, extension_handle): _VerifyExtensionHandle(self, extension_handle) if extension_handle.label == _FieldDescriptor.LABEL_REPEATED: raise KeyError('"%s" is repeated.' % extension_handle.full_name) if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: value = self._fields.get(extension_handle) return value is not None and value._is_present_in_parent else: return extension_handle in self._fields cls.HasExtension = HasExtension
[ "def", "_AddHasExtensionMethod", "(", "cls", ")", ":", "def", "HasExtension", "(", "self", ",", "extension_handle", ")", ":", "_VerifyExtensionHandle", "(", "self", ",", "extension_handle", ")", "if", "extension_handle", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "raise", "KeyError", "(", "'\"%s\" is repeated.'", "%", "extension_handle", ".", "full_name", ")", "if", "extension_handle", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "value", "=", "self", ".", "_fields", ".", "get", "(", "extension_handle", ")", "return", "value", "is", "not", "None", "and", "value", ".", "_is_present_in_parent", "else", ":", "return", "extension_handle", "in", "self", ".", "_fields", "cls", ".", "HasExtension", "=", "HasExtension" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L878-L890
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_InternalUnpackAny
def _InternalUnpackAny(msg): """Unpacks Any message and returns the unpacked message. This internal method is different from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message. """ # TODO(amauryfa): Don't use the factory of generated messages. # To make Any work with custom factories, use the message factory of the # parent message. # pylint: disable=g-import-not-at-top from google.protobuf import symbol_database factory = symbol_database.Default() type_url = msg.type_url if not type_url: return None # TODO(haberman): For now we just strip the hostname. Better logic will be # required. type_name = type_url.split('/')[-1] descriptor = factory.pool.FindMessageTypeByName(type_name) if descriptor is None: return None message_class = factory.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
python
def _InternalUnpackAny(msg): """Unpacks Any message and returns the unpacked message. This internal method is different from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message. """ # TODO(amauryfa): Don't use the factory of generated messages. # To make Any work with custom factories, use the message factory of the # parent message. # pylint: disable=g-import-not-at-top from google.protobuf import symbol_database factory = symbol_database.Default() type_url = msg.type_url if not type_url: return None # TODO(haberman): For now we just strip the hostname. Better logic will be # required. type_name = type_url.split('/')[-1] descriptor = factory.pool.FindMessageTypeByName(type_name) if descriptor is None: return None message_class = factory.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
[ "def", "_InternalUnpackAny", "(", "msg", ")", ":", "# TODO(amauryfa): Don't use the factory of generated messages.", "# To make Any work with custom factories, use the message factory of the", "# parent message.", "# pylint: disable=g-import-not-at-top", "from", "google", ".", "protobuf", "import", "symbol_database", "factory", "=", "symbol_database", ".", "Default", "(", ")", "type_url", "=", "msg", ".", "type_url", "if", "not", "type_url", ":", "return", "None", "# TODO(haberman): For now we just strip the hostname. Better logic will be", "# required.", "type_name", "=", "type_url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "descriptor", "=", "factory", ".", "pool", ".", "FindMessageTypeByName", "(", "type_name", ")", "if", "descriptor", "is", "None", ":", "return", "None", "message_class", "=", "factory", ".", "GetPrototype", "(", "descriptor", ")", "message", "=", "message_class", "(", ")", "message", ".", "ParseFromString", "(", "msg", ".", "value", ")", "return", "message" ]
Unpacks Any message and returns the unpacked message. This internal method is different from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message.
[ "Unpacks", "Any", "message", "and", "returns", "the", "unpacked", "message", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L892-L929
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddEqualsMethod
def _AddEqualsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __eq__(self, other): if (not isinstance(other, message_mod.Message) or other.DESCRIPTOR != self.DESCRIPTOR): return False if self is other: return True if self.DESCRIPTOR.full_name == _AnyFullTypeName: any_a = _InternalUnpackAny(self) any_b = _InternalUnpackAny(other) if any_a and any_b: return any_a == any_b if not self.ListFields() == other.ListFields(): return False # Sort unknown fields because their order shouldn't affect equality test. unknown_fields = list(self._unknown_fields) unknown_fields.sort() other_unknown_fields = list(other._unknown_fields) other_unknown_fields.sort() return unknown_fields == other_unknown_fields cls.__eq__ = __eq__
python
def _AddEqualsMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __eq__(self, other): if (not isinstance(other, message_mod.Message) or other.DESCRIPTOR != self.DESCRIPTOR): return False if self is other: return True if self.DESCRIPTOR.full_name == _AnyFullTypeName: any_a = _InternalUnpackAny(self) any_b = _InternalUnpackAny(other) if any_a and any_b: return any_a == any_b if not self.ListFields() == other.ListFields(): return False # Sort unknown fields because their order shouldn't affect equality test. unknown_fields = list(self._unknown_fields) unknown_fields.sort() other_unknown_fields = list(other._unknown_fields) other_unknown_fields.sort() return unknown_fields == other_unknown_fields cls.__eq__ = __eq__
[ "def", "_AddEqualsMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "(", "not", "isinstance", "(", "other", ",", "message_mod", ".", "Message", ")", "or", "other", ".", "DESCRIPTOR", "!=", "self", ".", "DESCRIPTOR", ")", ":", "return", "False", "if", "self", "is", "other", ":", "return", "True", "if", "self", ".", "DESCRIPTOR", ".", "full_name", "==", "_AnyFullTypeName", ":", "any_a", "=", "_InternalUnpackAny", "(", "self", ")", "any_b", "=", "_InternalUnpackAny", "(", "other", ")", "if", "any_a", "and", "any_b", ":", "return", "any_a", "==", "any_b", "if", "not", "self", ".", "ListFields", "(", ")", "==", "other", ".", "ListFields", "(", ")", ":", "return", "False", "# Sort unknown fields because their order shouldn't affect equality test.", "unknown_fields", "=", "list", "(", "self", ".", "_unknown_fields", ")", "unknown_fields", ".", "sort", "(", ")", "other_unknown_fields", "=", "list", "(", "other", ".", "_unknown_fields", ")", "other_unknown_fields", ".", "sort", "(", ")", "return", "unknown_fields", "==", "other_unknown_fields", "cls", ".", "__eq__", "=", "__eq__" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L932-L959
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddStrMethod
def _AddStrMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __str__(self): return text_format.MessageToString(self) cls.__str__ = __str__
python
def _AddStrMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __str__(self): return text_format.MessageToString(self) cls.__str__ = __str__
[ "def", "_AddStrMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "__str__", "(", "self", ")", ":", "return", "text_format", ".", "MessageToString", "(", "self", ")", "cls", ".", "__str__", "=", "__str__" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L962-L966
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddReprMethod
def _AddReprMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __repr__(self): return text_format.MessageToString(self) cls.__repr__ = __repr__
python
def _AddReprMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def __repr__(self): return text_format.MessageToString(self) cls.__repr__ = __repr__
[ "def", "_AddReprMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "__repr__", "(", "self", ")", ":", "return", "text_format", ".", "MessageToString", "(", "self", ")", "cls", ".", "__repr__", "=", "__repr__" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L969-L973
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddUnicodeMethod
def _AddUnicodeMethod(unused_message_descriptor, cls): """Helper for _AddMessageMethods().""" def __unicode__(self): return text_format.MessageToString(self, as_utf8=True).decode('utf-8') cls.__unicode__ = __unicode__
python
def _AddUnicodeMethod(unused_message_descriptor, cls): """Helper for _AddMessageMethods().""" def __unicode__(self): return text_format.MessageToString(self, as_utf8=True).decode('utf-8') cls.__unicode__ = __unicode__
[ "def", "_AddUnicodeMethod", "(", "unused_message_descriptor", ",", "cls", ")", ":", "def", "__unicode__", "(", "self", ")", ":", "return", "text_format", ".", "MessageToString", "(", "self", ",", "as_utf8", "=", "True", ")", ".", "decode", "(", "'utf-8'", ")", "cls", ".", "__unicode__", "=", "__unicode__" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L976-L981
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_BytesForNonRepeatedElement
def _BytesForNonRepeatedElement(value, field_number, field_type): """Returns the number of bytes needed to serialize a non-repeated element. The returned byte count includes space for tag information and any other additional space associated with serializing value. Args: value: Value we're serializing. field_number: Field number of this value. (Since the field number is stored as part of a varint-encoded tag, this has an impact on the total bytes required to serialize the value). field_type: The type of the field. One of the TYPE_* constants within FieldDescriptor. """ try: fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] return fn(field_number, value) except KeyError: raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
python
def _BytesForNonRepeatedElement(value, field_number, field_type): """Returns the number of bytes needed to serialize a non-repeated element. The returned byte count includes space for tag information and any other additional space associated with serializing value. Args: value: Value we're serializing. field_number: Field number of this value. (Since the field number is stored as part of a varint-encoded tag, this has an impact on the total bytes required to serialize the value). field_type: The type of the field. One of the TYPE_* constants within FieldDescriptor. """ try: fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type] return fn(field_number, value) except KeyError: raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
[ "def", "_BytesForNonRepeatedElement", "(", "value", ",", "field_number", ",", "field_type", ")", ":", "try", ":", "fn", "=", "type_checkers", ".", "TYPE_TO_BYTE_SIZE_FN", "[", "field_type", "]", "return", "fn", "(", "field_number", ",", "value", ")", "except", "KeyError", ":", "raise", "message_mod", ".", "EncodeError", "(", "'Unrecognized field type: %d'", "%", "field_type", ")" ]
Returns the number of bytes needed to serialize a non-repeated element. The returned byte count includes space for tag information and any other additional space associated with serializing value. Args: value: Value we're serializing. field_number: Field number of this value. (Since the field number is stored as part of a varint-encoded tag, this has an impact on the total bytes required to serialize the value). field_type: The type of the field. One of the TYPE_* constants within FieldDescriptor.
[ "Returns", "the", "number", "of", "bytes", "needed", "to", "serialize", "a", "non", "-", "repeated", "element", ".", "The", "returned", "byte", "count", "includes", "space", "for", "tag", "information", "and", "any", "other", "additional", "space", "associated", "with", "serializing", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L984-L1001
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddByteSizeMethod
def _AddByteSizeMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ByteSize(self): if not self._cached_byte_size_dirty: return self._cached_byte_size size = 0 for field_descriptor, field_value in self.ListFields(): size += field_descriptor._sizer(field_value) for tag_bytes, value_bytes in self._unknown_fields: size += len(tag_bytes) + len(value_bytes) self._cached_byte_size = size self._cached_byte_size_dirty = False self._listener_for_children.dirty = False return size cls.ByteSize = ByteSize
python
def _AddByteSizeMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ByteSize(self): if not self._cached_byte_size_dirty: return self._cached_byte_size size = 0 for field_descriptor, field_value in self.ListFields(): size += field_descriptor._sizer(field_value) for tag_bytes, value_bytes in self._unknown_fields: size += len(tag_bytes) + len(value_bytes) self._cached_byte_size = size self._cached_byte_size_dirty = False self._listener_for_children.dirty = False return size cls.ByteSize = ByteSize
[ "def", "_AddByteSizeMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "ByteSize", "(", "self", ")", ":", "if", "not", "self", ".", "_cached_byte_size_dirty", ":", "return", "self", ".", "_cached_byte_size", "size", "=", "0", "for", "field_descriptor", ",", "field_value", "in", "self", ".", "ListFields", "(", ")", ":", "size", "+=", "field_descriptor", ".", "_sizer", "(", "field_value", ")", "for", "tag_bytes", ",", "value_bytes", "in", "self", ".", "_unknown_fields", ":", "size", "+=", "len", "(", "tag_bytes", ")", "+", "len", "(", "value_bytes", ")", "self", ".", "_cached_byte_size", "=", "size", "self", ".", "_cached_byte_size_dirty", "=", "False", "self", ".", "_listener_for_children", ".", "dirty", "=", "False", "return", "size", "cls", ".", "ByteSize", "=", "ByteSize" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1004-L1023
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddSerializeToStringMethod
def _AddSerializeToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializeToString(self): # Check if the message has all of its required fields set. errors = [] if not self.IsInitialized(): raise message_mod.EncodeError( 'Message %s is missing required fields: %s' % ( self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) return self.SerializePartialToString() cls.SerializeToString = SerializeToString
python
def _AddSerializeToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializeToString(self): # Check if the message has all of its required fields set. errors = [] if not self.IsInitialized(): raise message_mod.EncodeError( 'Message %s is missing required fields: %s' % ( self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors()))) return self.SerializePartialToString() cls.SerializeToString = SerializeToString
[ "def", "_AddSerializeToStringMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "SerializeToString", "(", "self", ")", ":", "# Check if the message has all of its required fields set.", "errors", "=", "[", "]", "if", "not", "self", ".", "IsInitialized", "(", ")", ":", "raise", "message_mod", ".", "EncodeError", "(", "'Message %s is missing required fields: %s'", "%", "(", "self", ".", "DESCRIPTOR", ".", "full_name", ",", "','", ".", "join", "(", "self", ".", "FindInitializationErrors", "(", ")", ")", ")", ")", "return", "self", ".", "SerializePartialToString", "(", ")", "cls", ".", "SerializeToString", "=", "SerializeToString" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1026-L1037
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddSerializePartialToStringMethod
def _AddSerializePartialToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializePartialToString(self): out = BytesIO() self._InternalSerialize(out.write) return out.getvalue() cls.SerializePartialToString = SerializePartialToString def InternalSerialize(self, write_bytes): for field_descriptor, field_value in self.ListFields(): field_descriptor._encoder(write_bytes, field_value) for tag_bytes, value_bytes in self._unknown_fields: write_bytes(tag_bytes) write_bytes(value_bytes) cls._InternalSerialize = InternalSerialize
python
def _AddSerializePartialToStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def SerializePartialToString(self): out = BytesIO() self._InternalSerialize(out.write) return out.getvalue() cls.SerializePartialToString = SerializePartialToString def InternalSerialize(self, write_bytes): for field_descriptor, field_value in self.ListFields(): field_descriptor._encoder(write_bytes, field_value) for tag_bytes, value_bytes in self._unknown_fields: write_bytes(tag_bytes) write_bytes(value_bytes) cls._InternalSerialize = InternalSerialize
[ "def", "_AddSerializePartialToStringMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "SerializePartialToString", "(", "self", ")", ":", "out", "=", "BytesIO", "(", ")", "self", ".", "_InternalSerialize", "(", "out", ".", "write", ")", "return", "out", ".", "getvalue", "(", ")", "cls", ".", "SerializePartialToString", "=", "SerializePartialToString", "def", "InternalSerialize", "(", "self", ",", "write_bytes", ")", ":", "for", "field_descriptor", ",", "field_value", "in", "self", ".", "ListFields", "(", ")", ":", "field_descriptor", ".", "_encoder", "(", "write_bytes", ",", "field_value", ")", "for", "tag_bytes", ",", "value_bytes", "in", "self", ".", "_unknown_fields", ":", "write_bytes", "(", "tag_bytes", ")", "write_bytes", "(", "value_bytes", ")", "cls", ".", "_InternalSerialize", "=", "InternalSerialize" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1040-L1055
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddMergeFromStringMethod
def _AddMergeFromStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def MergeFromString(self, serialized): length = len(serialized) try: if self._InternalParse(serialized, 0, length) != length: # The only reason _InternalParse would return early is if it # encountered an end-group tag. raise message_mod.DecodeError('Unexpected end-group tag.') except (IndexError, TypeError): # Now ord(buf[p:p+1]) == ord('') gets TypeError. raise message_mod.DecodeError('Truncated message.') except struct.error as e: raise message_mod.DecodeError(e) return length # Return this for legacy reasons. cls.MergeFromString = MergeFromString local_ReadTag = decoder.ReadTag local_SkipField = decoder.SkipField decoders_by_tag = cls._decoders_by_tag is_proto3 = message_descriptor.syntax == "proto3" def InternalParse(self, buffer, pos, end): self._Modified() field_dict = self._fields unknown_field_list = self._unknown_fields while pos != end: (tag_bytes, new_pos) = local_ReadTag(buffer, pos) field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) if field_decoder is None: value_start_pos = new_pos new_pos = local_SkipField(buffer, new_pos, end, tag_bytes) if new_pos == -1: return pos if not is_proto3: if not unknown_field_list: unknown_field_list = self._unknown_fields = [] unknown_field_list.append( (tag_bytes, buffer[value_start_pos:new_pos])) pos = new_pos else: pos = field_decoder(buffer, new_pos, end, self, field_dict) if field_desc: self._UpdateOneofState(field_desc) return pos cls._InternalParse = InternalParse
python
def _AddMergeFromStringMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def MergeFromString(self, serialized): length = len(serialized) try: if self._InternalParse(serialized, 0, length) != length: # The only reason _InternalParse would return early is if it # encountered an end-group tag. raise message_mod.DecodeError('Unexpected end-group tag.') except (IndexError, TypeError): # Now ord(buf[p:p+1]) == ord('') gets TypeError. raise message_mod.DecodeError('Truncated message.') except struct.error as e: raise message_mod.DecodeError(e) return length # Return this for legacy reasons. cls.MergeFromString = MergeFromString local_ReadTag = decoder.ReadTag local_SkipField = decoder.SkipField decoders_by_tag = cls._decoders_by_tag is_proto3 = message_descriptor.syntax == "proto3" def InternalParse(self, buffer, pos, end): self._Modified() field_dict = self._fields unknown_field_list = self._unknown_fields while pos != end: (tag_bytes, new_pos) = local_ReadTag(buffer, pos) field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None)) if field_decoder is None: value_start_pos = new_pos new_pos = local_SkipField(buffer, new_pos, end, tag_bytes) if new_pos == -1: return pos if not is_proto3: if not unknown_field_list: unknown_field_list = self._unknown_fields = [] unknown_field_list.append( (tag_bytes, buffer[value_start_pos:new_pos])) pos = new_pos else: pos = field_decoder(buffer, new_pos, end, self, field_dict) if field_desc: self._UpdateOneofState(field_desc) return pos cls._InternalParse = InternalParse
[ "def", "_AddMergeFromStringMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "MergeFromString", "(", "self", ",", "serialized", ")", ":", "length", "=", "len", "(", "serialized", ")", "try", ":", "if", "self", ".", "_InternalParse", "(", "serialized", ",", "0", ",", "length", ")", "!=", "length", ":", "# The only reason _InternalParse would return early is if it", "# encountered an end-group tag.", "raise", "message_mod", ".", "DecodeError", "(", "'Unexpected end-group tag.'", ")", "except", "(", "IndexError", ",", "TypeError", ")", ":", "# Now ord(buf[p:p+1]) == ord('') gets TypeError.", "raise", "message_mod", ".", "DecodeError", "(", "'Truncated message.'", ")", "except", "struct", ".", "error", "as", "e", ":", "raise", "message_mod", ".", "DecodeError", "(", "e", ")", "return", "length", "# Return this for legacy reasons.", "cls", ".", "MergeFromString", "=", "MergeFromString", "local_ReadTag", "=", "decoder", ".", "ReadTag", "local_SkipField", "=", "decoder", ".", "SkipField", "decoders_by_tag", "=", "cls", ".", "_decoders_by_tag", "is_proto3", "=", "message_descriptor", ".", "syntax", "==", "\"proto3\"", "def", "InternalParse", "(", "self", ",", "buffer", ",", "pos", ",", "end", ")", ":", "self", ".", "_Modified", "(", ")", "field_dict", "=", "self", ".", "_fields", "unknown_field_list", "=", "self", ".", "_unknown_fields", "while", "pos", "!=", "end", ":", "(", "tag_bytes", ",", "new_pos", ")", "=", "local_ReadTag", "(", "buffer", ",", "pos", ")", "field_decoder", ",", "field_desc", "=", "decoders_by_tag", ".", "get", "(", "tag_bytes", ",", "(", "None", ",", "None", ")", ")", "if", "field_decoder", "is", "None", ":", "value_start_pos", "=", "new_pos", "new_pos", "=", "local_SkipField", "(", "buffer", ",", "new_pos", ",", "end", ",", "tag_bytes", ")", "if", "new_pos", "==", "-", "1", ":", "return", "pos", "if", "not", "is_proto3", ":", "if", "not", "unknown_field_list", ":", "unknown_field_list", "=", "self", ".", "_unknown_fields", "=", "[", "]", "unknown_field_list", ".", "append", "(", "(", "tag_bytes", ",", "buffer", "[", "value_start_pos", ":", "new_pos", "]", ")", ")", "pos", "=", "new_pos", "else", ":", "pos", "=", "field_decoder", "(", "buffer", ",", "new_pos", ",", "end", ",", "self", ",", "field_dict", ")", "if", "field_desc", ":", "self", ".", "_UpdateOneofState", "(", "field_desc", ")", "return", "pos", "cls", ".", "_InternalParse", "=", "InternalParse" ]
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1058-L1103
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddIsInitializedMethod
def _AddIsInitializedMethod(message_descriptor, cls): """Adds the IsInitialized and FindInitializationError methods to the protocol message class.""" required_fields = [field for field in message_descriptor.fields if field.label == _FieldDescriptor.LABEL_REQUIRED] def IsInitialized(self, errors=None): """Checks if all required fields of a message are set. Args: errors: A list which, if provided, will be populated with the field paths of all missing required fields. Returns: True iff the specified message has all required fields set. """ # Performance is critical so we avoid HasField() and ListFields(). for field in required_fields: if (field not in self._fields or (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and not self._fields[field]._is_present_in_parent)): if errors is not None: errors.extend(self.FindInitializationErrors()) return False for field, value in list(self._fields.items()): # dict can change size! if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.label == _FieldDescriptor.LABEL_REPEATED: if (field.message_type.has_options and field.message_type.GetOptions().map_entry): continue for element in value: if not element.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False elif value._is_present_in_parent and not value.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False return True cls.IsInitialized = IsInitialized def FindInitializationErrors(self): """Finds required fields which are not initialized. Returns: A list of strings. Each string is a path to an uninitialized field from the top-level message, e.g. "foo.bar[5].baz". """ errors = [] # simplify things for field in required_fields: if not self.HasField(field.name): errors.append(field.name) for field, value in self.ListFields(): if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.is_extension: name = "(%s)" % field.full_name else: name = field.name if _IsMapField(field): if _IsMessageMapField(field): for key in value: element = value[key] prefix = "%s[%s]." % (name, key) sub_errors = element.FindInitializationErrors() errors += [prefix + error for error in sub_errors] else: # ScalarMaps can't have any initialization errors. pass elif field.label == _FieldDescriptor.LABEL_REPEATED: for i in range(len(value)): element = value[i] prefix = "%s[%d]." % (name, i) sub_errors = element.FindInitializationErrors() errors += [prefix + error for error in sub_errors] else: prefix = name + "." sub_errors = value.FindInitializationErrors() errors += [prefix + error for error in sub_errors] return errors cls.FindInitializationErrors = FindInitializationErrors
python
def _AddIsInitializedMethod(message_descriptor, cls): """Adds the IsInitialized and FindInitializationError methods to the protocol message class.""" required_fields = [field for field in message_descriptor.fields if field.label == _FieldDescriptor.LABEL_REQUIRED] def IsInitialized(self, errors=None): """Checks if all required fields of a message are set. Args: errors: A list which, if provided, will be populated with the field paths of all missing required fields. Returns: True iff the specified message has all required fields set. """ # Performance is critical so we avoid HasField() and ListFields(). for field in required_fields: if (field not in self._fields or (field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and not self._fields[field]._is_present_in_parent)): if errors is not None: errors.extend(self.FindInitializationErrors()) return False for field, value in list(self._fields.items()): # dict can change size! if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.label == _FieldDescriptor.LABEL_REPEATED: if (field.message_type.has_options and field.message_type.GetOptions().map_entry): continue for element in value: if not element.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False elif value._is_present_in_parent and not value.IsInitialized(): if errors is not None: errors.extend(self.FindInitializationErrors()) return False return True cls.IsInitialized = IsInitialized def FindInitializationErrors(self): """Finds required fields which are not initialized. Returns: A list of strings. Each string is a path to an uninitialized field from the top-level message, e.g. "foo.bar[5].baz". """ errors = [] # simplify things for field in required_fields: if not self.HasField(field.name): errors.append(field.name) for field, value in self.ListFields(): if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: if field.is_extension: name = "(%s)" % field.full_name else: name = field.name if _IsMapField(field): if _IsMessageMapField(field): for key in value: element = value[key] prefix = "%s[%s]." % (name, key) sub_errors = element.FindInitializationErrors() errors += [prefix + error for error in sub_errors] else: # ScalarMaps can't have any initialization errors. pass elif field.label == _FieldDescriptor.LABEL_REPEATED: for i in range(len(value)): element = value[i] prefix = "%s[%d]." % (name, i) sub_errors = element.FindInitializationErrors() errors += [prefix + error for error in sub_errors] else: prefix = name + "." sub_errors = value.FindInitializationErrors() errors += [prefix + error for error in sub_errors] return errors cls.FindInitializationErrors = FindInitializationErrors
[ "def", "_AddIsInitializedMethod", "(", "message_descriptor", ",", "cls", ")", ":", "required_fields", "=", "[", "field", "for", "field", "in", "message_descriptor", ".", "fields", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REQUIRED", "]", "def", "IsInitialized", "(", "self", ",", "errors", "=", "None", ")", ":", "\"\"\"Checks if all required fields of a message are set.\n\n Args:\n errors: A list which, if provided, will be populated with the field\n paths of all missing required fields.\n\n Returns:\n True iff the specified message has all required fields set.\n \"\"\"", "# Performance is critical so we avoid HasField() and ListFields().", "for", "field", "in", "required_fields", ":", "if", "(", "field", "not", "in", "self", ".", "_fields", "or", "(", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", "and", "not", "self", ".", "_fields", "[", "field", "]", ".", "_is_present_in_parent", ")", ")", ":", "if", "errors", "is", "not", "None", ":", "errors", ".", "extend", "(", "self", ".", "FindInitializationErrors", "(", ")", ")", "return", "False", "for", "field", ",", "value", "in", "list", "(", "self", ".", "_fields", ".", "items", "(", ")", ")", ":", "# dict can change size!", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "if", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "if", "(", "field", ".", "message_type", ".", "has_options", "and", "field", ".", "message_type", ".", "GetOptions", "(", ")", ".", "map_entry", ")", ":", "continue", "for", "element", "in", "value", ":", "if", "not", "element", ".", "IsInitialized", "(", ")", ":", "if", "errors", "is", "not", "None", ":", "errors", ".", "extend", "(", "self", ".", "FindInitializationErrors", "(", ")", ")", "return", "False", "elif", "value", ".", "_is_present_in_parent", "and", "not", "value", ".", "IsInitialized", "(", ")", ":", "if", "errors", "is", "not", "None", ":", "errors", ".", "extend", "(", "self", ".", "FindInitializationErrors", "(", ")", ")", "return", "False", "return", "True", "cls", ".", "IsInitialized", "=", "IsInitialized", "def", "FindInitializationErrors", "(", "self", ")", ":", "\"\"\"Finds required fields which are not initialized.\n\n Returns:\n A list of strings. Each string is a path to an uninitialized field from\n the top-level message, e.g. \"foo.bar[5].baz\".\n \"\"\"", "errors", "=", "[", "]", "# simplify things", "for", "field", "in", "required_fields", ":", "if", "not", "self", ".", "HasField", "(", "field", ".", "name", ")", ":", "errors", ".", "append", "(", "field", ".", "name", ")", "for", "field", ",", "value", "in", "self", ".", "ListFields", "(", ")", ":", "if", "field", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "if", "field", ".", "is_extension", ":", "name", "=", "\"(%s)\"", "%", "field", ".", "full_name", "else", ":", "name", "=", "field", ".", "name", "if", "_IsMapField", "(", "field", ")", ":", "if", "_IsMessageMapField", "(", "field", ")", ":", "for", "key", "in", "value", ":", "element", "=", "value", "[", "key", "]", "prefix", "=", "\"%s[%s].\"", "%", "(", "name", ",", "key", ")", "sub_errors", "=", "element", ".", "FindInitializationErrors", "(", ")", "errors", "+=", "[", "prefix", "+", "error", "for", "error", "in", "sub_errors", "]", "else", ":", "# ScalarMaps can't have any initialization errors.", "pass", "elif", "field", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "for", "i", "in", "range", "(", "len", "(", "value", ")", ")", ":", "element", "=", "value", "[", "i", "]", "prefix", "=", "\"%s[%d].\"", "%", "(", "name", ",", "i", ")", "sub_errors", "=", "element", ".", "FindInitializationErrors", "(", ")", "errors", "+=", "[", "prefix", "+", "error", "for", "error", "in", "sub_errors", "]", "else", ":", "prefix", "=", "name", "+", "\".\"", "sub_errors", "=", "value", ".", "FindInitializationErrors", "(", ")", "errors", "+=", "[", "prefix", "+", "error", "for", "error", "in", "sub_errors", "]", "return", "errors", "cls", ".", "FindInitializationErrors", "=", "FindInitializationErrors" ]
Adds the IsInitialized and FindInitializationError methods to the protocol message class.
[ "Adds", "the", "IsInitialized", "and", "FindInitializationError", "methods", "to", "the", "protocol", "message", "class", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1106-L1198
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddMessageMethods
def _AddMessageMethods(message_descriptor, cls): """Adds implementations of all Message methods to cls.""" _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddReprMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls) _AddWhichOneofMethod(message_descriptor, cls) _AddReduceMethod(cls) # Adds methods which do not depend on cls. cls.Clear = _Clear cls.DiscardUnknownFields = _DiscardUnknownFields cls._SetListener = _SetListener
python
def _AddMessageMethods(message_descriptor, cls): """Adds implementations of all Message methods to cls.""" _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddReprMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls) _AddWhichOneofMethod(message_descriptor, cls) _AddReduceMethod(cls) # Adds methods which do not depend on cls. cls.Clear = _Clear cls.DiscardUnknownFields = _DiscardUnknownFields cls._SetListener = _SetListener
[ "def", "_AddMessageMethods", "(", "message_descriptor", ",", "cls", ")", ":", "_AddListFieldsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddHasFieldMethod", "(", "message_descriptor", ",", "cls", ")", "_AddClearFieldMethod", "(", "message_descriptor", ",", "cls", ")", "if", "message_descriptor", ".", "is_extendable", ":", "_AddClearExtensionMethod", "(", "cls", ")", "_AddHasExtensionMethod", "(", "cls", ")", "_AddEqualsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddStrMethod", "(", "message_descriptor", ",", "cls", ")", "_AddReprMethod", "(", "message_descriptor", ",", "cls", ")", "_AddUnicodeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddByteSizeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializeToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializePartialToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddIsInitializedMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromMethod", "(", "cls", ")", "_AddWhichOneofMethod", "(", "message_descriptor", ",", "cls", ")", "_AddReduceMethod", "(", "cls", ")", "# Adds methods which do not depend on cls.", "cls", ".", "Clear", "=", "_Clear", "cls", ".", "DiscardUnknownFields", "=", "_DiscardUnknownFields", "cls", ".", "_SetListener", "=", "_SetListener" ]
Adds implementations of all Message methods to cls.
[ "Adds", "implementations", "of", "all", "Message", "methods", "to", "cls", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1295-L1318
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_AddPrivateHelperMethods
def _AddPrivateHelperMethods(message_descriptor, cls): """Adds implementation of private helper methods to cls.""" def Modified(self): """Sets the _cached_byte_size_dirty bit to true, and propagates this to our listener iff this was a state change. """ # Note: Some callers check _cached_byte_size_dirty before calling # _Modified() as an extra optimization. So, if this method is ever # changed such that it does stuff even when _cached_byte_size_dirty is # already true, the callers need to be updated. if not self._cached_byte_size_dirty: self._cached_byte_size_dirty = True self._listener_for_children.dirty = True self._is_present_in_parent = True self._listener.Modified() def _UpdateOneofState(self, field): """Sets field as the active field in its containing oneof. Will also delete currently active field in the oneof, if it is different from the argument. Does not mark the message as modified. """ other_field = self._oneofs.setdefault(field.containing_oneof, field) if other_field is not field: del self._fields[other_field] self._oneofs[field.containing_oneof] = field cls._Modified = Modified cls.SetInParent = Modified cls._UpdateOneofState = _UpdateOneofState
python
def _AddPrivateHelperMethods(message_descriptor, cls): """Adds implementation of private helper methods to cls.""" def Modified(self): """Sets the _cached_byte_size_dirty bit to true, and propagates this to our listener iff this was a state change. """ # Note: Some callers check _cached_byte_size_dirty before calling # _Modified() as an extra optimization. So, if this method is ever # changed such that it does stuff even when _cached_byte_size_dirty is # already true, the callers need to be updated. if not self._cached_byte_size_dirty: self._cached_byte_size_dirty = True self._listener_for_children.dirty = True self._is_present_in_parent = True self._listener.Modified() def _UpdateOneofState(self, field): """Sets field as the active field in its containing oneof. Will also delete currently active field in the oneof, if it is different from the argument. Does not mark the message as modified. """ other_field = self._oneofs.setdefault(field.containing_oneof, field) if other_field is not field: del self._fields[other_field] self._oneofs[field.containing_oneof] = field cls._Modified = Modified cls.SetInParent = Modified cls._UpdateOneofState = _UpdateOneofState
[ "def", "_AddPrivateHelperMethods", "(", "message_descriptor", ",", "cls", ")", ":", "def", "Modified", "(", "self", ")", ":", "\"\"\"Sets the _cached_byte_size_dirty bit to true,\n and propagates this to our listener iff this was a state change.\n \"\"\"", "# Note: Some callers check _cached_byte_size_dirty before calling", "# _Modified() as an extra optimization. So, if this method is ever", "# changed such that it does stuff even when _cached_byte_size_dirty is", "# already true, the callers need to be updated.", "if", "not", "self", ".", "_cached_byte_size_dirty", ":", "self", ".", "_cached_byte_size_dirty", "=", "True", "self", ".", "_listener_for_children", ".", "dirty", "=", "True", "self", ".", "_is_present_in_parent", "=", "True", "self", ".", "_listener", ".", "Modified", "(", ")", "def", "_UpdateOneofState", "(", "self", ",", "field", ")", ":", "\"\"\"Sets field as the active field in its containing oneof.\n\n Will also delete currently active field in the oneof, if it is different\n from the argument. Does not mark the message as modified.\n \"\"\"", "other_field", "=", "self", ".", "_oneofs", ".", "setdefault", "(", "field", ".", "containing_oneof", ",", "field", ")", "if", "other_field", "is", "not", "field", ":", "del", "self", ".", "_fields", "[", "other_field", "]", "self", ".", "_oneofs", "[", "field", ".", "containing_oneof", "]", "=", "field", "cls", ".", "_Modified", "=", "Modified", "cls", ".", "SetInParent", "=", "Modified", "cls", ".", "_UpdateOneofState", "=", "_UpdateOneofState" ]
Adds implementation of private helper methods to cls.
[ "Adds", "implementation", "of", "private", "helper", "methods", "to", "cls", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1321-L1352
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
_OneofListener.Modified
def Modified(self): """Also updates the state of the containing oneof in the parent message.""" try: self._parent_message_weakref._UpdateOneofState(self._field) super(_OneofListener, self).Modified() except ReferenceError: pass
python
def Modified(self): """Also updates the state of the containing oneof in the parent message.""" try: self._parent_message_weakref._UpdateOneofState(self._field) super(_OneofListener, self).Modified() except ReferenceError: pass
[ "def", "Modified", "(", "self", ")", ":", "try", ":", "self", ".", "_parent_message_weakref", ".", "_UpdateOneofState", "(", "self", ".", "_field", ")", "super", "(", "_OneofListener", ",", "self", ")", ".", "Modified", "(", ")", "except", "ReferenceError", ":", "pass" ]
Also updates the state of the containing oneof in the parent message.
[ "Also", "updates", "the", "state", "of", "the", "containing", "oneof", "in", "the", "parent", "message", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1413-L1419
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py
EnumTypeWrapper.Name
def Name(self, number): """Returns a string containing the name of an enum value.""" if number in self._enum_type.values_by_number: return self._enum_type.values_by_number[number].name raise ValueError('Enum %s has no name defined for value %d' % ( self._enum_type.name, number))
python
def Name(self, number): """Returns a string containing the name of an enum value.""" if number in self._enum_type.values_by_number: return self._enum_type.values_by_number[number].name raise ValueError('Enum %s has no name defined for value %d' % ( self._enum_type.name, number))
[ "def", "Name", "(", "self", ",", "number", ")", ":", "if", "number", "in", "self", ".", "_enum_type", ".", "values_by_number", ":", "return", "self", ".", "_enum_type", ".", "values_by_number", "[", "number", "]", ".", "name", "raise", "ValueError", "(", "'Enum %s has no name defined for value %d'", "%", "(", "self", ".", "_enum_type", ".", "name", ",", "number", ")", ")" ]
Returns a string containing the name of an enum value.
[ "Returns", "a", "string", "containing", "the", "name", "of", "an", "enum", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L51-L56
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py
EnumTypeWrapper.Value
def Value(self, name): """Returns the value coresponding to the given enum name.""" if name in self._enum_type.values_by_name: return self._enum_type.values_by_name[name].number raise ValueError('Enum %s has no value defined for name %s' % ( self._enum_type.name, name))
python
def Value(self, name): """Returns the value coresponding to the given enum name.""" if name in self._enum_type.values_by_name: return self._enum_type.values_by_name[name].number raise ValueError('Enum %s has no value defined for name %s' % ( self._enum_type.name, name))
[ "def", "Value", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_enum_type", ".", "values_by_name", ":", "return", "self", ".", "_enum_type", ".", "values_by_name", "[", "name", "]", ".", "number", "raise", "ValueError", "(", "'Enum %s has no value defined for name %s'", "%", "(", "self", ".", "_enum_type", ".", "name", ",", "name", ")", ")" ]
Returns the value coresponding to the given enum name.
[ "Returns", "the", "value", "coresponding", "to", "the", "given", "enum", "name", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L58-L63
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py
EnumTypeWrapper.items
def items(self): """Return a list of the (name, value) pairs of the enum. These are returned in the order they were defined in the .proto file. """ return [(value_descriptor.name, value_descriptor.number) for value_descriptor in self._enum_type.values]
python
def items(self): """Return a list of the (name, value) pairs of the enum. These are returned in the order they were defined in the .proto file. """ return [(value_descriptor.name, value_descriptor.number) for value_descriptor in self._enum_type.values]
[ "def", "items", "(", "self", ")", ":", "return", "[", "(", "value_descriptor", ".", "name", ",", "value_descriptor", ".", "number", ")", "for", "value_descriptor", "in", "self", ".", "_enum_type", ".", "values", "]" ]
Return a list of the (name, value) pairs of the enum. These are returned in the order they were defined in the .proto file.
[ "Return", "a", "list", "of", "the", "(", "name", "value", ")", "pairs", "of", "the", "enum", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L83-L89
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
_load_tcmps_lib
def _load_tcmps_lib(): """ Load global singleton of tcmps lib handler. This function is used not used at the top level, so that the shared library is loaded lazily only when needed. """ global _g_TCMPS_LIB if _g_TCMPS_LIB is None: # This library requires macOS 10.14 or above if _mac_ver() < (10, 14): return None # The symbols defined in libtcmps are now exposed directly by # libunity_shared. Eventually the object_detector and # activity_classifier toolkits will use the same Python/C++ bridge as # the other toolkits, and this usage of ctypes will go away. file_dir = _os.path.dirname(__file__) lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, 'libunity_shared.dylib')) try: _g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL) except OSError: pass return _g_TCMPS_LIB
python
def _load_tcmps_lib(): """ Load global singleton of tcmps lib handler. This function is used not used at the top level, so that the shared library is loaded lazily only when needed. """ global _g_TCMPS_LIB if _g_TCMPS_LIB is None: # This library requires macOS 10.14 or above if _mac_ver() < (10, 14): return None # The symbols defined in libtcmps are now exposed directly by # libunity_shared. Eventually the object_detector and # activity_classifier toolkits will use the same Python/C++ bridge as # the other toolkits, and this usage of ctypes will go away. file_dir = _os.path.dirname(__file__) lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, 'libunity_shared.dylib')) try: _g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL) except OSError: pass return _g_TCMPS_LIB
[ "def", "_load_tcmps_lib", "(", ")", ":", "global", "_g_TCMPS_LIB", "if", "_g_TCMPS_LIB", "is", "None", ":", "# This library requires macOS 10.14 or above", "if", "_mac_ver", "(", ")", "<", "(", "10", ",", "14", ")", ":", "return", "None", "# The symbols defined in libtcmps are now exposed directly by", "# libunity_shared. Eventually the object_detector and", "# activity_classifier toolkits will use the same Python/C++ bridge as", "# the other toolkits, and this usage of ctypes will go away.", "file_dir", "=", "_os", ".", "path", ".", "dirname", "(", "__file__", ")", "lib_path", "=", "_os", ".", "path", ".", "abspath", "(", "_os", ".", "path", ".", "join", "(", "file_dir", ",", "_os", ".", "pardir", ",", "'libunity_shared.dylib'", ")", ")", "try", ":", "_g_TCMPS_LIB", "=", "_ctypes", ".", "CDLL", "(", "lib_path", ",", "_ctypes", ".", "RTLD_LOCAL", ")", "except", "OSError", ":", "pass", "return", "_g_TCMPS_LIB" ]
Load global singleton of tcmps lib handler. This function is used not used at the top level, so that the shared library is loaded lazily only when needed.
[ "Load", "global", "singleton", "of", "tcmps", "lib", "handler", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L141-L164
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
has_fast_mps_support
def has_fast_mps_support(): """ Returns True if the environment has MPS backend support and a high-power (fast) device is available. """ lib = _load_tcmps_lib() if lib is None: return False c_bool = _ctypes.c_bool() ret = lib.TCMPSHasHighPowerMetalDevice(_ctypes.byref(c_bool)) return ret == 0 and c_bool.value
python
def has_fast_mps_support(): """ Returns True if the environment has MPS backend support and a high-power (fast) device is available. """ lib = _load_tcmps_lib() if lib is None: return False c_bool = _ctypes.c_bool() ret = lib.TCMPSHasHighPowerMetalDevice(_ctypes.byref(c_bool)) return ret == 0 and c_bool.value
[ "def", "has_fast_mps_support", "(", ")", ":", "lib", "=", "_load_tcmps_lib", "(", ")", "if", "lib", "is", "None", ":", "return", "False", "c_bool", "=", "_ctypes", ".", "c_bool", "(", ")", "ret", "=", "lib", ".", "TCMPSHasHighPowerMetalDevice", "(", "_ctypes", ".", "byref", "(", "c_bool", ")", ")", "return", "ret", "==", "0", "and", "c_bool", ".", "value" ]
Returns True if the environment has MPS backend support and a high-power (fast) device is available.
[ "Returns", "True", "if", "the", "environment", "has", "MPS", "backend", "support", "and", "a", "high", "-", "power", "(", "fast", ")", "device", "is", "available", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L167-L178
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
mps_device_name
def mps_device_name(): """ Returns name of MPS device that will be used, else None. """ lib = _load_tcmps_lib() if lib is None: return None n = 256 c_name = (_ctypes.c_char * n)() ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n)) if ret == 0: return _decode_bytes_to_native_string(c_name.value) else: return None
python
def mps_device_name(): """ Returns name of MPS device that will be used, else None. """ lib = _load_tcmps_lib() if lib is None: return None n = 256 c_name = (_ctypes.c_char * n)() ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n)) if ret == 0: return _decode_bytes_to_native_string(c_name.value) else: return None
[ "def", "mps_device_name", "(", ")", ":", "lib", "=", "_load_tcmps_lib", "(", ")", "if", "lib", "is", "None", ":", "return", "None", "n", "=", "256", "c_name", "=", "(", "_ctypes", ".", "c_char", "*", "n", ")", "(", ")", "ret", "=", "lib", ".", "TCMPSMetalDeviceName", "(", "_ctypes", ".", "byref", "(", "c_name", ")", ",", "_ctypes", ".", "c_int32", "(", "n", ")", ")", "if", "ret", "==", "0", ":", "return", "_decode_bytes_to_native_string", "(", "c_name", ".", "value", ")", "else", ":", "return", "None" ]
Returns name of MPS device that will be used, else None.
[ "Returns", "name", "of", "MPS", "device", "that", "will", "be", "used", "else", "None", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L188-L202
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
mps_device_memory_limit
def mps_device_memory_limit(): """ Returns the memory size in bytes that can be effectively allocated on the MPS device that will be used, or None if no suitable device is available. """ lib = _load_tcmps_lib() if lib is None: return None c_size = _ctypes.c_uint64() ret = lib.TCMPSMetalDeviceMemoryLimit(_ctypes.byref(c_size)) return c_size.value if ret == 0 else None
python
def mps_device_memory_limit(): """ Returns the memory size in bytes that can be effectively allocated on the MPS device that will be used, or None if no suitable device is available. """ lib = _load_tcmps_lib() if lib is None: return None c_size = _ctypes.c_uint64() ret = lib.TCMPSMetalDeviceMemoryLimit(_ctypes.byref(c_size)) return c_size.value if ret == 0 else None
[ "def", "mps_device_memory_limit", "(", ")", ":", "lib", "=", "_load_tcmps_lib", "(", ")", "if", "lib", "is", "None", ":", "return", "None", "c_size", "=", "_ctypes", ".", "c_uint64", "(", ")", "ret", "=", "lib", ".", "TCMPSMetalDeviceMemoryLimit", "(", "_ctypes", ".", "byref", "(", "c_size", ")", ")", "return", "c_size", ".", "value", "if", "ret", "==", "0", "else", "None" ]
Returns the memory size in bytes that can be effectively allocated on the MPS device that will be used, or None if no suitable device is available.
[ "Returns", "the", "memory", "size", "in", "bytes", "that", "can", "be", "effectively", "allocated", "on", "the", "MPS", "device", "that", "will", "be", "used", "or", "None", "if", "no", "suitable", "device", "is", "available", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L205-L216
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
MpsFloatArray.shape
def shape(self): """Copy the shape from TCMPS as a new numpy ndarray.""" # Create C variables that will serve as out parameters for TCMPS. shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointer into memory owned by the C++ object self.handle. status_code = self._LIB.TCMPSGetFloatArrayShape( self.handle, _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSGetFloatArrayShape" return _shape_tuple_from_ctypes(shape_ptr, dim)
python
def shape(self): """Copy the shape from TCMPS as a new numpy ndarray.""" # Create C variables that will serve as out parameters for TCMPS. shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointer into memory owned by the C++ object self.handle. status_code = self._LIB.TCMPSGetFloatArrayShape( self.handle, _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSGetFloatArrayShape" return _shape_tuple_from_ctypes(shape_ptr, dim)
[ "def", "shape", "(", "self", ")", ":", "# Create C variables that will serve as out parameters for TCMPS.", "shape_ptr", "=", "_ctypes", ".", "POINTER", "(", "_ctypes", ".", "c_size_t", ")", "(", ")", "# size_t* shape_ptr", "dim", "=", "_ctypes", ".", "c_size_t", "(", ")", "# size_t dim", "# Obtain pointer into memory owned by the C++ object self.handle.", "status_code", "=", "self", ".", "_LIB", ".", "TCMPSGetFloatArrayShape", "(", "self", ".", "handle", ",", "_ctypes", ".", "byref", "(", "shape_ptr", ")", ",", "_ctypes", ".", "byref", "(", "dim", ")", ")", "assert", "status_code", "==", "0", ",", "\"Error calling TCMPSGetFloatArrayShape\"", "return", "_shape_tuple_from_ctypes", "(", "shape_ptr", ",", "dim", ")" ]
Copy the shape from TCMPS as a new numpy ndarray.
[ "Copy", "the", "shape", "from", "TCMPS", "as", "a", "new", "numpy", "ndarray", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L314-L326
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
MpsFloatArray.asnumpy
def asnumpy(self): """Copy the data from TCMPS into a new numpy ndarray""" # Create C variables that will serve as out parameters for TCMPS. data_ptr = _ctypes.POINTER(_ctypes.c_float)() # float* data_ptr shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointers into memory owned by the C++ object self.handle. # Note that this may trigger synchronization with another thread # producing the data. status_code = self._LIB.TCMPSReadFloatArray( self.handle, _ctypes.byref(data_ptr), _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSReadFloatArray" return _numpy_array_from_ctypes(data_ptr, shape_ptr, dim)
python
def asnumpy(self): """Copy the data from TCMPS into a new numpy ndarray""" # Create C variables that will serve as out parameters for TCMPS. data_ptr = _ctypes.POINTER(_ctypes.c_float)() # float* data_ptr shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointers into memory owned by the C++ object self.handle. # Note that this may trigger synchronization with another thread # producing the data. status_code = self._LIB.TCMPSReadFloatArray( self.handle, _ctypes.byref(data_ptr), _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSReadFloatArray" return _numpy_array_from_ctypes(data_ptr, shape_ptr, dim)
[ "def", "asnumpy", "(", "self", ")", ":", "# Create C variables that will serve as out parameters for TCMPS.", "data_ptr", "=", "_ctypes", ".", "POINTER", "(", "_ctypes", ".", "c_float", ")", "(", ")", "# float* data_ptr", "shape_ptr", "=", "_ctypes", ".", "POINTER", "(", "_ctypes", ".", "c_size_t", ")", "(", ")", "# size_t* shape_ptr", "dim", "=", "_ctypes", ".", "c_size_t", "(", ")", "# size_t dim", "# Obtain pointers into memory owned by the C++ object self.handle.", "# Note that this may trigger synchronization with another thread", "# producing the data.", "status_code", "=", "self", ".", "_LIB", ".", "TCMPSReadFloatArray", "(", "self", ".", "handle", ",", "_ctypes", ".", "byref", "(", "data_ptr", ")", ",", "_ctypes", ".", "byref", "(", "shape_ptr", ")", ",", "_ctypes", ".", "byref", "(", "dim", ")", ")", "assert", "status_code", "==", "0", ",", "\"Error calling TCMPSReadFloatArray\"", "return", "_numpy_array_from_ctypes", "(", "data_ptr", ",", "shape_ptr", ",", "dim", ")" ]
Copy the data from TCMPS into a new numpy ndarray
[ "Copy", "the", "data", "from", "TCMPS", "into", "a", "new", "numpy", "ndarray" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L328-L344
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
MpsGraphAPI.train
def train(self, input, label): """ Submits an input batch to the model. Returns a MpsFloatArray representing the batch loss. Calling asnumpy() on this value will wait for the batch to finish and yield the loss as a numpy array. """ assert self._mode == MpsGraphMode.Train assert input.shape == self._ishape assert label.shape == self._oshape input_array = MpsFloatArray(input) label_array = MpsFloatArray(label) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSTrainGraph( self.handle, input_array.handle, label_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSTrainGraph" assert result_handle, "TCMPSTrainGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) # Output from training should be a one-dimensional array of loss values, # one per example in the batch. assert result.shape() == (self._oshape[0],) return result
python
def train(self, input, label): """ Submits an input batch to the model. Returns a MpsFloatArray representing the batch loss. Calling asnumpy() on this value will wait for the batch to finish and yield the loss as a numpy array. """ assert self._mode == MpsGraphMode.Train assert input.shape == self._ishape assert label.shape == self._oshape input_array = MpsFloatArray(input) label_array = MpsFloatArray(label) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSTrainGraph( self.handle, input_array.handle, label_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSTrainGraph" assert result_handle, "TCMPSTrainGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) # Output from training should be a one-dimensional array of loss values, # one per example in the batch. assert result.shape() == (self._oshape[0],) return result
[ "def", "train", "(", "self", ",", "input", ",", "label", ")", ":", "assert", "self", ".", "_mode", "==", "MpsGraphMode", ".", "Train", "assert", "input", ".", "shape", "==", "self", ".", "_ishape", "assert", "label", ".", "shape", "==", "self", ".", "_oshape", "input_array", "=", "MpsFloatArray", "(", "input", ")", "label_array", "=", "MpsFloatArray", "(", "label", ")", "result_handle", "=", "_ctypes", ".", "c_void_p", "(", ")", "status_code", "=", "self", ".", "_LIB", ".", "TCMPSTrainGraph", "(", "self", ".", "handle", ",", "input_array", ".", "handle", ",", "label_array", ".", "handle", ",", "_ctypes", ".", "byref", "(", "result_handle", ")", ")", "assert", "status_code", "==", "0", ",", "\"Error calling TCMPSTrainGraph\"", "assert", "result_handle", ",", "\"TCMPSTrainGraph unexpectedly returned NULL pointer\"", "result", "=", "MpsFloatArray", "(", "result_handle", ")", "# Output from training should be a one-dimensional array of loss values,", "# one per example in the batch.", "assert", "result", ".", "shape", "(", ")", "==", "(", "self", ".", "_oshape", "[", "0", "]", ",", ")", "return", "result" ]
Submits an input batch to the model. Returns a MpsFloatArray representing the batch loss. Calling asnumpy() on this value will wait for the batch to finish and yield the loss as a numpy array.
[ "Submits", "an", "input", "batch", "to", "the", "model", ".", "Returns", "a", "MpsFloatArray", "representing", "the", "batch", "loss", ".", "Calling", "asnumpy", "()", "on", "this", "value", "will", "wait", "for", "the", "batch", "to", "finish", "and", "yield", "the", "loss", "as", "a", "numpy", "array", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L463-L490
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
MpsGraphAPI.predict
def predict(self, input): """ Submits an input batch to the model. Returns a MpsFloatArray representing the model predictions. Calling asnumpy() on this value will wait for the batch to finish and yield the predictions as a numpy array. """ assert self._mode == MpsGraphMode.Inference assert input.shape == self._ishape input_array = MpsFloatArray(input) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSPredictGraph( self.handle, input_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSPredictGraph" assert result_handle, "TCMPSPredictGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) assert result.shape() == self._oshape return result
python
def predict(self, input): """ Submits an input batch to the model. Returns a MpsFloatArray representing the model predictions. Calling asnumpy() on this value will wait for the batch to finish and yield the predictions as a numpy array. """ assert self._mode == MpsGraphMode.Inference assert input.shape == self._ishape input_array = MpsFloatArray(input) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSPredictGraph( self.handle, input_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSPredictGraph" assert result_handle, "TCMPSPredictGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) assert result.shape() == self._oshape return result
[ "def", "predict", "(", "self", ",", "input", ")", ":", "assert", "self", ".", "_mode", "==", "MpsGraphMode", ".", "Inference", "assert", "input", ".", "shape", "==", "self", ".", "_ishape", "input_array", "=", "MpsFloatArray", "(", "input", ")", "result_handle", "=", "_ctypes", ".", "c_void_p", "(", ")", "status_code", "=", "self", ".", "_LIB", ".", "TCMPSPredictGraph", "(", "self", ".", "handle", ",", "input_array", ".", "handle", ",", "_ctypes", ".", "byref", "(", "result_handle", ")", ")", "assert", "status_code", "==", "0", ",", "\"Error calling TCMPSPredictGraph\"", "assert", "result_handle", ",", "\"TCMPSPredictGraph unexpectedly returned NULL pointer\"", "result", "=", "MpsFloatArray", "(", "result_handle", ")", "assert", "result", ".", "shape", "(", ")", "==", "self", ".", "_oshape", "return", "result" ]
Submits an input batch to the model. Returns a MpsFloatArray representing the model predictions. Calling asnumpy() on this value will wait for the batch to finish and yield the predictions as a numpy array.
[ "Submits", "an", "input", "batch", "to", "the", "model", ".", "Returns", "a", "MpsFloatArray", "representing", "the", "model", "predictions", ".", "Calling", "asnumpy", "()", "on", "this", "value", "will", "wait", "for", "the", "batch", "to", "finish", "and", "yield", "the", "predictions", "as", "a", "numpy", "array", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L492-L513
train
apple/turicreate
src/unity/python/turicreate/toolkits/_mps_utils.py
MpsGraphAPI.train_return_grad
def train_return_grad(self, input, grad): """ Performs a forward pass from the input batch, followed by a backward pass using the provided gradient (in place of a loss function). Returns a MpsFloatArray representing the output (final gradient) of the backward pass. Calling asnumpy() on this value will wait for the batch to finish and yield the output as a numpy array. """ assert self._mode == MpsGraphMode.TrainReturnGrad assert input.shape == self._ishape assert grad.shape == self._oshape input_array = MpsFloatArray(input) grad_array = MpsFloatArray(grad) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSTrainGraph( self.handle, input_array.handle, grad_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSTrainReturnGradGraph" assert result_handle, "TCMPSTrainReturnGradGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) assert result.shape() == self._ishape return result
python
def train_return_grad(self, input, grad): """ Performs a forward pass from the input batch, followed by a backward pass using the provided gradient (in place of a loss function). Returns a MpsFloatArray representing the output (final gradient) of the backward pass. Calling asnumpy() on this value will wait for the batch to finish and yield the output as a numpy array. """ assert self._mode == MpsGraphMode.TrainReturnGrad assert input.shape == self._ishape assert grad.shape == self._oshape input_array = MpsFloatArray(input) grad_array = MpsFloatArray(grad) result_handle = _ctypes.c_void_p() status_code = self._LIB.TCMPSTrainGraph( self.handle, input_array.handle, grad_array.handle, _ctypes.byref(result_handle)) assert status_code == 0, "Error calling TCMPSTrainReturnGradGraph" assert result_handle, "TCMPSTrainReturnGradGraph unexpectedly returned NULL pointer" result = MpsFloatArray(result_handle) assert result.shape() == self._ishape return result
[ "def", "train_return_grad", "(", "self", ",", "input", ",", "grad", ")", ":", "assert", "self", ".", "_mode", "==", "MpsGraphMode", ".", "TrainReturnGrad", "assert", "input", ".", "shape", "==", "self", ".", "_ishape", "assert", "grad", ".", "shape", "==", "self", ".", "_oshape", "input_array", "=", "MpsFloatArray", "(", "input", ")", "grad_array", "=", "MpsFloatArray", "(", "grad", ")", "result_handle", "=", "_ctypes", ".", "c_void_p", "(", ")", "status_code", "=", "self", ".", "_LIB", ".", "TCMPSTrainGraph", "(", "self", ".", "handle", ",", "input_array", ".", "handle", ",", "grad_array", ".", "handle", ",", "_ctypes", ".", "byref", "(", "result_handle", ")", ")", "assert", "status_code", "==", "0", ",", "\"Error calling TCMPSTrainReturnGradGraph\"", "assert", "result_handle", ",", "\"TCMPSTrainReturnGradGraph unexpectedly returned NULL pointer\"", "result", "=", "MpsFloatArray", "(", "result_handle", ")", "assert", "result", ".", "shape", "(", ")", "==", "self", ".", "_ishape", "return", "result" ]
Performs a forward pass from the input batch, followed by a backward pass using the provided gradient (in place of a loss function). Returns a MpsFloatArray representing the output (final gradient) of the backward pass. Calling asnumpy() on this value will wait for the batch to finish and yield the output as a numpy array.
[ "Performs", "a", "forward", "pass", "from", "the", "input", "batch", "followed", "by", "a", "backward", "pass", "using", "the", "provided", "gradient", "(", "in", "place", "of", "a", "loss", "function", ")", ".", "Returns", "a", "MpsFloatArray", "representing", "the", "output", "(", "final", "gradient", ")", "of", "the", "backward", "pass", ".", "Calling", "asnumpy", "()", "on", "this", "value", "will", "wait", "for", "the", "batch", "to", "finish", "and", "yield", "the", "output", "as", "a", "numpy", "array", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L515-L541
train
Miserlou/Zappa
zappa/asynchronous.py
route_sns_task
def route_sns_task(event, context): """ Gets SNS Message, deserialises the message, imports the function, calls the function with args """ record = event['Records'][0] message = json.loads( record['Sns']['Message'] ) return run_message(message)
python
def route_sns_task(event, context): """ Gets SNS Message, deserialises the message, imports the function, calls the function with args """ record = event['Records'][0] message = json.loads( record['Sns']['Message'] ) return run_message(message)
[ "def", "route_sns_task", "(", "event", ",", "context", ")", ":", "record", "=", "event", "[", "'Records'", "]", "[", "0", "]", "message", "=", "json", ".", "loads", "(", "record", "[", "'Sns'", "]", "[", "'Message'", "]", ")", "return", "run_message", "(", "message", ")" ]
Gets SNS Message, deserialises the message, imports the function, calls the function with args
[ "Gets", "SNS", "Message", "deserialises", "the", "message", "imports", "the", "function", "calls", "the", "function", "with", "args" ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L275-L284
train
Miserlou/Zappa
zappa/asynchronous.py
run_message
def run_message(message): """ Runs a function defined by a message object with keys: 'task_path', 'args', and 'kwargs' used by lambda routing and a 'command' in handler.py """ if message.get('capture_response', False): DYNAMODB_CLIENT.put_item( TableName=ASYNC_RESPONSE_TABLE, Item={ 'id': {'S': str(message['response_id'])}, 'ttl': {'N': str(int(time.time()+600))}, 'async_status': {'S': 'in progress'}, 'async_response': {'S': str(json.dumps('N/A'))}, } ) func = import_and_get_task(message['task_path']) if hasattr(func, 'sync'): response = func.sync( *message['args'], **message['kwargs'] ) else: response = func( *message['args'], **message['kwargs'] ) if message.get('capture_response', False): DYNAMODB_CLIENT.update_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(message['response_id'])}}, UpdateExpression="SET async_response = :r, async_status = :s", ExpressionAttributeValues={ ':r': {'S': str(json.dumps(response))}, ':s': {'S': 'complete'}, }, ) return response
python
def run_message(message): """ Runs a function defined by a message object with keys: 'task_path', 'args', and 'kwargs' used by lambda routing and a 'command' in handler.py """ if message.get('capture_response', False): DYNAMODB_CLIENT.put_item( TableName=ASYNC_RESPONSE_TABLE, Item={ 'id': {'S': str(message['response_id'])}, 'ttl': {'N': str(int(time.time()+600))}, 'async_status': {'S': 'in progress'}, 'async_response': {'S': str(json.dumps('N/A'))}, } ) func = import_and_get_task(message['task_path']) if hasattr(func, 'sync'): response = func.sync( *message['args'], **message['kwargs'] ) else: response = func( *message['args'], **message['kwargs'] ) if message.get('capture_response', False): DYNAMODB_CLIENT.update_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(message['response_id'])}}, UpdateExpression="SET async_response = :r, async_status = :s", ExpressionAttributeValues={ ':r': {'S': str(json.dumps(response))}, ':s': {'S': 'complete'}, }, ) return response
[ "def", "run_message", "(", "message", ")", ":", "if", "message", ".", "get", "(", "'capture_response'", ",", "False", ")", ":", "DYNAMODB_CLIENT", ".", "put_item", "(", "TableName", "=", "ASYNC_RESPONSE_TABLE", ",", "Item", "=", "{", "'id'", ":", "{", "'S'", ":", "str", "(", "message", "[", "'response_id'", "]", ")", "}", ",", "'ttl'", ":", "{", "'N'", ":", "str", "(", "int", "(", "time", ".", "time", "(", ")", "+", "600", ")", ")", "}", ",", "'async_status'", ":", "{", "'S'", ":", "'in progress'", "}", ",", "'async_response'", ":", "{", "'S'", ":", "str", "(", "json", ".", "dumps", "(", "'N/A'", ")", ")", "}", ",", "}", ")", "func", "=", "import_and_get_task", "(", "message", "[", "'task_path'", "]", ")", "if", "hasattr", "(", "func", ",", "'sync'", ")", ":", "response", "=", "func", ".", "sync", "(", "*", "message", "[", "'args'", "]", ",", "*", "*", "message", "[", "'kwargs'", "]", ")", "else", ":", "response", "=", "func", "(", "*", "message", "[", "'args'", "]", ",", "*", "*", "message", "[", "'kwargs'", "]", ")", "if", "message", ".", "get", "(", "'capture_response'", ",", "False", ")", ":", "DYNAMODB_CLIENT", ".", "update_item", "(", "TableName", "=", "ASYNC_RESPONSE_TABLE", ",", "Key", "=", "{", "'id'", ":", "{", "'S'", ":", "str", "(", "message", "[", "'response_id'", "]", ")", "}", "}", ",", "UpdateExpression", "=", "\"SET async_response = :r, async_status = :s\"", ",", "ExpressionAttributeValues", "=", "{", "':r'", ":", "{", "'S'", ":", "str", "(", "json", ".", "dumps", "(", "response", ")", ")", "}", ",", "':s'", ":", "{", "'S'", ":", "'complete'", "}", ",", "}", ",", ")", "return", "response" ]
Runs a function defined by a message object with keys: 'task_path', 'args', and 'kwargs' used by lambda routing and a 'command' in handler.py
[ "Runs", "a", "function", "defined", "by", "a", "message", "object", "with", "keys", ":", "task_path", "args", "and", "kwargs", "used", "by", "lambda", "routing", "and", "a", "command", "in", "handler", ".", "py" ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L287-L327
train
Miserlou/Zappa
zappa/asynchronous.py
run
def run(func, args=[], kwargs={}, service='lambda', capture_response=False, remote_aws_lambda_function_name=None, remote_aws_region=None, **task_kwargs): """ Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.asynchronous.run zappa.asynchronous.run(func, args, kwargs) If you want to use SNS, then do: zappa.asynchronous.run(func, args, kwargs, service='sns') and other arguments are similar to @task """ lambda_function_name = remote_aws_lambda_function_name or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = remote_aws_region or os.environ.get('AWS_REGION') task_path = get_func_task_path(func) return ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response, **task_kwargs).send(task_path, args, kwargs)
python
def run(func, args=[], kwargs={}, service='lambda', capture_response=False, remote_aws_lambda_function_name=None, remote_aws_region=None, **task_kwargs): """ Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.asynchronous.run zappa.asynchronous.run(func, args, kwargs) If you want to use SNS, then do: zappa.asynchronous.run(func, args, kwargs, service='sns') and other arguments are similar to @task """ lambda_function_name = remote_aws_lambda_function_name or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = remote_aws_region or os.environ.get('AWS_REGION') task_path = get_func_task_path(func) return ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response, **task_kwargs).send(task_path, args, kwargs)
[ "def", "run", "(", "func", ",", "args", "=", "[", "]", ",", "kwargs", "=", "{", "}", ",", "service", "=", "'lambda'", ",", "capture_response", "=", "False", ",", "remote_aws_lambda_function_name", "=", "None", ",", "remote_aws_region", "=", "None", ",", "*", "*", "task_kwargs", ")", ":", "lambda_function_name", "=", "remote_aws_lambda_function_name", "or", "os", ".", "environ", ".", "get", "(", "'AWS_LAMBDA_FUNCTION_NAME'", ")", "aws_region", "=", "remote_aws_region", "or", "os", ".", "environ", ".", "get", "(", "'AWS_REGION'", ")", "task_path", "=", "get_func_task_path", "(", "func", ")", "return", "ASYNC_CLASSES", "[", "service", "]", "(", "lambda_function_name", "=", "lambda_function_name", ",", "aws_region", "=", "aws_region", ",", "capture_response", "=", "capture_response", ",", "*", "*", "task_kwargs", ")", ".", "send", "(", "task_path", ",", "args", ",", "kwargs", ")" ]
Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.asynchronous.run zappa.asynchronous.run(func, args, kwargs) If you want to use SNS, then do: zappa.asynchronous.run(func, args, kwargs, service='sns') and other arguments are similar to @task
[ "Instead", "of", "decorating", "a", "function", "with", "@task", "you", "can", "just", "run", "it", "directly", ".", "If", "you", "were", "going", "to", "do", "func", "(", "*", "args", "**", "kwargs", ")", "then", "you", "will", "call", "this", ":" ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L334-L356
train
Miserlou/Zappa
zappa/asynchronous.py
task
def task(*args, **kwargs): """Async task decorator so that running Args: func (function): the function to be wrapped Further requirements: func must be an independent top-level function. i.e. not a class method or an anonymous function service (str): either 'lambda' or 'sns' remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task remote_aws_region (str): the name of a remote region to make lambda/sns calls against Returns: A replacement function that dispatches func() to run asynchronously through the service in question """ func = None if len(args) == 1 and callable(args[0]): func = args[0] if not kwargs: # Default Values service = 'lambda' lambda_function_name_arg = None aws_region_arg = None else: # Arguments were passed service = kwargs.get('service', 'lambda') lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name') aws_region_arg = kwargs.get('remote_aws_region') capture_response = kwargs.get('capture_response', False) def func_wrapper(func): task_path = get_func_task_path(func) @wraps(func) def _run_async(*args, **kwargs): """ This is the wrapping async function that replaces the function that is decorated with @task. Args: These are just passed through to @task's func Assuming a valid service is passed to task() and it is run inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists), it dispatches the function to be run through the service variable. Otherwise, it runs the task synchronously. Returns: In async mode, the object returned includes state of the dispatch. For instance When outside of Lambda, the func passed to @task is run and we return the actual value. """ lambda_function_name = lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = aws_region_arg or os.environ.get('AWS_REGION') if (service in ASYNC_CLASSES) and (lambda_function_name): send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response).send(task_path, args, kwargs) return send_result else: return func(*args, **kwargs) update_wrapper(_run_async, func) _run_async.service = service _run_async.sync = func return _run_async return func_wrapper(func) if func else func_wrapper
python
def task(*args, **kwargs): """Async task decorator so that running Args: func (function): the function to be wrapped Further requirements: func must be an independent top-level function. i.e. not a class method or an anonymous function service (str): either 'lambda' or 'sns' remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task remote_aws_region (str): the name of a remote region to make lambda/sns calls against Returns: A replacement function that dispatches func() to run asynchronously through the service in question """ func = None if len(args) == 1 and callable(args[0]): func = args[0] if not kwargs: # Default Values service = 'lambda' lambda_function_name_arg = None aws_region_arg = None else: # Arguments were passed service = kwargs.get('service', 'lambda') lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name') aws_region_arg = kwargs.get('remote_aws_region') capture_response = kwargs.get('capture_response', False) def func_wrapper(func): task_path = get_func_task_path(func) @wraps(func) def _run_async(*args, **kwargs): """ This is the wrapping async function that replaces the function that is decorated with @task. Args: These are just passed through to @task's func Assuming a valid service is passed to task() and it is run inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists), it dispatches the function to be run through the service variable. Otherwise, it runs the task synchronously. Returns: In async mode, the object returned includes state of the dispatch. For instance When outside of Lambda, the func passed to @task is run and we return the actual value. """ lambda_function_name = lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = aws_region_arg or os.environ.get('AWS_REGION') if (service in ASYNC_CLASSES) and (lambda_function_name): send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response).send(task_path, args, kwargs) return send_result else: return func(*args, **kwargs) update_wrapper(_run_async, func) _run_async.service = service _run_async.sync = func return _run_async return func_wrapper(func) if func else func_wrapper
[ "def", "task", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "func", "=", "None", "if", "len", "(", "args", ")", "==", "1", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "func", "=", "args", "[", "0", "]", "if", "not", "kwargs", ":", "# Default Values", "service", "=", "'lambda'", "lambda_function_name_arg", "=", "None", "aws_region_arg", "=", "None", "else", ":", "# Arguments were passed", "service", "=", "kwargs", ".", "get", "(", "'service'", ",", "'lambda'", ")", "lambda_function_name_arg", "=", "kwargs", ".", "get", "(", "'remote_aws_lambda_function_name'", ")", "aws_region_arg", "=", "kwargs", ".", "get", "(", "'remote_aws_region'", ")", "capture_response", "=", "kwargs", ".", "get", "(", "'capture_response'", ",", "False", ")", "def", "func_wrapper", "(", "func", ")", ":", "task_path", "=", "get_func_task_path", "(", "func", ")", "@", "wraps", "(", "func", ")", "def", "_run_async", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n This is the wrapping async function that replaces the function\n that is decorated with @task.\n Args:\n These are just passed through to @task's func\n\n Assuming a valid service is passed to task() and it is run\n inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),\n it dispatches the function to be run through the service variable.\n Otherwise, it runs the task synchronously.\n\n Returns:\n In async mode, the object returned includes state of the dispatch.\n For instance\n\n When outside of Lambda, the func passed to @task is run and we\n return the actual value.\n \"\"\"", "lambda_function_name", "=", "lambda_function_name_arg", "or", "os", ".", "environ", ".", "get", "(", "'AWS_LAMBDA_FUNCTION_NAME'", ")", "aws_region", "=", "aws_region_arg", "or", "os", ".", "environ", ".", "get", "(", "'AWS_REGION'", ")", "if", "(", "service", "in", "ASYNC_CLASSES", ")", "and", "(", "lambda_function_name", ")", ":", "send_result", "=", "ASYNC_CLASSES", "[", "service", "]", "(", "lambda_function_name", "=", "lambda_function_name", ",", "aws_region", "=", "aws_region", ",", "capture_response", "=", "capture_response", ")", ".", "send", "(", "task_path", ",", "args", ",", "kwargs", ")", "return", "send_result", "else", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "update_wrapper", "(", "_run_async", ",", "func", ")", "_run_async", ".", "service", "=", "service", "_run_async", ".", "sync", "=", "func", "return", "_run_async", "return", "func_wrapper", "(", "func", ")", "if", "func", "else", "func_wrapper" ]
Async task decorator so that running Args: func (function): the function to be wrapped Further requirements: func must be an independent top-level function. i.e. not a class method or an anonymous function service (str): either 'lambda' or 'sns' remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task remote_aws_region (str): the name of a remote region to make lambda/sns calls against Returns: A replacement function that dispatches func() to run asynchronously through the service in question
[ "Async", "task", "decorator", "so", "that", "running" ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L364-L439
train
Miserlou/Zappa
zappa/asynchronous.py
import_and_get_task
def import_and_get_task(task_path): """ Given a modular path to a function, import that module and return the function. """ module, function = task_path.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) return app_function
python
def import_and_get_task(task_path): """ Given a modular path to a function, import that module and return the function. """ module, function = task_path.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) return app_function
[ "def", "import_and_get_task", "(", "task_path", ")", ":", "module", ",", "function", "=", "task_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "app_module", "=", "importlib", ".", "import_module", "(", "module", ")", "app_function", "=", "getattr", "(", "app_module", ",", "function", ")", "return", "app_function" ]
Given a modular path to a function, import that module and return the function.
[ "Given", "a", "modular", "path", "to", "a", "function", "import", "that", "module", "and", "return", "the", "function", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L453-L461
train
Miserlou/Zappa
zappa/asynchronous.py
get_func_task_path
def get_func_task_path(func): """ Format the modular task path for a function via inspection. """ module_path = inspect.getmodule(func).__name__ task_path = '{module_path}.{func_name}'.format( module_path=module_path, func_name=func.__name__ ) return task_path
python
def get_func_task_path(func): """ Format the modular task path for a function via inspection. """ module_path = inspect.getmodule(func).__name__ task_path = '{module_path}.{func_name}'.format( module_path=module_path, func_name=func.__name__ ) return task_path
[ "def", "get_func_task_path", "(", "func", ")", ":", "module_path", "=", "inspect", ".", "getmodule", "(", "func", ")", ".", "__name__", "task_path", "=", "'{module_path}.{func_name}'", ".", "format", "(", "module_path", "=", "module_path", ",", "func_name", "=", "func", ".", "__name__", ")", "return", "task_path" ]
Format the modular task path for a function via inspection.
[ "Format", "the", "modular", "task", "path", "for", "a", "function", "via", "inspection", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L464-L473
train
Miserlou/Zappa
zappa/asynchronous.py
get_async_response
def get_async_response(response_id): """ Get the response from the async table """ response = DYNAMODB_CLIENT.get_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(response_id)}} ) if 'Item' not in response: return None return { 'status': response['Item']['async_status']['S'], 'response': json.loads(response['Item']['async_response']['S']), }
python
def get_async_response(response_id): """ Get the response from the async table """ response = DYNAMODB_CLIENT.get_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(response_id)}} ) if 'Item' not in response: return None return { 'status': response['Item']['async_status']['S'], 'response': json.loads(response['Item']['async_response']['S']), }
[ "def", "get_async_response", "(", "response_id", ")", ":", "response", "=", "DYNAMODB_CLIENT", ".", "get_item", "(", "TableName", "=", "ASYNC_RESPONSE_TABLE", ",", "Key", "=", "{", "'id'", ":", "{", "'S'", ":", "str", "(", "response_id", ")", "}", "}", ")", "if", "'Item'", "not", "in", "response", ":", "return", "None", "return", "{", "'status'", ":", "response", "[", "'Item'", "]", "[", "'async_status'", "]", "[", "'S'", "]", ",", "'response'", ":", "json", ".", "loads", "(", "response", "[", "'Item'", "]", "[", "'async_response'", "]", "[", "'S'", "]", ")", ",", "}" ]
Get the response from the async table
[ "Get", "the", "response", "from", "the", "async", "table" ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L476-L490
train
Miserlou/Zappa
zappa/asynchronous.py
LambdaAsyncResponse.send
def send(self, task_path, args, kwargs): """ Create the message object and pass it to the actual sender. """ message = { 'task_path': task_path, 'capture_response': self.capture_response, 'response_id': self.response_id, 'args': args, 'kwargs': kwargs } self._send(message) return self
python
def send(self, task_path, args, kwargs): """ Create the message object and pass it to the actual sender. """ message = { 'task_path': task_path, 'capture_response': self.capture_response, 'response_id': self.response_id, 'args': args, 'kwargs': kwargs } self._send(message) return self
[ "def", "send", "(", "self", ",", "task_path", ",", "args", ",", "kwargs", ")", ":", "message", "=", "{", "'task_path'", ":", "task_path", ",", "'capture_response'", ":", "self", ".", "capture_response", ",", "'response_id'", ":", "self", ".", "response_id", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", "}", "self", ".", "_send", "(", "message", ")", "return", "self" ]
Create the message object and pass it to the actual sender.
[ "Create", "the", "message", "object", "and", "pass", "it", "to", "the", "actual", "sender", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L162-L174
train
Miserlou/Zappa
zappa/asynchronous.py
LambdaAsyncResponse._send
def _send(self, message): """ Given a message, directly invoke the lamdba function for this task. """ message['command'] = 'zappa.asynchronous.route_lambda_task' payload = json.dumps(message).encode('utf-8') if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover raise AsyncException("Payload too large for async Lambda call") self.response = self.client.invoke( FunctionName=self.lambda_function_name, InvocationType='Event', #makes the call async Payload=payload ) self.sent = (self.response.get('StatusCode', 0) == 202)
python
def _send(self, message): """ Given a message, directly invoke the lamdba function for this task. """ message['command'] = 'zappa.asynchronous.route_lambda_task' payload = json.dumps(message).encode('utf-8') if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover raise AsyncException("Payload too large for async Lambda call") self.response = self.client.invoke( FunctionName=self.lambda_function_name, InvocationType='Event', #makes the call async Payload=payload ) self.sent = (self.response.get('StatusCode', 0) == 202)
[ "def", "_send", "(", "self", ",", "message", ")", ":", "message", "[", "'command'", "]", "=", "'zappa.asynchronous.route_lambda_task'", "payload", "=", "json", ".", "dumps", "(", "message", ")", ".", "encode", "(", "'utf-8'", ")", "if", "len", "(", "payload", ")", ">", "LAMBDA_ASYNC_PAYLOAD_LIMIT", ":", "# pragma: no cover", "raise", "AsyncException", "(", "\"Payload too large for async Lambda call\"", ")", "self", ".", "response", "=", "self", ".", "client", ".", "invoke", "(", "FunctionName", "=", "self", ".", "lambda_function_name", ",", "InvocationType", "=", "'Event'", ",", "#makes the call async", "Payload", "=", "payload", ")", "self", ".", "sent", "=", "(", "self", ".", "response", ".", "get", "(", "'StatusCode'", ",", "0", ")", "==", "202", ")" ]
Given a message, directly invoke the lamdba function for this task.
[ "Given", "a", "message", "directly", "invoke", "the", "lamdba", "function", "for", "this", "task", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L176-L189
train
Miserlou/Zappa
zappa/asynchronous.py
SnsAsyncResponse._send
def _send(self, message): """ Given a message, publish to this topic. """ message['command'] = 'zappa.asynchronous.route_sns_task' payload = json.dumps(message).encode('utf-8') if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover raise AsyncException("Payload too large for SNS") self.response = self.client.publish( TargetArn=self.arn, Message=payload ) self.sent = self.response.get('MessageId')
python
def _send(self, message): """ Given a message, publish to this topic. """ message['command'] = 'zappa.asynchronous.route_sns_task' payload = json.dumps(message).encode('utf-8') if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover raise AsyncException("Payload too large for SNS") self.response = self.client.publish( TargetArn=self.arn, Message=payload ) self.sent = self.response.get('MessageId')
[ "def", "_send", "(", "self", ",", "message", ")", ":", "message", "[", "'command'", "]", "=", "'zappa.asynchronous.route_sns_task'", "payload", "=", "json", ".", "dumps", "(", "message", ")", ".", "encode", "(", "'utf-8'", ")", "if", "len", "(", "payload", ")", ">", "LAMBDA_ASYNC_PAYLOAD_LIMIT", ":", "# pragma: no cover", "raise", "AsyncException", "(", "\"Payload too large for SNS\"", ")", "self", ".", "response", "=", "self", ".", "client", ".", "publish", "(", "TargetArn", "=", "self", ".", "arn", ",", "Message", "=", "payload", ")", "self", ".", "sent", "=", "self", ".", "response", ".", "get", "(", "'MessageId'", ")" ]
Given a message, publish to this topic.
[ "Given", "a", "message", "publish", "to", "this", "topic", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L242-L254
train
Miserlou/Zappa
zappa/utilities.py
parse_s3_url
def parse_s3_url(url): """ Parses S3 URL. Returns bucket (domain) and file (full path). """ bucket = '' path = '' if url: result = urlparse(url) bucket = result.netloc path = result.path.strip('/') return bucket, path
python
def parse_s3_url(url): """ Parses S3 URL. Returns bucket (domain) and file (full path). """ bucket = '' path = '' if url: result = urlparse(url) bucket = result.netloc path = result.path.strip('/') return bucket, path
[ "def", "parse_s3_url", "(", "url", ")", ":", "bucket", "=", "''", "path", "=", "''", "if", "url", ":", "result", "=", "urlparse", "(", "url", ")", "bucket", "=", "result", ".", "netloc", "path", "=", "result", ".", "path", ".", "strip", "(", "'/'", ")", "return", "bucket", ",", "path" ]
Parses S3 URL. Returns bucket (domain) and file (full path).
[ "Parses", "S3", "URL", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L67-L79
train
Miserlou/Zappa
zappa/utilities.py
string_to_timestamp
def string_to_timestamp(timestring): """ Accepts a str, returns an int timestamp. """ ts = None # Uses an extended version of Go's duration string. try: delta = durationpy.from_str(timestring); past = datetime.datetime.utcnow() - delta ts = calendar.timegm(past.timetuple()) return ts except Exception as e: pass if ts: return ts # else: # print("Unable to parse timestring.") return 0
python
def string_to_timestamp(timestring): """ Accepts a str, returns an int timestamp. """ ts = None # Uses an extended version of Go's duration string. try: delta = durationpy.from_str(timestring); past = datetime.datetime.utcnow() - delta ts = calendar.timegm(past.timetuple()) return ts except Exception as e: pass if ts: return ts # else: # print("Unable to parse timestring.") return 0
[ "def", "string_to_timestamp", "(", "timestring", ")", ":", "ts", "=", "None", "# Uses an extended version of Go's duration string.", "try", ":", "delta", "=", "durationpy", ".", "from_str", "(", "timestring", ")", "past", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "delta", "ts", "=", "calendar", ".", "timegm", "(", "past", ".", "timetuple", "(", ")", ")", "return", "ts", "except", "Exception", "as", "e", ":", "pass", "if", "ts", ":", "return", "ts", "# else:", "# print(\"Unable to parse timestring.\")", "return", "0" ]
Accepts a str, returns an int timestamp.
[ "Accepts", "a", "str", "returns", "an", "int", "timestamp", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L91-L111
train
Miserlou/Zappa
zappa/utilities.py
detect_django_settings
def detect_django_settings(): """ Automatically try to discover Django settings files, return them as relative module paths. """ matches = [] for root, dirnames, filenames in os.walk(os.getcwd()): for filename in fnmatch.filter(filenames, '*settings.py'): full = os.path.join(root, filename) if 'site-packages' in full: continue full = os.path.join(root, filename) package_path = full.replace(os.getcwd(), '') package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') matches.append(package_module) return matches
python
def detect_django_settings(): """ Automatically try to discover Django settings files, return them as relative module paths. """ matches = [] for root, dirnames, filenames in os.walk(os.getcwd()): for filename in fnmatch.filter(filenames, '*settings.py'): full = os.path.join(root, filename) if 'site-packages' in full: continue full = os.path.join(root, filename) package_path = full.replace(os.getcwd(), '') package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') matches.append(package_module) return matches
[ "def", "detect_django_settings", "(", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "os", ".", "getcwd", "(", ")", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*settings.py'", ")", ":", "full", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "if", "'site-packages'", "in", "full", ":", "continue", "full", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "package_path", "=", "full", ".", "replace", "(", "os", ".", "getcwd", "(", ")", ",", "''", ")", "package_module", "=", "package_path", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", ".", "split", "(", "'.'", ",", "1", ")", "[", "1", "]", ".", "replace", "(", "'.py'", ",", "''", ")", "matches", ".", "append", "(", "package_module", ")", "return", "matches" ]
Automatically try to discover Django settings files, return them as relative module paths.
[ "Automatically", "try", "to", "discover", "Django", "settings", "files", "return", "them", "as", "relative", "module", "paths", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L117-L134
train
Miserlou/Zappa
zappa/utilities.py
detect_flask_apps
def detect_flask_apps(): """ Automatically try to discover Flask apps files, return them as relative module paths. """ matches = [] for root, dirnames, filenames in os.walk(os.getcwd()): for filename in fnmatch.filter(filenames, '*.py'): full = os.path.join(root, filename) if 'site-packages' in full: continue full = os.path.join(root, filename) with io.open(full, 'r', encoding='utf-8') as f: lines = f.readlines() for line in lines: app = None # Kind of janky.. if '= Flask(' in line: app = line.split('= Flask(')[0].strip() if '=Flask(' in line: app = line.split('=Flask(')[0].strip() if not app: continue package_path = full.replace(os.getcwd(), '') package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') app_module = package_module + '.' + app matches.append(app_module) return matches
python
def detect_flask_apps(): """ Automatically try to discover Flask apps files, return them as relative module paths. """ matches = [] for root, dirnames, filenames in os.walk(os.getcwd()): for filename in fnmatch.filter(filenames, '*.py'): full = os.path.join(root, filename) if 'site-packages' in full: continue full = os.path.join(root, filename) with io.open(full, 'r', encoding='utf-8') as f: lines = f.readlines() for line in lines: app = None # Kind of janky.. if '= Flask(' in line: app = line.split('= Flask(')[0].strip() if '=Flask(' in line: app = line.split('=Flask(')[0].strip() if not app: continue package_path = full.replace(os.getcwd(), '') package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') app_module = package_module + '.' + app matches.append(app_module) return matches
[ "def", "detect_flask_apps", "(", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "os", ".", "getcwd", "(", ")", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*.py'", ")", ":", "full", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "if", "'site-packages'", "in", "full", ":", "continue", "full", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "with", "io", ".", "open", "(", "full", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "for", "line", "in", "lines", ":", "app", "=", "None", "# Kind of janky..", "if", "'= Flask('", "in", "line", ":", "app", "=", "line", ".", "split", "(", "'= Flask('", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "'=Flask('", "in", "line", ":", "app", "=", "line", ".", "split", "(", "'=Flask('", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "not", "app", ":", "continue", "package_path", "=", "full", ".", "replace", "(", "os", ".", "getcwd", "(", ")", ",", "''", ")", "package_module", "=", "package_path", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", ".", "split", "(", "'.'", ",", "1", ")", "[", "1", "]", ".", "replace", "(", "'.py'", ",", "''", ")", "app_module", "=", "package_module", "+", "'.'", "+", "app", "matches", ".", "append", "(", "app_module", ")", "return", "matches" ]
Automatically try to discover Flask apps files, return them as relative module paths.
[ "Automatically", "try", "to", "discover", "Flask", "apps", "files", "return", "them", "as", "relative", "module", "paths", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L136-L171
train
Miserlou/Zappa
zappa/utilities.py
add_event_source
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and add the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: if not event_source_obj.status(funk): event_source_obj.add(funk) return 'successful' if event_source_obj.status(funk) else 'failed' else: return 'exists' return 'dryrun'
python
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and add the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: if not event_source_obj.status(funk): event_source_obj.add(funk) return 'successful' if event_source_obj.status(funk) else 'failed' else: return 'exists' return 'dryrun'
[ "def", "add_event_source", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", ":", "event_source_obj", ",", "ctx", ",", "funk", "=", "get_event_source", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", "# TODO: Detect changes in config and refine exists algorithm", "if", "not", "dry", ":", "if", "not", "event_source_obj", ".", "status", "(", "funk", ")", ":", "event_source_obj", ".", "add", "(", "funk", ")", "return", "'successful'", "if", "event_source_obj", ".", "status", "(", "funk", ")", "else", "'failed'", "else", ":", "return", "'exists'", "return", "'dryrun'" ]
Given an event_source dictionary, create the object and add the event source.
[ "Given", "an", "event_source", "dictionary", "create", "the", "object", "and", "add", "the", "event", "source", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L399-L413
train
Miserlou/Zappa
zappa/utilities.py
remove_event_source
def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and remove the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # This is slightly dirty, but necessary for using Kappa this way. funk.arn = lambda_arn if not dry: rule_response = event_source_obj.remove(funk) return rule_response else: return event_source_obj
python
def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and remove the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # This is slightly dirty, but necessary for using Kappa this way. funk.arn = lambda_arn if not dry: rule_response = event_source_obj.remove(funk) return rule_response else: return event_source_obj
[ "def", "remove_event_source", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", ":", "event_source_obj", ",", "ctx", ",", "funk", "=", "get_event_source", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", "# This is slightly dirty, but necessary for using Kappa this way.", "funk", ".", "arn", "=", "lambda_arn", "if", "not", "dry", ":", "rule_response", "=", "event_source_obj", ".", "remove", "(", "funk", ")", "return", "rule_response", "else", ":", "return", "event_source_obj" ]
Given an event_source dictionary, create the object and remove the event source.
[ "Given", "an", "event_source", "dictionary", "create", "the", "object", "and", "remove", "the", "event", "source", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L415-L428
train
Miserlou/Zappa
zappa/utilities.py
get_event_source_status
def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and get the event source status. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) return event_source_obj.status(funk)
python
def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and get the event source status. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) return event_source_obj.status(funk)
[ "def", "get_event_source_status", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", ":", "event_source_obj", ",", "ctx", ",", "funk", "=", "get_event_source", "(", "event_source", ",", "lambda_arn", ",", "target_function", ",", "boto_session", ",", "dry", "=", "False", ")", "return", "event_source_obj", ".", "status", "(", "funk", ")" ]
Given an event_source dictionary, create the object and get the event source status.
[ "Given", "an", "event_source", "dictionary", "create", "the", "object", "and", "get", "the", "event", "source", "status", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L430-L436
train
Miserlou/Zappa
zappa/utilities.py
check_new_version_available
def check_new_version_available(this_version): """ Checks if a newer version of Zappa is available. Returns True is updateable, else False. """ import requests pypi_url = 'https://pypi.python.org/pypi/Zappa/json' resp = requests.get(pypi_url, timeout=1.5) top_version = resp.json()['info']['version'] return this_version != top_version
python
def check_new_version_available(this_version): """ Checks if a newer version of Zappa is available. Returns True is updateable, else False. """ import requests pypi_url = 'https://pypi.python.org/pypi/Zappa/json' resp = requests.get(pypi_url, timeout=1.5) top_version = resp.json()['info']['version'] return this_version != top_version
[ "def", "check_new_version_available", "(", "this_version", ")", ":", "import", "requests", "pypi_url", "=", "'https://pypi.python.org/pypi/Zappa/json'", "resp", "=", "requests", ".", "get", "(", "pypi_url", ",", "timeout", "=", "1.5", ")", "top_version", "=", "resp", ".", "json", "(", ")", "[", "'info'", "]", "[", "'version'", "]", "return", "this_version", "!=", "top_version" ]
Checks if a newer version of Zappa is available. Returns True is updateable, else False.
[ "Checks", "if", "a", "newer", "version", "of", "Zappa", "is", "available", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L442-L455
train
Miserlou/Zappa
zappa/utilities.py
validate_name
def validate_name(name, maxlen=80): """Validate name for AWS Lambda function. name: actual name (without `arn:aws:lambda:...:` prefix and without `:$LATEST`, alias or version suffix. maxlen: max allowed length for name without prefix and suffix. The value 80 was calculated from prefix with longest known region name and assuming that no alias or version would be longer than `$LATEST`. Based on AWS Lambda spec http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html Return: the name Raise: InvalidAwsLambdaName, if the name is invalid. """ if not isinstance(name, basestring): msg = "Name must be of type string" raise InvalidAwsLambdaName(msg) if len(name) > maxlen: msg = "Name is longer than {maxlen} characters." raise InvalidAwsLambdaName(msg.format(maxlen=maxlen)) if len(name) == 0: msg = "Name must not be empty string." raise InvalidAwsLambdaName(msg) if not re.match("^[a-zA-Z0-9-_]+$", name): msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -" raise InvalidAwsLambdaName(msg) return name
python
def validate_name(name, maxlen=80): """Validate name for AWS Lambda function. name: actual name (without `arn:aws:lambda:...:` prefix and without `:$LATEST`, alias or version suffix. maxlen: max allowed length for name without prefix and suffix. The value 80 was calculated from prefix with longest known region name and assuming that no alias or version would be longer than `$LATEST`. Based on AWS Lambda spec http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html Return: the name Raise: InvalidAwsLambdaName, if the name is invalid. """ if not isinstance(name, basestring): msg = "Name must be of type string" raise InvalidAwsLambdaName(msg) if len(name) > maxlen: msg = "Name is longer than {maxlen} characters." raise InvalidAwsLambdaName(msg.format(maxlen=maxlen)) if len(name) == 0: msg = "Name must not be empty string." raise InvalidAwsLambdaName(msg) if not re.match("^[a-zA-Z0-9-_]+$", name): msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -" raise InvalidAwsLambdaName(msg) return name
[ "def", "validate_name", "(", "name", ",", "maxlen", "=", "80", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "msg", "=", "\"Name must be of type string\"", "raise", "InvalidAwsLambdaName", "(", "msg", ")", "if", "len", "(", "name", ")", ">", "maxlen", ":", "msg", "=", "\"Name is longer than {maxlen} characters.\"", "raise", "InvalidAwsLambdaName", "(", "msg", ".", "format", "(", "maxlen", "=", "maxlen", ")", ")", "if", "len", "(", "name", ")", "==", "0", ":", "msg", "=", "\"Name must not be empty string.\"", "raise", "InvalidAwsLambdaName", "(", "msg", ")", "if", "not", "re", ".", "match", "(", "\"^[a-zA-Z0-9-_]+$\"", ",", "name", ")", ":", "msg", "=", "\"Name can only contain characters from a-z, A-Z, 0-9, _ and -\"", "raise", "InvalidAwsLambdaName", "(", "msg", ")", "return", "name" ]
Validate name for AWS Lambda function. name: actual name (without `arn:aws:lambda:...:` prefix and without `:$LATEST`, alias or version suffix. maxlen: max allowed length for name without prefix and suffix. The value 80 was calculated from prefix with longest known region name and assuming that no alias or version would be longer than `$LATEST`. Based on AWS Lambda spec http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html Return: the name Raise: InvalidAwsLambdaName, if the name is invalid.
[ "Validate", "name", "for", "AWS", "Lambda", "function", ".", "name", ":", "actual", "name", "(", "without", "arn", ":", "aws", ":", "lambda", ":", "...", ":", "prefix", "and", "without", ":", "$LATEST", "alias", "or", "version", "suffix", ".", "maxlen", ":", "max", "allowed", "length", "for", "name", "without", "prefix", "and", "suffix", "." ]
3ccf7490a8d8b8fa74a61ee39bf44234f3567739
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L463-L490
train