code
stringlengths
17
6.64M
def create_testing_instances(sentence, tokenizer, max_seq_length=128): 'Create `TestInstance`s from raw text.' max_token_num = (max_seq_length - 2) tokens = tokenizer.tokenize(sentence) if (len(tokens) > max_token_num): tokens = tokens[:max_token_num] if (tokens[0] is not '[SOS]'): tokens.insert(0, '[SOS]') if (tokens[(- 1)] is not '[EOS]'): tokens.append('[EOS]') instances = [] instances.append(create_instances_from_tokens(tokens)) return instances
def create_instances_from_tokens(tokens): 'Creates `TestInstance`s for a single sentence.' instance = TestingInstance(tokens) return instance
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): 'Checks whether the casing config is consistent with the checkpoint name.' if (not init_checkpoint): return m = re.match('^.*?([A-Za-z0-9_-]+)/bert_model.ckpt', init_checkpoint) if (m is None): return model_name = m.group(1) lower_models = ['uncased_L-24_H-1024_A-16', 'uncased_L-12_H-768_A-12', 'multilingual_L-12_H-768_A-12', 'chinese_L-12_H-768_A-12'] cased_models = ['cased_L-12_H-768_A-12', 'cased_L-24_H-1024_A-16', 'multi_cased_L-12_H-768_A-12'] is_bad_config = False if ((model_name in lower_models) and (not do_lower_case)): is_bad_config = True actual_flag = 'False' case_name = 'lowercased' opposite_flag = 'True' if ((model_name in cased_models) and do_lower_case): is_bad_config = True actual_flag = 'True' case_name = 'cased' opposite_flag = 'False' if is_bad_config: raise ValueError(('You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. However, `%s` seems to be a %s model, so you should pass in `--do_lower_case=%s` so that the fine-tuning matches how the model was pre-training. If this error is wrong, please just comment out this check.' % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)))
def convert_to_unicode(text): "Converts `text` to Unicode (if it's not already), assuming utf-8 input." if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf-8', 'ignore') else: raise ValueError(('Unsupported string type: %s' % type(text))) elif six.PY2: if isinstance(text, str): return text.decode('utf-8', 'ignore') elif isinstance(text, unicode): return text else: raise ValueError(('Unsupported string type: %s' % type(text))) else: raise ValueError('Not running on Python2 or Python 3?')
def printable_text(text): 'Returns text encoded in a way suitable for print or `tf.logging`.' if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode('utf-8', 'ignore') else: raise ValueError(('Unsupported string type: %s' % type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode('utf-8') else: raise ValueError(('Unsupported string type: %s' % type(text))) else: raise ValueError('Not running on Python2 or Python 3?')
def load_vocab(vocab_file): 'Loads a vocabulary file into a dictionary.' vocab = collections.OrderedDict() index = 0 with tf.gfile.GFile(vocab_file, 'r') as reader: while True: token = convert_to_unicode(reader.readline()) if (not token): break token = token.strip() vocab[token] = index index += 1 return vocab
def convert_by_vocab(vocab, items): 'Converts a sequence of [tokens|ids] using the vocab.' output = [] for item in items: output.append(vocab[item]) return output
def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text): 'Runs basic whitespace cleaning and splitting on a piece of text.' text = text.strip() if (not text): return [] tokens = text.split() return tokens
class FullTokenizer(object): 'Runs end-to-end tokenziation.' def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for (k, v) in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object): 'Runs basic tokenization (punctuation splitting, lower casing, etc.).' def __init__(self, do_lower_case=True): 'Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n ' self.do_lower_case = do_lower_case def tokenize(self, text): 'Tokenizes a piece of text.' text = convert_to_unicode(text) text = self._clean_text(text) text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): 'Strips accents from a piece of text.' text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if (cat == 'Mn'): continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text): 'Splits punctuation on a piece of text.' chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): 'Adds whitespace around any CJK character.' output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): 'Checks whether CP is the codepoint of a CJK character.' if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))): return True return False def _clean_text(self, text): 'Performs invalid character removal and whitespace cleanup on text.' output = [] for char in text: cp = ord(char) if ((cp == 0) or (cp == 65533) or _is_control(char)): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
class WordpieceTokenizer(object): 'Runs WordPiece tokenziation.' def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): 'Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n ' text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if (len(chars) > self.max_input_chars_per_word): output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while (start < len(chars)): end = len(chars) cur_substr = None while (start < end): substr = ''.join(chars[start:end]) if (start > 0): substr = ('##' + substr) if (substr in self.vocab): cur_substr = substr break end -= 1 if (cur_substr is None): is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
def _is_whitespace(char): 'Checks whether `chars` is a whitespace character.' if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')): return True cat = unicodedata.category(char) if (cat == 'Zs'): return True return False
def _is_control(char): 'Checks whether `chars` is a control character.' if ((char == '\t') or (char == '\n') or (char == '\r')): return False cat = unicodedata.category(char) if cat.startswith('C'): return True return False
def _is_punctuation(char): 'Checks whether `chars` is a punctuation character.' cp = ord(char) if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))): return True cat = unicodedata.category(char) if cat.startswith('P'): return True return False
def load(in_file=None, format='tsv'): ' Load a clustering from a file. By default the input file is a\n tab-separated listing of words and their cluster ID. Returns a dictionary of\n the clustering.\n\n Args:\n in_file (string): path to input file\n format (string): input file format (default: tsv)\n\n Returns:\n dict: word-to-tag mapping\n ' mapping = {} if (format == 'tsv'): with open(in_file) as f: for line in f: tokens = line.split() mapping[tokens[0]] = int(tokens[1]) return mapping
def save(mapping=None, out=None, format='tsv'): ' Save a clustering (dictionary) to file. By default the output file is\n a tab-separated listing of words and their cluster ID.\n\n Args:\n mapping (dict): word-to-tag mapping\n out (string): path to output file\n format (string): output file format (default: tsv)\n ' if (format == 'tsv'): with open(out, 'w') as outfile: for key in sorted(sorted(mapping), key=mapping.get): line = (((str(key) + '\t') + str(mapping[key])) + '\n') outfile.write(line)
def tag_string(mapping=None, text=None, unk=unk): "Tag a string with the corresponding cluster ID's. If a word is not\n found in the clustering, use unk.\n\n Args:\n mapping (dict): word-to-tag mapping\n text (string): the string to be tagged\n unk (string): what to label unknown/unseen words that are not in\n mapping (default: <unk>)\n\n Returns:\n string: sequence of tags\n " newsent = '' for word in text.split(): if (word in mapping): newsent += (' ' + str(mapping[word])) elif (unk in mapping): newsent += (' ' + str(mapping[unk])) else: newsent += (' ' + '<unk>') return newsent.lstrip()
def tag_stdin(mapping=None, unk=unk): ' This calls tag_string() for each line in stdin, and prints the\n result to stdout.\n\n Args:\n mapping (dict): word-to-tag mapping\n unk (string): what to label unknown/unseen words that are not in\n mapping (default: <unk>)\n ' for line in sys.stdin: print(tag_string(mapping=mapping, text=line, unk=unk))
def cluster(text=None, in_file=None, classes=None, class_file=None, class_offset=None, forward_lambda=None, ngram_input=None, min_count=None, out=None, print_freqs=None, quiet=None, refine=None, rev_alternate=None, threads=None, tune_cycles=None, unidirectional=None, verbose=None, word_vectors=None): '\n Produce a clustering, given a textual input. There is one required argument\n (the training input text), and many optional arguments. The one required\n argument is either text or in_file. The argument text is a list of Python\n strings. The argument in_file is a path to a text file, consisting of\n preprocessed (eg. tokenized) one-sentence-per-line text. The use of text\n is probably not a good idea for large corpora.\n\n The other optional arguments are described by running the compiled\n clustercat binary with the --help argument, except that the\n leading -- from the shell argument is removed, and - is replaced with _.\n So for example, instead of --tune-cycles 15, the Python function argument\n would be tune_cycles=15 .\n\n Returns a dictionary of the form { word : cluster_id } .\n ' cc_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) cc_bin = os.path.join(cc_dir, 'bin', 'clustercat') if os.path.isfile(cc_bin): cmd_str = [cc_bin] elif distutils.spawn.find_executable('clustercat'): cmd_str = ['clustercat'] else: print('Error: Unable to access clustercat binary from either ', cc_dir, " or $PATH. In the parent directory, first run 'make install', and then add $HOME/bin/ to your $PATH, by typing the following command:\necho 'PATH=$PATH:$HOME/bin' >> $HOME/.bashrc && source $HOME/.bashrc") exit(1) clustercat_params = {'in_file': '--in', 'out': '--out', 'classes': '--classes', 'class_file': '--class-file', 'class_offset': '--class-offset', 'forward_lambda': '--forward-lambda', 'ngram_input': '--ngram-input', 'min_count': '--min-count', 'refine': '--refine', 'rev_alternate': '--rev-alternate', 'threads': '--threads', 'tune_cycles': '--tune-cycles', 'word_vectors': '--word-vectors'} boolean_params = {'print_freqs': '--print-freqs', 'quiet': '--quiet', 'unidirectional': '--unidirectional', 'verbose': '--verbose'} for (arg, value) in locals().items(): if ((arg in boolean_params) and (value is True)): cmd_str.append(boolean_params[arg]) elif ((arg in clustercat_params) and (value is not None)): cmd_str.append(clustercat_params[arg]) cmd_str.append(str(value)) cmd_out = '' if (text and (not in_file)): p1 = subprocess.Popen(['printf', '\n'.join(text)], stdout=subprocess.PIPE, universal_newlines=True) p2 = subprocess.Popen(cmd_str, stdin=p1.stdout, stdout=subprocess.PIPE, universal_newlines=True) p1.stdout.close() cmd_out = p2.communicate()[0] elif (in_file and (not text)): cmd_out = subprocess.check_output(cmd_str, universal_newlines=True) else: print('Error: supply either text or in_file argument to clustercat.cluster(), but not both') clusters = {} for line in cmd_out.split('\n'): split_line = line.split('\t') try: clusters[split_line[0]] = int(split_line[1]) except: pass return clusters
def main(): ' No real reason to use this as a standalone script. Just invoke the\n C-compiled binary for standalone applications. But here you\n go, anyways.\n ' import argparse parser = argparse.ArgumentParser(description='Clusters words, or tags them') parser.add_argument('-i', '--in', help='Load input training file') parser.add_argument('-o', '--out', help='Save final mapping to file') parser.add_argument('-t', '--tag', help='Tag stdin input, using clustering in supplied argument') args = parser.parse_args() if args.tag: mapping = load(in_file=args.tag) tag_stdin(mapping=mapping) else: mapping = cluster(text=sys.stdin) if args.out: save(mapping=mapping, out=args.out) else: print(mapping)
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class NetState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETSTATE
class NetStateRule(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETSTATERULE
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class TransformationParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _TRANSFORMATIONPARAMETER
class AccuracyParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ACCURACYPARAMETER
class ArgMaxParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ARGMAXPARAMETER
class ConcatParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONCATPARAMETER
class ContrastiveLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONTRASTIVELOSSPARAMETER
class ConvolutionParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONVOLUTIONPARAMETER
class DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATAPARAMETER
class DropoutParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DROPOUTPARAMETER
class DummyDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DUMMYDATAPARAMETER
class EltwiseParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ELTWISEPARAMETER
class ThresholdParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _THRESHOLDPARAMETER
class HDF5DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5DATAPARAMETER
class HDF5OutputParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5OUTPUTPARAMETER
class HingeLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HINGELOSSPARAMETER
class ImageDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _IMAGEDATAPARAMETER
class InfogainLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INFOGAINLOSSPARAMETER
class InnerProductParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LRNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class MVNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MVNPARAMETER
class PoolingParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class ReLUParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _RELUPARAMETER
class SigmoidParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SIGMOIDPARAMETER
class SliceParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SLICEPARAMETER
class SoftmaxParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOFTMAXPARAMETER
class TanHParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _TANHPARAMETER
class WindowDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _WINDOWDATAPARAMETER
class V0LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _V0LAYERPARAMETER
class BlobShape(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBSHAPE
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class NetState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETSTATE
class NetStateRule(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETSTATERULE
class ParamSpec(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _PARAMSPEC
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class TransformationParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _TRANSFORMATIONPARAMETER
class LossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LOSSPARAMETER
class AccuracyParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ACCURACYPARAMETER
class ArgMaxParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ARGMAXPARAMETER
class ConcatParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONCATPARAMETER
class ContrastiveLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONTRASTIVELOSSPARAMETER
class ConvolutionParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONVOLUTIONPARAMETER
class DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATAPARAMETER
class DropoutParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DROPOUTPARAMETER
class DummyDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DUMMYDATAPARAMETER
class EltwiseParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ELTWISEPARAMETER
class EmbedParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EMBEDPARAMETER
class ExpParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EXPPARAMETER
class FlattenParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FLATTENPARAMETER
class HDF5DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5DATAPARAMETER
class HDF5OutputParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5OUTPUTPARAMETER
class HingeLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HINGELOSSPARAMETER
class ImageDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _IMAGEDATAPARAMETER
class InfogainLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INFOGAINLOSSPARAMETER
class InnerProductParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LogParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LOGPARAMETER
class LRNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class MVNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MVNPARAMETER
class PoolingParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class PythonParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _PYTHONPARAMETER