Instruction
stringlengths
362
7.83k
output_code
stringlengths
1
945
Here is a snippet: <|code_start|> # Y_test = np.array([test_doc_labels[i] for i in test_doc_codes]) # # DBN # X_train = np.array(load_marshal(args.train_doc_codes)) # Y_train = np.array(load_marshal(args.train_doc_labels)) # X_test = np.array(load_marshal(args.test_doc_codes)) # Y_test = np.array(load_marshal(args.test_doc_labels)) seed = 7 np.random.seed(seed) val_idx = np.random.choice(range(X_train.shape[0]), args.n_val, replace=False) train_idx = list(set(range(X_train.shape[0])) - set(val_idx)) X_new_train = X_train[train_idx] Y_new_train = Y_train[train_idx] X_new_val = X_train[val_idx] Y_new_val = Y_train[val_idx] print 'train: %s, val: %s, test: %s' % (X_new_train.shape[0], X_new_val.shape[0], X_test.shape[0]) results = retrieval(X_new_train, Y_new_train, X_new_val, Y_new_val,\ fractions=[0.001], multilabel=args.multilabel) print 'precision on val set: %s' % results if not args.query_info: results = retrieval(X_train, Y_train, X_test, Y_test,\ fractions=[0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0], multilabel=args.multilabel) else: query_docs = load_corpus(args.query_info)['docs'] len_test = [sum(query_docs[i].values()) for i in test_doc_codes] <|code_end|> . Write the next line using the current file imports: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from autoencoder.testing.retrieval import retrieval, retrieval_by_doclength from autoencoder.utils.io_utils import load_json, load_marshal from autoencoder.preprocessing.preprocessing import load_corpus and context from other files: # Path: autoencoder/testing/retrieval.py # def retrieval(X_train, Y_train, X_test, Y_test, fractions=[0.01, 0.5, 1.0], multilabel=False): # db_size = len(X_train) # n_queries = len(X_test) # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # X_train = None # X_test = None # precisions = defaultdict(float) # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # target = Y_test[idx] # for fr in fractions: # ntop = int(fr * db_size) # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], target, multilabel)])) / ntop # precisions[fr] += pr # precisions = dict([(x, y / n_queries) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # def retrieval_by_doclength(X_train, Y_train, X_test, Y_test, len_test, fraction=0.001, len_bin=600, multilabel=False): # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # precisions = defaultdict(list) # n_queries = len(X_test) # ntop = int(fraction * len(X_train)) # # bins = [50, 100, 200, 300, 500, 1000, 2000, 3000, 4000, 5000] # bins = [100, 120, 150, 200, 300, 1000, 1500, 2000, 4000] # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], Y_test[idx], multilabel)])) / ntop # for each in bins: # if len_test[idx] < each: # precisions[each].append(pr) # break # import pdb;pdb.set_trace() # precisions = dict([(x, sum(y) / len(y)) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_marshal(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = m.load(f) # except Exception as e: # raise e # # return data # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus , which may include functions, classes, or code. Output only the next line.
results = retrieval_by_doclength(X_train, Y_train, X_test, Y_test, len_test, fraction=0.001, multilabel=args.multilabel)
Given snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc codes file') parser.add_argument('train_doc_labels', type=str, help='path to the train doc labels file') parser.add_argument('test_doc_codes', type=str, help='path to the test doc codes file') parser.add_argument('test_doc_labels', type=str, help='path to the test doc labels file') parser.add_argument('-nv', '--n_val', type=int, default=1000, help='size of validation set (default 1000)') parser.add_argument('-qi', '--query_info', type=str, help='path to the query corpus (for geting doc length info)') parser.add_argument('-ml', '--multilabel', action='store_true', help='multilabel flag') args = parser.parse_args() # autoencoder <|code_end|> , continue by predicting the next line. Consider current file imports: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from autoencoder.testing.retrieval import retrieval, retrieval_by_doclength from autoencoder.utils.io_utils import load_json, load_marshal from autoencoder.preprocessing.preprocessing import load_corpus and context: # Path: autoencoder/testing/retrieval.py # def retrieval(X_train, Y_train, X_test, Y_test, fractions=[0.01, 0.5, 1.0], multilabel=False): # db_size = len(X_train) # n_queries = len(X_test) # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # X_train = None # X_test = None # precisions = defaultdict(float) # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # target = Y_test[idx] # for fr in fractions: # ntop = int(fr * db_size) # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], target, multilabel)])) / ntop # precisions[fr] += pr # precisions = dict([(x, y / n_queries) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # def retrieval_by_doclength(X_train, Y_train, X_test, Y_test, len_test, fraction=0.001, len_bin=600, multilabel=False): # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # precisions = defaultdict(list) # n_queries = len(X_test) # ntop = int(fraction * len(X_train)) # # bins = [50, 100, 200, 300, 500, 1000, 2000, 3000, 4000, 5000] # bins = [100, 120, 150, 200, 300, 1000, 1500, 2000, 4000] # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], Y_test[idx], multilabel)])) / ntop # for each in bins: # if len_test[idx] < each: # precisions[each].append(pr) # break # import pdb;pdb.set_trace() # precisions = dict([(x, sum(y) / len(y)) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_marshal(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = m.load(f) # except Exception as e: # raise e # # return data # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus which might include code, classes, or functions. Output only the next line.
train_doc_codes = load_json(args.train_doc_codes)
Next line prediction: <|code_start|> # Y_train = np.array([train_doc_labels[i] for i in train_doc_codes]) # X_test = np.r_[X_test] # Y_test = np.array([test_doc_labels[i] for i in test_doc_codes]) # # DBN # X_train = np.array(load_marshal(args.train_doc_codes)) # Y_train = np.array(load_marshal(args.train_doc_labels)) # X_test = np.array(load_marshal(args.test_doc_codes)) # Y_test = np.array(load_marshal(args.test_doc_labels)) seed = 7 np.random.seed(seed) val_idx = np.random.choice(range(X_train.shape[0]), args.n_val, replace=False) train_idx = list(set(range(X_train.shape[0])) - set(val_idx)) X_new_train = X_train[train_idx] Y_new_train = Y_train[train_idx] X_new_val = X_train[val_idx] Y_new_val = Y_train[val_idx] print 'train: %s, val: %s, test: %s' % (X_new_train.shape[0], X_new_val.shape[0], X_test.shape[0]) results = retrieval(X_new_train, Y_new_train, X_new_val, Y_new_val,\ fractions=[0.001], multilabel=args.multilabel) print 'precision on val set: %s' % results if not args.query_info: results = retrieval(X_train, Y_train, X_test, Y_test,\ fractions=[0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0], multilabel=args.multilabel) else: <|code_end|> . Use current file imports: (import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from autoencoder.testing.retrieval import retrieval, retrieval_by_doclength from autoencoder.utils.io_utils import load_json, load_marshal from autoencoder.preprocessing.preprocessing import load_corpus) and context including class names, function names, or small code snippets from other files: # Path: autoencoder/testing/retrieval.py # def retrieval(X_train, Y_train, X_test, Y_test, fractions=[0.01, 0.5, 1.0], multilabel=False): # db_size = len(X_train) # n_queries = len(X_test) # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # X_train = None # X_test = None # precisions = defaultdict(float) # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # target = Y_test[idx] # for fr in fractions: # ntop = int(fr * db_size) # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], target, multilabel)])) / ntop # precisions[fr] += pr # precisions = dict([(x, y / n_queries) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # def retrieval_by_doclength(X_train, Y_train, X_test, Y_test, len_test, fraction=0.001, len_bin=600, multilabel=False): # X_train = unitmatrix(X_train) # normalize # X_test = unitmatrix(X_test) # score = X_test.dot(X_train.T) # precisions = defaultdict(list) # n_queries = len(X_test) # ntop = int(fraction * len(X_train)) # # bins = [50, 100, 200, 300, 500, 1000, 2000, 3000, 4000, 5000] # bins = [100, 120, 150, 200, 300, 1000, 1500, 2000, 4000] # # for idx in range(n_queries): # retrieval_idx = score[idx].argsort()[::-1] # pr = float(len([i for i in retrieval_idx[:ntop] if hit(Y_train[i], Y_test[idx], multilabel)])) / ntop # for each in bins: # if len_test[idx] < each: # precisions[each].append(pr) # break # import pdb;pdb.set_trace() # precisions = dict([(x, sum(y) / len(y)) for x, y in precisions.iteritems()]) # # return sorted(precisions.items(), key=lambda d:d[0]) # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_marshal(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = m.load(f) # except Exception as e: # raise e # # return data # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus . Output only the next line.
query_docs = load_corpus(args.query_info)['docs']
Continue the code snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--label', type=str, required=True, help='path to the input label file') parser.add_argument('-c', '--corpus', type=str, required=True, help='path to the constructed corpus file') parser.add_argument('-o', '--output', type=str, required=True, help='path to the output file') args = parser.parse_args() <|code_end|> . Use current file imports: import argparse from autoencoder.datasets.reuters import extract_labels from autoencoder.utils.io_utils import load_json and context (classes, functions, or code) from other files: # Path: autoencoder/datasets/reuters.py # def extract_labels(docs, path, output): # # it will be fast if docs is a dict instead of a list # doc_labels = defaultdict(set) # with open(path, 'r') as f: # for line in f: # label, did, _ = line.strip('\n').split() # if did in docs: # doc_labels[did].add(label) # doc_labels = dict([(x, list(y)) for x, y in doc_labels.iteritems()]) # dump_json(doc_labels, output) # # return doc_labels # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data . Output only the next line.
extract_labels(load_json(args.corpus)['docs'], args.label, args.output)
Here is a snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--label', type=str, required=True, help='path to the input label file') parser.add_argument('-c', '--corpus', type=str, required=True, help='path to the constructed corpus file') parser.add_argument('-o', '--output', type=str, required=True, help='path to the output file') args = parser.parse_args() <|code_end|> . Write the next line using the current file imports: import argparse from autoencoder.datasets.reuters import extract_labels from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/datasets/reuters.py # def extract_labels(docs, path, output): # # it will be fast if docs is a dict instead of a list # doc_labels = defaultdict(set) # with open(path, 'r') as f: # for line in f: # label, did, _ = line.strip('\n').split() # if did in docs: # doc_labels[did].add(label) # doc_labels = dict([(x, list(y)) for x, y in doc_labels.iteritems()]) # dump_json(doc_labels, output) # # return doc_labels # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may include functions, classes, or code. Output only the next line.
extract_labels(load_json(args.corpus)['docs'], args.label, args.output)
Predict the next line after this snippet: <|code_start|>from __future__ import absolute_import def get_doc_codes(model, bow, vocab, avg=True): vec = np.zeros(model.vector_size) count = 0 for idx in bow: word = vocab[int(idx)] val = bow[idx] if word in model: vec += model[word] * val count += val elif word.title() in model: vec += model[word.title()] * val count += val elif word.upper() in model: vec += model[word.upper()] * val count += val return vec / count if avg else vec def load_w2v(file): model = Word2Vec.load_word2vec_format(file, binary=True) return model def doc_word2vec(model, corpus, vocab, output, avg=True): doc_codes = {} for key, bow in corpus.iteritems(): vec = get_doc_codes(model, bow, vocab, avg) doc_codes[key] = vec.tolist() <|code_end|> using the current file's imports: import numpy as np from gensim.models import Word2Vec from ..utils.io_utils import dump_json and any relevant context from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
dump_json(doc_codes, output)
Given snippet: <|code_start|> model.save(filepath, overwrite=True) else: if self.verbose > 0: print('Epoch %05d: %s did not improve' % (epoch, self.monitor)) else: if self.verbose > 0: print('Epoch %05d: saving model to %s' % (epoch, filepath)) if self.save_weights_only: model.save_weights(filepath, overwrite=True) else: model.save(filepath, overwrite=True) class VisualWeights(Callback): def __init__(self, save_path, per_epoch=15): super(VisualWeights, self).__init__() self.per_epoch = per_epoch self.filename, self.ext = os.path.splitext(save_path) def on_epoch_end(self, epoch, logs=None): """Called at the end of an epoch. # Arguments epoch: integer, index of epoch. logs: dictionary of logs. """ if epoch % self.per_epoch == 0: weights = self.model.get_weights()[0] # weights /= np.max(np.abs(weights)) weights = unitmatrix(weights, axis=0) # normalize # weights[np.abs(weights) < 1e-2] = 0 <|code_end|> , continue by predicting the next line. Consider current file imports: import os import numpy as np import keras.backend as K import tensorflow as tf import warnings from keras.layers import Dense from keras.callbacks import Callback from keras.engine import Layer from keras import initializers from ..testing.visualize import heatmap from .op_utils import unitmatrix and context: # Path: autoencoder/testing/visualize.py # def heatmap(data, save_file='heatmap.png'): # ax = plt.figure().gca() # ax.yaxis.set_major_locator(MaxNLocator(integer=True)) # ax.yaxis.set_major_locator(MultipleLocator(5)) # plt.pcolor(data, cmap=plt.cm.jet) # plt.savefig(save_file) # # plt.show() # # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen which might include code, classes, or functions. Output only the next line.
heatmap(weights.T, '%s_%s%s'%(self.filename, epoch, self.ext))
Using the snippet: <|code_start|> model.save_weights(filepath, overwrite=True) else: model.save(filepath, overwrite=True) else: if self.verbose > 0: print('Epoch %05d: %s did not improve' % (epoch, self.monitor)) else: if self.verbose > 0: print('Epoch %05d: saving model to %s' % (epoch, filepath)) if self.save_weights_only: model.save_weights(filepath, overwrite=True) else: model.save(filepath, overwrite=True) class VisualWeights(Callback): def __init__(self, save_path, per_epoch=15): super(VisualWeights, self).__init__() self.per_epoch = per_epoch self.filename, self.ext = os.path.splitext(save_path) def on_epoch_end(self, epoch, logs=None): """Called at the end of an epoch. # Arguments epoch: integer, index of epoch. logs: dictionary of logs. """ if epoch % self.per_epoch == 0: weights = self.model.get_weights()[0] # weights /= np.max(np.abs(weights)) <|code_end|> , determine the next line of code. You have imports: import os import numpy as np import keras.backend as K import tensorflow as tf import warnings from keras.layers import Dense from keras.callbacks import Callback from keras.engine import Layer from keras import initializers from ..testing.visualize import heatmap from .op_utils import unitmatrix and context (class names, function names, or code) available: # Path: autoencoder/testing/visualize.py # def heatmap(data, save_file='heatmap.png'): # ax = plt.figure().gca() # ax.yaxis.set_major_locator(MaxNLocator(integer=True)) # ax.yaxis.set_major_locator(MultipleLocator(5)) # plt.pcolor(data, cmap=plt.cm.jet) # plt.savefig(save_file) # # plt.show() # # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen . Output only the next line.
weights = unitmatrix(weights, axis=0) # normalize
Predict the next line for this snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--label', type=str, required=True, help='path to the input label file') parser.add_argument('-c', '--corpus', type=str, required=True, help='path to the constructed corpus file') parser.add_argument('-o', '--output', type=str, required=True, help='path to the output file') args = parser.parse_args() <|code_end|> with the help of current file imports: import argparse from autoencoder.datasets.wiki10plus import extract_labels from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/datasets/wiki10plus.py # def extract_labels(docs, labels, output): # # it will be fast if docs is a dict instead of a list # doc_labels = {} # for name in docs: # doc_labels[name] = labels[name] # dump_json(doc_labels, output) # import pdb;pdb.set_trace() # return doc_labels # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may contain function names, class names, or code. Output only the next line.
extract_labels(load_json(args.corpus)['docs'], load_json(args.label), args.output)
Here is a snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--label', type=str, required=True, help='path to the input label file') parser.add_argument('-c', '--corpus', type=str, required=True, help='path to the constructed corpus file') parser.add_argument('-o', '--output', type=str, required=True, help='path to the output file') args = parser.parse_args() <|code_end|> . Write the next line using the current file imports: import argparse from autoencoder.datasets.wiki10plus import extract_labels from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/datasets/wiki10plus.py # def extract_labels(docs, labels, output): # # it will be fast if docs is a dict instead of a list # doc_labels = {} # for name in docs: # doc_labels[name] = labels[name] # dump_json(doc_labels, output) # import pdb;pdb.set_trace() # return doc_labels # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may include functions, classes, or code. Output only the next line.
extract_labels(load_json(args.corpus)['docs'], load_json(args.label), args.output)
Based on the snippet: <|code_start|> n_samples = X_docs.shape[0] np.random.seed(0) val_idx = np.random.choice(range(n_samples), args.n_val, replace=False) train_idx = list(set(range(n_samples)) - set(val_idx)) X_train = X_docs[train_idx] X_val = X_docs[val_idx] del X_docs # np.random.shuffle(X_docs) # n_val = args.n_val ## X_train = np.r_[X_docs[:-n_val]] ## X_val = np.r_[X_docs[-n_val:]] # X_train = np.r_[X_docs[:-n_val]] # del X_docs[:-n_val] # X_val = np.r_[X_docs] # del X_docs if args.noise: # X_train_noisy = X_docs_noisy[:-n_val] # X_val_noisy = X_docs_noisy[-n_val:] X_train_noisy = X_docs_noisy[train_idx] X_val_noisy = X_docs_noisy[val_idx] print 'added %s noise' % args.noise else: X_train_noisy = X_train X_val_noisy = X_val start = timeit.default_timer() <|code_end|> , predict the immediate next line with the help of imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context (classes, functions, sometimes code) from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
ae = AutoEncoder(n_vocab, args.n_dim, comp_topk=args.comp_topk, weights_file=args.load_weights)
Given snippet: <|code_start|> X_val = X_docs[val_idx] del X_docs # np.random.shuffle(X_docs) # n_val = args.n_val ## X_train = np.r_[X_docs[:-n_val]] ## X_val = np.r_[X_docs[-n_val:]] # X_train = np.r_[X_docs[:-n_val]] # del X_docs[:-n_val] # X_val = np.r_[X_docs] # del X_docs if args.noise: # X_train_noisy = X_docs_noisy[:-n_val] # X_val_noisy = X_docs_noisy[-n_val:] X_train_noisy = X_docs_noisy[train_idx] X_val_noisy = X_docs_noisy[val_idx] print 'added %s noise' % args.noise else: X_train_noisy = X_train X_val_noisy = X_val start = timeit.default_timer() ae = AutoEncoder(n_vocab, args.n_dim, comp_topk=args.comp_topk, weights_file=args.load_weights) ae.fit([X_train_noisy, X_train], [X_val_noisy, X_val], nb_epoch=args.n_epoch, \ batch_size=args.batch_size, feature_weights=None, contractive=args.contractive) print 'runtime: %ss' % (timeit.default_timer() - start) <|code_end|> , continue by predicting the next line. Consider current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e which might include code, classes, or functions. Output only the next line.
if args.save_model:
Here is a snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): <|code_end|> . Write the next line using the current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e , which may include functions, classes, or code. Output only the next line.
corpus = load_corpus(args.input)
Based on the snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory doc_keys = docs.keys() X_docs = [] for k in doc_keys: <|code_end|> , predict the immediate next line with the help of imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context (classes, functions, sometimes code) from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0))
Given snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory doc_keys = docs.keys() X_docs = [] for k in doc_keys: <|code_end|> , continue by predicting the next line. Consider current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e which might include code, classes, or functions. Output only the next line.
X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0))
Given the following code snippet before the placeholder: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory doc_keys = docs.keys() X_docs = [] for k in doc_keys: X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] X_docs = np.r_[X_docs] if args.noise == 'gs': <|code_end|> , predict the next line using imports from the current file: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context including class names, function names, and sometimes code from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
X_docs_noisy = add_gaussian_noise(X_docs, 0.1)
Continue the code snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory doc_keys = docs.keys() X_docs = [] for k in doc_keys: X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] X_docs = np.r_[X_docs] if args.noise == 'gs': X_docs_noisy = add_gaussian_noise(X_docs, 0.1) elif args.noise == 'sp': X_docs_noisy = add_salt_pepper_noise(X_docs, 0.1) pass elif args.noise == 'mn': <|code_end|> . Use current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context (classes, functions, or code) from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
X_docs_noisy = add_masking_noise(X_docs, 0.01)
Continue the code snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory doc_keys = docs.keys() X_docs = [] for k in doc_keys: X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] X_docs = np.r_[X_docs] if args.noise == 'gs': X_docs_noisy = add_gaussian_noise(X_docs, 0.1) elif args.noise == 'sp': <|code_end|> . Use current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context (classes, functions, or code) from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
X_docs_noisy = add_salt_pepper_noise(X_docs, 0.1)
Here is a snippet: <|code_start|> if args.noise: # X_train_noisy = X_docs_noisy[:-n_val] # X_val_noisy = X_docs_noisy[-n_val:] X_train_noisy = X_docs_noisy[train_idx] X_val_noisy = X_docs_noisy[val_idx] print 'added %s noise' % args.noise else: X_train_noisy = X_train X_val_noisy = X_val start = timeit.default_timer() ae = AutoEncoder(n_vocab, args.n_dim, comp_topk=args.comp_topk, weights_file=args.load_weights) ae.fit([X_train_noisy, X_train], [X_val_noisy, X_val], nb_epoch=args.n_epoch, \ batch_size=args.batch_size, feature_weights=None, contractive=args.contractive) print 'runtime: %ss' % (timeit.default_timer() - start) if args.save_model: arch_file = args.save_model + '.arch' weights_file = args.save_model + '.weights' save_model(ae, arch_file, weights_file) print 'Saved model arch and weights file to %s and %s, respectively.' \ % (arch_file, weights_file) if args.output: train_doc_codes = ae.encoder.predict(X_train) val_doc_codes = ae.encoder.predict(X_val) doc_keys = np.array(doc_keys) <|code_end|> . Write the next line using the current file imports: import timeit import argparse import numpy as np from os import path from autoencoder.core.ae import AutoEncoder, load_model, save_model from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec, vocab_weights from autoencoder.utils.op_utils import vecnorm, add_gaussian_noise, add_masking_noise, add_salt_pepper_noise from autoencoder.utils.io_utils import dump_json and context from other files: # Path: autoencoder/core/ae.py # class AutoEncoder(object): # def __init__(self, input_size, dim, comp_topk=None, ctype=None, save_model='best_model'): # def build(self): # def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, contractive=None): # def save_ae_model(model, model_file): # def load_ae_model(model_file): # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def doc2vec(doc, dim): # vec = np.zeros(dim) # for idx, val in doc.items(): # vec[int(idx)] = val # # return vec # # def vocab_weights(vocab_dict, word_freq, max_=100., ratio=.75): # weights = np.zeros((len(vocab_dict), 1)) # # for word, idx in vocab_dict.items(): # weights[idx] = word_freq[str(idx)] # weights = np.clip(weights / max_, 0., 1.) # # return np.power(weights, ratio) # # Path: autoencoder/utils/op_utils.py # def vecnorm(vec, norm, epsilon=1e-3): # """ # Scale a vector to unit length. The only exception is the zero vector, which # is returned back unchanged. # """ # if norm not in ('prob', 'max1', 'logmax1'): # raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\ # 'max1' and 'logmax1'." % norm) # # if isinstance(vec, np.ndarray): # vec = np.asarray(vec, dtype=float) # if norm == 'prob': # veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing # elif norm == 'max1': # veclen = np.max(vec) + epsilon # elif norm == 'logmax1': # vec = np.log10(1. + vec) # veclen = np.max(vec) + epsilon # if veclen > 0.0: # return (vec + epsilon) / veclen # else: # return vec # else: # raise ValueError('vec should be ndarray, found: %s' % type(vec)) # # def add_gaussian_noise(X, corruption_ratio, range_=[0, 1]): # X_noisy = X + corruption_ratio * np.random.normal(loc=0.0, scale=1.0, size=X.shape) # X_noisy = np.clip(X_noisy, range_[0], range_[1]) # # return X_noisy # # def add_masking_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = 0 # # return X_noisy # # def add_salt_pepper_noise(X, fraction): # assert fraction >= 0 and fraction <= 1 # X_noisy = np.copy(X) # nrow, ncol = X.shape # n = int(ncol * fraction) # for i in range(nrow): # idx_noisy = np.random.choice(ncol, n, replace=False) # X_noisy[i, idx_noisy] = np.random.binomial(1, .5, n) # # return X_noisy # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e , which may include functions, classes, or code. Output only the next line.
dump_json(dict(zip(doc_keys[train_idx].tolist(), train_doc_codes.tolist())), args.output)
Based on the snippet: <|code_start|> # Y_train = load_pickle(args.train_doc_labels) # X_val = np.array(load_pickle(args.val_doc_codes)) # Y_val = load_pickle(args.val_doc_labels) # X_test = np.array(load_pickle(args.test_doc_codes)) # Y_test = load_pickle(args.test_doc_labels) if args.multilabel_clf: encoder = MultiLabelBinarizer() encoder.fit(Y_train + Y_val + Y_test) Y_train = encoder.transform(Y_train) Y_val = encoder.transform(Y_val) Y_test = encoder.transform(Y_test) else: Y = Y_train + Y_val + Y_test n_train = len(Y_train) n_val = len(Y_val) n_test = len(Y_test) encoder = LabelEncoder() Y = np_utils.to_categorical(encoder.fit_transform(Y)) Y_train = Y[:n_train] Y_val = Y[n_train:n_train + n_val] Y_test = Y[-n_test:] seed = 7 print 'train: %s, val: %s, test: %s' % (X_train.shape[0], X_val.shape[0], X_test.shape[0]) if args.multilabel_clf: results = multilabel_classifier(X_train, Y_train, X_val, Y_val, \ X_test, Y_test, nb_epoch=args.n_epoch, batch_size=args.batch_size, seed=seed) print 'f1 score on test set: macro_f1: %s, micro_f1: %s' % tuple(results) else: <|code_end|> , predict the immediate next line with the help of imports: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import ShuffleSplit from autoencoder.testing.classifier import multiclass_classifier, multilabel_classifier from autoencoder.utils.io_utils import load_json, load_pickle and context (classes, functions, sometimes code) from other files: # Path: autoencoder/testing/classifier.py # def multiclass_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = softmax_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # epochs=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # acc = clf.test_on_batch(X_test, Y_test)[1] # # confusion matrix and precision-recall # true = np.argmax(Y_test,axis=1) # pred = np.argmax(clf.predict(X_test), axis=1) # print confusion_matrix(true, pred) # print classification_report(true, pred) # return acc # # def multilabel_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = sigmoid_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # nb_epoch=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # pred = clf.predict(X_test) # pred = (pred > .5) * 1 # macro_f1 = f1_score(Y_test, pred, average='macro') # micro_f1 = f1_score(Y_test, pred, average='micro') # # return [macro_f1, micro_f1] # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_pickle(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = pickle.load(f) # except Exception as e: # raise e # # return data . Output only the next line.
results = multiclass_classifier(X_train, Y_train, X_val, Y_val, \
Predict the next line after this snippet: <|code_start|> Y_test = [test_doc_labels[i] for i in test_doc_codes] # # DBN # X_train = np.array(load_pickle(args.train_doc_codes)) # Y_train = load_pickle(args.train_doc_labels) # X_val = np.array(load_pickle(args.val_doc_codes)) # Y_val = load_pickle(args.val_doc_labels) # X_test = np.array(load_pickle(args.test_doc_codes)) # Y_test = load_pickle(args.test_doc_labels) if args.multilabel_clf: encoder = MultiLabelBinarizer() encoder.fit(Y_train + Y_val + Y_test) Y_train = encoder.transform(Y_train) Y_val = encoder.transform(Y_val) Y_test = encoder.transform(Y_test) else: Y = Y_train + Y_val + Y_test n_train = len(Y_train) n_val = len(Y_val) n_test = len(Y_test) encoder = LabelEncoder() Y = np_utils.to_categorical(encoder.fit_transform(Y)) Y_train = Y[:n_train] Y_val = Y[n_train:n_train + n_val] Y_test = Y[-n_test:] seed = 7 print 'train: %s, val: %s, test: %s' % (X_train.shape[0], X_val.shape[0], X_test.shape[0]) if args.multilabel_clf: <|code_end|> using the current file's imports: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import ShuffleSplit from autoencoder.testing.classifier import multiclass_classifier, multilabel_classifier from autoencoder.utils.io_utils import load_json, load_pickle and any relevant context from other files: # Path: autoencoder/testing/classifier.py # def multiclass_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = softmax_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # epochs=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # acc = clf.test_on_batch(X_test, Y_test)[1] # # confusion matrix and precision-recall # true = np.argmax(Y_test,axis=1) # pred = np.argmax(clf.predict(X_test), axis=1) # print confusion_matrix(true, pred) # print classification_report(true, pred) # return acc # # def multilabel_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = sigmoid_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # nb_epoch=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # pred = clf.predict(X_test) # pred = (pred > .5) * 1 # macro_f1 = f1_score(Y_test, pred, average='macro') # micro_f1 = f1_score(Y_test, pred, average='micro') # # return [macro_f1, micro_f1] # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_pickle(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = pickle.load(f) # except Exception as e: # raise e # # return data . Output only the next line.
results = multilabel_classifier(X_train, Y_train, X_val, Y_val, \
Given the following code snippet before the placeholder: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc codes file') parser.add_argument('train_doc_labels', type=str, help='path to the train doc codes file') parser.add_argument('val_doc_codes', type=str, help='path to the train doc codes file') parser.add_argument('val_doc_labels', type=str, help='path to the train doc labels file') parser.add_argument('test_doc_codes', type=str, help='path to the test doc codes file') parser.add_argument('test_doc_labels', type=str, help='path to the test doc labels file') parser.add_argument('-ne', '--n_epoch', type=int, default=100, help='num of epoches (default 100)') parser.add_argument('-bs', '--batch_size', type=int, default=100, help='batch size (default 100)') parser.add_argument('-mlc', '--multilabel_clf', action='store_true', help='multilabel classification flag') args = parser.parse_args() # autoencoder <|code_end|> , predict the next line using imports from the current file: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import ShuffleSplit from autoencoder.testing.classifier import multiclass_classifier, multilabel_classifier from autoencoder.utils.io_utils import load_json, load_pickle and context including class names, function names, and sometimes code from other files: # Path: autoencoder/testing/classifier.py # def multiclass_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = softmax_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # epochs=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # acc = clf.test_on_batch(X_test, Y_test)[1] # # confusion matrix and precision-recall # true = np.argmax(Y_test,axis=1) # pred = np.argmax(clf.predict(X_test), axis=1) # print confusion_matrix(true, pred) # print classification_report(true, pred) # return acc # # def multilabel_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # clf = sigmoid_network(X_train.shape[1], Y_train.shape[1]) # clf.fit(X_train, Y_train, # nb_epoch=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # pred = clf.predict(X_test) # pred = (pred > .5) * 1 # macro_f1 = f1_score(Y_test, pred, average='macro') # micro_f1 = f1_score(Y_test, pred, average='micro') # # return [macro_f1, micro_f1] # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_pickle(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = pickle.load(f) # except Exception as e: # raise e # # return data . Output only the next line.
train_doc_codes = load_json(args.train_doc_codes)
Continue the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False) # print len([1 for x in corpus]) corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus) <|code_end|> . Use current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (classes, functions, or code) from other files: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
w2v = Word2Vec(args.n_dim, window=args.window_size, \
Predict the next line after this snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False) # print len([1 for x in corpus]) corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus) w2v = Word2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch) start = timeit.default_timer() w2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) <|code_end|> using the current file's imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and any relevant context from other files: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
save_w2v(w2v.model, args.save_model)
Here is a snippet: <|code_start|>''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False) # print len([1 for x in corpus]) corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus) w2v = Word2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch) start = timeit.default_timer() w2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) save_w2v(w2v.model, args.save_model) def test(args): corpus = load_corpus(args.corpus[0]) docs, vocab_dict = corpus['docs'], corpus['vocab'] <|code_end|> . Write the next line using the current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context from other files: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count , which may include functions, classes, or code. Output only the next line.
doc_codes = doc_word2vec(docs, revdict(vocab_dict), args.load_model, args.output, avg=True)
Based on the snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): <|code_end|> , predict the immediate next line with the help of imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (classes, functions, sometimes code) from other files: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
vocab = load_json(args.vocab)
Given snippet: <|code_start|>@author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=False) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=False) # print len([1 for x in corpus]) corpus_iter = lambda: ([word for word in sentence if word in vocab] for sentence in corpus) w2v = Word2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch) start = timeit.default_timer() w2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) save_w2v(w2v.model, args.save_model) def test(args): <|code_end|> , continue by predicting the next line. Consider current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count which might include code, classes, or functions. Output only the next line.
corpus = load_corpus(args.corpus[0])
Continue the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.reuters import CorpusIterReuters # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus <|code_end|> . Use current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() from os import path from autoencoder.baseline.word2vec import Word2Vec, save_w2v, load_w2v from autoencoder.baseline.doc_word2vec import doc_word2vec from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (classes, functions, or code) from other files: # Path: autoencoder/baseline/word2vec.py # class Word2Vec(object): # def __init__(self, dim, min_count=1, sg=1, hs=0, window=5, negative=5, epoches=5): # super(Word2Vec, self).__init__() # self.dim = dim # self.min_count = min_count # self.sg = sg # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # # def train(self, corpus): # self.model = word2vec.Word2Vec(size=self.dim, min_count=self.min_count,\ # window=self.window, workers=multiprocessing.cpu_count(), \ # sg=self.sg, hs=self.hs, negative=self.negative, iter=self.epoches) # self.model.build_vocab(corpus()) # self.model.train(corpus()) # # return self # # def save_w2v(model, outfile): # model.save_word2vec_format(outfile, binary=True) # # def load_w2v(mod_file): # return word2vec.Word2Vec.load_word2vec_format(mod_file, binary=True) # # Path: autoencoder/baseline/doc_word2vec.py # def doc_word2vec(model, corpus, vocab, output, avg=True): # doc_codes = {} # for key, bow in corpus.iteritems(): # vec = get_doc_codes(model, bow, vocab, avg) # doc_codes[key] = vec.tolist() # dump_json(doc_codes, output) # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=False)
Here is a snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' def main(): parser T= argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input corpus dir') parser.add_argument('-o', '--output', type=str, default='./', help='path to the output dir') parser.add_argument('-wl', '--whitelist', type=str, help='path to the whitelist file') args = parser.parse_args() if args.whitelist: white_list = load_json(args.whitelist) else: white_list = None <|code_end|> . Write the next line using the current file imports: import argparse from autoencoder.datasets.wiki10plus import xml2text from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/datasets/wiki10plus.py # def xml2text(in_dir, out_dir, white_list=None): # # it will be fast if white_list is a dict instead of a list # files = get_all_files(in_dir, recursive=False) # count = 0 # for filename in files: # if white_list and not os.path.basename(filename) in white_list: # continue # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # extract_contents(text, os.path.join(out_dir, os.path.basename(filename))) # count += 1 # except Exception as e: # raise e # if count % 500 == 0: # print 'processed %s' % count # print 'processed %s docs, discarded %s docs' % (count, len(files) - count) # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may include functions, classes, or code. Output only the next line.
xml2text(args.input, args.output, white_list)
Here is a snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' def main(): parser T= argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input corpus dir') parser.add_argument('-o', '--output', type=str, default='./', help='path to the output dir') parser.add_argument('-wl', '--whitelist', type=str, help='path to the whitelist file') args = parser.parse_args() if args.whitelist: <|code_end|> . Write the next line using the current file imports: import argparse from autoencoder.datasets.wiki10plus import xml2text from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/datasets/wiki10plus.py # def xml2text(in_dir, out_dir, white_list=None): # # it will be fast if white_list is a dict instead of a list # files = get_all_files(in_dir, recursive=False) # count = 0 # for filename in files: # if white_list and not os.path.basename(filename) in white_list: # continue # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # extract_contents(text, os.path.join(out_dir, os.path.basename(filename))) # count += 1 # except Exception as e: # raise e # if count % 500 == 0: # print 'processed %s' % count # print 'processed %s docs, discarded %s docs' % (count, len(files) - count) # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may include functions, classes, or code. Output only the next line.
white_list = load_json(args.whitelist)
Given the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import def train_lda(corpus, vocab_dict, n_topics, n_iter, save_model): lda = LdaModel(corpus, num_topics=n_topics, id2word=vocab_dict, \ passes=n_iter, minimum_probability=1e-3) lda.save(save_model) return lda def generate_doc_codes(model, corpus, output): model.minimum_probability = 1e-3 n_topics = model.num_topics doc_codes = {} for key, doc_bow in corpus.iteritems(): code = np.zeros(n_topics) for idx, val in model[doc_bow]: code[idx] = val doc_codes[key] = code.tolist() <|code_end|> , generate the next line using the imports in this file: import numpy as np from gensim import corpora from gensim.models.ldamodel import LdaModel from ..utils.io_utils import dump_json from ..utils.op_utils import unitmatrix and context (functions, classes, or occasionally code) from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen . Output only the next line.
dump_json(doc_codes, output)
Continue the code snippet: <|code_start|> def generate_doc_codes(model, corpus, output): model.minimum_probability = 1e-3 n_topics = model.num_topics doc_codes = {} for key, doc_bow in corpus.iteritems(): code = np.zeros(n_topics) for idx, val in model[doc_bow]: code[idx] = val doc_codes[key] = code.tolist() dump_json(doc_codes, output) return doc_codes def show_topics(model, n_words_per_topic=10): n_topics = model.num_topics topics = [zip(*model.show_topic(i, n_words_per_topic))[0] for i in range(n_topics)] return topics def show_topics_prob(model, n_words_per_topic=10): n_topics = model.num_topics topics = [model.show_topic(i, n_words_per_topic) for i in range(n_topics)] return topics def calc_pairwise_cosine(model): n = model.num_topics weights = model.state.get_lambda() weights = np.apply_along_axis(lambda x: x / x.sum(), 1, weights) # get dist. <|code_end|> . Use current file imports: import numpy as np from gensim import corpora from gensim.models.ldamodel import LdaModel from ..utils.io_utils import dump_json from ..utils.op_utils import unitmatrix and context (classes, functions, or code) from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen . Output only the next line.
weights = unitmatrix(weights) # normalize
Here is a snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from keras.optimizers import Adam def retrieval(X_train, Y_train, X_test, Y_test, fractions=[0.01, 0.5, 1.0], multilabel=False): db_size = len(X_train) n_queries = len(X_test) <|code_end|> . Write the next line using the current file imports: import numpy as np import pdb;pdb.set_trace() from collections import defaultdict, Counter from keras.models import Sequential from keras.layers import Dense from keras.callbacks import EarlyStopping, ReduceLROnPlateau from autoencoder.utils.op_utils import unitmatrix and context from other files: # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen , which may include functions, classes, or code. Output only the next line.
X_train = unitmatrix(X_train) # normalize
Given the following code snippet before the placeholder: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import cached_stop_words = init_stopwords() class CorpusIter20News(object): def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): self.stem = stem self.with_docname = with_docname <|code_end|> , predict the next line using imports from the current file: import os from random import shuffle from collections import defaultdict from ..preprocessing.preprocessing import get_all_files, init_stopwords, tiny_tokenize and context including class names, function names, and sometimes code from other files: # Path: autoencoder/preprocessing/preprocessing.py # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] . Output only the next line.
self.files = get_all_files(corpus_path, recursive)
Continue the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import <|code_end|> . Use current file imports: import os from random import shuffle from collections import defaultdict from ..preprocessing.preprocessing import get_all_files, init_stopwords, tiny_tokenize and context (classes, functions, or code) from other files: # Path: autoencoder/preprocessing/preprocessing.py # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] . Output only the next line.
cached_stop_words = init_stopwords()
Given snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import cached_stop_words = init_stopwords() class CorpusIter20News(object): def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): self.stem = stem self.with_docname = with_docname self.files = get_all_files(corpus_path, recursive) def __iter__(self): shuffle(self.files) count = 0 for filename in self.files: try: with open(filename, 'r') as fp: text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming <|code_end|> , continue by predicting the next line. Consider current file imports: import os from random import shuffle from collections import defaultdict from ..preprocessing.preprocessing import get_all_files, init_stopwords, tiny_tokenize and context: # Path: autoencoder/preprocessing/preprocessing.py # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] which might include code, classes, or functions. Output only the next line.
words = tiny_tokenize(text, self.stem, cached_stop_words)
Based on the snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input source file') parser.add_argument('--topn', type=int, default=25, help='keep only topn most frequent labels') parser.add_argument('-o', '--output', type=str, help='path to the output file') args = parser.parse_args() <|code_end|> , predict the immediate next line with the help of imports: import argparse from autoencoder.datasets.wikitag_extractor import extract_labels from autoencoder.utils.io_utils import dump_json and context (classes, functions, sometimes code) from other files: # Path: autoencoder/datasets/wikitag_extractor.py # def extract_labels(in_file, topn): # #XML PARSING # parser = xml.sax.make_parser() # parser.setContentHandler(XMLhandler()) # parser.parse(in_file) # # #NOTE in the xml file "css" and "files" also come in the <hash>....</hash>, so i remove them manually # labeldict.pop("css") # labeldict.pop("files") # # #TO TAKE LABELS WITH AT LEAST N OCCURENCES ONLY # freqs = Counter(labels) # pairs = sorted(freqs.items(), key=lambda item: item[1], reverse=True) # pairs = pairs[:topn] # newlabels = zip(*pairs)[0] # newlabeldict = {} # # #REMOVE EXTRA LABELS FROM LABEL DICTIONARY (HASH-LABEL MAPPING) # #ALSO CHECK IF WE LOST ANY DOCUMENT; IN CASE A DOCUMENT HAS 0 LABELS # # se = set(newlabels) # for i in labeldict: # newlabeldict[i] = list(se.intersection(labeldict[i])) # if len(newlabeldict[i]) == 0: # newlabeldict.pop(i) # # return newlabeldict # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
labeldict = extract_labels(args.input, args.topn)
Given the following code snippet before the placeholder: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input source file') parser.add_argument('--topn', type=int, default=25, help='keep only topn most frequent labels') parser.add_argument('-o', '--output', type=str, help='path to the output file') args = parser.parse_args() labeldict = extract_labels(args.input, args.topn) <|code_end|> , predict the next line using imports from the current file: import argparse from autoencoder.datasets.wikitag_extractor import extract_labels from autoencoder.utils.io_utils import dump_json and context including class names, function names, and sometimes code from other files: # Path: autoencoder/datasets/wikitag_extractor.py # def extract_labels(in_file, topn): # #XML PARSING # parser = xml.sax.make_parser() # parser.setContentHandler(XMLhandler()) # parser.parse(in_file) # # #NOTE in the xml file "css" and "files" also come in the <hash>....</hash>, so i remove them manually # labeldict.pop("css") # labeldict.pop("files") # # #TO TAKE LABELS WITH AT LEAST N OCCURENCES ONLY # freqs = Counter(labels) # pairs = sorted(freqs.items(), key=lambda item: item[1], reverse=True) # pairs = pairs[:topn] # newlabels = zip(*pairs)[0] # newlabeldict = {} # # #REMOVE EXTRA LABELS FROM LABEL DICTIONARY (HASH-LABEL MAPPING) # #ALSO CHECK IF WE LOST ANY DOCUMENT; IN CASE A DOCUMENT HAS 0 LABELS # # se = set(newlabels) # for i in labeldict: # newlabeldict[i] = list(se.intersection(labeldict[i])) # if len(newlabeldict[i]) == 0: # newlabeldict.pop(i) # # return newlabeldict # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
dump_json(labeldict, args.output)
Predict the next line after this snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input corpus file') parser.add_argument('-o', '--output', type=str, default='./', help='path to the output dir') parser.add_argument('-ts', '--test_split', type=float, required=True, help='fraction of the dataset to be used as test data') args = parser.parse_args() <|code_end|> using the current file's imports: import argparse from autoencoder.datasets.movie_review_data import construct_train_test_corpus and any relevant context from other files: # Path: autoencoder/datasets/movie_review_data.py # def construct_train_test_corpus(file, test_split, output, threshold=10, topn=20000): # train_data, train_labels, test_data, test_labels = load_data(file, test_split) # train_word_freq = count_words(train_data.values()) # # train_docs, vocab_dict, train_word_freq = construct_corpus(train_data, train_word_freq, True, threshold=threshold, topn=topn) # train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} # dump_json(train_corpus, os.path.join(output, 'train.corpus')) # print 'Generated training corpus' # dump_json(train_labels, os.path.join(output, 'train.labels')) # print 'Generated training labels' # # test_word_freq = count_words(test_data.values()) # test_docs, _, _ = construct_corpus(test_data, test_word_freq, False, vocab_dict=vocab_dict) # test_corpus = {'docs': test_docs, 'vocab': vocab_dict} # dump_json(test_corpus, os.path.join(output, 'test.corpus')) # print 'Generated test corpus' # dump_json(test_labels, os.path.join(output, 'test.labels')) # print 'Generated test labels' # import pdb;pdb.set_trace() . Output only the next line.
construct_train_test_corpus(args.input, args.test_split, args.output, threshold=10, topn=2000)
Given the code snippet: <|code_start|>''' Created on Nov, 2016 @author: hugo ''' from __future__ import absolute_import def train(args): <|code_end|> , generate the next line using the imports in this file: import argparse import timeit import math import numpy as np from os import path from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.utils.io_utils import dump_json, write_file from autoencoder.baseline.lda import train_lda, generate_doc_codes, load_model, show_topics, show_topics_prob, calc_pairwise_cosine, calc_pairwise_dev from autoencoder.testing.visualize import word_cloud from autoencoder.utils.op_utils import unitmatrix and context (functions, classes, or occasionally code) from other files: # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/baseline/lda.py # def train_lda(corpus, vocab_dict, n_topics, n_iter, save_model): # lda = LdaModel(corpus, num_topics=n_topics, id2word=vocab_dict, \ # passes=n_iter, minimum_probability=1e-3) # lda.save(save_model) # # return lda # # def generate_doc_codes(model, corpus, output): # model.minimum_probability = 1e-3 # n_topics = model.num_topics # doc_codes = {} # for key, doc_bow in corpus.iteritems(): # code = np.zeros(n_topics) # for idx, val in model[doc_bow]: # code[idx] = val # doc_codes[key] = code.tolist() # dump_json(doc_codes, output) # # return doc_codes # # def load_model(model_file): # return LdaModel.load(model_file) # # def show_topics(model, n_words_per_topic=10): # n_topics = model.num_topics # topics = [zip(*model.show_topic(i, n_words_per_topic))[0] for i in range(n_topics)] # # return topics # # def show_topics_prob(model, n_words_per_topic=10): # n_topics = model.num_topics # topics = [model.show_topic(i, n_words_per_topic) for i in range(n_topics)] # # return topics # # def calc_pairwise_cosine(model): # n = model.num_topics # weights = model.state.get_lambda() # weights = np.apply_along_axis(lambda x: x / x.sum(), 1, weights) # get dist. # weights = unitmatrix(weights) # normalize # score = [] # for i in range(n): # for j in range(i + 1, n): # score.append(np.arccos(weights[i].dot(weights[j]))) # # return np.mean(score), np.std(score) # # def calc_pairwise_dev(model): # # the average squared deviation from 0 (90 degree) # n = model.num_topics # weights = model.state.get_lambda() # weights = np.apply_along_axis(lambda x: x / x.sum(), 1, weights) # get dist. # weights = unitmatrix(weights) # normalize # score = 0. # for i in range(n): # for j in range(i + 1, n): # score += (weights[i].dot(weights[j]))**2 # # return np.sqrt(2. * score / n / (n - 1)) # # Path: autoencoder/testing/visualize.py # def word_cloud(word_embedding_matrix, vocab, s, save_file='scatter.png'): # words = [(i, vocab[i]) for i in s] # model = TSNE(n_components=2, random_state=0) # #Note that the following line might use a good chunk of RAM # tsne_embedding = model.fit_transform(word_embedding_matrix) # words_vectors = tsne_embedding[np.array([item[1] for item in words])] # # plt.subplots_adjust(bottom = 0.1) # plt.scatter( # words_vectors[:, 0], words_vectors[:, 1], marker='o', cmap=plt.get_cmap('Spectral')) # # for label, x, y in zip(s, words_vectors[:, 0], words_vectors[:, 1]): # plt.annotate( # label, # xy=(x, y), xytext=(-20, 20), # textcoords='offset points', ha='right', va='bottom', # fontsize=20, # # bbox=dict(boxstyle='round,pad=1.', fc='yellow', alpha=0.5), # arrowprops=dict(arrowstyle = '<-', connectionstyle='arc3,rad=0') # ) # plt.show() # # plt.savefig(save_file) # # Path: autoencoder/utils/op_utils.py # def unitmatrix(matrix, norm='l2', axis=1): # if norm == 'l1': # maxtrixlen = np.sum(np.abs(matrix), axis=axis) # if norm == 'l2': # maxtrixlen = np.linalg.norm(matrix, axis=axis) # # if np.any(maxtrixlen <= 0): # return matrix # else: # maxtrixlen = maxtrixlen.reshape(1, len(maxtrixlen)) if axis == 0 else maxtrixlen.reshape(len(maxtrixlen), 1) # return matrix / maxtrixlen . Output only the next line.
corpus = load_corpus(args.corpus)
Predict the next line after this snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc code file') parser.add_argument('train_doc_names', type=str, help='path to the train doc name file') parser.add_argument('val_doc_codes', type=str, help='path to the valid doc code file') parser.add_argument('val_doc_names', type=str, help='path to the valid doc name file') parser.add_argument('test_doc_codes', type=str, help='path to the test doc code file') parser.add_argument('test_doc_names', type=str, help='path to the test doc name file') parser.add_argument('out_dir', type=str, help='path to the output dir') args = parser.parse_args() train_doc_codes_path = args.train_doc_codes test_doc_codes_path = args.test_doc_codes <|code_end|> using the current file's imports: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.preprocessing.preprocessing import load_corpus, corpus2libsvm from autoencoder.utils.io_utils import load_file, dump_json and any relevant context from other files: # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def corpus2libsvm(docs, doc_labels, output): # '''Convert the corpus format to libsvm format. # ''' # data = [] # names = [] # for key, val in docs.iteritems(): # # label = doc_labels[key] # label = 0 # line = label if isinstance(label, list) else [str(label)] + ["%s:%s" % (int(x) + 1, y) for x, y in val.iteritems()] # data.append(line) # names.append(key) # write_file(data, output) # write_file(names, output + '.fnames') # return data, names # # Path: autoencoder/utils/io_utils.py # def load_file(file, float_=False): # data = [] # try: # with open(file, 'r') as datafile: # for line in datafile: # content = line.strip('\n').split() # if float_: # content = [float(x) for x in content] # data.append(content) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
train_doc_codes = load_file(train_doc_codes_path, True)
Predict the next line after this snippet: <|code_start|> parser.add_argument('out_dir', type=str, help='path to the output dir') args = parser.parse_args() train_doc_codes_path = args.train_doc_codes test_doc_codes_path = args.test_doc_codes train_doc_codes = load_file(train_doc_codes_path, True) train_doc_names = load_file(args.train_doc_names) val_doc_codes = load_file(args.val_doc_codes, True) val_doc_names = load_file(args.val_doc_names) test_doc_codes = load_file(test_doc_codes_path, True) test_doc_names = load_file(args.test_doc_names) assert len(train_doc_codes) == len(train_doc_names) assert len(val_doc_codes) == len(val_doc_names) assert len(test_doc_codes) == len(test_doc_names) new_train_doc_codes = {} new_test_doc_codes = {} for i in range(len(train_doc_names)): new_train_doc_codes[''.join(train_doc_names[i])] = train_doc_codes[i] del train_doc_codes for i in range(len(val_doc_names)): new_train_doc_codes[''.join(val_doc_names[i])] = val_doc_codes[i] del val_doc_codes for i in range(len(test_doc_names)): new_test_doc_codes[''.join(test_doc_names[i])] = test_doc_codes[i] del test_doc_codes out_dir = args.out_dir <|code_end|> using the current file's imports: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.preprocessing.preprocessing import load_corpus, corpus2libsvm from autoencoder.utils.io_utils import load_file, dump_json and any relevant context from other files: # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def corpus2libsvm(docs, doc_labels, output): # '''Convert the corpus format to libsvm format. # ''' # data = [] # names = [] # for key, val in docs.iteritems(): # # label = doc_labels[key] # label = 0 # line = label if isinstance(label, list) else [str(label)] + ["%s:%s" % (int(x) + 1, y) for x, y in val.iteritems()] # data.append(line) # names.append(key) # write_file(data, output) # write_file(names, output + '.fnames') # return data, names # # Path: autoencoder/utils/io_utils.py # def load_file(file, float_=False): # data = [] # try: # with open(file, 'r') as datafile: # for line in datafile: # content = line.strip('\n').split() # if float_: # content = [float(x) for x in content] # data.append(content) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
dump_json(new_train_doc_codes, os.path.join(out_dir, 'new_' + os.path.basename(train_doc_codes_path)))
Predict the next line for this snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_path', type=str, help='path to the train corpus file') parser.add_argument('test_path', type=str, help='path to the test corpus file') parser.add_argument('train_label', type=str, help='path to the train label file') parser.add_argument('test_label', type=str, help='path to the test label file') parser.add_argument('out_dir', type=str, help='path to the output dir') parser.add_argument('-nv', '--n_val', type=int, default=1000, help='validation set size') args = parser.parse_args() <|code_end|> with the help of current file imports: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.preprocessing.preprocessing import load_corpus, corpus2libsvm from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def corpus2libsvm(docs, doc_labels, output): # '''Convert the corpus format to libsvm format. # ''' # data = [] # names = [] # for key, val in docs.iteritems(): # # label = doc_labels[key] # label = 0 # line = label if isinstance(label, list) else [str(label)] + ["%s:%s" % (int(x) + 1, y) for x, y in val.iteritems()] # data.append(line) # names.append(key) # write_file(data, output) # write_file(names, output + '.fnames') # return data, names # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may contain function names, class names, or code. Output only the next line.
docs = load_corpus(args.train_path)['docs'].items()
Given the following code snippet before the placeholder: <|code_start|> @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_path', type=str, help='path to the train corpus file') parser.add_argument('test_path', type=str, help='path to the test corpus file') parser.add_argument('train_label', type=str, help='path to the train label file') parser.add_argument('test_label', type=str, help='path to the test label file') parser.add_argument('out_dir', type=str, help='path to the output dir') parser.add_argument('-nv', '--n_val', type=int, default=1000, help='validation set size') args = parser.parse_args() docs = load_corpus(args.train_path)['docs'].items() test_docs = load_corpus(args.test_path)['docs'] np.random.seed(0) np.random.shuffle(docs) n_val = args.n_val train_docs = dict(docs[:-n_val]) val_docs = dict(docs[-n_val:]) # doc_labels = load_json(args.train_label) # test_labels = load_json(args.test_label) doc_labels = None test_labels = None <|code_end|> , predict the next line using imports from the current file: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.preprocessing.preprocessing import load_corpus, corpus2libsvm from autoencoder.utils.io_utils import load_json and context including class names, function names, and sometimes code from other files: # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # def corpus2libsvm(docs, doc_labels, output): # '''Convert the corpus format to libsvm format. # ''' # data = [] # names = [] # for key, val in docs.iteritems(): # # label = doc_labels[key] # label = 0 # line = label if isinstance(label, list) else [str(label)] + ["%s:%s" % (int(x) + 1, y) for x, y in val.iteritems()] # data.append(line) # names.append(key) # write_file(data, output) # write_file(names, output + '.fnames') # return data, names # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data . Output only the next line.
train = corpus2libsvm(train_docs, doc_labels, os.path.join(args.out_dir, 'train.libsvm'))
Predict the next line for this snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('doc_codes_file', type=str, help='path to the input corpus file') parser.add_argument('doc_labels_file', type=str, help='path to the output doc codes file') parser.add_argument('cmd', choices=['pca', 'tsne'], help='plot cmd') parser.add_argument('-o', '--output', type=str, default='out.png', help='path to the output file') args = parser.parse_args() cmd = args.cmd.lower() classes_to_visual = {'ECAT': 'ECONOMICS', 'MCAT': 'MARKETS', 'CCAT': 'CORPORATE/INDUSTRIAL', 'GCAT': 'GOVERNMENT/SOCIAL'} if cmd == 'pca': <|code_end|> with the help of current file imports: import argparse from autoencoder.testing.visualize import reuters_visualize_pca_2d, reuters_visualize_tsne from autoencoder.utils.io_utils import load_json and context from other files: # Path: autoencoder/testing/visualize.py # def reuters_visualize_pca_2d(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, class_names.intersection(set(doc_labels[doc]))) for doc, code in doc_codes.items() if len(class_names.intersection(set(doc_labels[doc]))) == 1]) # # codes = [] # # labels = [] # # for doc, code in doc_codes.items(): # # y = set(doc_labels[doc]) # # x = list(class_names.intersection(y)) # # if x: # # codes.append(code) # # labels.append(x[0]) # # x = 0 # # pairs = [] # # for each in labels: # # if len(class_names.intersection(set(each))) > 1: # # x += 1 # # pairs.append(class_names.intersection(set(each))) # # print x # # # X = np.r_[list(codes)] # X = PCA(n_components=3).fit_transform(X) # plt.figure(figsize=(10, 10), facecolor='white') # # x_pc, y_pc = 0, 1 # # for c in class_names: # idx = get_indices(labels, c) # plt.plot(X[idx, x_pc], X[idx, y_pc], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # # plt.legend(c) # plt.title('Projected on the first 2 PCs') # plt.xlabel('PC %s' % x_pc) # plt.ylabel('PC %s' % y_pc) # legend = plt.legend(loc='upper center', shadow=True) # plt.savefig(save_file) # plt.show() # # def reuters_visualize_tsne(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if class_names.intersection(set(doc_labels[doc]))]) # # X = np.r_[list(codes)] # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) # np.set_printoptions(suppress=True) # X = tsne.fit_transform(X) # # plt.figure(figsize=(10, 10), facecolor='white') # # for c in classes_to_visual.keys(): # idx = get_indices(labels, c) # plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # legend = plt.legend(loc='upper center', shadow=True) # plt.title("tsne") # plt.savefig(save_file) # plt.show() # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data , which may contain function names, class names, or code. Output only the next line.
reuters_visualize_pca_2d(load_json(args.doc_codes_file), load_json(args.doc_labels_file), classes_to_visual, args.output)
Given the following code snippet before the placeholder: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('doc_codes_file', type=str, help='path to the input corpus file') parser.add_argument('doc_labels_file', type=str, help='path to the output doc codes file') parser.add_argument('cmd', choices=['pca', 'tsne'], help='plot cmd') parser.add_argument('-o', '--output', type=str, default='out.png', help='path to the output file') args = parser.parse_args() cmd = args.cmd.lower() classes_to_visual = {'ECAT': 'ECONOMICS', 'MCAT': 'MARKETS', 'CCAT': 'CORPORATE/INDUSTRIAL', 'GCAT': 'GOVERNMENT/SOCIAL'} if cmd == 'pca': reuters_visualize_pca_2d(load_json(args.doc_codes_file), load_json(args.doc_labels_file), classes_to_visual, args.output) elif cmd == 'tsne': <|code_end|> , predict the next line using imports from the current file: import argparse from autoencoder.testing.visualize import reuters_visualize_pca_2d, reuters_visualize_tsne from autoencoder.utils.io_utils import load_json and context including class names, function names, and sometimes code from other files: # Path: autoencoder/testing/visualize.py # def reuters_visualize_pca_2d(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, class_names.intersection(set(doc_labels[doc]))) for doc, code in doc_codes.items() if len(class_names.intersection(set(doc_labels[doc]))) == 1]) # # codes = [] # # labels = [] # # for doc, code in doc_codes.items(): # # y = set(doc_labels[doc]) # # x = list(class_names.intersection(y)) # # if x: # # codes.append(code) # # labels.append(x[0]) # # x = 0 # # pairs = [] # # for each in labels: # # if len(class_names.intersection(set(each))) > 1: # # x += 1 # # pairs.append(class_names.intersection(set(each))) # # print x # # # X = np.r_[list(codes)] # X = PCA(n_components=3).fit_transform(X) # plt.figure(figsize=(10, 10), facecolor='white') # # x_pc, y_pc = 0, 1 # # for c in class_names: # idx = get_indices(labels, c) # plt.plot(X[idx, x_pc], X[idx, y_pc], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # # plt.legend(c) # plt.title('Projected on the first 2 PCs') # plt.xlabel('PC %s' % x_pc) # plt.ylabel('PC %s' % y_pc) # legend = plt.legend(loc='upper center', shadow=True) # plt.savefig(save_file) # plt.show() # # def reuters_visualize_tsne(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if class_names.intersection(set(doc_labels[doc]))]) # # X = np.r_[list(codes)] # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) # np.set_printoptions(suppress=True) # X = tsne.fit_transform(X) # # plt.figure(figsize=(10, 10), facecolor='white') # # for c in classes_to_visual.keys(): # idx = get_indices(labels, c) # plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # legend = plt.legend(loc='upper center', shadow=True) # plt.title("tsne") # plt.savefig(save_file) # plt.show() # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data . Output only the next line.
reuters_visualize_tsne(load_json(args.doc_codes_file), load_json(args.doc_labels_file), classes_to_visual, args.output)
Given snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('doc_codes_file', type=str, help='path to the input corpus file') parser.add_argument('doc_labels_file', type=str, help='path to the output doc codes file') parser.add_argument('cmd', choices=['pca', 'tsne'], help='plot cmd') parser.add_argument('-o', '--output', type=str, default='out.png', help='path to the output file') args = parser.parse_args() cmd = args.cmd.lower() classes_to_visual = {'ECAT': 'ECONOMICS', 'MCAT': 'MARKETS', 'CCAT': 'CORPORATE/INDUSTRIAL', 'GCAT': 'GOVERNMENT/SOCIAL'} if cmd == 'pca': <|code_end|> , continue by predicting the next line. Consider current file imports: import argparse from autoencoder.testing.visualize import reuters_visualize_pca_2d, reuters_visualize_tsne from autoencoder.utils.io_utils import load_json and context: # Path: autoencoder/testing/visualize.py # def reuters_visualize_pca_2d(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, class_names.intersection(set(doc_labels[doc]))) for doc, code in doc_codes.items() if len(class_names.intersection(set(doc_labels[doc]))) == 1]) # # codes = [] # # labels = [] # # for doc, code in doc_codes.items(): # # y = set(doc_labels[doc]) # # x = list(class_names.intersection(y)) # # if x: # # codes.append(code) # # labels.append(x[0]) # # x = 0 # # pairs = [] # # for each in labels: # # if len(class_names.intersection(set(each))) > 1: # # x += 1 # # pairs.append(class_names.intersection(set(each))) # # print x # # # X = np.r_[list(codes)] # X = PCA(n_components=3).fit_transform(X) # plt.figure(figsize=(10, 10), facecolor='white') # # x_pc, y_pc = 0, 1 # # for c in class_names: # idx = get_indices(labels, c) # plt.plot(X[idx, x_pc], X[idx, y_pc], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # # plt.legend(c) # plt.title('Projected on the first 2 PCs') # plt.xlabel('PC %s' % x_pc) # plt.ylabel('PC %s' % y_pc) # legend = plt.legend(loc='upper center', shadow=True) # plt.savefig(save_file) # plt.show() # # def reuters_visualize_tsne(doc_codes, doc_labels, classes_to_visual, save_file): # """ # Visualize the input data on a 2D PCA plot. Depending on the number of components, # the plot will contain an X amount of subplots. # @param doc_codes: # @param number_of_components: The number of principal components for the PCA plot. # """ # # # markers = ["p", "s", "h", "H", "+", "x", "D"] # markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] # # C = len(classes_to_visual) # while True: # if C <= len(markers): # break # markers += markers # # class_names = classes_to_visual.keys() # class_ids = dict(zip(class_names, range(C))) # class_names = set(class_names) # codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if class_names.intersection(set(doc_labels[doc]))]) # # X = np.r_[list(codes)] # tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) # np.set_printoptions(suppress=True) # X = tsne.fit_transform(X) # # plt.figure(figsize=(10, 10), facecolor='white') # # for c in classes_to_visual.keys(): # idx = get_indices(labels, c) # plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], # markersize=6, label=classes_to_visual[c]) # legend = plt.legend(loc='upper center', shadow=True) # plt.title("tsne") # plt.savefig(save_file) # plt.show() # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data which might include code, classes, or functions. Output only the next line.
reuters_visualize_pca_2d(load_json(args.doc_codes_file), load_json(args.doc_labels_file), classes_to_visual, args.output)
Predict the next line after this snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc code file') parser.add_argument('val_doc_codes', type=str, help='path to the valid doc code file') parser.add_argument('out_dir', type=str, help='path to the output dir') args = parser.parse_args() train_doc_codes_path = args.train_doc_codes <|code_end|> using the current file's imports: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.utils.io_utils import load_json, dump_json and any relevant context from other files: # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
train_doc_codes = load_json(train_doc_codes_path)
Given snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc code file') parser.add_argument('val_doc_codes', type=str, help='path to the valid doc code file') parser.add_argument('out_dir', type=str, help='path to the output dir') args = parser.parse_args() train_doc_codes_path = args.train_doc_codes train_doc_codes = load_json(train_doc_codes_path) val_doc_codes = load_json(args.val_doc_codes) # import pdb;pdb.set_trace() train_doc_codes.update(val_doc_codes) out_dir = args.out_dir <|code_end|> , continue by predicting the next line. Consider current file imports: import os import sys import argparse import numpy as np import pdb;pdb.set_trace() from autoencoder.utils.io_utils import load_json, dump_json and context: # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e which might include code, classes, or functions. Output only the next line.
dump_json(train_doc_codes, os.path.join(out_dir, 'new_' + os.path.basename(train_doc_codes_path)))
Predict the next line for this snippet: <|code_start|> parser.add_argument('-cv', '--cross_validation', type=int, help='k-fold cross validation') args = parser.parse_args() # autoencoder # train_doc_codes = load_json(args.train_doc_codes) # train_doc_labels = load_json(args.train_doc_labels) # test_doc_codes = load_json(args.test_doc_codes) # test_doc_labels = load_json(args.test_doc_labels) # X_train = np.r_[train_doc_codes.values()] # Y_train = np.array([train_doc_labels[i] for i in train_doc_codes]) # X_test = np.r_[test_doc_codes.values()] # Y_test = np.array([test_doc_labels[i] for i in test_doc_codes]) # # DBN X_train = np.array(load_pickle(args.train_doc_codes)) Y_train = load_pickle(args.train_doc_labels) X_test = np.array(load_pickle(args.test_doc_codes)) Y_test = load_pickle(args.test_doc_labels) seed = 7 np.random.seed(seed) if not args.cross_validation: val_idx = np.random.choice(range(X_train.shape[0]), args.n_val, replace=False) train_idx = list(set(range(X_train.shape[0])) - set(val_idx)) X_new_train = X_train[train_idx] Y_new_train = Y_train[train_idx] X_new_val = X_train[val_idx] Y_new_val = Y_train[val_idx] print 'train: %s, val: %s, test: %s' % (X_new_train.shape[0], X_new_val.shape[0], X_test.shape[0]) <|code_end|> with the help of current file imports: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from sklearn.model_selection import ShuffleSplit from autoencoder.testing.regression import neural_regression from autoencoder.utils.io_utils import load_json, load_pickle and context from other files: # Path: autoencoder/testing/regression.py # def neural_regression(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # reg = neural_network(X_train.shape[1]) # reg.fit(X_train, Y_train, # nb_epoch=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # pred = reg.predict(X_test) # pred = np.reshape(pred, pred.shape[0]) # r2 = r2_score(Y_test, pred) # # return r2 # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_pickle(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = pickle.load(f) # except Exception as e: # raise e # # return data , which may contain function names, class names, or code. Output only the next line.
results = neural_regression(X_new_train, Y_new_train, X_new_val, Y_new_val, \
Given the following code snippet before the placeholder: <|code_start|> @author: hugo ''' from __future__ import absolute_import def main(): parser = argparse.ArgumentParser() parser.add_argument('train_doc_codes', type=str, help='path to the train doc codes file') parser.add_argument('train_doc_labels', type=str, help='path to the train doc labels file') parser.add_argument('test_doc_codes', type=str, help='path to the test doc codes file') parser.add_argument('test_doc_labels', type=str, help='path to the test doc labels file') parser.add_argument('-nv', '--n_val', type=int, default=1000, help='size of validation set (default 1000)') parser.add_argument('-ne', '--n_epoch', type=int, default=100, help='num of epoches (default 100)') parser.add_argument('-bs', '--batch_size', type=int, default=100, help='batch size (default 100)') parser.add_argument('-cv', '--cross_validation', type=int, help='k-fold cross validation') args = parser.parse_args() # autoencoder # train_doc_codes = load_json(args.train_doc_codes) # train_doc_labels = load_json(args.train_doc_labels) # test_doc_codes = load_json(args.test_doc_codes) # test_doc_labels = load_json(args.test_doc_labels) # X_train = np.r_[train_doc_codes.values()] # Y_train = np.array([train_doc_labels[i] for i in train_doc_codes]) # X_test = np.r_[test_doc_codes.values()] # Y_test = np.array([test_doc_labels[i] for i in test_doc_codes]) # # DBN <|code_end|> , predict the next line using imports from the current file: import argparse import numpy as np import pdb;pdb.set_trace() from keras.utils import np_utils from sklearn.model_selection import ShuffleSplit from autoencoder.testing.regression import neural_regression from autoencoder.utils.io_utils import load_json, load_pickle and context including class names, function names, and sometimes code from other files: # Path: autoencoder/testing/regression.py # def neural_regression(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7): # reg = neural_network(X_train.shape[1]) # reg.fit(X_train, Y_train, # nb_epoch=nb_epoch, # batch_size=batch_size, # shuffle=True, # validation_data=(X_val, Y_val), # callbacks=[ # ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), # EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'), # ] # ) # pred = reg.predict(X_test) # pred = np.reshape(pred, pred.shape[0]) # r2 = r2_score(Y_test, pred) # # return r2 # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def load_pickle(path_to_file): # try: # with open(path_to_file, 'r') as f: # data = pickle.load(f) # except Exception as e: # raise e # # return data . Output only the next line.
X_train = np.array(load_pickle(args.train_doc_codes))
Given snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus # from autoencoder.datasets.reuters import CorpusIterReuters def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) <|code_end|> , continue by predicting the next line. Consider current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count which might include code, classes, or functions. Output only the next line.
d2v = MyDoc2Vec(args.n_dim, window=args.window_size, \
Given snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus # from autoencoder.datasets.reuters import CorpusIterReuters def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = MyDoc2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch, dm_concat=1) start = timeit.default_timer() d2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) <|code_end|> , continue by predicting the next line. Consider current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count which might include code, classes, or functions. Output only the next line.
save_doc2vec(d2v.model, args.save_model)
Here is a snippet: <|code_start|> def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = MyDoc2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch, dm_concat=1) start = timeit.default_timer() d2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) save_doc2vec(d2v.model, args.save_model) def test(args): vocab = load_json(args.vocab) # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) <|code_end|> . Write the next line using the current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context from other files: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count , which may include functions, classes, or code. Output only the next line.
d2v = load_doc2vec(args.load_model)
Continue the code snippet: <|code_start|>def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = MyDoc2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch, dm_concat=1) start = timeit.default_timer() d2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) save_doc2vec(d2v.model, args.save_model) def test(args): vocab = load_json(args.vocab) # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = load_doc2vec(args.load_model) <|code_end|> . Use current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (classes, functions, or code) from other files: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
doc_codes = predict(d2v, corpus_iter)
Given the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus # from autoencoder.datasets.reuters import CorpusIterReuters def train(args): <|code_end|> , generate the next line using the imports in this file: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (functions, classes, or occasionally code) from other files: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
vocab = load_json(args.vocab)
Predict the next line for this snippet: <|code_start|> vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = MyDoc2Vec(args.n_dim, window=args.window_size, \ negative=args.negative, epoches=args.n_epoch, dm_concat=1) start = timeit.default_timer() d2v.train(corpus_iter) print 'runtime: %ss' % (timeit.default_timer() - start) save_doc2vec(d2v.model, args.save_model) def test(args): vocab = load_json(args.vocab) # load corpus corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True) # corpus = CorpusIterMRD(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterWiki10plus(args.corpus[0], load_json(args.docnames), stem=True, with_docname=True) # corpus = CorpusIterReuters(args.corpus, load_json(args.docnames), with_docname=True) corpus_iter = lambda: (TaggedDocument([word for word in sentence if word in vocab], tag) for sentence, tag in corpus) d2v = load_doc2vec(args.load_model) doc_codes = predict(d2v, corpus_iter) <|code_end|> with the help of current file imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context from other files: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count , which may contain function names, class names, or code. Output only the next line.
dump_json(doc_codes, args.output)
Using the snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import # from autoencoder.datasets.movie_review_data import CorpusIterMRD # from autoencoder.datasets.wiki10plus import CorpusIterWiki10plus # from autoencoder.datasets.reuters import CorpusIterReuters def train(args): vocab = load_json(args.vocab) # import pdb;pdb.set_trace() # load corpus <|code_end|> , determine the next line of code. You have imports: import argparse import timeit import numpy as np import pdb;pdb.set_trace() import pdb;pdb.set_trace() from os import path from gensim.models.doc2vec import TaggedDocument from autoencoder.baseline.doc2vec import MyDoc2Vec, save_doc2vec, load_doc2vec, predict from autoencoder.utils.io_utils import load_json, dump_json, write_file from autoencoder.preprocessing.preprocessing import load_corpus from autoencoder.datasets.the20news import CorpusIter20News and context (class names, function names, or code) available: # Path: autoencoder/baseline/doc2vec.py # class MyDoc2Vec(object): # def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1): # super(MyDoc2Vec, self).__init__() # self.dim = dim # self.hs = hs # self.window = window # self.negative = negative # self.epoches = epoches # self.dm = dm # self.dm_concat = dm_concat # # def train(self, corpus): # self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \ # workers=multiprocessing.cpu_count(), hs=self.hs,\ # negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat) # self.model.build_vocab(corpus()) # for each in range(self.epoches): # self.model.train(corpus()) # # return self # # def save_doc2vec(model, outfile): # model.save(outfile) # # def load_doc2vec(mod_file): # return Doc2Vec.load(mod_file) # # def predict(model, corpus): # doc_codes = {} # for doc_words, doc_name in corpus(): # doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist() # # return doc_codes # # Path: autoencoder/utils/io_utils.py # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e # # Path: autoencoder/preprocessing/preprocessing.py # def load_corpus(corpus_path): # corpus = load_json(corpus_path) # # return corpus # # Path: autoencoder/datasets/the20news.py # class CorpusIter20News(object): # def __init__(self, corpus_path, recursive=False, stem=True, with_docname=False): # self.stem = stem # self.with_docname = with_docname # self.files = get_all_files(corpus_path, recursive) # # def __iter__(self): # shuffle(self.files) # count = 0 # for filename in self.files: # try: # with open(filename, 'r') as fp: # text = fp.read().lower() # # remove punctuations, stopwords and *unnecessary digits*, stemming # words = tiny_tokenize(text, self.stem, cached_stop_words) # count += 1 # if self.with_docname: # parent_name, child_name = os.path.split(filename) # doc_name = os.path.split(parent_name)[-1] + '_' + child_name # yield [words, [doc_name]] # else: # yield words # except Exception as e: # raise e # print count . Output only the next line.
corpus = CorpusIter20News(args.corpus[0], recursive=True, stem=True, with_docname=True)
Given the following code snippet before the placeholder: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-train', '--train_path', type=str, required=True, help='path to the training corpus') parser.add_argument('-test', '--test_path', type=str, required=True, help='path to the test corpus') parser.add_argument('-o', '--out_dir', type=str, required=True, help='path to the output dir') parser.add_argument('-threshold', '--threshold', type=int, default=5, help='word frequency threshold (default 5)') parser.add_argument('-topn', '--topn', type=int, default=2000, help='top n words (default 2000)') args = parser.parse_args() <|code_end|> , predict the next line using imports from the current file: import os import argparse from autoencoder.preprocessing.preprocessing import construct_train_test_corpus, generate_20news_doc_labels#, generate_8k_doc_labels and context including class names, function names, and sometimes code from other files: # Path: autoencoder/preprocessing/preprocessing.py # def construct_train_test_corpus(train_path, test_path, output, threshold=5, topn=None): # train_docs, vocab_dict, train_word_freq = construct_corpus(train_path, True, threshold=threshold, topn=topn, recursive=True) # train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} # dump_json(train_corpus, os.path.join(output, 'train.corpus')) # print 'Generated training corpus' # # test_docs, _, _ = construct_corpus(test_path, False, vocab_dict=vocab_dict, recursive=True) # test_corpus = {'docs': test_docs, 'vocab': vocab_dict} # dump_json(test_corpus, os.path.join(output, 'test.corpus')) # print 'Generated test corpus' # # return train_corpus, test_corpus # # def generate_20news_doc_labels(doc_names, output): # doc_labels = {} # for each in doc_names: # label = each.split('_')[0] # doc_labels[each] = label # # dump_json(doc_labels, output) # # return doc_labels . Output only the next line.
train_corpus, test_corpus = construct_train_test_corpus(args.train_path, args.test_path, args.out_dir, threshold=args.threshold, topn=args.topn)
Predict the next line after this snippet: <|code_start|>''' Created on Dec, 2016 @author: hugo ''' def main(): parser = argparse.ArgumentParser() parser.add_argument('-train', '--train_path', type=str, required=True, help='path to the training corpus') parser.add_argument('-test', '--test_path', type=str, required=True, help='path to the test corpus') parser.add_argument('-o', '--out_dir', type=str, required=True, help='path to the output dir') parser.add_argument('-threshold', '--threshold', type=int, default=5, help='word frequency threshold (default 5)') parser.add_argument('-topn', '--topn', type=int, default=2000, help='top n words (default 2000)') args = parser.parse_args() train_corpus, test_corpus = construct_train_test_corpus(args.train_path, args.test_path, args.out_dir, threshold=args.threshold, topn=args.topn) <|code_end|> using the current file's imports: import os import argparse from autoencoder.preprocessing.preprocessing import construct_train_test_corpus, generate_20news_doc_labels#, generate_8k_doc_labels and any relevant context from other files: # Path: autoencoder/preprocessing/preprocessing.py # def construct_train_test_corpus(train_path, test_path, output, threshold=5, topn=None): # train_docs, vocab_dict, train_word_freq = construct_corpus(train_path, True, threshold=threshold, topn=topn, recursive=True) # train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} # dump_json(train_corpus, os.path.join(output, 'train.corpus')) # print 'Generated training corpus' # # test_docs, _, _ = construct_corpus(test_path, False, vocab_dict=vocab_dict, recursive=True) # test_corpus = {'docs': test_docs, 'vocab': vocab_dict} # dump_json(test_corpus, os.path.join(output, 'test.corpus')) # print 'Generated test corpus' # # return train_corpus, test_corpus # # def generate_20news_doc_labels(doc_names, output): # doc_labels = {} # for each in doc_names: # label = each.split('_')[0] # doc_labels[each] = label # # dump_json(doc_labels, output) # # return doc_labels . Output only the next line.
train_labels = generate_20news_doc_labels(train_corpus['docs'].keys(), os.path.join(args.out_dir, 'train.labels'))
Using the snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import pattern = r'>([^<>]+)<' prog = re.compile(pattern) <|code_end|> , determine the next line of code. You have imports: import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json and context (class names, function names, or code) available: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
cached_stop_words = init_stopwords()
Using the snippet: <|code_start|> self.stem = stem self.train_docs = train_docs self.with_docname = with_docname self.files = get_all_files(corpus_dir, False) def __iter__(self): shuffle(self.files) count = 0 for filename in self.files: doc_name = os.path.basename(filename) if not doc_name in self.train_docs: continue try: with open(filename, 'r') as fp: count += 1 text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming words = tiny_tokenize(text, self.stem, cached_stop_words) if self.with_docname: yield [words, [doc_name]] else: yield words except Exception as e: raise e print count def extract_contents(text, out_file): if not isinstance(text, unicode): text = text.decode('utf-8') contents = ' '.join(prog.findall(text)) <|code_end|> , determine the next line of code. You have imports: import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json and context (class names, function names, or code) available: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
contents = tiny_tokenize_xml(contents, False, cached_stop_words)
Predict the next line after this snippet: <|code_start|>@author: hugo ''' from __future__ import absolute_import pattern = r'>([^<>]+)<' prog = re.compile(pattern) cached_stop_words = init_stopwords() class CorpusIterWiki10plus(object): def __init__(self, corpus_dir, train_docs, stem=True, with_docname=False): self.stem = stem self.train_docs = train_docs self.with_docname = with_docname self.files = get_all_files(corpus_dir, False) def __iter__(self): shuffle(self.files) count = 0 for filename in self.files: doc_name = os.path.basename(filename) if not doc_name in self.train_docs: continue try: with open(filename, 'r') as fp: count += 1 text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming <|code_end|> using the current file's imports: import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json and any relevant context from other files: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
words = tiny_tokenize(text, self.stem, cached_stop_words)
Continue the code snippet: <|code_start|>''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import pattern = r'>([^<>]+)<' prog = re.compile(pattern) cached_stop_words = init_stopwords() class CorpusIterWiki10plus(object): def __init__(self, corpus_dir, train_docs, stem=True, with_docname=False): self.stem = stem self.train_docs = train_docs self.with_docname = with_docname <|code_end|> . Use current file imports: import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json and context (classes, functions, or code) from other files: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
self.files = get_all_files(corpus_dir, False)
Next line prediction: <|code_start|> corpus = {} files = get_all_files(corpus_dir, False) cached_stop_words = [] # cached_stop_words = init_stopwords() count = 0 for filename in files: try: with open(filename, 'r') as fp: text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming words = tiny_tokenize(text, stem, cached_stop_words) corpus[os.path.basename(filename)] = dict(Counter(words)) # doc-word frequency count += 1 except Exception as e: raise e if count % 500 == 0: print count corpus = corpus.items() np.random.seed(seed) np.random.shuffle(corpus) n_docs = len(corpus) train_data = dict(corpus[:-int(n_docs * test_split)]) test_data = dict(corpus[-int(n_docs * test_split):]) return train_data, test_data def construct_train_test_corpus(corpus_dir, test_split, output, threshold=10, topn=2000): train_data, test_data = load_data(corpus_dir, test_split) <|code_end|> . Use current file imports: (import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json) and context including class names, function names, or small code snippets from other files: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
train_word_freq = count_words(train_data.values())
Based on the snippet: <|code_start|> cached_stop_words = [] # cached_stop_words = init_stopwords() count = 0 for filename in files: try: with open(filename, 'r') as fp: text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming words = tiny_tokenize(text, stem, cached_stop_words) corpus[os.path.basename(filename)] = dict(Counter(words)) # doc-word frequency count += 1 except Exception as e: raise e if count % 500 == 0: print count corpus = corpus.items() np.random.seed(seed) np.random.shuffle(corpus) n_docs = len(corpus) train_data = dict(corpus[:-int(n_docs * test_split)]) test_data = dict(corpus[-int(n_docs * test_split):]) return train_data, test_data def construct_train_test_corpus(corpus_dir, test_split, output, threshold=10, topn=2000): train_data, test_data = load_data(corpus_dir, test_split) train_word_freq = count_words(train_data.values()) <|code_end|> , predict the immediate next line with the help of imports: import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json and context (classes, functions, sometimes code) from other files: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
train_docs, vocab_dict, train_word_freq = construct_corpus(train_data, train_word_freq, True, threshold=threshold, topn=topn)
Next line prediction: <|code_start|> count = 0 for filename in files: try: with open(filename, 'r') as fp: text = fp.read().lower() # remove punctuations, stopwords and *unnecessary digits*, stemming words = tiny_tokenize(text, stem, cached_stop_words) corpus[os.path.basename(filename)] = dict(Counter(words)) # doc-word frequency count += 1 except Exception as e: raise e if count % 500 == 0: print count corpus = corpus.items() np.random.seed(seed) np.random.shuffle(corpus) n_docs = len(corpus) train_data = dict(corpus[:-int(n_docs * test_split)]) test_data = dict(corpus[-int(n_docs * test_split):]) return train_data, test_data def construct_train_test_corpus(corpus_dir, test_split, output, threshold=10, topn=2000): train_data, test_data = load_data(corpus_dir, test_split) train_word_freq = count_words(train_data.values()) train_docs, vocab_dict, train_word_freq = construct_corpus(train_data, train_word_freq, True, threshold=threshold, topn=topn) train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} <|code_end|> . Use current file imports: (import os import re import numpy as np import pdb;pdb.set_trace() from random import shuffle from collections import Counter from ..preprocessing.preprocessing import init_stopwords, tiny_tokenize_xml, tiny_tokenize, get_all_files, count_words from ..datasets.reuters import construct_corpus from ..utils.io_utils import dump_json) and context including class names, function names, or small code snippets from other files: # Path: autoencoder/preprocessing/preprocessing.py # def init_stopwords(): # try: # stopword_path = 'patterns/english_stopwords.txt' # cached_stop_words = load_stopwords(os.path.join(os.path.split(__file__)[0], stopword_path)) # print 'Loaded %s' % stopword_path # except: # from nltk.corpus import stopwords # cached_stop_words = stopwords.words("english") # print 'Loaded nltk.corpus.stopwords' # # return cached_stop_words # # def tiny_tokenize_xml(text, stem=False, stop_words=[]): # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.encode(encoding='ascii', errors='ignore'))) if # not token.isdigit() and not token in stop_words] # # def tiny_tokenize(text, stem=False, stop_words=[]): # words = [] # for token in wordpunct_tokenize(re.sub('[%s]' % re.escape(string.punctuation), ' ', \ # text.decode(encoding='UTF-8', errors='ignore'))): # if not token.isdigit() and not token in stop_words: # if stem: # try: # w = EnglishStemmer().stem(token) # except Exception as e: # w = token # else: # w = token # words.append(w) # # return words # # # return [EnglishStemmer().stem(token) if stem else token for token in wordpunct_tokenize( # # re.sub('[%s]' % re.escape(string.punctuation), ' ', text.decode(encoding='UTF-8', errors='ignore'))) if # # not token.isdigit() and not token in stop_words] # # def get_all_files(corpus_path, recursive=False): # if recursive: # return [os.path.join(root, file) for root, dirnames, filenames in os.walk(corpus_path) for file in filenames if os.path.isfile(os.path.join(root, file)) and not file.startswith('.')] # else: # return [os.path.join(corpus_path, filename) for filename in os.listdir(corpus_path) if os.path.isfile(os.path.join(corpus_path, filename)) and not filename.startswith('.')] # # def count_words(docs): # # count the number of times a word appears in a corpus # word_freq = defaultdict(lambda: 0) # for each in docs: # for word, val in each.iteritems(): # word_freq[word] += val # # return word_freq # # Path: autoencoder/datasets/reuters.py # def construct_corpus(doc_word_freq, word_freq, training_phase, vocab_dict=None, threshold=5, topn=None): # if not (training_phase or isinstance(vocab_dict, dict)): # raise ValueError('vocab_dict must be provided if training_phase is set False') # # if training_phase: # vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) # # docs = generate_bow(doc_word_freq, vocab_dict) # new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) # # return docs, vocab_dict, new_word_freq # # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e . Output only the next line.
dump_json(train_corpus, os.path.join(output, 'train.corpus'))
Based on the snippet: <|code_start|> try: word_count[vocab_dict[word]] = freq except: # word is not in vocab, i.e., this word should be discarded continue docs[key] = word_count return docs def build_vocab(word_freq, threshold=5, topn=None, start_idx=0): """ threshold only take effects when topn is None. words are indexed by overall frequency in the dataset. """ word_freq = sorted(word_freq.iteritems(), key=lambda d:d[1], reverse=True) if topn: word_freq = zip(*word_freq[:topn])[0] vocab_dict = dict(zip(word_freq, range(start_idx, len(word_freq) + start_idx))) else: idx = start_idx vocab_dict = {} for word, freq in word_freq: if freq < threshold: return vocab_dict vocab_dict[word] = idx idx += 1 return vocab_dict def construct_train_test_corpus(train_path, test_path, output, threshold=5, topn=None): train_docs, vocab_dict, train_word_freq = construct_corpus(train_path, True, threshold=threshold, topn=topn, recursive=True) train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} <|code_end|> , predict the immediate next line with the help of imports: import os import re import string import codecs import numpy as np from collections import defaultdict from nltk.tokenize import wordpunct_tokenize from nltk.stem.porter import PorterStemmer as EnglishStemmer from ..utils.io_utils import dump_json, load_json, write_file from nltk.corpus import stopwords and context (classes, functions, sometimes code) from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e . Output only the next line.
dump_json(train_corpus, os.path.join(output, 'train.corpus'))
Continue the code snippet: <|code_start|> # doc_name = os.path.basename(filename) parent_name, child_name = os.path.split(filename) doc_name = os.path.split(parent_name)[-1] + '_' + child_name for i in range(len(words)): # doc-word frequency try: doc_word_freq[doc_name][words[i]] += 1 except: doc_word_freq[doc_name][words[i]] = 1 # word frequency word_freq[words[i]] += 1 except Exception as e: raise e return word_freq, doc_word_freq def construct_corpus(corpus_path, training_phase, vocab_dict=None, threshold=5, topn=None, recursive=False): if not (training_phase or isinstance(vocab_dict, dict)): raise ValueError('vocab_dict must be provided if training_phase is set False') word_freq, doc_word_freq = load_data(corpus_path, recursive) if training_phase: vocab_dict = build_vocab(word_freq, threshold=threshold, topn=topn) docs = generate_bow(doc_word_freq, vocab_dict) new_word_freq = dict([(vocab_dict[word], freq) for word, freq in word_freq.iteritems() if word in vocab_dict]) return docs, vocab_dict, new_word_freq def load_corpus(corpus_path): <|code_end|> . Use current file imports: import os import re import string import codecs import numpy as np from collections import defaultdict from nltk.tokenize import wordpunct_tokenize from nltk.stem.porter import PorterStemmer as EnglishStemmer from ..utils.io_utils import dump_json, load_json, write_file from nltk.corpus import stopwords and context (classes, functions, or code) from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e . Output only the next line.
corpus = load_json(corpus_path)
Given the code snippet: <|code_start|> if freq < threshold: return vocab_dict vocab_dict[word] = idx idx += 1 return vocab_dict def construct_train_test_corpus(train_path, test_path, output, threshold=5, topn=None): train_docs, vocab_dict, train_word_freq = construct_corpus(train_path, True, threshold=threshold, topn=topn, recursive=True) train_corpus = {'docs': train_docs, 'vocab': vocab_dict, 'word_freq': train_word_freq} dump_json(train_corpus, os.path.join(output, 'train.corpus')) print 'Generated training corpus' test_docs, _, _ = construct_corpus(test_path, False, vocab_dict=vocab_dict, recursive=True) test_corpus = {'docs': test_docs, 'vocab': vocab_dict} dump_json(test_corpus, os.path.join(output, 'test.corpus')) print 'Generated test corpus' return train_corpus, test_corpus def corpus2libsvm(docs, doc_labels, output): '''Convert the corpus format to libsvm format. ''' data = [] names = [] for key, val in docs.iteritems(): # label = doc_labels[key] label = 0 line = label if isinstance(label, list) else [str(label)] + ["%s:%s" % (int(x) + 1, y) for x, y in val.iteritems()] data.append(line) names.append(key) <|code_end|> , generate the next line using the imports in this file: import os import re import string import codecs import numpy as np from collections import defaultdict from nltk.tokenize import wordpunct_tokenize from nltk.stem.porter import PorterStemmer as EnglishStemmer from ..utils.io_utils import dump_json, load_json, write_file from nltk.corpus import stopwords and context (functions, classes, or occasionally code) from other files: # Path: autoencoder/utils/io_utils.py # def dump_json(data, file): # try: # with open(file, 'w') as datafile: # json.dump(data, datafile) # except Exception as e: # raise e # # def load_json(file): # try: # with open(file, 'r') as datafile: # data = json.load(datafile) # except Exception as e: # raise e # # return data # # def write_file(data, file): # try: # with open(file, 'w') as datafile: # for line in data: # datafile.write(' '.join(line) + '\n') # except Exception as e: # raise e . Output only the next line.
write_file(data, output)
Using the snippet: <|code_start|> features: Number of neurons in each layer, and number of layers (given by length of sequence) + one layer for softmax. train: If model is being evaluated in training mode or not. init_fn: Initialization function used for dense layers. activation_fn: Activation function to be used for dense layers. masks: Masks of the layers in this model, in the same form as module params, or None. masked_layer_indices: The layer indices of layers in model to be masked. dropout_rate: Dropout rate, if 0 then dropout is not used (default). Returns: A tensor of shape (batch, num_classes), containing the logit output. """ batch_norm = flax.deprecated.nn.BatchNorm.partial( use_running_average=not train, momentum=0.99, epsilon=1e-5) depth = 1 + len(features) masks = masked.generate_model_masks(depth, masks, masked_layer_indices) # If inputs are in image dimensions, flatten image. inputs = inputs.reshape(inputs.shape[0], -1) for i, feature_num in enumerate(features): if f'MaskedModule_{i}' in masks: logging.info('Layer %d is masked in model', i) mask = masks[f'MaskedModule_{i}'] inputs = masked.masked(flax.deprecated.nn.Dense, mask)( inputs, features=feature_num, <|code_end|> , determine the next line of code. You have imports: import math import flax import jax.numpy as jnp from typing import Callable, Mapping, Optional, Sequence, Tuple from absl import logging from rigl.experimental.jax.pruning import init from rigl.experimental.jax.pruning import masked and context (class names, function names, or code) available: # Path: rigl/experimental/jax/pruning/init.py # def init(rng, shape, dtype=dtype): # if mask is None: # return base_init(rng, shape, dtype) # # # Find the ablated neurons in the mask, to determine correct fan_out. # neuron_weight_count = jnp.sum( # jnp.reshape(mask, (-1, mask.shape[-1])), axis=0) # non_zero_neurons = jnp.sum(neuron_weight_count != 0) # # # Special case of completely ablated weight matrix/layer. # if jnp.sum(non_zero_neurons) == 0: # print('Empty weight mask!') # return jnp.zeros(shape, dtype) # # # Neurons have different fan_in w/mask, build up initialization per-unit. # init_cols = [] # rng, *split_rngs = jax.random.split(rng, mask.shape[-1] + 1) # for i in range(mask.shape[-1]): # # Special case of ablated neuron. # if neuron_weight_count[i] == 0: # init_cols.append(jnp.zeros(shape[:-1] + (1,), dtype)) # continue # # # Fake shape of weight matrix with correct fan_in, and fan_out. # sparse_shape = (int(neuron_weight_count[i]), int(non_zero_neurons)) # # # Use only the first column of init from initializer, since faked fan_out. # init = base_init(split_rngs[i], sparse_shape, dtype)[Ellipsis, 0] # # # Expand out to full sparse array. # expanded_init = jnp.zeros( # mask[Ellipsis, i].shape, # dtype).flatten().at[jnp.where(mask[Ellipsis, i].flatten() == 1)].set(init) # expanded_init = jnp.reshape(expanded_init, mask[Ellipsis, i].shape) # init_cols.append(expanded_init[Ellipsis, jnp.newaxis]) # # return jnp.concatenate(init_cols, axis=-1) # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) . Output only the next line.
kernel_init=init.sparse_init(
Based on the snippet: <|code_start|> features = (32, 32), train=True, init_fn = flax.deprecated.nn.initializers.kaiming_normal, activation_fn = flax.deprecated.nn.relu, masks = None, masked_layer_indices = None, dropout_rate = 0.): """Applies fully-connected neural network to the inputs. Args: inputs: Input data with dimensions (batch, features), if features has more than one dimension, it is flattened. num_classes: Number of classes in the dataset. features: Number of neurons in each layer, and number of layers (given by length of sequence) + one layer for softmax. train: If model is being evaluated in training mode or not. init_fn: Initialization function used for dense layers. activation_fn: Activation function to be used for dense layers. masks: Masks of the layers in this model, in the same form as module params, or None. masked_layer_indices: The layer indices of layers in model to be masked. dropout_rate: Dropout rate, if 0 then dropout is not used (default). Returns: A tensor of shape (batch, num_classes), containing the logit output. """ batch_norm = flax.deprecated.nn.BatchNorm.partial( use_running_average=not train, momentum=0.99, epsilon=1e-5) depth = 1 + len(features) <|code_end|> , predict the immediate next line with the help of imports: import math import flax import jax.numpy as jnp from typing import Callable, Mapping, Optional, Sequence, Tuple from absl import logging from rigl.experimental.jax.pruning import init from rigl.experimental.jax.pruning import masked and context (classes, functions, sometimes code) from other files: # Path: rigl/experimental/jax/pruning/init.py # def init(rng, shape, dtype=dtype): # if mask is None: # return base_init(rng, shape, dtype) # # # Find the ablated neurons in the mask, to determine correct fan_out. # neuron_weight_count = jnp.sum( # jnp.reshape(mask, (-1, mask.shape[-1])), axis=0) # non_zero_neurons = jnp.sum(neuron_weight_count != 0) # # # Special case of completely ablated weight matrix/layer. # if jnp.sum(non_zero_neurons) == 0: # print('Empty weight mask!') # return jnp.zeros(shape, dtype) # # # Neurons have different fan_in w/mask, build up initialization per-unit. # init_cols = [] # rng, *split_rngs = jax.random.split(rng, mask.shape[-1] + 1) # for i in range(mask.shape[-1]): # # Special case of ablated neuron. # if neuron_weight_count[i] == 0: # init_cols.append(jnp.zeros(shape[:-1] + (1,), dtype)) # continue # # # Fake shape of weight matrix with correct fan_in, and fan_out. # sparse_shape = (int(neuron_weight_count[i]), int(non_zero_neurons)) # # # Use only the first column of init from initializer, since faked fan_out. # init = base_init(split_rngs[i], sparse_shape, dtype)[Ellipsis, 0] # # # Expand out to full sparse array. # expanded_init = jnp.zeros( # mask[Ellipsis, i].shape, # dtype).flatten().at[jnp.where(mask[Ellipsis, i].flatten() == 1)].set(init) # expanded_init = jnp.reshape(expanded_init, mask[Ellipsis, i].shape) # init_cols.append(expanded_init[Ellipsis, jnp.newaxis]) # # return jnp.concatenate(init_cols, axis=-1) # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) . Output only the next line.
masks = masked.generate_model_masks(depth, masks,
Predict the next line for this snippet: <|code_start|># coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for weight_symmetry.datasets.cifar10.""" class CIFAR10DatasetTest(absltest.TestCase): """Test cases for CIFAR10 Dataset.""" def setUp(self): """Common setup routines/variables for test cases.""" super().setUp() self._batch_size = 16 self._batch_size_test = 10 self._shuffle_buffer_size = 8 <|code_end|> with the help of current file imports: from absl.testing import absltest from rigl.experimental.jax.datasets import cifar10 import numpy as np and context from other files: # Path: rigl/experimental/jax/datasets/cifar10.py # class CIFAR10Dataset(dataset_base.ImageDataset): # NAME: str = 'cifar10' # MEAN_RGB: Sequence[float] = [0.4914 * 255, 0.4822 * 255, 0.4465 * 255] # STDDEV_RGB: Sequence[float] = [0.2470 * 255, 0.2435 * 255, 0.2616 * 255] # def __init__(self, # batch_size, # batch_size_test, # shuffle_buffer_size = 1024, # seed = 42): # def preprocess( # self, data): , which may contain function names, class names, or code. Output only the next line.
self._dataset = cifar10.CIFAR10Dataset(
Predict the next line after this snippet: <|code_start|># # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Weight Symmetry: Train model with randomly shuffled sparse mask.""" # TODO: Refactor drivers to separate logic from flags/IO. experiment_dir = '{}/{}/'.format(FLAGS.experiment_dir, work_unit_id) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) <|code_end|> using the current file's imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and any relevant context from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
dataset = dataset_factory.create_dataset(
Next line prediction: <|code_start|># limitations under the License. # Lint as: python3 """Weight Symmetry: Train model with randomly shuffled sparse mask.""" # TODO: Refactor drivers to separate logic from flags/IO. experiment_dir = '{}/{}/'.format(FLAGS.experiment_dir, work_unit_id) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape <|code_end|> . Use current file imports: (import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils) and context including class names, function names, or small code snippets from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
base_model, _ = model_factory.create_model(
Given the following code snippet before the placeholder: <|code_start|> host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) <|code_end|> , predict the next line using imports from the current file: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context including class names, function names, and sometimes code from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng,
Here is a snippet: <|code_start|> rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: mask_stats = symmetry.get_mask_stats(mask) logging.info('Mask stats: %s', str(mask_stats)) for label, value in mask_stats.items(): try: summary_writer.scalar(f'mask/{label}', value, 0) # This is needed because permutations (long int) can't be cast to float32. except (OverflowError, ValueError): summary_writer.text(f'mask/{label}', str(value), 0) logging.error('Could not write mask/%s to tensorflow summary as float32' ', writing as string instead.', label) if FLAGS.dump_json: mask_stats['permutations'] = str(mask_stats['permutations']) utils.dump_dict_json( mask_stats, path.join(experiment_dir, 'mask_stats.json')) <|code_end|> . Write the next line using the current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') , which may include functions, classes, or code. Output only the next line.
mask = masked.propagate_masks(mask)
Using the snippet: <|code_start|> jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: <|code_end|> , determine the next line of code. You have imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (class names, function names, or code) available: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
mask_stats = symmetry.get_mask_stats(mask)
Continue the code snippet: <|code_start|> rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, masks=mask) if FLAGS.optimizer == 'Adam': optimizer = flax.optim.Adam( learning_rate=FLAGS.lr, weight_decay=FLAGS.weight_decay) elif FLAGS.optimizer == 'Momentum': optimizer = flax.optim.Momentum( learning_rate=FLAGS.lr, beta=FLAGS.momentum, weight_decay=FLAGS.weight_decay, nesterov=False) steps_per_epoch = dataset.get_train_len() // FLAGS.batch_size if FLAGS.lr_schedule == 'constant': lr_fn = lr_schedule.create_constant_learning_rate_schedule( FLAGS.lr, steps_per_epoch) elif FLAGS.lr_schedule == 'stepped': lr_schedule_steps = ast.literal_eval(FLAGS.lr_schedule_steps) lr_fn = lr_schedule.create_stepped_learning_rate_schedule( FLAGS.lr, steps_per_epoch, lr_schedule_steps) elif FLAGS.lr_schedule == 'cosine': lr_fn = lr_schedule.create_cosine_learning_rate_schedule( FLAGS.lr, steps_per_epoch, FLAGS.epochs) else: raise ValueError('Unknown LR schedule type {}'.format(FLAGS.lr_schedule)) if jax.host_id() == 0: <|code_end|> . Use current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (classes, functions, or code) from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
trainer = training.Trainer(
Next line prediction: <|code_start|> input_shape = (1,) + dataset.shape base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: mask_stats = symmetry.get_mask_stats(mask) logging.info('Mask stats: %s', str(mask_stats)) for label, value in mask_stats.items(): try: summary_writer.scalar(f'mask/{label}', value, 0) # This is needed because permutations (long int) can't be cast to float32. except (OverflowError, ValueError): summary_writer.text(f'mask/{label}', str(value), 0) logging.error('Could not write mask/%s to tensorflow summary as float32' ', writing as string instead.', label) if FLAGS.dump_json: mask_stats['permutations'] = str(mask_stats['permutations']) <|code_end|> . Use current file imports: (import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils) and context including class names, function names, or small code snippets from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
utils.dump_dict_json(
Predict the next line for this snippet: <|code_start|># # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for weight_symmetry.fixed_param.""" class FixedParamTest(absltest.TestCase): def test_run(self): """Tests if the driver for shuffled training runs correctly.""" experiment_dir = tempfile.mkdtemp() eval_flags = dict( epochs=1, experiment_dir=experiment_dir, ) with flagsaver.flagsaver(**eval_flags): <|code_end|> with the help of current file imports: import glob import tempfile from os import path from absl.testing import absltest from absl.testing import flagsaver from rigl.experimental.jax import fixed_param and context from other files: # Path: rigl/experimental/jax/fixed_param.py # def main(argv: List[str]): , which may contain function names, class names, or code. Output only the next line.
fixed_param.main([])
Next line prediction: <|code_start|># coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Weight Symmetry: Train model with randomly sampled sparse mask.""" experiment_dir = path.join(FLAGS.experiment_dir, str(work_unit_id)) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) <|code_end|> . Use current file imports: (import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils) and context including class names, function names, or small code snippets from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
dataset = dataset_factory.create_dataset(
Here is a snippet: <|code_start|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Weight Symmetry: Train model with randomly sampled sparse mask.""" experiment_dir = path.join(FLAGS.experiment_dir, str(work_unit_id)) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape <|code_end|> . Write the next line using the current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') , which may include functions, classes, or code. Output only the next line.
base_model, _ = model_factory.create_model(
Given the following code snippet before the placeholder: <|code_start|> host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, masked_layer_indices=FLAGS.masked_layer_indices) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) <|code_end|> , predict the next line using imports from the current file: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context including class names, function names, and sometimes code from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng,
Here is a snippet: <|code_start|> num_classes=dataset.num_classes, masked_layer_indices=FLAGS.masked_layer_indices) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: mask_stats = symmetry.get_mask_stats(mask) logging.info('Mask stats: %s', str(mask_stats)) for label, value in mask_stats.items(): try: summary_writer.scalar(f'mask/{label}', value, 0) # This is needed because permutations (long int) can't be cast to float32. except (OverflowError, ValueError): summary_writer.text(f'mask/{label}', str(value), 0) logging.error('Could not write mask/%s to tensorflow summary as float32' ', writing as string instead.', label) if FLAGS.dump_json: mask_stats['permutations'] = str(mask_stats['permutations']) utils.dump_dict_json( mask_stats, path.join(experiment_dir, 'mask_stats.json')) <|code_end|> . Write the next line using the current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') , which may include functions, classes, or code. Output only the next line.
mask = masked.propagate_masks(mask)
Given snippet: <|code_start|> if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', FLAGS.model, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, masked_layer_indices=FLAGS.masked_layer_indices) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: <|code_end|> , continue by predicting the next line. Consider current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') which might include code, classes, or functions. Output only the next line.
mask_stats = symmetry.get_mask_stats(mask)
Given the following code snippet before the placeholder: <|code_start|> rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, masks=mask) if FLAGS.optimizer == 'Adam': optimizer = flax.optim.Adam( learning_rate=FLAGS.lr, weight_decay=FLAGS.weight_decay) elif FLAGS.optimizer == 'Momentum': optimizer = flax.optim.Momentum( learning_rate=FLAGS.lr, beta=FLAGS.momentum, weight_decay=FLAGS.weight_decay, nesterov=False) steps_per_epoch = dataset.get_train_len() // FLAGS.batch_size if FLAGS.lr_schedule == 'constant': lr_fn = lr_schedule.create_constant_learning_rate_schedule( FLAGS.lr, steps_per_epoch) elif FLAGS.lr_schedule == 'stepped': lr_schedule_steps = ast.literal_eval(FLAGS.lr_schedule_steps) lr_fn = lr_schedule.create_stepped_learning_rate_schedule( FLAGS.lr, steps_per_epoch, lr_schedule_steps) elif FLAGS.lr_schedule == 'cosine': lr_fn = lr_schedule.create_cosine_learning_rate_schedule( FLAGS.lr, steps_per_epoch, FLAGS.epochs) else: raise ValueError(f'Unknown LR schedule type {FLAGS.lr_schedule}') if jax.host_id() == 0: <|code_end|> , predict the next line using imports from the current file: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context including class names, function names, and sometimes code from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
trainer = training.Trainer(
Here is a snippet: <|code_start|> base_model, _ = model_factory.create_model( FLAGS.model, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, masked_layer_indices=FLAGS.masked_layer_indices) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.mask_randomseed) mask = mask_factory.create_mask(FLAGS.mask_type, base_model, mask_rng, FLAGS.mask_sparsity) if jax.host_id() == 0: mask_stats = symmetry.get_mask_stats(mask) logging.info('Mask stats: %s', str(mask_stats)) for label, value in mask_stats.items(): try: summary_writer.scalar(f'mask/{label}', value, 0) # This is needed because permutations (long int) can't be cast to float32. except (OverflowError, ValueError): summary_writer.text(f'mask/{label}', str(value), 0) logging.error('Could not write mask/%s to tensorflow summary as float32' ', writing as string instead.', label) if FLAGS.dump_json: mask_stats['permutations'] = str(mask_stats['permutations']) <|code_end|> . Write the next line using the current file imports: import ast import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import mask_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/mask_factory.py # MASK_TYPES: Mapping[str, MaskFnType] = { # 'random': # masked.shuffled_mask, # 'per_neuron': # masked.shuffled_neuron_mask, # 'per_neuron_no_input_ablation': # masked.shuffled_neuron_no_input_ablation_mask, # 'symmetric': # masked.symmetric_mask, # } # def create_mask(mask_type, base_model, # rng, sparsity, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') , which may include functions, classes, or code. Output only the next line.
utils.dump_dict_json(
Given snippet: <|code_start|># coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements RigL.""" def get_all_layers(model, filter_fn=lambda _: True): """Gets all layers of a model and layers of a layer if it is a keras.Model.""" all_layers = [] for l in model.layers: if hasattr(l, 'layers'): all_layers.extend(get_all_layers(l, filter_fn=filter_fn)) elif filter_fn(l): all_layers.append(l) return all_layers def is_pruned(layer): <|code_end|> , continue by predicting the next line. Consider current file imports: import gin import tensorflow as tf from rigl.rigl_tf2 import utils and context: # Path: rigl/rigl_tf2/utils.py # FLAGS = flags.FLAGS # PRUNING_WRAPPER = pruning_wrapper.PruneLowMagnitude # PRUNED_LAYER_TYPES = (tf.keras.layers.Conv2D, tf.keras.layers.Dense) # def get_dataset(): # def get_pruning_params(mode='prune', # initial_sparsity=0.0, # final_sparsity=0.8, # begin_step=2000, # end_step=4000, # frequency=200): # def maybe_prune_layer(layer, params, filter_fn): # def get_network( # pruning_params, # input_shape, # num_classes, # activation = 'relu', # network_name = 'lenet5', # mask_init_path = None, # shuffle_mask = False, # weight_init_path = None, # weight_init_method = None, # weight_decay = 0., # noise_stddev = 0., # pruned_layer_types = PRUNED_LAYER_TYPES): # def get_optimizer(total_steps, # name = 'adam', # learning_rate = 0.001, # clipnorm = None, # clipvalue = None, # momentum = None): which might include code, classes, or functions. Output only the next line.
return isinstance(layer, utils.PRUNING_WRAPPER) and layer.trainable
Using the snippet: <|code_start|># coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Weight Symmetry: Train models with fixed param, but diff. depth and width.""" experiment_dir = path.join(FLAGS.experiment_dir, str(work_unit_id)) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) <|code_end|> , determine the next line of code. You have imports: import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (class names, function names, or code) available: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
dataset = dataset_factory.create_dataset(
Based on the snippet: <|code_start|> # Lint as: python3 """Weight Symmetry: Train models with fixed param, but diff. depth and width.""" experiment_dir = path.join(FLAGS.experiment_dir, str(work_unit_id)) logging.info('Saving experimental results to %s', experiment_dir) host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', MODEL, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape input_len = functools.reduce(operator.mul, dataset.shape) <|code_end|> , predict the immediate next line with the help of imports: import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (classes, functions, sometimes code) from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
features = mnist_fc.feature_dim_for_param(
Using the snippet: <|code_start|> host_count = jax.host_count() local_device_count = jax.local_device_count() logging.info('Device count: %d, host count: %d, local device count: %d', jax.device_count(), host_count, local_device_count) if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', MODEL, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape input_len = functools.reduce(operator.mul, dataset.shape) features = mnist_fc.feature_dim_for_param( input_len, FLAGS.param_count, FLAGS.depth) logging.info('Model Configuration: %s', str(features)) <|code_end|> , determine the next line of code. You have imports: import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (class names, function names, or code) available: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
base_model, _ = model_factory.create_model(
Using the snippet: <|code_start|> rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape input_len = functools.reduce(operator.mul, dataset.shape) features = mnist_fc.feature_dim_for_param( input_len, FLAGS.param_count, FLAGS.depth) logging.info('Model Configuration: %s', str(features)) base_model, _ = model_factory.create_model( MODEL, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, features=features) model_param_count = utils.count_param(base_model, ('kernel',)) logging.info( 'Model Config: param.: %d, depth: %d. max width: %d, min width: %d', model_param_count, len(features), max(features), min(features)) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.random_seed) <|code_end|> , determine the next line of code. You have imports: import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (class names, function names, or code) available: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
mask = masked.shuffled_mask(
Next line prediction: <|code_start|> features = mnist_fc.feature_dim_for_param( input_len, FLAGS.param_count, FLAGS.depth) logging.info('Model Configuration: %s', str(features)) base_model, _ = model_factory.create_model( MODEL, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, features=features) model_param_count = utils.count_param(base_model, ('kernel',)) logging.info( 'Model Config: param.: %d, depth: %d. max width: %d, min width: %d', model_param_count, len(features), max(features), min(features)) logging.info('Generating random mask based on model') # Re-initialize the RNG to maintain same training pattern (as in prune code). mask_rng = jax.random.PRNGKey(FLAGS.random_seed) mask = masked.shuffled_mask( base_model, rng=mask_rng, sparsity=FLAGS.mask_sparsity) if jax.host_id() == 0: <|code_end|> . Use current file imports: (import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils) and context including class names, function names, or small code snippets from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
mask_stats = symmetry.get_mask_stats(mask)
Next line prediction: <|code_start|> features=features, masks=mask) if FLAGS.opt == 'Adam': optimizer = flax.optim.Adam( learning_rate=FLAGS.lr, weight_decay=FLAGS.weight_decay) elif FLAGS.opt == 'Momentum': optimizer = flax.optim.Momentum( learning_rate=FLAGS.lr, beta=FLAGS.momentum, weight_decay=FLAGS.weight_decay, nesterov=False) else: raise ValueError('Unknown Optimizer: {}'.format(FLAGS.opt)) steps_per_epoch = dataset.get_train_len() // FLAGS.batch_size if FLAGS.lr_schedule == 'constant': lr_fn = lr_schedule.create_constant_learning_rate_schedule( FLAGS.lr, steps_per_epoch) elif FLAGS.lr_schedule == 'stepped': lr_schedule_steps = ast.literal_eval(FLAGS.lr_schedule_steps) lr_fn = lr_schedule.create_stepped_learning_rate_schedule( FLAGS.lr, steps_per_epoch, lr_schedule_steps) elif FLAGS.lr_schedule == 'cosine': lr_fn = lr_schedule.create_cosine_learning_rate_schedule( FLAGS.lr, steps_per_epoch, FLAGS.epochs) else: raise ValueError('Unknown LR schedule type {}'.format(FLAGS.lr_schedule)) if jax.host_id() == 0: <|code_end|> . Use current file imports: (import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils) and context including class names, function names, or small code snippets from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
trainer = training.Trainer(
Given the code snippet: <|code_start|> if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(experiment_dir) dataset = dataset_factory.create_dataset( FLAGS.dataset, FLAGS.batch_size, FLAGS.batch_size_test, shuffle_buffer_size=FLAGS.shuffle_buffer_size) logging.info('Training %s on the %s dataset...', MODEL, FLAGS.dataset) rng = jax.random.PRNGKey(FLAGS.random_seed) input_shape = (1,) + dataset.shape input_len = functools.reduce(operator.mul, dataset.shape) features = mnist_fc.feature_dim_for_param( input_len, FLAGS.param_count, FLAGS.depth) logging.info('Model Configuration: %s', str(features)) base_model, _ = model_factory.create_model( MODEL, rng, ((input_shape, jnp.float32),), num_classes=dataset.num_classes, features=features) <|code_end|> , generate the next line using the imports in this file: import ast import functools import operator import uuid import flax import jax import jax.numpy as jnp from os import path from typing import List, Sequence from absl import app from absl import flags from absl import logging from flax.metrics import tensorboard from flax.training import lr_schedule from rigl.experimental.jax.datasets import dataset_factory from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.models import model_factory from rigl.experimental.jax.pruning import masked from rigl.experimental.jax.pruning import symmetry from rigl.experimental.jax.training import training from rigl.experimental.jax.utils import utils and context (functions, classes, or occasionally code) from other files: # Path: rigl/experimental/jax/datasets/dataset_factory.py # DATASETS: Mapping[str, Type[dataset_base.Dataset]] = { # 'MNIST': mnist.MNISTDataset, # 'CIFAR10': cifar10.CIFAR10Dataset, # } # def create_dataset(name, *args, **kwargs): # # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/models/model_factory.py # MODELS: Mapping[str, Type[flax.deprecated.nn.Model]] = { # 'MNIST_CNN': mnist_cnn.MNISTCNN, # 'MNIST_FC': mnist_fc.MNISTFC, # 'CIFAR10_CNN': cifar10_cnn.CIFAR10CNN, # } # def create_model( # name, rng, # input_specs, **kwargs # ): # def update_model(model, # **kwargs): # # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) # # Path: rigl/experimental/jax/pruning/symmetry.py # def count_permutations_mask_layer( # mask_layer, # next_mask_layer = None, # parameter_key = 'kernel'): # def count_permutations_mask(mask): # def get_mask_stats(mask): # # Path: rigl/experimental/jax/training/training.py # LABELKEY = dataset_base.ImageDataset.LABELKEY # DATAKEY = dataset_base.ImageDataset.DATAKEY # def _shard_batch(xs): # def _prepare(x): # def train_step( # optimizer: flax.optim.Optimizer, batch: Mapping[str, jnp.array], # rng: Callable[[int], jnp.array], state: flax.deprecated.nn.Collection, # learning_rate_fn: Callable[[int], float] # ) -> Tuple[flax.optim.Optimizer, flax.deprecated.nn.Collection, float, float]: # def loss_fn( # model: flax.deprecated.nn.Model # ) -> Tuple[float, Tuple[flax.deprecated.nn.Collection, jnp.array]]: # def __init__( # self, # optimizer_def: flax.optim.OptimizerDef, # initial_model: flax.deprecated.nn.Model, # initial_state: flax.deprecated.nn.Collection, # dataset: jnp.array, # rng: Callable[[int], jnp.array] = None, # summary_writer: Optional[tf.summary.SummaryWriter] = None, # ): # class Trainer: # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
model_param_count = utils.count_param(base_model, ('kernel',))
Based on the snippet: <|code_start|># Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for weight_symmetry.models.mnist_fc.""" PARAM_COUNT_PARAM: Sequence[str] = ('kernel',) class MNISTFCTest(parameterized.TestCase): """Tests the MNISTFC model.""" def setUp(self): super().setUp() self._rng = jax.random.PRNGKey(42) self._num_classes = 10 self._batch_size = 2 self._input_len = 28*28*1 self._input_shape = ((self._batch_size, self._input_len), jnp.float32) self._input = jnp.zeros((self._batch_size, self._input_len), jnp.float32) self._param_count = 1e7 def test_output_shapes(self): """Tests the output shape from the model.""" with flax.deprecated.nn.stateful() as initial_state: <|code_end|> , predict the immediate next line with the help of imports: from typing import Sequence from absl.testing import absltest from absl.testing import parameterized from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.utils import utils import flax import jax import jax.numpy as jnp and context (classes, functions, sometimes code) from other files: # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
_, initial_params = mnist_fc.MNISTFC.init_by_shape(
Continue the code snippet: <|code_start|> invalid_masks = { 'MaskedModule_0': { 'kernel': jnp.zeros((self._batch_size, 5 * 5 * 16)) } } with self.assertRaisesRegex( ValueError, 'Mask is invalid for model.'): mnist_fc.MNISTFC.init_by_shape( self._rng, (self._input_shape,), num_classes=self._num_classes, masks=invalid_masks) def _create_model(self, features): """Convenience fn to create a FLAX model .""" _, initial_params = mnist_fc.MNISTFC.init_by_shape( self._rng, (self._input_shape,), num_classes=self._num_classes, features=features) return flax.deprecated.nn.Model(mnist_fc.MNISTFC, initial_params) @parameterized.parameters(*range(1, 6)) def test_feature_dim_for_param_depth(self, depth): """Tests feature_dim_for_param with multiple depths.""" features = mnist_fc.feature_dim_for_param(self._input_len, self._param_count, depth) model = self._create_model(features) <|code_end|> . Use current file imports: from typing import Sequence from absl.testing import absltest from absl.testing import parameterized from rigl.experimental.jax.models import mnist_fc from rigl.experimental.jax.utils import utils import flax import jax import jax.numpy as jnp and context (classes, functions, or code) from other files: # Path: rigl/experimental/jax/models/mnist_fc.py # def feature_dim_for_param(input_len, # param_count, # depth, # depth_mult = 2.): # def apply(self, # inputs, # num_classes, # features = (32, 32), # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None, # dropout_rate = 0.): # class MNISTFC(flax.deprecated.nn.Module): # # Path: rigl/experimental/jax/utils/utils.py # def cross_entropy_loss(log_softmax_logits, # labels): # def compute_metrics(logits, # labels): # def _np_converter(obj): # def dump_dict_json(data_dict, path): # def count_param(model, # param_names): # def cosine_similarity(a, b): # def param_as_array(params): # def cosine_similarity_model(initial_model, # current_model): # def vector_difference_norm_model(initial_model, # current_model): # def pairwise_longest(iterable): # T = TypeVar('T') . Output only the next line.
total_size = utils.count_param(model, PARAM_COUNT_PARAM)
Given the code snippet: <|code_start|># coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Pruning mask factory. Attributes: MaskFnType: A type alias for functions to create sparse masks. MASK_TYPES: Masks types that can be created. """ # A function to create a mask, takes as arguments: a flax model, JAX PRNG Key, # sparsity level as a float in [0, 1]. MaskFnType = Callable[ [flax.deprecated.nn.Model, Callable[[int], <|code_end|> , generate the next line using the imports in this file: from typing import Any, Callable, Mapping from rigl.experimental.jax.pruning import masked import flax import jax.numpy as jnp and context (functions, classes, or occasionally code) from other files: # Path: rigl/experimental/jax/pruning/masked.py # def masked(module, mask): # """Convenience function for masking a FLAX module with MaskedModule.""" # return MaskedModule.partial(wrapped_module=module, mask=mask) . Output only the next line.
jnp.array], float], masked.MaskType]
Continue the code snippet: <|code_start|># you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for weight_symmetry.models.mnist_cnn.""" class MNISTCNNTest(absltest.TestCase): """Tests the MNISTCNN model.""" def setUp(self): super().setUp() self._rng = jax.random.PRNGKey(42) self._num_classes = 10 self._batch_size = 2 self._input_shape = ((self._batch_size, 28, 28, 1), jnp.float32) self._input = jnp.zeros(*self._input_shape) def test_output_shapes(self): """Tests the output shapes of the model.""" with flax.deprecated.nn.stateful() as initial_state: <|code_end|> . Use current file imports: from absl.testing import absltest from rigl.experimental.jax.models import mnist_cnn import flax import jax import jax.numpy as jnp and context (classes, functions, or code) from other files: # Path: rigl/experimental/jax/models/mnist_cnn.py # class MNISTCNN(flax.deprecated.nn.Module): # def apply(self, # inputs, # num_classes, # filter_shape = (5, 5), # filters = (16, 32), # dense_size = 64, # train=True, # init_fn = flax.deprecated.nn.initializers.kaiming_normal, # activation_fn = flax.deprecated.nn.relu, # masks = None, # masked_layer_indices = None): . Output only the next line.
_, initial_params = mnist_cnn.MNISTCNN.init_by_shape(