text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_intersections(self, line_segments_b): """ Count the intersections of two strokes with each other. Parameters line_segments_b : list A list of line segemnts Returns ------- int The number of intersections between A and B. """
line_segments_a = self.lineSegments # Calculate intersections intersection_points = [] for line1, line2 in itertools.product(line_segments_a, line_segments_b): intersection_points += get_segments_intersections(line1, line2) return len(set(intersection_points))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_area(self): """Calculate area of bounding box."""
return (self.p2.x-self.p1.x)*(self.p2.y-self.p1.y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_center(self): """ Get the center point of this bounding box. """
return Point((self.p1.x+self.p2.x)/2.0, (self.p1.y+self.p2.y)/2.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _list_ids(path_to_data): """List raw data IDs grouped by symbol ID from a pickle file ``path_to_data``."""
loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded['handwriting_datasets'] raw_ids = {} for raw_dataset in raw_datasets: raw_data_id = raw_dataset['handwriting'].raw_data_id if raw_dataset['formula_id'] not in raw_ids: raw_ids[raw_dataset['formula_id']] = [raw_data_id] else: raw_ids[raw_dataset['formula_id']].append(raw_data_id) for symbol_id in sorted(raw_ids): print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id])))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_system(model_folder): """Return the preprocessing description, the feature description and the model description."""
# Get model description model_description_file = os.path.join(model_folder, "info.yml") if not os.path.isfile(model_description_file): logging.error("You are probably not in the folder of a model, because " "%s is not a file. (-m argument)", model_description_file) sys.exit(-1) with open(model_description_file, 'r') as ymlfile: model_desc = yaml.load(ymlfile) # Get the feature and the preprocessing description feature_desc = _get_description(model_desc) preprocessing_desc = _get_description(feature_desc) return (preprocessing_desc, feature_desc, model_desc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display_data(raw_data_string, raw_data_id, model_folder, show_raw): """Print ``raw_data_id`` with the content ``raw_data_string`` after applying the preprocessing of ``model_folder`` to it."""
print("## Raw Data (ID: %i)" % raw_data_id) print("```") print(raw_data_string) print("```") preprocessing_desc, feature_desc, _ = _get_system(model_folder) # Print model print("## Model") print("%s\n" % model_folder) # Get the preprocessing queue tmp = preprocessing_desc['queue'] preprocessing_queue = preprocessing.get_preprocessing_queue(tmp) # Get feature values as list of floats, rounded to 3 decimal places tmp = feature_desc['features'] feature_list = features.get_features(tmp) # Print preprocessing queue preprocessing.print_preprocessing_list(preprocessing_queue) features.print_featurelist(feature_list) # Get Handwriting recording = handwritten_data.HandwrittenData(raw_data_string, raw_data_id=raw_data_id) if show_raw: recording.show() recording.preprocessing(preprocessing_queue) feature_values = recording.feature_extraction(feature_list) feature_values = [round(el, 3) for el in feature_values] print("Features:") print(feature_values) # Get the list of data multiplication algorithms mult_queue = data_multiplication.get_data_multiplication_queue( feature_desc['data-multiplication']) # Multiply traing_set training_set = [{'id': 42, 'formula_id': 42, 'formula_in_latex': 'None', 'handwriting': recording}] training_set = create_ffiles.training_set_multiplication(training_set, mult_queue) # Display it logging.info("Show %i recordings...", len(training_set)) for recording in training_set: recording['handwriting'].show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(list_ids, model, contact_server, raw_data_id, show_raw, mysql_cfg='mysql_online'): """Main function of view.py."""
if list_ids: preprocessing_desc, _, _ = _get_system(model) raw_datapath = os.path.join(utils.get_project_root(), preprocessing_desc['data-source']) _list_ids(raw_datapath) else: if contact_server: data = _fetch_data_from_server(raw_data_id, mysql_cfg) print("hwrt version: %s" % hwrt.__version__) if data is not None: display_data(data['data'], data['id'], model, show_raw) else: logging.info("RAW_DATA_ID %i does not exist or " "database connection did not work.", raw_data_id) # The data was not on the server / the connection to the server did # not work. So try it again with the model data preprocessing_desc, _, _ = _get_system(model) raw_datapath = os.path.join(utils.get_project_root(), preprocessing_desc['data-source']) handwriting = _get_data_from_rawfile(raw_datapath, raw_data_id) if handwriting is None: logging.info("Recording with ID %i was not found in %s", raw_data_id, raw_datapath) else: print("hwrt version: %s" % hwrt.__version__) display_data(handwriting.raw_data_json, handwriting.formula_id, model, show_raw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_parameters(folder): """Get the parameters of the preprocessing done within `folder`. Parameters folder : string Returns ------- tuple : (path of raw data, path where preprocessed data gets stored, list of preprocessing algorithms) """
# Read the model description file with open(os.path.join(folder, "info.yml"), 'r') as ymlfile: preprocessing_description = yaml.load(ymlfile) # Get the path of the raw data raw_datapath = os.path.join(utils.get_project_root(), preprocessing_description['data-source']) # Get the path were the preprocessed file should be put outputpath = os.path.join(folder, "data.pickle") # Get the preprocessing queue tmp = preprocessing_description['queue'] preprocessing_queue = preprocessing.get_preprocessing_queue(tmp) return (raw_datapath, outputpath, preprocessing_queue)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_preprocessed_dataset(path_to_data, outputpath, preprocessing_queue): """Create a preprocessed dataset file by applying `preprocessing_queue` to `path_to_data`. The result will be stored in `outputpath`."""
# Log everything logging.info("Data soure %s", path_to_data) logging.info("Output will be stored in %s", outputpath) tmp = "Preprocessing Queue:\n" for preprocessing_class in preprocessing_queue: tmp += str(preprocessing_class) + "\n" logging.info(tmp) # Load from pickled file if not os.path.isfile(path_to_data): logging.info(("'%s' does not exist. Please either abort this script " "or update the data location."), path_to_data) raw_dataset_path = utils.choose_raw_dataset() # Get project-relative path raw_dataset_path = "raw-datasets" + \ raw_dataset_path.split("raw-datasets")[1] print(raw_dataset_path) sys.exit() # TODO: Update model! logging.info("Start loading data...") loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded['handwriting_datasets'] logging.info("Start applying preprocessing methods") start_time = time.time() for i, raw_dataset in enumerate(raw_datasets): if i % 10 == 0 and i > 0: utils.print_status(len(raw_datasets), i, start_time) # Do the work raw_dataset['handwriting'].preprocessing(preprocessing_queue) sys.stdout.write("\r%0.2f%% (done)\033[K\n" % (100)) print("") pickle.dump({'handwriting_datasets': raw_datasets, 'formula_id2latex': loaded['formula_id2latex'], 'preprocessing_queue': preprocessing_queue}, open(outputpath, "wb"), 2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(folder): """Main part of preprocess_dataset that glues things togeter."""
raw_datapath, outputpath, p_queue = get_parameters(folder) create_preprocessed_dataset(raw_datapath, outputpath, p_queue) utils.create_run_logfile(folder)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_index_formula_lookup(formula_id2index, feature_folder, index2latex): """ Create a lookup file where the index is mapped to the formula id and the LaTeX command. Parameters formula_id2index : dict feature_folder : str Path to a folder in which a feature file as well as an index2formula_id.csv is. index2latex : dict Maps an integer index to a LaTeX command """
index2formula_id = sorted(formula_id2index.items(), key=lambda n: n[1]) index2formula_file = os.path.join(feature_folder, "index2formula_id.csv") with open(index2formula_file, "w") as f: f.write("index,formula_id,latex\n") for formula_id, index in index2formula_id: f.write("%i,%i,%s\n" % (index, formula_id, index2latex[index]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(feature_folder, create_learning_curve=False): """main function of create_ffiles.py"""
# Read the feature description file with open(os.path.join(feature_folder, "info.yml"), 'r') as ymlfile: feature_description = yaml.load(ymlfile) # Get preprocessed .pickle file from model description file path_to_data = os.path.join(utils.get_project_root(), feature_description['data-source']) if os.path.isdir(path_to_data): path_to_data = os.path.join(path_to_data, "data.pickle") target_paths = {'traindata': os.path.join(feature_folder, "traindata.hdf5"), 'validdata': os.path.join(feature_folder, "validdata.hdf5"), 'testdata': os.path.join(feature_folder, "testdata.hdf5")} feature_list = features.get_features(feature_description['features']) mult_queue = data_multiplication.get_data_multiplication_queue( feature_description['data-multiplication']) # Set everything up for the creation of the 3 hdf5 (test, validation, # training). os.chdir(feature_folder) logging.info("Start creation of hdf5-files...") logging.info("Get sets from '%s' ...", path_to_data) (training_set, validation_set, test_set, formula_id2index, preprocessing_queue, index2latex) = get_sets(path_to_data) training_set = training_set_multiplication(training_set, mult_queue) _create_index_formula_lookup(formula_id2index, feature_folder, index2latex) # Output data for documentation print("Classes (nr of symbols): %i" % len(formula_id2index)) preprocessing.print_preprocessing_list(preprocessing_queue) features.print_featurelist(feature_list) logging.info("Start creating hdf5 files") # Get the dimension of the feature vector input_features = sum(map(lambda n: n.get_dimension(), feature_list)) # Traindata has to come first because of feature normalization for dataset_name, dataset, is_traindata in \ [("traindata", training_set, True), ("testdata", test_set, False), ("validdata", validation_set, False)]: t0 = time.time() logging.info("Start preparing '%s' ...", dataset_name) prepared, translation = prepare_dataset(dataset, formula_id2index, feature_list, is_traindata) logging.info("%s length: %i", dataset_name, len(prepared)) logging.info("start 'make_hdf5'x ...") make_hdf5(dataset_name, input_features, prepared, os.path.join(feature_folder, target_paths[dataset_name]), create_learning_curve) _create_translation_file(feature_folder, dataset_name, translation, formula_id2index) t1 = time.time() - t0 logging.info("%s was written. Needed %0.2f seconds", dataset_name, t1) gc.collect() utils.create_run_logfile(feature_folder)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def training_set_multiplication(training_set, mult_queue): """ Multiply the training set by all methods listed in mult_queue. Parameters training_set : set of all recordings that will be used for training mult_queue : list of all algorithms that will take one recording and generate more than one. Returns ------- mutliple recordings """
logging.info("Multiply data...") for algorithm in mult_queue: new_trning_set = [] for recording in training_set: samples = algorithm(recording['handwriting']) for sample in samples: new_trning_set.append({'id': recording['id'], 'is_in_testset': 0, 'formula_id': recording['formula_id'], 'handwriting': sample, 'formula_in_latex': recording['formula_in_latex']}) training_set = new_trning_set return new_trning_set
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _calculate_feature_stats(feature_list, prepared, serialization_file): # pylint: disable=R0914 """Calculate min, max and mean for each feature. Store it in object."""
# Create feature only list feats = [x for x, _ in prepared] # Label is not necessary # Calculate all means / mins / maxs means = numpy.mean(feats, 0) mins = numpy.min(feats, 0) maxs = numpy.max(feats, 0) # Calculate, min, max and mean vector for each feature with # normalization start = 0 mode = 'w' arguments = {'newline': ''} if sys.version_info.major < 3: mode += 'b' arguments = {} with open(serialization_file, mode, **arguments) as csvfile: spamwriter = csv.writer(csvfile, delimiter=str(';'), quotechar=str('"'), quoting=csv.QUOTE_MINIMAL) for feature in feature_list: end = start + feature.get_dimension() # append the data to the feature class feature.mean = numpy.array(means[start:end]) feature.min = numpy.array(mins[start:end]) feature.max = numpy.array(maxs[start:end]) start = end for mean, fmax, fmin in zip(feature.mean, feature.max, feature.min): spamwriter.writerow([mean, fmax - fmin])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_hdf5(dataset_name, feature_count, data, output_filename, create_learning_curve): """ Create the hdf5 file. Parameters filename : name of the file that hdf5_create will use to create the hdf5 file. feature_count : integer number of features data : list of tuples data format ('feature_string', 'label') """
# create raw data file for hdf5_create if dataset_name == "traindata" and create_learning_curve: max_trainingexamples = 501 output_filename_save = output_filename steps = 10 for trainingexamples in range(100, max_trainingexamples, steps): # adjust output_filename tmp = output_filename_save.split(".") tmp[-2] += "-%i-examples" % trainingexamples output_filename = ".".join(map(str, tmp)) # Make sure the data has not more than ``trainingexamples`` seen_symbols = defaultdict(int) new_data = {} for feature_string, label in data: if seen_symbols[label] < trainingexamples: seen_symbols[label] += 1 new_data = (feature_string, label) # Create the hdf5 file utils.create_hdf5(output_filename, feature_count, new_data) else: utils.create_hdf5(output_filename, feature_count, data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_dataset(): """Create a dataset for machine learning of segmentations. Returns ------- tuple : (X, y) where X is a list of tuples. Each tuple is a feature. y is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol') """
seg_data = "segmentation-X.npy" seg_labels = "segmentation-y.npy" # seg_ids = "segmentation-ids.npy" if os.path.isfile(seg_data) and os.path.isfile(seg_labels): X = numpy.load(seg_data) y = numpy.load(seg_labels) with open('datasets.pickle', 'rb') as f: datasets = pickle.load(f) return (X, y, datasets) datasets = get_segmented_raw_data() X, y = [], [] for i, data in enumerate(datasets): if i % 10 == 0: logging.info("[Create Dataset] i=%i/%i", i, len(datasets)) segmentation = json.loads(data['segmentation']) recording = json.loads(data['data']) X_symbol = [get_median_stroke_distance(recording)] if len([p for s in recording for p in s if p['time'] is None]) > 0: continue combis = itertools.combinations(list(range(len(recording))), 2) for strokeid1, strokeid2 in combis: stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] if len(stroke1) == 0 or len(stroke2) == 0: logging.debug("stroke len 0. Skip.") continue X.append(get_stroke_features(recording, strokeid1, strokeid2) + X_symbol) same_symbol = (_get_symbol_index(strokeid1, segmentation) == _get_symbol_index(strokeid2, segmentation)) y.append(int(same_symbol)) X = numpy.array(X, dtype=numpy.float32) y = numpy.array(y, dtype=numpy.int32) numpy.save(seg_data, X) numpy.save(seg_labels, y) datasets = filter_recordings(datasets) with open('datasets.pickle', 'wb') as f: pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL) return (X, y, datasets)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_segmented_raw_data(top_n=10000): """Fetch data from the server. Parameters top_n : int Number of data sets which get fetched from the server. """
cfg = utils.get_database_configuration() mysql = cfg['mysql_online'] connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("SELECT `id`, `data`, `segmentation` " "FROM `wm_raw_draw_data` WHERE " "(`segmentation` IS NOT NULL OR `accepted_formula_id` IS NOT NULL) " "AND `wild_point_count` = 0 " "AND `stroke_segmentable` = 1 " "ORDER BY `id` LIMIT 0, %i") % top_n logging.info(sql) cursor.execute(sql) datasets = cursor.fetchall() logging.info("Fetched %i recordings. Add missing segmentations.", len(datasets)) for i in range(len(datasets)): if datasets[i]['segmentation'] is None: stroke_count = len(json.loads(datasets[i]['data'])) if stroke_count > 10: print("Massive stroke count! %i" % stroke_count) datasets[i]['segmentation'] = str([[s for s in range(stroke_count)]]) return datasets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stroke_features(recording, strokeid1, strokeid2): """Get the features used to decide if two strokes belong to the same symbol or not. Parameters recording : list A list of strokes strokeid1 : int strokeid2 : int Returns ------- list : A list of features which could be useful to decide if stroke1 and stroke2 belong to the same symbol. """
stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] assert isinstance(stroke1, list), "stroke1 is a %s" % type(stroke1) X_i = [] for s in [stroke1, stroke2]: hw = HandwrittenData(json.dumps([s])) feat1 = features.ConstantPointCoordinates(strokes=1, points_per_stroke=20, fill_empty_with=0) feat2 = features.ReCurvature(strokes=1) feat3 = features.Ink() X_i += hw.feature_extraction([feat1, feat2, feat3]) X_i += [get_strokes_distance(stroke1, stroke2)] # Distance of strokes X_i += [get_time_distance(stroke1, stroke2)] # Time in between X_i += [abs(strokeid2-strokeid1)] # Strokes in between # X_i += [get_black_percentage()] return X_i
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_segmentation(recording, single_clf, single_stroke_clf, stroke_segmented_classifier): """ Get a list of segmentations of recording with the probability of the segmentation being correct. Parameters recording : A list of lists Each sublist represents a stroke single_clf : object A classifier for single symbols single_stroke_clf : object A classifier which decides if a single stroke is a complete symbol stroke_segmented_classifier : object Classifier which decides if two strokes belong to one symbol or not Returns ------- list of tuples : Segmentations together with their probabilities. Each probability has to be positive and the sum may not be bigger than 1.0. Examples -------- [ ([[0, 1], [2]], 0.8), ([[0], [1,2]], 0.1), ([[0,2], [1]], 0.05) ] """
mst_wood = get_mst_wood(recording, single_clf) return [(normalize_segmentation([mst['strokes'] for mst in mst_wood]), 1.0)] # HandwrittenData(json.dumps(recording)).show() # return [([[i for i in range(len(recording))]], 1.0)] # #mst_wood = break_mst(mst, recording) # TODO # for i in range(0, 2**len(points)): # segmentation = get_segmentation_from_mst(mst, i) # TODO X_symbol = [get_median_stroke_distance(recording)] # Pre-segment to 8 strokes # TODO: Take first 4 strokes and add strokes within their bounding box # TODO: What if that is more then 8 strokes? # -> Geometry # Build tree structure. A stroke `c` is the child of another stroke `p`, # if the bounding box of `c` is within the bounding box of `p`. # Problem: B <-> 13 g_top_segmentations = [([], 1.0)] # g_top_segmentations # range(int(math.ceil(float(len(recording))/8))): for chunk_part in mst_wood: # chunk = recording[8*chunk_part:8*(chunk_part+1)] chunk = [recording[stroke] for stroke in chunk_part['strokes']] # Segment after pre-segmentation prob = [[1.0 for _ in chunk] for _ in chunk] for strokeid1, strokeid2 in itertools.product(range(len(chunk)), range(len(chunk))): if strokeid1 == strokeid2: continue X = get_stroke_features(chunk, strokeid1, strokeid2) X += X_symbol X = numpy.array([X], dtype=numpy.float32) prob[strokeid1][strokeid2] = stroke_segmented_classifier(X) # Top segmentations ts = list(partitions.get_top_segmentations(prob, 500)) for i, segmentation in enumerate(ts): symbols = apply_segmentation(chunk, segmentation) min_top2 = partitions.TopFinder(1, find_min=True) for i, symbol in enumerate(symbols): predictions = single_clf.predict(symbol) min_top2.push("value-%i" % i, predictions[0]['probability'] + predictions[1]['probability']) ts[i][1] *= list(min_top2)[0][1] # for i, segmentation in enumerate(ts): # ts[i][0] = update_segmentation_data(ts[i][0], 8*chunk_part) g_top_segmentations = merge_segmentations(g_top_segmentations, ts, chunk_part['strokes']) return [(normalize_segmentation(seg), probability) for seg, probability in g_top_segmentations]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def break_mst(mst, i): """ Break mst into multiple MSTs by removing one node i. Parameters mst : symmetrical square matrix i : index of the mst where to break Returns ------- list of dictionarys ('mst' and 'strokes' are the keys) """
for j in range(len(mst['mst'])): mst['mst'][i][j] = 0 mst['mst'][j][i] = 0 _, components = scipy.sparse.csgraph.connected_components(mst['mst']) comp_indices = {} for el in set(components): comp_indices[el] = {'strokes': [], 'strokes_i': []} for i, comp_nr in enumerate(components): comp_indices[comp_nr]['strokes'].append(mst['strokes'][i]) comp_indices[comp_nr]['strokes_i'].append(i) mst_wood = [] for key in comp_indices: matrix = [] for i, line in enumerate(mst['mst']): line_add = [] if i not in comp_indices[key]['strokes_i']: continue for j, el in enumerate(line): if j in comp_indices[key]['strokes_i']: line_add.append(el) matrix.append(line_add) assert len(matrix) > 0, \ ("len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" % (comp_indices[key]['strokes'], mst, i)) assert len(matrix) == len(matrix[0]), \ ("matrix was %i x %i, but should be square" % (len(matrix), len(matrix[0]))) assert len(matrix) == len(comp_indices[key]['strokes']), \ (("stroke length was not equal to matrix length " "(strokes=%s, len(matrix)=%i)") % (comp_indices[key]['strokes'], len(matrix))) mst_wood.append({'mst': matrix, 'strokes': comp_indices[key]['strokes']}) return mst_wood
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_out_of_order(segmentation): """ Check if a given segmentation is out of order. Examples -------- False False True """
last_stroke = -1 for symbol in segmentation: for stroke in symbol: if last_stroke > stroke: return True last_stroke = stroke return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bb_intersections(recording): """ Get all intersections of the bounding boxes of strokes. Parameters recording : list of lists of integers Returns ------- A symmetrical matrix which indicates if two bounding boxes intersect. """
intersections = numpy.zeros((len(recording), len(recording)), dtype=bool) for i in range(len(recording)-1): a = geometry.get_bounding_box(recording[i]).grow(0.2) for j in range(i+1, len(recording)): b = geometry.get_bounding_box(recording[j]).grow(0.2) intersections[i][j] = geometry.do_bb_intersect(a, b) intersections[j][i] = intersections[i][j] return intersections
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def p_strokes(symbol, count): """ Get the probability of a written `symbol` having `count` strokes. Parameters symbol : str LaTeX command count : int, >= 1 Returns ------- float In [0.0, 1.0] """
global stroke_prob assert count >= 1 epsilon = 0.00000001 if stroke_prob is None: misc_path = pkg_resources.resource_filename('hwrt', 'misc/') stroke_prob_file = os.path.join(misc_path, 'prob_stroke_count_by_symbol.yml') with open(stroke_prob_file, 'r') as stream: stroke_prob = yaml.load(stream) if symbol in stroke_prob: if count in stroke_prob[symbol]: return stroke_prob[symbol][count] else: return epsilon return epsilon
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_hypotheses_assuming_new_stroke(self, new_stroke, stroke_nr, new_beam): """ Get new guesses by assuming new_stroke is a new symbol. Parameters new_stroke : list of dicts represent a point. stroke_nr : int Number of the stroke for segmentation new_beam : beam object """
guesses = single_clf.predict({'data': [new_stroke], 'id': None})[:self.m] for hyp in self.hypotheses: new_geometry = deepcopy(hyp['geometry']) most_right = new_geometry if len(hyp['symbols']) == 0: while 'right' in most_right: most_right = most_right['right'] most_right['right'] = {'symbol_index': len(hyp['symbols']), 'right': None} else: most_right = {'symbol_index': len(hyp['symbols']), 'right': None} for guess in guesses: sym = {'symbol': guess['semantics'], 'probability': guess['probability']} new_seg = deepcopy(hyp['segmentation']) new_seg.append([stroke_nr]) new_sym = deepcopy(hyp['symbols']) new_sym.append(sym) b = {'segmentation': new_seg, 'symbols': new_sym, 'geometry': new_geometry, 'probability': None } # spacial_rels = [] # TODO # for s1_indices, s2_indices in zip(b['segmentation'], # b['segmentation'][1:]): # tmp = [new_beam.history['data'][el] for el in s1_indices] # s1 = HandwrittenData(json.dumps(tmp)) # tmp = [new_beam.history['data'][el] for el in s2_indices] # s2 = HandwrittenData(json.dumps(tmp)) # rel = spacial_relationship.estimate(s1, s2) # spacial_rels.append(rel) # b['geometry'] = spacial_rels new_beam.hypotheses.append(b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_stroke(self, new_stroke): """ Update the beam so that it considers `new_stroke`. When a `new_stroke` comes, it can either belong to a symbol for which at least one other stroke was already made or belong to a symbol for which `new_stroke` is the first stroke. The number of hypotheses after q strokes without pruning is f: N_0 -> N_0 f(0) = 1 f(1) = m f(q) = f(q-1)*(m+n) The number of time the single symbol classifier has to be called, when already q hypotheses exist: f_s: N_0 -> N_0 f_s(q) = q*n + 1 (upper bound) Parameters new_stroke : list of dicts represent a point. """
global single_clf if len(self.hypotheses) == 0: # Don't put this in the constructor! self.hypotheses = [{'segmentation': [], 'symbols': [], 'geometry': {}, 'probability': Decimal(1) }] stroke_nr = len(self.history['data']) new_history = deepcopy(self.history) new_history['data'].append(new_stroke) new_beam = Beam() new_beam.history = new_history evaluated_segmentations = [] # Get new guesses by assuming new_stroke belongs to an already begun # symbol had_multisymbol = False for hyp in self.hypotheses: # Add stroke to last n symbols (seperately) for i in range(min(self.n, len(hyp['segmentation']))): # Build stroke data new_strokes = {'data': [], 'id': -1} for stroke_index in hyp['segmentation'][-(i+1)]: curr_stroke = self.history['data'][stroke_index] new_strokes['data'].append(curr_stroke) new_strokes['data'].append(new_stroke) new_seg = deepcopy(hyp['segmentation']) new_seg[-(i+1)].append(stroke_nr) if new_seg in evaluated_segmentations: continue else: evaluated_segmentations.append(new_seg) # Predict this new collection of strokes guesses = single_clf.predict(new_strokes)[:self.m] for guess in guesses: if guess['semantics'].split(";")[1] == "::MULTISYMBOL::": # This was a wrong segmentation. Ignore it. had_multisymbol = True continue sym = {'symbol': guess['semantics'], 'probability': guess['probability']} new_sym = deepcopy(hyp['symbols']) new_sym[-(i+1)] = sym b = {'segmentation': new_seg, 'symbols': new_sym, 'geometry': deepcopy(hyp['geometry']), 'probability': None } new_beam.hypotheses.append(b) if len(self.hypotheses) <= 1 or had_multisymbol: self._add_hypotheses_assuming_new_stroke(new_stroke, stroke_nr, new_beam) for hyp in new_beam.hypotheses: hyp['probability'] = _calc_hypothesis_probability(hyp) # Get probability again # Get geometry of each beam entry # TODO # Update probabilities # TODO # Normalize to sum=1 self.hypotheses = new_beam.hypotheses self.history = new_beam.history self._prune() new_probs = softmax([h['probability'] for h in self.hypotheses]) for hyp, prob in zip(self.hypotheses, new_probs): hyp['probability'] = prob
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prune(self): """Shorten hypotheses to the best k ones."""
self.hypotheses = sorted(self.hypotheses, key=lambda e: e['probability'], reverse=True)[:self.k]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_matrices(): """ Get the matrices from a pickled files. Returns ------- list List of all matrices. """
with open('hwrt/misc/is_one_symbol_classifier.pickle', 'rb') as f: a = pickle.load(f) arrays = [] for el1 in a.input_storage: for el2 in el1.__dict__['storage']: if isinstance(el2, cuda.CudaNdarray): arrays.append({'storage': numpy.asarray(el2), 'name': el1.name}) else: logging.warning("was type %s. Do nothing." % type(el2)) logging.debug(el1.name) return arrays
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_model_tar(matrices, tarname="model-cuda-converted.tar"): """ Create a tar file which contains the model. Parameters matrices : list tarname : str Target file which will be created. """
# Write layers filenames = [] for layer in range(len(matrices)): if matrices[layer]['name'] == 'W': weights = matrices[layer]['storage'] weights_file = h5py.File('W%i.hdf5' % (layer / 2), 'w') weights_file.create_dataset(weights_file.id.name, data=weights) weights_file.close() filenames.append('W%i.hdf5' % (layer / 2)) elif matrices[layer]['name'] == 'b': b = matrices[layer]['storage'] bfile = h5py.File('b%i.hdf5' % (layer / 2), 'w') bfile.create_dataset(bfile.id.name, data=b) bfile.close() filenames.append('b%i.hdf5' % (layer / 2)) # activation = a['layers'][layer]['_props']['activation'] # activation = activation.replace('sigmoid', 'Sigmoid') # activation = activation.replace('softmax', 'Softmax') # layers.append({'W': {'size': list(W.shape), # 'filename': 'W%i.hdf5' % layer}, # 'b': {'size': list(b.shape), # 'filename': 'b%i.hdf5' % layer}, # 'activation': activation}) with tarfile.open(tarname, "w:") as tar: for name in filenames: tar.add(name) # Remove temporary files which are now in tar file for filename in filenames: os.remove(filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_python_version(): """Check if the currently running Python version is new enough."""
# Required due to multiple with statements on one line req_version = (2, 7) cur_version = sys.version_info if cur_version >= req_version: print("Python version... %sOK%s (found %s, requires %s)" % (Bcolors.OKGREEN, Bcolors.ENDC, str(platform.python_version()), str(req_version[0]) + "." + str(req_version[1]))) else: print("Python version... %sFAIL%s (found %s, requires %s)" % (Bcolors.FAIL, Bcolors.ENDC, str(cur_version), str(req_version)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Execute all checks."""
check_python_version() check_python_modules() check_executables() home = os.path.expanduser("~") print("\033[1mCheck files\033[0m") rcfile = os.path.join(home, ".hwrtrc") if os.path.isfile(rcfile): print("~/.hwrtrc... %sFOUND%s" % (Bcolors.OKGREEN, Bcolors.ENDC)) else: print("~/.hwrtrc... %sNOT FOUND%s" % (Bcolors.FAIL, Bcolors.ENDC)) misc_path = pkg_resources.resource_filename('hwrt', 'misc/') print("misc-path: %s" % misc_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(d1, d2): """Merge two raw datasets into one. Parameters d1 : dict d2 : dict Returns ------- dict """
if d1['formula_id2latex'] is None: formula_id2latex = {} else: formula_id2latex = d1['formula_id2latex'].copy() formula_id2latex.update(d2['formula_id2latex']) handwriting_datasets = d1['handwriting_datasets'] for dataset in d2['handwriting_datasets']: handwriting_datasets.append(dataset) return {'formula_id2latex': formula_id2latex, 'handwriting_datasets': handwriting_datasets}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_file_consistent(local_path_file, md5_hash): """Check if file is there and if the md5_hash is correct."""
return os.path.isfile(local_path_file) and \ hashlib.md5(open(local_path_file, 'rb').read()).hexdigest() == md5_hash
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """Main part of the download script."""
# Read config file. This has to get updated via git project_root = utils.get_project_root() infofile = os.path.join(project_root, "raw-datasets/info.yml") logging.info("Read '%s'...", infofile) with open(infofile, 'r') as ymlfile: datasets = yaml.load(ymlfile) for dataset in datasets: local_path_file = os.path.join(project_root, dataset['online_path']) i = 0 while not is_file_consistent(local_path_file, dataset['md5']) and i < 3: if os.path.isfile(local_path_file): local_file_size = os.path.getsize(local_path_file) logging.info("MD5 codes differ. ") logging.info("The file size of the downloaded file is %s.", utils.sizeof_fmt(local_file_size)) logging.info("Download the file '%s'...", dataset['online_path']) urllib.urlretrieve(dataset['url'], local_path_file) i += 1 if i < 10: logging.info("Found '%s'.", dataset['online_path'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_model(): """ Load a n-gram language model for mathematics in ARPA format which gets shipped with hwrt. Returns ------- A NgramLanguageModel object """
logging.info("Load language model...") ngram_arpa_t = pkg_resources.resource_filename('hwrt', 'misc/ngram.arpa.tar.bz2') with tarfile.open(ngram_arpa_t, 'r:bz2') as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) ngram_arpa_f = os.path.join(tarfolder, 'ngram.arpa') with open(ngram_arpa_f) as f: content = f.read() ngram_model = NgramLanguageModel() ngram_model.load_from_arpa_str(content) return ngram_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_from_arpa_str(self, arpa_str): """ Initialize N-gram model by reading an ARPA language model string. Parameters arpa_str : str A string in ARPA language model file format """
data_found = False end_found = False in_ngram_block = 0 for i, line in enumerate(arpa_str.split("\n")): if not end_found: if not data_found: if "\\data\\" in line: data_found = True else: if in_ngram_block == 0: if line.startswith("ngram"): ngram_type, count = line.split("=") _, n = ngram_type.split(" ") n = int(n) self.ngrams[n] = {'data': {}, 'count': count} elif line.startswith("\\"): n = int(line.split("-")[0][1:]) in_ngram_block = n else: continue # Empty line elif in_ngram_block > 0: if "\\end\\" in line: end_found = True elif line.startswith("\\"): n = int(line.split("-")[0][1:]) in_ngram_block = n elif len(line) <= 1: continue else: data = line.split("\t") probability = Decimal(data[0]) ngram = data[1:] if len(ngram) != n: raise Exception(("ARPA language file is " "inconsistant. Line %i has " "only %i items, but should " "have %i items.") % (i, len(ngram), n)) rest = ngram append_to = self.ngrams[n]['data'] while len(rest) > 1: first, rest = rest[0], rest[1:] if first not in append_to: append_to[first] = {} append_to = append_to[first] if rest[0] in append_to: raise Exception(("Duplicate entry for " "ngram %s") % ngram) append_to[rest[0]] = probability else: if line.startswith("info: "): logging.info(line[6:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_probability(self, sentence): """ Calculate the probability of a sentence, given this language model. Parameters sentence : list A list of strings / tokens. """
if len(sentence) == 1: return Decimal(10)**self.get_unigram_log_prob(sentence) elif len(sentence) == 2: return Decimal(10)**self.get_bigram_log_prob(sentence) else: log_prob = Decimal(0.0) for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]): log_prob += self.get_trigram_log_prob((w1, w2, w3)) log_prob = Decimal(log_prob) return Decimal(10)**log_prob
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_dir(sample_dir): """Evaluate all recordings in `sample_dir`. Parameters sample_dir : string The path to a directory with *.inkml files. Returns ------- list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability' """
results = [] if sample_dir[-1] == "/": sample_dir = sample_dir[:-1] for filename in glob.glob("%s/*.inkml" % sample_dir): results.append(evaluate_inkml(filename)) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_inkml(inkml_file_path): """Evaluate an InkML file. Parameters inkml_file_path : string path to an InkML file Returns ------- dictionary The dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'semantics' (which contains the latex command) and 'probability' """
logging.info("Start evaluating '%s'...", inkml_file_path) ret = {'filename': inkml_file_path} recording = inkml.read(inkml_file_path) results = evaluate(json.dumps(recording.get_sorted_pointlist()), result_format='LaTeX') ret['results'] = results return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_output_csv(evaluation_results, filename='results.csv'): """Generate the evaluation results in the format Parameters evaluation_results : list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability' Examples -------- MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1 MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100 """
with open(filename, 'w') as f: for result in evaluation_results: for i, entry in enumerate(result['results']): if entry['semantics'] == ',': result['results']['semantics'] = 'COMMA' f.write("%s, " % result['filename']) f.write(", ".join([entry['semantics'] for entry in result['results']])) f.write("\n") f.write("%s, " % "scores") f.write(", ".join([str(entry['probability']) for entry in result['results']])) f.write("\n")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_project_configuration(): """Get project configuration as dictionary."""
home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") if not os.path.isfile(rcfile): create_project_configuration(rcfile) with open(rcfile, 'r') as ymlfile: cfg = yaml.load(ymlfile) return cfg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_project_configuration(filename): """Create a project configuration file which contains a configuration that might make sense."""
home = os.path.expanduser("~") project_root_folder = os.path.join(home, "hwr-experiments") config = {'root': project_root_folder, 'nntoolkit': None, 'dropbox_app_key': None, 'dropbox_app_secret': None, 'dbconfig': os.path.join(home, "hwrt-config/db.config.yml"), 'data_analyzation_queue': [{'Creator': None}], 'worker_api_key': '1234567890abc', 'environment': 'development'} with open(filename, 'w') as f: yaml.dump(config, f, default_flow_style=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_project_root(): """Get the project root folder as a string."""
cfg = get_project_configuration() # At this point it can be sure that the configuration file exists # Now make sure the project structure exists for dirname in ["raw-datasets", "preprocessed", "feature-files", "models", "reports"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) raw_yml_path = pkg_resources.resource_filename('hwrt', 'misc/') # TODO: How to check for updates if it already exists? raw_data_dst = os.path.join(cfg['root'], "raw-datasets/info.yml") if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, "info.yml") shutil.copy(raw_yml_pkg_src, raw_data_dst) # Make sure small-baseline folders exists for dirname in ["models/small-baseline", "feature-files/small-baseline", "preprocessed/small-baseline"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) # Make sure small-baseline yml files exist paths = [("preprocessed/small-baseline/", "preprocessing-small-info.yml"), ("feature-files/small-baseline/", "feature-small-info.yml"), ("models/small-baseline/", "model-small-info.yml")] for dest, src in paths: raw_data_dst = os.path.join(cfg['root'], "%s/info.yml" % dest) if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, src) shutil.copy(raw_yml_pkg_src, raw_data_dst) return cfg['root']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_template_folder(): """Get path to the folder where th HTML templates are."""
cfg = get_project_configuration() if 'templates' not in cfg: home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") cfg['templates'] = pkg_resources.resource_filename('hwrt', 'templates/') with open(rcfile, 'w') as f: yaml.dump(cfg, f, default_flow_style=False) return cfg['templates']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_database_config_file(): """Get the absolute path to the database configuration file."""
cfg = get_project_configuration() if 'dbconfig' in cfg: if os.path.isfile(cfg['dbconfig']): return cfg['dbconfig'] else: logging.info("File '%s' was not found. Adjust 'dbconfig' in your " "~/.hwrtrc file.", cfg['dbconfig']) else: logging.info("No database connection file found. " "Specify 'dbconfig' in your ~/.hwrtrc file.") return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_database_configuration(): """Get database configuration as dictionary."""
db_config = get_database_config_file() if db_config is None: return None with open(db_config, 'r') as ymlfile: cfg = yaml.load(ymlfile) return cfg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def input_int_default(question="", default=0): """A function that works for both, Python 2.x and Python 3.x. It asks the user for input and returns it as a string. """
answer = input_string(question) if answer == "" or answer == "yes": return default else: return int(answer)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_run_logfile(folder): """Create a 'run.log' within folder. This file contains the time of the latest successful run. """
with open(os.path.join(folder, "run.log"), "w") as f: datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") f.write("timestamp: '%s'" % datestring)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choose_raw_dataset(currently=""): """Let the user choose a raw dataset. Return the absolute path."""
folder = os.path.join(get_project_root(), "raw-datasets") files = [os.path.join(folder, name) for name in os.listdir(folder) if name.endswith(".pickle")] default = -1 for i, filename in enumerate(files): if os.path.basename(currently) == os.path.basename(filename): default = i if i != default: print("[%i]\t%s" % (i, os.path.basename(filename))) else: print("\033[1m[%i]\033[0m\t%s" % (i, os.path.basename(filename))) i = input_int_default("Choose a dataset by number: ", default) return files[i]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_readable_time(t): """ Format the time to a readable format. Parameters t : int Time in ms Returns ------- string """
ms = t % 1000 t -= ms t /= 1000 s = t % 60 t -= s t /= 60 minutes = t % 60 t -= minutes t /= 60 if t != 0: return "%ih, %i minutes %is %ims" % (t, minutes, s, ms) elif minutes != 0: return "%i minutes %is %ims" % (minutes, s, ms) elif s != 0: return "%is %ims" % (s, ms) else: return "%ims" % ms
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_model(): """Get a path for a default value for the model. Start searching in the current directory."""
project_root = get_project_root() models_dir = os.path.join(project_root, "models") curr_dir = os.getcwd() if os.path.commonprefix([models_dir, curr_dir]) == models_dir and \ curr_dir != models_dir: latest_model = curr_dir else: latest_model = get_latest_folder(models_dir) return latest_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_adjusted_model_for_percentages(model_src, model_use): """Replace logreg layer by sigmoid to get probabilities."""
# Copy model file shutil.copyfile(model_src, model_use) # Adjust model file with open(model_src) as f: content = f.read() content = content.replace("logreg", "sigmoid") with open(model_use, "w") as f: f.write(content)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_hdf5(output_filename, feature_count, data): """ Create a HDF5 feature files. Parameters output_filename : string name of the HDF5 file that will be created feature_count : int dimension of all features combined data : list of tuples list of (x, y) tuples, where x is the feature vector of dimension ``feature_count`` and y is a label. """
import h5py logging.info("Start creating of %s hdf file", output_filename) x = [] y = [] for features, label in data: assert len(features) == feature_count, \ "Expected %i features, got %i features" % \ (feature_count, len(features)) x.append(features) y.append(int(label)) Wfile = h5py.File(output_filename, 'w') Wfile.create_dataset("data", data=x, dtype='float32') Wfile.create_dataset("labels", data=y, dtype='int32') Wfile.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_model(model_file): """Load a model by its file. This includes the model itself, but also the preprocessing queue, the feature list and the output semantics. """
# Extract tar with tarfile.open(model_file) as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) from . import features from . import preprocessing # Get the preprocessing with open(os.path.join(tarfolder, "preprocessing.yml"), 'r') as ymlfile: preprocessing_description = yaml.load(ymlfile) preprocessing_queue = preprocessing.get_preprocessing_queue( preprocessing_description['queue']) # Get the features with open(os.path.join(tarfolder, "features.yml"), 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) # Get the model import nntoolkit.utils model = nntoolkit.utils.get_model(model_file) output_semantics_file = os.path.join(tarfolder, 'output_semantics.csv') output_semantics = nntoolkit.utils.get_outputs(output_semantics_file) # Cleanup shutil.rmtree(tarfolder) return (preprocessing_queue, feature_list, model, output_semantics)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording, recording_id=None): """ Evaluate a model for a single recording, after everything has been loaded. Parameters preprocessing_queue : list List of all preprocessing objects. feature_list : list List of all feature objects. model : dict Neural network model. output_semantics : list List that defines what an output means. recording : string in JSON format The handwritten recording in JSON format. recording_id : int or None For debugging purposes. """
handwriting = handwritten_data.HandwrittenData(recording, raw_data_id=recording_id) handwriting.preprocessing(preprocessing_queue) x = handwriting.feature_extraction(feature_list) import nntoolkit.evaluate model_output = nntoolkit.evaluate.get_model_output(model, [x]) return nntoolkit.evaluate.get_results(model_output, output_semantics)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_model_single_recording_preloaded_multisymbol(preprocessing_queue, feature_list, model, output_semantics, recording): """ Evaluate a model for a single recording, after everything has been loaded. Multiple symbols are recognized. Parameters preprocessing_queue : list List of all preprocessing objects. feature_list : list List of all feature objects. model : dict Neural network model. output_semantics : List that defines what an output means. recording : The handwritten recording in JSON format. """
import json import nntoolkit.evaluate recording = json.loads(recording) logging.info(("## start (%i strokes)" % len(recording)) + "#" * 80) hypotheses = [] # [[{'score': 0.123, symbols: [123, 123]}] # split0 # []] # Split i... for split in get_possible_splits(len(recording)): recording_segmented = segment_by_split(split, recording) cur_split_results = [] for i, symbol in enumerate(recording_segmented): handwriting = handwritten_data.HandwrittenData(json.dumps(symbol)) handwriting.preprocessing(preprocessing_queue) x = handwriting.feature_extraction(feature_list) model_output = nntoolkit.evaluate.get_model_output(model, [x]) results = nntoolkit.evaluate.get_results(model_output, output_semantics) results = results[:10] cur_split_results.append([el for el in results if el['probability'] >= 0.01]) # serve.show_results(results, n=10) # Now that I have all symbols of this split, I have to get all # combinations of the hypothesis import itertools for hyp in itertools.product(*cur_split_results): hypotheses.append({'score': reduce(lambda x, y: x*y, [s['probability'] for s in hyp])*len(hyp)/len(recording), 'symbols': [s['semantics'] for s in hyp], 'min_part': min([s['probability'] for s in hyp]), 'segmentation': split}) hypotheses = sorted(hypotheses, key=lambda n: n['min_part'], reverse=True)[:10] for i, hyp in enumerate(hypotheses): if hyp['score'] > 0.001: logging.info("%0.4f: %s (seg: %s)", hyp['score'], hyp['symbols'], hyp['segmentation']) return nntoolkit.evaluate.get_results(model_output, output_semantics)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_model_single_recording_multisymbol(model_file, recording): """ Evaluate a model for a single recording where possibly multiple symbols are. Parameters model_file : string Model file (.tar) recording : The handwritten recording. """
(preprocessing_queue, feature_list, model, output_semantics) = load_model(model_file) logging.info("multiple symbol mode") logging.info(recording) results = evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate_model(recording, model_folder, verbose=False): """Evaluate model for a single recording."""
from . import preprocess_dataset from . import features for target_folder in get_recognizer_folders(model_folder): # The source is later than the target. That means we need to # refresh the target if "preprocessed" in target_folder: logging.info("Start applying preprocessing methods...") t = target_folder _, _, preprocessing_queue = preprocess_dataset.get_parameters(t) handwriting = handwritten_data.HandwrittenData(recording) if verbose: handwriting.show() handwriting.preprocessing(preprocessing_queue) if verbose: logging.debug("After preprocessing: %s", handwriting.get_sorted_pointlist()) handwriting.show() elif "feature-files" in target_folder: logging.info("Create feature file...") infofile_path = os.path.join(target_folder, "info.yml") with open(infofile_path, 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) feature_count = sum(map(lambda n: n.get_dimension(), feature_list)) x = handwriting.feature_extraction(feature_list) # Create hdf5 _, output_filename = tempfile.mkstemp(suffix='.hdf5', text=True) create_hdf5(output_filename, feature_count, [(x, 0)]) elif "model" in target_folder: logfile, model_use = _evaluate_model_single_file(target_folder, output_filename) return logfile else: logging.info("'%s' not found", target_folder) os.remove(output_filename) os.remove(model_use)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_index2latex(model_description): """ Get a dictionary that maps indices to LaTeX commands. Parameters model_description : string A model description file that points to a feature folder where an `index2formula_id.csv` has to be. Returns ------- dictionary : Maps indices to LaTeX commands """
index2latex = {} translation_csv = os.path.join(get_project_root(), model_description["data-source"], "index2formula_id.csv") with open(translation_csv) as csvfile: csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in csvreader: index2latex[int(row['index'])] = row['latex'] return index2latex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_online_symbol_data(database_id): """Get from the server."""
import pymysql import pymysql.cursors cfg = get_database_configuration() mysql = cfg['mysql_online'] connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("SELECT `id`, `formula_in_latex`, `unicode_dec`, `font`, " "`font_style` FROM `wm_formula` WHERE `id` =%i") % database_id cursor.execute(sql) datasets = cursor.fetchall() if len(datasets) == 1: return datasets[0] else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def classify_single_recording(raw_data_json, model_folder, verbose=False): """ Get the classification as a list of tuples. The first value is the LaTeX code, the second value is the probability. """
evaluation_file = evaluate_model(raw_data_json, model_folder, verbose) with open(os.path.join(model_folder, "info.yml")) as ymlfile: model_description = yaml.load(ymlfile) index2latex = get_index2latex(model_description) # Map line to probabilites for LaTeX commands with open(evaluation_file) as f: probabilities = f.read() probabilities = map(float, probabilities.split(" ")) results = [] for index, probability in enumerate(probabilities): results.append((index2latex[index], probability)) results = sorted(results, key=lambda n: n[1], reverse=True) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_objectlist(description, config_key, module): """ Take a description and return a list of classes. Parameters description : list of dictionaries Each dictionary has only one entry. The key is the name of a class. The value of that entry is a list of dictionaries again. Those dictionaries are paramters. Returns ------- List of objects. """
object_list = [] for feature in description: for feat, params in feature.items(): feat = get_class(feat, config_key, module) if params is None: object_list.append(feat()) else: parameters = {} for dicts in params: for param_name, param_value in dicts.items(): parameters[param_name] = param_value object_list.append(feat(**parameters)) # pylint: disable=W0142 return object_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_class(name, config_key, module): """Get the class by its name as a string."""
clsmembers = inspect.getmembers(module, inspect.isclass) for string_name, act_class in clsmembers: if string_name == name: return act_class # Check if the user has specified a plugin and if the class is in there cfg = get_project_configuration() if config_key in cfg: modname = os.path.splitext(os.path.basename(cfg[config_key]))[0] if os.path.isfile(cfg[config_key]): usermodule = imp.load_source(modname, cfg[config_key]) clsmembers = inspect.getmembers(usermodule, inspect.isclass) for string_name, act_class in clsmembers: if string_name == name: return act_class else: logging.warning("File '%s' does not exist. Adjust ~/.hwrtrc.", cfg['data_analyzation_plugins']) logging.debug("Unknown class '%s'.", name) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mysql_cfg(): """ Get the appropriate MySQL configuration """
environment = get_project_configuration()['environment'] cfg = get_database_configuration() if environment == 'production': mysql = cfg['mysql_online'] else: mysql = cfg['mysql_dev'] return mysql
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def softmax(w, t=1.0): """Calculate the softmax of a list of numbers w. Parameters w : list of numbers Returns ------- a list of the same length as w of non-negative numbers Examples -------- array([ 0.47502081, 0.52497919]) array([ 0.42555748, 0.57444252]) array([ 9.99981542e-01, 1.84578933e-05]) array([ 4.53978687e-05, 9.99954602e-01]) """
w = [Decimal(el) for el in w] e = numpy.exp(numpy.array(w) / Decimal(t)) dist = e / numpy.sum(e) return dist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_beam_cache_directory(): """ Get a directory where pickled Beam Data can be stored. Create that directory, if it doesn't exist. Returns ------- str Path to the directory """
home = os.path.expanduser("~") cache_dir = os.path.join(home, '.hwrt-beam-cache') if not os.path.exists(cache_dir): os.makedirs(cache_dir) return cache_dir
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_beam(secret_uuid): """ Get a beam from the session with `secret_uuid`. Parameters secret_uuid : str Returns ------- The beam object if it exists, otherwise `None`. """
beam_dir = get_beam_cache_directory() beam_filename = os.path.join(beam_dir, secret_uuid) if os.path.isfile(beam_filename): with open(beam_filename, 'rb') as handle: beam = pickle.load(handle) return beam else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_valid_uuid(uuid_to_test, version=4): """ Check if uuid_to_test is a valid UUID. Parameters uuid_to_test : str version : {1, 2, 3, 4} Returns ------- `True` if uuid_to_test is a valid UUID, otherwise `False`. Examples -------- True False """
try: uuid_obj = UUID(uuid_to_test, version=version) except ValueError: return False return str(uuid_obj) == uuid_to_test
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prepare_table(table): """Make the table 'symmetric' where the lower left part of the matrix is the reverse probability """
n = len(table) for i, row in enumerate(table): assert len(row) == n for j, el in enumerate(row): if i == j: table[i][i] = 0.0 elif i > j: table[i][j] = 1-table[j][i] return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def neclusters(l, K): """Partition list ``l`` in ``K`` partitions, without empty parts. [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]] [[[0, 1, 2]]] """
for c in clusters(l, K): if all(x for x in c): yield c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_segmentations(l): """Get all segmentations of a list ``l``. This gets bigger fast. See https://oeis.org/A000110 For len(l) = 14 it is 190,899,322 [[[0, 1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[0], [1], [2]]] """
for K in range(1, len(l)+1): gen = neclusters(l, K) for el in gen: yield el
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def q(segmentation, s1, s2): """Test if ``s1`` and ``s2`` are in the same symbol, given the ``segmentation``. """
index1 = find_index(segmentation, s1) index2 = find_index(segmentation, s2) return index1 == index2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def score_segmentation(segmentation, table): """Get the score of a segmentation."""
stroke_nr = sum(1 for symbol in segmentation for stroke in symbol) score = 1 for i in range(stroke_nr): for j in range(i+1, stroke_nr): qval = q(segmentation, i, j) if qval: score *= table[i][j] else: score *= table[j][i] return score
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push(self, element, value): """Push an ``element`` into the datastrucutre together with its value and only save it if it currently is one of the top n elements. Drop elements if necessary. """
insert_pos = 0 for index, el in enumerate(self.tops): if not self.find_min and el[1] >= value: insert_pos = index+1 elif self.find_min and el[1] <= value: insert_pos = index+1 self.tops.insert(insert_pos, [element, value]) self.tops = self.tops[:self.n]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _array2cstr(arr): """ Serializes a numpy array to a compressed base64 string """
out = StringIO() np.save(out, arr) return b64encode(out.getvalue())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _str2array(d): """ Reconstructs a numpy array from a plain-text string """
if type(d) == list: return np.asarray([_str2array(s) for s in d]) ins = StringIO(d) return np.loadtxt(ins)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_output_semantics(model_folder, outputs): """ Create a 'output_semantics.csv' file which contains information what the output of the single output neurons mean. Parameters model_folder : str folder where the model description file is outputs : int number of output neurons """
with open('output_semantics.csv', 'wb') as csvfile: model_description_file = os.path.join(model_folder, "info.yml") with open(model_description_file, 'r') as ymlfile: model_description = yaml.load(ymlfile) logging.info("Start fetching translation dict...") translation_dict = utils.get_index2data(model_description) spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL) for output_index in range(outputs): if output_index in translation_dict: # Add more information: # 1. ID in my system # 2. latex # 3. unicode code point # 4. font # 5. font style spamwriter.writerow(translation_dict[output_index]) else: print("No data for %i." % output_index) spamwriter.writerow(["output %i" % output_index])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def elementtree_to_dict(element): """Convert an xml ElementTree to a dictionary."""
d = dict() if hasattr(element, 'text') and element.text is not None: d['text'] = element.text d.update(element.items()) # element's attributes for c in list(element): # element's children if c.tag not in d: d[c.tag] = elementtree_to_dict(c) # an element with the same tag was already in the dict else: # if it's not a list already, convert it to a list and append if not isinstance(d[c.tag], list): d[c.tag] = [d[c.tag], elementtree_to_dict(c)] # append to the list else: d[c.tag].append(elementtree_to_dict(c)) return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def strip_end(text, suffix): """Strip `suffix` from the end of `text` if `text` has that suffix."""
if not text.endswith(suffix): return text return text[:len(text)-len(suffix)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def formula_to_dbid(formula_str, backslash_fix=False): """ Convert a LaTeX formula to the database index. Parameters formula_str : string The formula as LaTeX code. backslash_fix : boolean If this is set to true, then it will be checked if the same formula exists with a preceeding backslash. Returns ------- int : The database index. """
global __formula_to_dbid_cache if __formula_to_dbid_cache is None: mysql = utils.get_mysql_cfg() connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() # Get all formulas that should get examined sql = ("SELECT `id`, `formula_in_latex` FROM `wm_formula` ") cursor.execute(sql) formulas = cursor.fetchall() __formula_to_dbid_cache = {} for fm in formulas: __formula_to_dbid_cache[fm['formula_in_latex']] = fm['id'] if formula_str in __formula_to_dbid_cache: return __formula_to_dbid_cache[formula_str] elif backslash_fix and ('\\%s' % formula_str) in __formula_to_dbid_cache: return __formula_to_dbid_cache['\\%s' % formula_str] else: logging.info("Symbol '%s' was not found. Add it to write-math.com.", formula_str) mysql = utils.get_mysql_cfg() connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("INSERT INTO `wm_formula` (`user_id`, `formula_name`, " "`formula_in_latex`, " "`mode`, `package`) VALUES (" "'10', %s, %s, 'bothmodes', NULL);") if len(formula_str) < 20: logging.info("Insert formula %s.", formula_str) cursor.execute(sql, (formula_str, formula_str)) connection.commit() __formula_to_dbid_cache[formula_str] = connection.insert_id() return __formula_to_dbid_cache[formula_str]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_recording(hw): """Insert recording `hw` into database."""
mysql = utils.get_mysql_cfg() connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) try: cursor = connection.cursor() sql = ("INSERT INTO `wm_raw_draw_data` (" "`user_id`, " "`data`, " "`md5data`, " "`creation_date`, " "`device_type`, " "`accepted_formula_id`, " "`secret`, " "`ip`, " "`segmentation`, " "`internal_id`, " "`description` " ") VALUES (%s, %s, MD5(data), " "%s, %s, %s, %s, %s, %s, %s, %s);") data = (hw.user_id, hw.raw_data_json, getattr(hw, 'creation_date', None), getattr(hw, 'device_type', ''), getattr(hw, 'formula_id', None), getattr(hw, 'secret', ''), getattr(hw, 'ip', None), str(getattr(hw, 'segmentation', '')), getattr(hw, 'internal_id', ''), getattr(hw, 'description', '')) cursor.execute(sql, data) connection.commit() for symbol_id, strokes in zip(hw.symbol_stream, hw.segmentation): insert_symbol_mapping(cursor.lastrowid, symbol_id, hw.user_id, strokes) logging.info("Insert raw data.") except pymysql.err.IntegrityError as e: print("Error: {} (can probably be ignored)".format(e))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_symbol_mapping(raw_data_id, symbol_id, user_id, strokes): """ Insert data into `wm_strokes_to_symbol`. Parameters raw_data_id : int user_id : int strokes: list of int """
mysql = utils.get_mysql_cfg() connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("INSERT INTO `wm_partial_answer` " "(`recording_id`, `symbol_id`, `strokes`, `user_id`, " "`is_accepted`) " "VALUES (%s, %s, %s, %s, 1);") data = (raw_data_id, symbol_id, ",".join([str(stroke) for stroke in strokes]), user_id) cursor.execute(sql, data) connection.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_label(label, replace_by_similar=True): """Some labels currently don't work together because of LaTeX naming clashes. Those will be replaced by simple strings. """
bad_names = ['celsius', 'degree', 'ohm', 'venus', 'mars', 'astrosun', 'fullmoon', 'leftmoon', 'female', 'male', 'checked', 'diameter', 'sun', 'Bowtie', 'sqrt', 'cong', 'copyright', 'dag', 'parr', 'notin', 'dotsc', 'mathds', 'mathfrak'] if any(label[1:].startswith(bad) for bad in bad_names): if label == '\\dag' and replace_by_similar: return '\\dagger' elif label == '\\diameter' and replace_by_similar: return '\\O' return label[1:] else: return label
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def analyze_feature(raw_datasets, feature, basename="aspect_ratios"): """ Apply ``feature`` to all recordings in ``raw_datasets``. Store the results in two files. One file stores the raw result, the other one groups the results by symbols and stores the mean, standard deviation and the name of the symbol as a csv file. Parameters raw_datasets : List of dictionaries Each dictionary is a raw_dataset. feature : An instance of the feature class type The `feature` which gets analyzed on `raw_datasets`. basename : string Name for the file in which the data gets written. """
# Prepare files csv_file = dam.prepare_file(basename + '.csv') raw_file = dam.prepare_file(basename + '.raw') csv_file = open(csv_file, 'a') raw_file = open(raw_file, 'a') csv_file.write("label,mean,std\n") # Write header raw_file.write("latex,raw_data_id,value\n") # Write header print_data = [] for _, datasets in dam.sort_by_formula_id(raw_datasets).items(): values = [] for data in datasets: value = feature(data)[0] values.append(value) raw_file.write("%s,%i,%0.2f\n" % (datasets[0].formula_in_latex, data.raw_data_id, value)) label = filter_label(datasets[0].formula_in_latex) print_data.append((label, numpy.mean(values), numpy.std(values))) # Sort the data by highest mean, descending print_data = sorted(print_data, key=lambda n: n[1], reverse=True) # Write data to file for label, mean, std in print_data: csv_file.write("%s,%0.2f,%0.2f\n" % (label, mean, std)) csv_file.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(handwriting_datasets_file, analyze_features): """Start the creation of the wanted metric."""
# Load from pickled file logging.info("Start loading data '%s' ...", handwriting_datasets_file) loaded = pickle.load(open(handwriting_datasets_file)) raw_datasets = loaded['handwriting_datasets'] logging.info("%i datasets loaded.", len(raw_datasets)) logging.info("Start analyzing...") if analyze_features: featurelist = [(features.AspectRatio(), "aspect_ratio.csv"), (features.ReCurvature(1), "re_curvature.csv"), (features.Height(), "height.csv"), (features.Width(), "width.csv"), (features.Time(), "time.csv"), (features.Ink(), "ink.csv"), (features.StrokeCount(), "stroke-count.csv")] for feat, filename in featurelist: logging.info("create %s...", filename) analyze_feature(raw_datasets, feat, filename) # Analyze everything specified in configuration cfg = utils.get_project_configuration() if 'data_analyzation_queue' in cfg: metrics = dam.get_metrics(cfg['data_analyzation_queue']) for metric in metrics: logging.info("Start metric %s...", str(metric)) metric(raw_datasets) else: logging.info("No 'data_analyzation_queue' in ~/.hwrtrc")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_matching_braces(latex): """ If `latex` is surrounded by matching braces, remove them. They are not necessary. Parameters latex : string Returns ------- string Examples -------- '2+2' '{2+2' """
if latex.startswith('{') and latex.endswith('}'): opened = 1 matches = True for char in latex[1:-1]: if char == '{': opened += 1 elif char == '}': opened -= 1 if opened == 0: matches = False if matches: latex = latex[1:-1] return latex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_folder(folder): """Read all files of `folder` and return a list of HandwrittenData objects. Parameters folder : string Path to a folder Returns ------- list : A list of all .ink files in the given folder. """
recordings = [] for filename in glob.glob(os.path.join(folder, '*.ink')): recording = parse_scg_ink_file(filename) recordings.append(recording) return recordings
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_colors(segmentation): """Get a list of colors which is as long as the segmentation. Parameters segmentation : list of lists Returns ------- list A list of colors. """
symbol_count = len(segmentation) num_colors = symbol_count # See http://stackoverflow.com/a/20298116/562769 color_array = [ "#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C", "#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800", "#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51", "#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94", "#7ED379", "#012C58"] # Apply a little trick to make sure we have enough colors, no matter # how many symbols are in one recording. # This simply appends the color array as long as necessary to get enough # colors new_array = color_array[:] while len(new_array) <= num_colors: new_array += color_array return new_array[:num_colors]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_times(self): """ Some recordings have wrong times. Fix them so that nothing after loading a handwritten recording breaks. """
pointlist = self.get_pointlist() times = [point['time'] for stroke in pointlist for point in stroke] times_min = max(min(times), 0) # Make sure this is not None for i, stroke in enumerate(pointlist): for j, point in enumerate(stroke): if point['time'] is None: pointlist[i][j]['time'] = times_min else: times_min = point['time'] self.raw_data_json = json.dumps(pointlist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pointlist(self): """ Get a list of lists of tuples from JSON raw data string. Those lists represent strokes with control points. Returns ------- list : A list of strokes. Each stroke is a list of dictionaries {'x': 123, 'y': 42, 'time': 1337} """
try: pointlist = json.loads(self.raw_data_json) except Exception as inst: logging.debug("pointStrokeList: strokelistP") logging.debug(self.raw_data_json) logging.debug("didn't work") raise inst if len(pointlist) == 0: logging.warning("Pointlist was empty. Search for '" + self.raw_data_json + "' in `wm_raw_draw_data`.") return pointlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_sorted_pointlist(self): """ Make sure that the points and strokes are in order. Returns ------- list A list of all strokes in the recording. Each stroke is represented as a list of dicts {'time': 123, 'x': 45, 'y': 67} """
pointlist = self.get_pointlist() for i in range(len(pointlist)): pointlist[i] = sorted(pointlist[i], key=lambda p: p['time']) pointlist = sorted(pointlist, key=lambda stroke: stroke[0]['time']) return pointlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_pointlist(self, pointlist): """Overwrite pointlist. Parameters pointlist : a list of strokes; each stroke is a list of points The inner lists represent strokes. Every stroke consists of points. Every point is a dictinary with 'x', 'y', 'time'. """
assert type(pointlist) is list, \ "pointlist is not of type list, but %r" % type(pointlist) assert len(pointlist) >= 1, \ "The pointlist of formula_id %i is %s" % (self.formula_id, self.get_pointlist()) self.raw_data_json = json.dumps(pointlist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bounding_box(self): """ Get the bounding box of a pointlist. """
pointlist = self.get_pointlist() # Initialize bounding box parameters to save values minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"] miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"] mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"] # Adjust parameters for stroke in pointlist: for p in stroke: minx, maxx = min(minx, p["x"]), max(maxx, p["x"]) miny, maxy = min(miny, p["y"]), max(maxy, p["y"]) mint, maxt = min(mint, p["time"]), max(maxt, p["time"]) return {"minx": minx, "maxx": maxx, "miny": miny, "maxy": maxy, "mint": mint, "maxt": maxt}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bitmap(self, time=None, size=32, store_path=None): """ Get a bitmap of the object at a given instance of time. If time is `None`,`then the bitmap is generated for the last point in time. Parameters time : int or None size : int Size in pixels. The resulting bitmap will be (size x size). store_path : None or str If this is set, then the image will be saved there. Returns ------- numpy array : Greyscale png image """
# bitmap_width = int(self.get_width()*size) + 2 # bitmap_height = int(self.get_height()*size) + 2 img = Image.new('L', (size, size), 'black') draw = ImageDraw.Draw(img, 'L') bb = self.get_bounding_box() for stroke in self.get_sorted_pointlist(): for p1, p2 in zip(stroke, stroke[1:]): if time is not None and \ (p1['time'] > time or p2['time'] > time): continue y_from = int((-bb['miny'] + p1['y']) / max(self.get_height(), 1)*size) x_from = int((-bb['minx'] + p1['x']) / max(self.get_width(), 1)*size) y_to = int((-bb['miny'] + p2['y']) / max(self.get_height(), 1)*size) x_to = int((-bb['minx'] + p2['x']) / max(self.get_width(), 1)*size) draw.line([x_from, y_from, x_to, y_to], fill='#ffffff', width=1) del draw if store_path is not None: img.save(store_path) return numpy.asarray(img)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def preprocessing(self, algorithms): """Apply preprocessing algorithms. Parameters algorithms : a list objects Preprocessing allgorithms which get applied in order. Examples -------- """
assert type(algorithms) is list for algorithm in algorithms: algorithm(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feature_extraction(self, algorithms): """Get a list of features. Every algorithm has to return the features as a list."""
assert type(algorithms) is list features = [] for algorithm in algorithms: new_features = algorithm(self) assert len(new_features) == algorithm.get_dimension(), \ "Expected %i features from algorithm %s, got %i features" % \ (algorithm.get_dimension(), str(algorithm), len(new_features)) features += new_features return features
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def show(self): """Show the data graphically in a new pop-up window."""
# prevent the following error: # '_tkinter.TclError: no display name and no $DISPLAY environment # variable' # import matplotlib # matplotlib.use('GTK3Agg', warn=False) import matplotlib.pyplot as plt pointlist = self.get_pointlist() if 'pen_down' in pointlist[0][0]: assert len(pointlist) > 1, \ "Lenght of pointlist was %i. Got: %s" % (len(pointlist), pointlist) # Create a new pointlist that models pen-down strokes and pen # up strokes new_pointlist = [] last_pendown_state = None stroke = [] for point in pointlist[0]: if last_pendown_state is None: last_pendown_state = point['pen_down'] if point['pen_down'] != last_pendown_state: new_pointlist.append(stroke) last_pendown_state = point['pen_down'] stroke = [] else: stroke.append(point) new_pointlist.append(stroke) # add the last stroke pointlist = new_pointlist _, ax = plt.subplots() ax.set_title("Raw data id: %s, " "Formula_id: %s" % (str(self.raw_data_id), str(self.formula_id))) colors = _get_colors(self.segmentation) for symbols, color in zip(self.segmentation, colors): for stroke_index in symbols: stroke = pointlist[stroke_index] xs, ys = [], [] for p in stroke: xs.append(p['x']) ys.append(p['y']) if "pen_down" in stroke[0] and stroke[0]["pen_down"] is False: plt.plot(xs, ys, '-x', color=color) else: plt.plot(xs, ys, '-o', color=color) plt.gca().invert_yaxis() ax.set_aspect('equal') plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_single_dots(self): """Count all strokes of this recording that have only a single dot. """
pointlist = self.get_pointlist() single_dots = 0 for stroke in pointlist: if len(stroke) == 1: single_dots += 1 return single_dots
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_single_symbol_list(self): """ Convert this HandwrittenData object into a list of HandwrittenData objects. Each element of the list is a single symbol. Returns ------- list of HandwrittenData objects """
symbol_stream = getattr(self, 'symbol_stream', [None for symbol in self.segmentation]) single_symbols = [] pointlist = self.get_sorted_pointlist() for stroke_indices, label in zip(self.segmentation, symbol_stream): strokes = [] for stroke_index in stroke_indices: strokes.append(pointlist[stroke_index]) single_symbols.append(HandwrittenData(json.dumps(strokes), formula_id=label)) return single_symbols
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_git_postversion(addon_dir): """ return the addon version number, with a developmental version increment if there were git commits in the addon_dir after the last version change. If the last change to the addon correspond to the version number in the manifest it is used as is for the python package version. Otherwise a counter is incremented for each commit and resulting version number has the following form: [8|9].0.x.y.z.1devN, N being the number of git commits since the version change. Note: we use .99.devN because: * pip ignores .postN by design (https://github.com/pypa/pip/issues/2872) * x.y.z.devN is anterior to x.y.z Note: we don't put the sha1 of the commit in the version number because this is not PEP 440 compliant and is therefore misinterpreted by pip. """
addon_dir = os.path.realpath(addon_dir) last_version = read_manifest(addon_dir).get('version', '0.0.0') last_version_parsed = parse_version(last_version) if not is_git_controlled(addon_dir): return last_version if get_git_uncommitted(addon_dir): uncommitted = True count = 1 else: uncommitted = False count = 0 last_sha = None git_root = get_git_root(addon_dir) for sha in git_log_iterator(addon_dir): try: manifest = read_manifest_from_sha(sha, addon_dir, git_root) except NoManifestFound: break version = manifest.get('version', '0.0.0') version_parsed = parse_version(version) if version_parsed != last_version_parsed: break if last_sha is None: last_sha = sha else: count += 1 if not count: return last_version if last_sha: return last_version + ".99.dev%s" % count if uncommitted: return last_version + ".dev1" # if everything is committed, the last commit # must have the same version as current, # so last_sha must be set and we'll never reach this branch return last_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_odoo_version_info(addons_dir, odoo_version_override=None): """ Detect Odoo version from an addons directory """
odoo_version_info = None addons = os.listdir(addons_dir) for addon in addons: addon_dir = os.path.join(addons_dir, addon) if is_installable_addon(addon_dir): manifest = read_manifest(addon_dir) _, _, addon_odoo_version_info = _get_version( addon_dir, manifest, odoo_version_override, git_post_version=False) if odoo_version_info is not None and \ odoo_version_info != addon_odoo_version_info: raise DistutilsSetupError("Not all addons are for the same " "odoo version in %s (error detected " "in %s)" % (addons_dir, addon)) odoo_version_info = addon_odoo_version_info return odoo_version_info