code stringlengths 17 6.64M |
|---|
def get_plot_params(plot_params, show_score_diffs, diff):
defaults = {'all_pos_contributions': False, 'alpha_fade': 0.35, 'bar_linewidth': 0.25, 'bar_type_space_scaling': 0.015, 'bar_width': 0.8, 'cumulative_xlabel': None, 'cumulative_xticklabels': None, 'cumulative_xticks': None, 'cumulative_ylabel': None, 'detailed': True, 'dpi': 200, 'every_nth_ytick': 5, 'height': 15, 'invisible_spines': [], 'label_fontsize': 13, 'missing_symbol': '*', 'pos_cumulative_inset': [0.19, 0.12, 0.175, 0.175], 'pos_text_size_inset': [0.81, 0.12, 0.08, 0.08], 'remove_xticks': False, 'remove_yticks': False, 'score_colors': {'all_pos_neg': '#9E75B7', 'all_pos_pos': '#FECC5D', 'neg_s': '#9E75B7', 'neg_s_neg_p': '#C4CAFC', 'neg_s_pos_p': '#2F7CCE', 'neg_total': '#9E75B7', 'pos_s': '#FECC5D', 'pos_s_neg_p': '#FDFFD2', 'pos_s_pos_p': '#FFFF80', 'pos_total': '#FECC5D', 'total': '#707070'}, 'serif': False, 'show_score_diffs': show_score_diffs, 'show_total': True, 'system_names': ['Text 1', 'Text 2'], 'tick_format': '{:.1f}', 'tight': True, 'title_fontsize': 18, 'width': 7, 'width_scaling': 1.2, 'xlabel': 'Score shift $\\delta \\Phi_{\\tau}$ (%)', 'xlabel_fontsize': 20, 'xtick_fontsize': 14, 'y_margin': 0.005, 'ylabel': 'Rank', 'ylabel_fontsize': 20, 'ytick_fontsize': 14}
defaults['symbols'] = {'all_pos_neg': defaults['system_names'][0], 'all_pos_pos': defaults['system_names'][1], 'neg_s': u'▽', 'neg_s_neg_p': u'-↓', 'neg_s_pos_p': u'-↑', 'neg_total': '', 'pos_s': u'△', 'pos_s_neg_p': u'+↓', 'pos_s_pos_p': u'+↑', 'pos_total': '', 'total': '$\\Sigma$'}
defaults.update(plot_params)
return defaults
|
def set_serif():
rcParams['font.family'] = 'serif'
rcParams['mathtext.fontset'] = 'dejavuserif'
|
def get_bar_dims(type_scores, norm, plot_params):
"\n Gets the height and location of every bar needed to plot each type's\n contribution.\n\n Parameters\n ----------\n type_scores: list of tuples\n List of tuples of the form (type,p_diff,s_diff,p_avg,s_ref_diff,shift_score)\n for every type scored in the two systems. This is the detailed output\n of a Shift object's `get_shift_scores`\n norm: float\n The factor by which to normalize all the component scores\n plot_params: dict\n Dictionary of plotting parameters. Here, `all_pos_contributions` is used\n\n Returns\n -------\n Dictionary with nine keys: `p_solid_heights`, `s_solid_bases`,\n `s_solid_heights`, `p_fade_heights`, `p_fade_bases`, `s_fade_bases`,\n `s_fade_heights`, `total_heights`, `label_heights`. Values are lists are the\n corresponding bar dimensions for each word\n\n 'p' stands for the component with p_diff\n 's' stands for the component with s_diff.\n 'solid' indicates the part of the contribution that is not alpha faded\n 'base' stands for where the bottom of the bar is\n 'height' stands for the height relative to the base\n Note, `p_solid_base` would always be 0, which is why it is not included\n `total_heights` is the overall contribution for simple (not detailed) shift\n graphs (base is always 0).\n `label_heights` is the label position after making up for counteracting components\n "
dims = {'p_solid_heights': [], 's_solid_bases': [], 's_solid_heights': [], 'p_fade_heights': [], 'p_fade_bases': [], 's_fade_bases': [], 's_fade_heights': [], 'total_heights': [], 'label_heights': []}
for (_, p_diff, s_diff, p_avg, s_ref_diff, _) in type_scores:
c_p = (((100 * p_diff) * s_ref_diff) / norm)
c_s = (((100 * p_avg) * s_diff) / norm)
if ((not plot_params['all_pos_contributions']) or (p_diff > 0)):
dims['total_heights'].append((c_p + c_s))
else:
dims['total_heights'].append(((- 1) * (c_p + c_s)))
if ((np.sign((s_ref_diff * p_diff)) * np.sign(s_diff)) == 1):
dims['p_solid_heights'].append(c_p)
dims['s_solid_bases'].append(c_p)
dims['s_solid_heights'].append(c_s)
dims['label_heights'].append((c_p + c_s))
for d in ['p_fade_bases', 'p_fade_heights', 's_fade_bases', 's_fade_heights']:
dims[d].append(0)
elif (abs(c_p) > abs(c_s)):
dims['p_solid_heights'].append((c_p + c_s))
dims['p_fade_bases'].append((c_p + c_s))
dims['p_fade_heights'].append(((- 1) * c_s))
dims['s_fade_heights'].append(c_s)
dims['label_heights'].append(c_p)
for d in ['s_solid_bases', 's_solid_heights', 's_fade_bases']:
dims[d].append(0)
else:
dims['s_solid_heights'].append((c_s + c_p))
dims['p_fade_heights'].append(c_p)
dims['s_fade_bases'].append((c_s + c_p))
dims['s_fade_heights'].append(((- 1) * c_p))
dims['label_heights'].append(c_s)
for d in ['p_solid_heights', 's_solid_bases', 'p_fade_bases']:
dims[d].append(0)
return dims
|
def get_bar_colors(type_scores, plot_params):
"\n Returns the component colors of each type's contribution bars.\n\n Parameters\n ----------\n type_scores: list of tuples\n List of tuples of the form (type,p_diff,s_diff,p_avg,s_ref_diff,shift_score)\n for every type scored in the two systems. This is the detailed output\n of a Shift object's `get_shift_scores`\n plot_params: dict\n Dictionary of plotting parameters. Here, `all_pos_contributions` and\n `score_colors` are used\n\n Returns\n -------\n Dictionary with three keys: `p`, `s`, and `total`. Values are lists of the\n colors to assign to the p_diff and s_diff components respectively. If just\n the overall contributions are being shown in a simple (not detailed) shift\n graph, then the `total` colors are used\n "
score_colors = plot_params['score_colors']
bar_colors = {'p': [], 's': [], 'total': []}
for (_, p_diff, s_diff, p_avg, s_ref_diff, _) in type_scores:
c_total = ((p_diff * s_ref_diff) + (p_avg * s_diff))
if (not plot_params['all_pos_contributions']):
if (c_total > 0):
bar_colors['total'].append(score_colors['pos_total'])
else:
bar_colors['total'].append(score_colors['neg_total'])
elif (p_diff > 0):
bar_colors['total'].append(score_colors['all_pos_pos'])
else:
bar_colors['total'].append(score_colors['all_pos_neg'])
if (s_ref_diff > 0):
if (p_diff > 0):
bar_colors['p'].append(score_colors['pos_s_pos_p'])
else:
bar_colors['p'].append(score_colors['pos_s_neg_p'])
elif (p_diff > 0):
bar_colors['p'].append(score_colors['neg_s_pos_p'])
else:
bar_colors['p'].append(score_colors['neg_s_neg_p'])
if (s_diff > 0):
bar_colors['s'].append(score_colors['pos_s'])
else:
bar_colors['s'].append(score_colors['neg_s'])
return bar_colors
|
def plot_contributions(ax, top_n, bar_dims, bar_colors, plot_params):
'\n Plots all of the type contributions as horizontal bars\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n top_n: int\n The number of types being plotted on the shift graph\n bar_dims: dict\n Dictionary where keys are names of different types of bar dimensions and\n values are lists of those dimensions for each word type. See `get_bar_dims`\n for details\n bar_colors: dict\n Dictionary where keys are names of different types of bar colors and\n values are lists of those colors for each word type. See `get_bar_colors`\n for details\n plot_params: dict\n Dictionary of plotting parameters. Here, `alpha_fade`, `bar_width`,\n `detailed`, and `bar_linewidth` are used\n '
bar_count = min(top_n, len(bar_dims['total_heights']))
ys = range(((top_n - bar_count) + 1), (top_n + 1))
alpha = plot_params['alpha_fade']
width = plot_params['bar_width']
linewidth = plot_params['bar_linewidth']
edgecolor = (['black'] * bar_count)
if plot_params['detailed']:
ax.barh(ys, bar_dims['p_solid_heights'], width, align='center', zorder=10, color=bar_colors['p'], edgecolor=edgecolor, linewidth=linewidth)
ax.barh(ys, bar_dims['s_solid_heights'], width, left=bar_dims['s_solid_bases'], align='center', zorder=10, color=bar_colors['s'], edgecolor=edgecolor, linewidth=linewidth)
ax.barh(ys, bar_dims['p_fade_heights'], width, left=bar_dims['p_fade_bases'], align='center', zorder=10, color=bar_colors['p'], edgecolor=edgecolor, alpha=alpha, linewidth=linewidth)
ax.barh(ys, bar_dims['s_fade_heights'], width, left=bar_dims['s_fade_bases'], align='center', zorder=10, color=bar_colors['s'], edgecolor=edgecolor, alpha=alpha, linewidth=linewidth)
else:
ax.barh(ys, bar_dims['total_heights'], width, align='center', zorder=10, color=bar_colors['total'], edgecolor=edgecolor, linewidth=linewidth)
return ax
|
def get_bar_order(plot_params):
'\n Gets which cumulative bars to show at the top of the graph given what level\n of detail is being specified\n\n Parameters\n ----------\n plot_params: dict\n Dictionary of plotting parameters. Here, `all_pos_contributions`,\n `detailed`, `show_score_diffs`, and `show_total` are used\n\n Returns\n -------\n List of strs indicating which cumulative bars to show\n '
if plot_params['detailed']:
if plot_params['show_score_diffs']:
bar_order = ['neg_s', 'pos_s', 'neg_s_neg_p', 'neg_s_pos_p', 'pos_s_neg_p', 'pos_s_pos_p']
else:
bar_order = ['neg_s_neg_p', 'neg_s_pos_p', 'pos_s_neg_p', 'pos_s_pos_p']
elif (not plot_params['all_pos_contributions']):
bar_order = ['neg_total', 'pos_total']
else:
bar_order = ['all_pos_pos', 'all_pos_neg']
if plot_params['show_total']:
bar_order = (['total'] + bar_order)
return bar_order
|
def plot_total_contribution_sums(ax, total_comp_sums, bar_order, top_n, bar_dims, plot_params):
'\n Plots the cumulative contribution bars at the top of the shift graph\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n total_comp_sums: dict\n Dictionary with six keys, one for each of the different component\n contributions, where values are floats indicating the total contribution.\n See `get_shift_component_sums` for details\n bar_order: list of strs\n List of the names of which bars to show at the top of the shift graph.\n See `get_bar_order` for more detail\n top_n: int\n The number of types being plotted on the shift graph\n bar_dims: dict\n Dictionary where keys are names of different types of bar dimensions and\n values are lists of those dimensions for each word type. See `get_bar_dims`\n for details\n plot_params: dict\n Dictionary of plotting parameters. Here, `all_pos_contributions`,\n `show_total`, `score_colors`, `bar_width`, and `bar_linewidth` are used\n '
comp_bar_heights = []
for b in bar_order:
if (b == 'total'):
h = 0
elif (b == 'neg_total'):
h = ((total_comp_sums['neg_s'] + total_comp_sums['neg_s_pos_p']) + total_comp_sums['pos_s_neg_p'])
elif (b == 'pos_total'):
h = ((total_comp_sums['pos_s'] + total_comp_sums['neg_s_neg_p']) + total_comp_sums['pos_s_pos_p'])
elif (b == 'all_pos_pos'):
a = np.array(bar_dims['total_heights'])
h = np.sum(a[(a > 0)])
elif (b == 'all_pos_neg'):
a = np.array(bar_dims['total_heights'])
h = np.sum(a[(a < 0)])
else:
h = total_comp_sums[b]
comp_bar_heights.append(h)
if ('total' in bar_order):
total_index = bar_order.index('total')
total = sum(comp_bar_heights)
comp_bar_heights[total_index] = total
if (not plot_params['all_pos_contributions']):
max_bar_height = np.max(np.abs(bar_dims['label_heights']))
else:
max_bar_height = np.max(np.abs(bar_dims['total_heights']))
comp_scaling = (max_bar_height / np.max(np.abs(comp_bar_heights)))
comp_bar_heights = [(comp_scaling * h) for h in comp_bar_heights]
if plot_params['show_total']:
min_y = (top_n + 3.5)
ys = [(top_n + 2)]
else:
min_y = (top_n + 2)
ys = []
for n_h in range(int((len(comp_bar_heights) / 2))):
y = (min_y + (1.5 * n_h))
ys += [y, y]
comp_colors = [plot_params['score_colors'][b] for b in bar_order]
width = plot_params['bar_width']
linewidth = plot_params['bar_linewidth']
edgecolor = (['black'] * len(comp_bar_heights))
ax.barh(ys, comp_bar_heights, width, align='center', color=comp_colors, linewidth=linewidth, edgecolor=edgecolor)
return (ax, comp_bar_heights, bar_order)
|
def get_bar_type_space(ax, plot_params):
'\n Gets the amount of space to place in between the ends of bars and labels\n '
x_width = (2 * abs(max(ax.get_xlim(), key=(lambda x: abs(x)))))
bar_type_space = (plot_params['bar_type_space_scaling'] * x_width)
return bar_type_space
|
def set_bar_labels(ax, top_n, type_labels, full_bar_heights, comp_bar_heights, plot_params):
"\n Sets the labels on the end of each type's contribution bar\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n top_n: int\n The number of types being plotted on the shift graph\n type_labels: list of strs\n Sorted list of labels to plot on the shift graph\n full_bar_heights: list of floats\n List of heights of where to place the type contribution labels\n comp_bar_heights: list of floats\n List of heights of where to place the cumulative contribution labels\n plot_params: dict\n Dictionary of plotting parameters. Here, `show_total`, `label_fontsize`,\n and `bar_type_space_scaling`\n "
n = len(full_bar_heights)
all_bar_ends = (full_bar_heights + comp_bar_heights)
bar_type_space = get_bar_type_space(ax, plot_params)
if plot_params['show_total']:
min_y = (top_n + 3.5)
top_heights = [(top_n + 2)]
else:
min_y = (top_n + 2)
top_heights = []
for n_h in range(int((len(comp_bar_heights) / 2))):
y = (min_y + (1.5 * n_h))
top_heights += [y, y]
bar_heights = (list(range(((top_n - n) + 1), (top_n + 1))) + top_heights)
text_objs = []
fontsize = plot_params['label_fontsize']
for (bar_n, width) in enumerate(all_bar_ends):
height = bar_heights[bar_n]
if (width < 0):
ha = 'right'
space = ((- 1) * bar_type_space)
else:
ha = 'left'
space = bar_type_space
t = ax.text((width + space), height, type_labels[bar_n], ha=ha, va='center', fontsize=fontsize, zorder=5)
text_objs.append(t)
ax = adjust_axes_for_labels(ax, full_bar_heights, comp_bar_heights, text_objs, bar_type_space, plot_params)
return ax
|
def adjust_axes_for_labels(ax, bar_ends, comp_bars, text_objs, bar_type_space, plot_params):
'\n Attempts to readjusts the axes to account for newly plotted labels\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n bar_ends: list of floats\n List of heights of where to place the type contribution labels\n comp_bars: list of floats\n List of heights of where to place the cumulative contribution labels\n text_objs: list of Matplotlib text objects\n List of text after being plotted on the ax\n bar_type_space: float\n How much space to put between bar ends and labels\n plot_parms: dict\n Dictionary of plotting parameters. Here, `width_scaling` is used\n '
lengths = []
for (bar_n, bar_end) in enumerate(bar_ends):
bar_length = bar_end
bbox = text_objs[bar_n].get_window_extent(renderer=ax.figure.canvas.get_renderer())
bbox = ax.transData.inverted().transform(bbox)
text_length = abs((bbox[0][0] - bbox[1][0]))
if (bar_length > 0):
lengths.append(((bar_length + text_length) + bar_type_space))
else:
lengths.append(((bar_length - text_length) - bar_type_space))
comp_bars = [abs(b) for b in comp_bars]
lengths += comp_bars
width_scaling = plot_params['width_scaling']
max_length = (width_scaling * abs(sorted(lengths, key=(lambda x: abs(x)), reverse=True)[0]))
ax.set_xlim((((- 1) * max_length), max_length))
return ax
|
def set_ticks(ax, top_n, plot_params):
'\n Sets ticks and tick labels of the shift graph\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n top_n: int\n The number of types being plotted on the shift graph\n plot_parms: dict\n Dictionary of plotting parameters. Here, `all_pos_contributions`,\n `tick_format`, `xtick_fontsize`, `ytick_fontsize`, `remove_xticks`,\n and `remove_yticks` are used\n '
tick_format = plot_params['tick_format']
if (not plot_params['all_pos_contributions']):
x_ticks = [tick_format.format(t) for t in ax.get_xticks()]
else:
x_ticks = [tick_format.format(abs(t)) for t in ax.get_xticks()]
ax.set_xticklabels(x_ticks, fontsize=plot_params['xtick_fontsize'])
y_ticks = (list(range(1, top_n, plot_params['every_nth_ytick'])) + [top_n])
y_tick_label_pos = (list(range(top_n, 1, (- plot_params['every_nth_ytick']))) + ['1'])
y_tick_labels = [str(n) for n in y_tick_label_pos]
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_tick_labels, fontsize=plot_params['ytick_fontsize'])
if plot_params['remove_xticks']:
remove_xaxis_ticks(ax)
if plot_params['remove_yticks']:
remove_yaxis_ticks(ax)
return ax
|
def set_spines(ax, plot_params):
'\n Sets spines of the shift graph to be invisible if chosen by the user\n\n Parameters\n ----------\n ax: Matplotlib ax\n Current ax of the shift graph\n plot_parms: dict\n Dictionary of plotting parameters. Here `invisible_spines` is used\n '
spines = plot_params['invisible_spines']
if spines:
for spine in spines:
if (spine in {'left', 'right', 'top', 'bottom'}):
ax.spines[spine].set_visible(False)
else:
print('invalid spine argument')
return ax
|
def remove_yaxis_ticks(ax, major=True, minor=True):
'\n Removes all y-axis ticks on the shift graph\n '
if major:
for tic in ax.yaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
if minor:
for tic in ax.yaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
|
def remove_xaxis_ticks(ax, major=True, minor=True):
'\n Removes all x-axis ticks on the shift graph\n '
if major:
for tic in ax.xaxis.get_major_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
if minor:
for tic in ax.xaxis.get_minor_ticks():
tic.tick1line.set_visible(False)
tic.tick2line.set_visible(False)
|
def get_cumulative_inset(f, type2shift_score, top_n, normalization, plot_params):
"\n Plots the cumulative contribution inset on the shift graph\n\n Parameters\n ----------\n f: Matpotlib figure\n Current figure of the shift graph\n type2shift_score: dict\n Keys are types and values are their total shift score\n top_n: int\n The number of types being plotted on the shift graph\n normalization: str\n The type of normalization being used on the shift scores, either\n 'variation' (sum of abs values of scores) or 'trajectory' (sum of scores)\n plot_params: dict\n Dictionary of plotting parameters. Here, `pos_cumulative_inset`,\n `cumulative_xlabel`, `cumulative_ylabel`, `cumulative_xticks`,\n `cumulative_xticklabels`, `cumulative_yticks`, `cumulative_yticklabels`\n are used\n "
inset_pos = plot_params['pos_cumulative_inset']
if (normalization == 'variation'):
scores = sorted([(100 * np.abs(s)) for s in type2shift_score.values()], key=(lambda x: abs(x)), reverse=True)
if (plot_params['cumulative_xlabel'] is None):
plot_params['cumulative_xlabel'] = '$\\sum | \\delta \\Phi_{\\tau} |$'
else:
scores = sorted([(100 * s) for s in type2shift_score.values()], key=(lambda x: abs(x)), reverse=True)
if (plot_params['cumulative_xlabel'] is None):
plot_params['cumulative_xlabel'] = '$\\sum \\delta \\Phi_{\\tau}$'
cum_scores = np.cumsum(scores)
(left, bottom, width, height) = inset_pos
in_ax = f.add_axes([left, bottom, width, height])
in_ax.semilogy(cum_scores, range(1, (len(cum_scores) + 1)), '-', color='black', linewidth=0.5, markersize=1.2)
in_ax.set_xlim((min(cum_scores), max(cum_scores)))
in_ax.set_ylim((1, (len(cum_scores) + 1)))
in_ax.margins(x=0, y=0)
(y_min, y_max) = in_ax.get_ylim()
in_ax.set_ylim((y_max, y_min))
total_score = cum_scores[(- 1)]
if (np.sign(total_score) == 1):
if (plot_params['cumulative_xticks'] is None):
plot_params['cumulative_xticks'] = [0, 25, 50, 75, 100]
if (plot_params['cumulative_xticklabels'] is None):
plot_params['cumulative_xticklabels'] = ['0', '', '50', '', '100']
else:
if (plot_params['cumulative_xticks'] is None):
plot_params['cumulative_xticks'] = [(- 100), (- 75), (- 50), (- 25), 0]
if (plot_params['cumulative_xticklabels'] is None):
plot_params['cumulative_xticklabels'] = ['-100', '', '-50', '', '0']
in_ax.set_xticks(plot_params['cumulative_xticks'])
in_ax.set_xticklabels(plot_params['cumulative_xticklabels'], fontsize=11)
for tick in in_ax.yaxis.get_major_ticks():
tick.label.set_fontsize(11)
(x_min, x_max) = in_ax.get_xlim()
in_ax.hlines(top_n, x_min, x_max, linestyle='-', color='black', linewidth=0.5)
in_ax.set_xlabel(plot_params['cumulative_xlabel'], fontsize=12)
in_ax.set_ylabel(plot_params['cumulative_ylabel'], fontsize=12)
in_ax.patch.set_alpha(0)
return f
|
def get_text_size_inset(f, type2freq_1, type2freq_2, plot_params):
'\n Plots the relative text size inset on the shift graph\n\n Parameters\n ----------\n f: Matpotlib figure\n Current figure of the shift graph\n type2freq_1, type2freq_2: dict\n Keys are types, values are their frequencies\n plot_params: dict\n Dictionary of plotting parameters. Here, pos_text_size_inset` and\n `pos_text_size_inset` are used\n '
system_names = plot_params['system_names']
inset_pos = plot_params['pos_text_size_inset']
n1 = sum(type2freq_1.values())
n2 = sum(type2freq_2.values())
n = max(n1, n2)
n1 = (n1 / n)
n2 = (n2 / n)
(left, bottom, width, height) = inset_pos
in_ax = f.add_axes([left, bottom, width, height])
in_ax.barh([0.6, 0.4], [n1, n2], 0.1, color='#707070', linewidth=0.5, edgecolor=(['black'] * 2), tick_label=system_names)
in_ax.set_ylim((0, 1))
in_ax.text(0.5, 0.75, 'Text Size:', horizontalalignment='center', fontsize=14)
for tick in in_ax.yaxis.get_major_ticks():
tick.label.set_fontsize(12)
in_ax.tick_params(axis='y', length=0)
for side in ['left', 'right', 'top', 'bottom']:
in_ax.spines[side].set_visible(False)
in_ax.get_xaxis().set_visible(False)
in_ax.set_alpha(0)
return f
|
class Shift():
"\n Shift object for calculating weighted scores of two systems of types,\n and the shift between them\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n type2score_1, type2score_2: dict or str, optional\n If dict, types are keys and values are scores associated with each\n type. If str, the name of a score lexicon included in Shifterator.\n If None and other type2score is None, defaults to uniform scores\n across types. Otherwise defaults to the other type2score dict\n reference_value: str or float, optional\n The reference score to use to partition scores into two different\n regimes. If 'average', uses the average score according to type2freq_1\n and type2score_1. If None and a lexicon is selected for type2score,\n uses the respective middle point in that lexicon's scale. Otherwise\n if None, uses zero as the reference point\n handle_missing_scores: str, optional\n If 'error', throws an error whenever a word has a score in one score\n dictionary but not the other. If 'exclude', excludes any word that is\n missing a score in one score dictionary from all word shift\n calculations, regardless if it may have a score in the other dictionary.\n If 'adopt' and the score is missing in one dictionary, then uses the\n score from the other dictionary if it is available\n stop_lens: iterable of 2-tuples, optional\n Denotes intervals of scores that should be excluded from word shifts\n calculations. Types with scores in this range will be excluded from word\n shift calculations\n stop_words: set, optional\n Denotes words that should be excluded from calculation of word shifts\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n "
def __init__(self, type2freq_1, type2freq_2, type2score_1=None, type2score_2=None, reference_value=None, handle_missing_scores='error', stop_lens=None, stop_words=None, normalization='variation'):
if ((type2score_1 is not None) and (type2score_2 is not None)):
(self.type2score_1, lex_ref) = helper.get_score_dictionary(type2score_1)
(self.type2score_2, _) = helper.get_score_dictionary(type2score_2)
if (type2score_1 != type2score_2):
self.show_score_diffs = True
else:
self.show_score_diffs = False
elif (type2score_1 is not None):
(self.type2score_1, lex_ref) = helper.get_score_dictionary(type2score_1)
self.type2score_2 = self.type2score_1
self.show_score_diffs = False
elif (type2score_2 is not None):
(self.type2score_2, lex_ref) = helper.get_score_dictionary(type2score_2)
self.type2score_1 = self.type2score_2
self.show_score_diffs = False
else:
lex_ref = None
self.type2score_1 = {t: 1 for t in type2freq_1}
self.type2score_2 = {t: 1 for t in type2freq_2}
self.show_score_diffs = False
self.handle_missing_scores = handle_missing_scores
if (stop_lens is None):
self.stop_lens = []
else:
self.stop_lens = stop_lens
if (stop_words is None):
self.stop_words = set()
else:
self.stop_words = stop_words
preprocessed = helper.preprocess_words_scores(type2freq_1, self.type2score_1, type2freq_2, self.type2score_2, self.stop_lens, self.stop_words, self.handle_missing_scores)
self.type2freq_1 = preprocessed[0]
self.type2freq_2 = preprocessed[1]
self.type2score_1 = preprocessed[2]
self.type2score_2 = preprocessed[3]
self.types = preprocessed[4]
self.filtered_types = preprocessed[5]
self.no_score_types = preprocessed[6]
self.adopted_score_types = preprocessed[7]
if (reference_value is not None):
if (reference_value == 'average'):
self.reference_value = self.get_weighted_score(self.type2freq_1, self.type2score_1)
else:
self.reference_value = reference_value
elif (lex_ref is not None):
self.reference_value = lex_ref
else:
self.reference_value = 0
self.normalization = normalization
self.get_shift_scores(details=False)
def get_weighted_score(self, type2freq, type2score):
'\n Calculate an average score according to a set of frequencies and scores\n\n Parameters\n ----------\n type2freq: dict\n Keys are types and values are frequencies\n type2score: dict\n Keys are types and values are scores\n\n Returns\n -------\n s_avg: float\n Average weighted score of system\n '
types = set(type2freq.keys()).intersection(set(type2score.keys()))
if (len(types) == 0):
return
f_total = sum([freq for (t, freq) in type2freq.items() if (t in types)])
s_weighted = sum([(type2score[t] * freq) for (t, freq) in type2freq.items() if (t in types)])
s_avg = (s_weighted / f_total)
return s_avg
def get_shift_scores(self, details=False):
"\n Calculates the type shift scores between the two systems\n\n Parameters\n ----------\n details: boolean\n If true, returns each of the major components of each type's shift\n score, along with the overall shift scores. Otherwise, only returns\n the overall shift scores\n\n Returns\n -------\n type2p_diff: dict\n If details is True, returns dict where keys are types and values are\n the difference in relatively frequency, i.e. p_i,2 - p_i,1 for type i\n type2s_diff: dict,\n If details is True, returns dict where keys are types and values are\n the relative differences in score, i.e. s_i,2 - s_i,1 for type i\n type2p_avg: dict,\n If details is True, returns dict where keys are types and values are\n the average relative frequencies, i.e. 0.5*(p_i,1+p_i,2) for type i\n type2s_ref_diff: dict\n If details is True, returns dict where keys are types and values are\n relative deviation from reference score, i.e. 0.5*(s_i,2+s_i,1)-s_ref\n for type i\n type2shift_score: dict\n Keys are types and values are shift scores. The overall shift scores\n are normalized according to the `normalization` parameter of the\n Shift object\n "
s_avg_ref = self.reference_value
total_freq_1 = sum([freq for (t, freq) in self.type2freq_1.items() if (t in self.types)])
total_freq_2 = sum([freq for (t, freq) in self.type2freq_2.items() if (t in self.types)])
type2p_1 = {t: ((self.type2freq_1[t] / total_freq_1) if (t in self.type2freq_1) else 0) for t in self.types}
type2p_2 = {t: ((self.type2freq_2[t] / total_freq_2) if (t in self.type2freq_2) else 0) for t in self.types}
type2p_avg = dict()
type2p_diff = dict()
type2s_diff = dict()
type2s_ref_diff = dict()
type2shift_score = dict()
for t in self.types:
type2p_avg[t] = (0.5 * (type2p_1[t] + type2p_2[t]))
type2p_diff[t] = (type2p_2[t] - type2p_1[t])
type2s_diff[t] = (self.type2score_2[t] - self.type2score_1[t])
type2s_ref_diff[t] = ((0.5 * (self.type2score_2[t] + self.type2score_1[t])) - s_avg_ref)
type2shift_score[t] = ((type2p_diff[t] * type2s_ref_diff[t]) + (type2s_diff[t] * type2p_avg[t]))
total_diff = sum(type2shift_score.values())
self.diff = total_diff
if (total_diff == 0):
warnings.warn('Score normalization is not well-defined because the total score diff is 0. Setting norm to 1')
self.norm = 1
elif (self.normalization == 'variation'):
abs_sum = sum((abs(s) for s in type2shift_score.values()))
self.norm = abs_sum
elif ((self.normalization == 'trajectory') and (total_diff != 0)):
self.norm = abs(total_diff)
else:
self.norm = 1
type2shift_score = {t: (shift_score / self.norm) for (t, shift_score) in type2shift_score.items()}
self.type2p_diff = type2p_diff
self.type2s_diff = type2s_diff
self.type2p_avg = type2p_avg
self.type2s_ref_diff = type2s_ref_diff
self.type2shift_score = type2shift_score
if details:
return (type2p_diff, type2s_diff, type2p_avg, type2s_ref_diff, type2shift_score)
else:
return type2shift_score
def get_shift_component_sums(self):
'\n Calculates the cumulative contribution of each component of the different\n kinds of shift scores.\n\n Returns\n -------\n Dictionary with six keys, one for each of the different component\n contributions: pos_s_pos_p, pos_s_neg_p, neg_s_pos_p, neg_s_neg_p,\n pos_s, neg_s. Values are the total contribution from that component\n across all types\n '
if (self.type2shift_score is None):
shift_scores = self.get_shift_scores(details=True)
else:
shift_scores = [(t, self.type2p_diff[t], self.type2s_diff[t], self.type2p_avg[t], self.type2s_ref_diff[t], self.type2shift_score[t]) for t in self.type2s_diff]
pos_s_pos_p = 0
pos_s_neg_p = 0
neg_s_pos_p = 0
neg_s_neg_p = 0
pos_s = 0
neg_s = 0
for (t, p_diff, s_diff, p_avg, s_ref_diff, _) in shift_scores:
if (s_ref_diff > 0):
if (p_diff > 0):
pos_s_pos_p += (p_diff * s_ref_diff)
else:
pos_s_neg_p += (p_diff * s_ref_diff)
elif (p_diff > 0):
neg_s_pos_p += (p_diff * s_ref_diff)
else:
neg_s_neg_p += (p_diff * s_ref_diff)
if (s_diff > 0):
pos_s += (p_avg * s_diff)
else:
neg_s += (p_avg * s_diff)
return {'pos_s_pos_p': pos_s_pos_p, 'pos_s_neg_p': pos_s_neg_p, 'neg_s_pos_p': neg_s_pos_p, 'neg_s_neg_p': neg_s_neg_p, 'pos_s': pos_s, 'neg_s': neg_s}
def get_shift_graph(self, ax=None, top_n=50, text_size_inset=True, cumulative_inset=True, show_plot=True, filename=None, **kwargs):
'\n Plot the shift graph between two systems of types\n\n Parameters\n ----------\n ax: matplotlib.pyplot.axes.Axes, optional\n Axes to draw figure onto. Will create new axes if none are given.\n top_n: int, optional\n Display the top_n types as sorted by their absolute contribution to\n the difference between systems\n cumulative_inset: bool, optional\n Whether to show an inset showing the cumulative contributions to the\n shift by ranked types\n text_size_inset: bool, optional\n Whether to show an inset showing the relative sizes of each system\n show_plot: bool, optional\n Whether to show plot when it is done being rendered\n filename: str, optional\n If not None, name of the file for saving the shift graph\n\n Returns\n -------\n ax\n Matplotlib ax of shift graph. Displays shift graph if show_plot=True\n '
kwargs = plotting.get_plot_params(kwargs, self.show_score_diffs, self.diff)
type_scores = [(t, self.type2p_diff[t], self.type2s_diff[t], self.type2p_avg[t], self.type2s_ref_diff[t], self.type2shift_score[t]) for t in self.type2s_diff]
type_scores = sorted(type_scores, key=(lambda x: abs(x[(- 1)])), reverse=True)[:top_n]
type_scores.reverse()
bar_dims = plotting.get_bar_dims(type_scores, self.norm, kwargs)
bar_colors = plotting.get_bar_colors(type_scores, kwargs)
if (ax is None):
(_, ax) = plt.subplots(figsize=(kwargs['width'], kwargs['height']))
ax.margins(kwargs['y_margin'])
ax = plotting.plot_contributions(ax, top_n, bar_dims, bar_colors, kwargs)
total_comp_sums = self.get_shift_component_sums()
bar_order = plotting.get_bar_order(kwargs)
(ax, comp_bar_heights, bar_order) = plotting.plot_total_contribution_sums(ax, total_comp_sums, bar_order, top_n, bar_dims, kwargs)
type_labels = [t for (t, _, _, _, _, _) in type_scores]
m_sym = kwargs['missing_symbol']
type_labels = [((t + m_sym) if (t in self.adopted_score_types) else t) for t in type_labels]
bar_labels = [kwargs['symbols'][b] for b in bar_order]
labels = (type_labels + bar_labels)
if kwargs['serif']:
plotting.set_serif()
if kwargs['detailed']:
ax = plotting.set_bar_labels(ax, top_n, labels, bar_dims['label_heights'], comp_bar_heights, kwargs)
else:
ax = plotting.set_bar_labels(ax, top_n, labels, bar_dims['total_heights'], comp_bar_heights, kwargs)
ax.axvline(0, ls='-', color='black', lw=1.0, zorder=20)
ax.axhline((top_n + 1), ls='-', color='black', lw=0.7, zorder=20)
if kwargs['show_total']:
ax.axhline((top_n + 2.75), ls='-', color='black', lw=0.5, zorder=20)
if cumulative_inset:
plotting.get_cumulative_inset(ax.figure, self.type2shift_score, top_n, self.normalization, kwargs)
if text_size_inset:
plotting.get_text_size_inset(ax.figure, self.type2freq_1, self.type2freq_2, kwargs)
ax = plotting.set_ticks(ax, top_n, kwargs)
ax = plotting.set_spines(ax, kwargs)
ax.set_xlabel(kwargs['xlabel'], fontsize=kwargs['xlabel_fontsize'])
ax.set_ylabel(kwargs['ylabel'], fontsize=kwargs['ylabel_fontsize'])
if ('title' not in kwargs):
s_avg_1 = self.get_weighted_score(self.type2freq_1, self.type2score_1)
s_avg_2 = self.get_weighted_score(self.type2freq_2, self.type2score_2)
title = (((((('{}: '.format(kwargs['system_names'][0]) + '$\\Phi_{avg}=$') + '{0:.2f}'.format(s_avg_1)) + '\n') + '{}: '.format(kwargs['system_names'][1])) + '$\\Phi_{avg}=$') + '{0:.2f}'.format(s_avg_2))
kwargs['title'] = title
ax.set_title(kwargs['title'], fontsize=kwargs['title_fontsize'])
if kwargs['tight']:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
plt.tight_layout()
if (filename is not None):
plt.savefig(filename, dpi=kwargs['dpi'])
if show_plot:
plt.show()
return ax
|
class WeightedAvgShift(Shift):
"\n Shift object for calculating weighted scores of two systems of types,\n and the shift between them\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n type2score_1, type2score_2: dict or str, optional\n If dict, types are keys and values are scores associated with each\n type. If str, the name of a score lexicon included in Shifterator.\n If None and other type2score is None, defaults to uniform scores\n across types. Otherwise defaults to the other type2score dict\n reference_value: str or float, optional\n The reference score to use to partition scores into two different\n regimes. If 'average', uses the average score according to type2freq_1\n and type2score_1. If None and a lexicon is selected for type2score,\n uses the respective middle point in that lexicon's scale. Otherwise\n if None, uses zero as the reference point\n handle_missing_scores: str, optional\n If 'error', throws an error whenever a word has a score in one score\n dictionary but not the other. If 'exclude', excludes any word that is\n missing a score in one score dictionary from all word shift\n calculations, regardless if it may have a score in the other dictionary.\n If 'adopt' and the score is missing in one dictionary, then uses the\n score from the other dictionary if it is available\n stop_lens: iterable of 2-tuples, optional\n Denotes intervals of scores that should be excluded from word shifts\n calculations. Types with scores in this range will be excluded from word\n shift calculations\n stop_words: set, optional\n Denotes words that should be excluded from word shifts calculations\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n "
def __init__(self, type2freq_1, type2freq_2, type2score_1=None, type2score_2=None, reference_value=None, handle_missing_scores='error', stop_lens=None, stop_words=set(), normalization='variation'):
super().__init__(type2freq_1=type2freq_1, type2freq_2=type2freq_2, type2score_1=type2score_1, type2score_2=type2score_2, reference_value=reference_value, handle_missing_scores=handle_missing_scores, stop_lens=stop_lens, stop_words=stop_words, normalization=normalization)
|
class ProportionShift(Shift):
'\n Shift object for calculating differences in proportions of types across two\n systems\n\n Parameters\n __________\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n '
def __init__(self, type2freq_1, type2freq_2):
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
types = set(type2freq_1.keys()).union(type2freq_2.keys())
for t in types:
if (t not in type2freq_1):
type2freq_1[t] = 0
elif (t not in type2freq_2):
type2freq_2[t] = 0
super().__init__(type2freq_1=type2freq_1, type2freq_2=type2freq_2, type2score_1=None, type2score_2=None, reference_value=0, handle_missing_scores='error', stop_lens=None, stop_words=None, normalization='variation')
def get_shift_graph(self, top_n=50, show_plot=True, detailed=False, text_size_inset=True, cumulative_inset=True, title=None, filename=None, **kwargs):
if (title is None):
title = ''
ax = super().get_shift_graph(top_n=top_n, text_size_inset=text_size_inset, cumulative_inset=cumulative_inset, detailed=detailed, show_plot=show_plot, filename=filename, show_total=False, title=title, **kwargs)
return ax
|
class EntropyShift(Shift):
"\n Shift object for calculating the shift in entropy between two systems\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n base: float, optional\n Base of the logarithm for calculating entropy\n alpha: float, optional\n The parameter for the generalized Tsallis entropy. Setting `alpha=1`\n recovers the Shannon entropy. Higher `alpha` emphasizes more common\n types, lower `alpha` emphasizes less common types\n For details: https://en.wikipedia.org/wiki/Tsallis_entropy\n reference_value: str or float, optional\n The reference score to use to partition scores into two different\n regimes. If 'average', uses the average score according to type2freq_1\n and type2score_1. Otherwise, uses zero as the reference point\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n "
def __init__(self, type2freq_1, type2freq_2, base=2, alpha=1, reference_value=0, normalization='variation'):
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
(type2s_1, type2s_2) = entropy.get_entropy_scores(type2p_1, type2p_2, base, alpha)
super().__init__(type2freq_1=type2freq_1, type2freq_2=type2freq_2, type2score_1=type2s_1, type2score_2=type2s_2, handle_missing_scores='error', stop_lens=None, stop_words=None, reference_value=reference_value, normalization=normalization)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.alpha = alpha
def get_shift_graph(self, top_n=50, show_plot=True, detailed=False, text_size_inset=True, cumulative_inset=True, filename=None, **kwargs):
ax = super().get_shift_graph(top_n=top_n, text_size_inset=text_size_inset, cumulative_inset=cumulative_inset, detailed=detailed, show_plot=show_plot, filename=filename, **kwargs)
return ax
|
class KLDivergenceShift(Shift):
"\n Shift object for calculating the Kullback-Leibler divergence (KLD) between\n two systems\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types.\n The KLD will be computed with respect type2freq_1, i.e. D(T2 || T1).\n For the KLD to be well defined, all types must have nonzero frequencies\n in both type2freq_1 and type2_freq2\n base: float, optional\n Base of the logarithm for calculating entropy\n stop_lens: iterable of 2-tuples, optional\n Denotes intervals that should be excluded when calculating shift\n scores\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n "
def __init__(self, type2freq_1, type2freq_2, base=2, reference_value=0, normalization='variation'):
types_1 = set(type2freq_1.keys())
types_2 = set(type2freq_2.keys())
if (len(types_2.difference(types_1)) > 0):
err = ('There are types that appear in type2freq_2 but not type2freq_1:' + 'the KL divergence is not well-defined')
raise ValueError(err)
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
(type2s_1, type2s_2) = entropy.get_entropy_scores(type2p_1, type2p_2, base, alpha=1)
super().__init__(type2freq_1=type2freq_2, type2freq_2=type2freq_2, type2score_1=type2s_2, type2score_2=type2s_1, handle_missing_scores='error', stop_lens=None, stop_words=None, reference_value=reference_value, normalization=normalization)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
def get_shift_graph(self, top_n=50, show_plot=True, detailed=False, text_size_inset=True, cumulative_inset=True, title=None, filename=None, **kwargs):
if (title is None):
title = ''
ax = super().get_shift_graph(top_n=top_n, text_size_inset=text_size_inset, cumulative_inset=cumulative_inset, detailed=detailed, show_plot=show_plot, title=title, filename=filename, **kwargs)
return ax
|
class JSDivergenceShift(Shift):
"\n Shift object for calculating the Jensen-Shannon divergence (JSD) between two\n systems\n\n Parameters\n ----------\n type2freq_1, type2freq_2: dict\n Keys are types of a system and values are frequencies of those types\n weight_1, weight_2: float\n Relative weights of type2freq_1 and type2frq_2 when constructing their\n mixed distribution. Should sum to 1\n base: float, optional\n Base of the logarithm for calculating entropy\n alpha: float, optional\n The parameter for the generalized Tsallis entropy. Setting `alpha=1`\n recovers the Shannon entropy. Higher `alpha` emphasizes more common\n types, lower `alpha` emphasizes less common types\n For details: https://en.wikipedia.org/wiki/Tsallis_entropy\n reference_value: str or float, optional\n The reference score to use to partition scores into two different\n regimes. Defaults to zero as the reference point\n normalization: str, optional\n If 'variation', normalizes shift scores so that the sum of\n their absolute values sums to 1. If 'trajectory', normalizes\n them so that the sum of shift scores is 1 or -1. The trajectory\n normalization cannot be applied if the total shift score is 0, so\n scores are left unnormalized if the total is 0 and 'trajectory' is\n specified\n "
def __init__(self, type2freq_1, type2freq_2, base=2, weight_1=0.5, weight_2=0.5, alpha=1, reference_value=0, normalization='variation'):
if ((weight_1 + weight_2) != 1):
raise ValueError('weight_1 and weight_2 do not sum to 1')
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
(type2m, type2s_1, type2s_2) = entropy.get_jsd_scores(type2p_1, type2p_2, weight_1=weight_1, weight_2=weight_2, base=base, alpha=alpha)
super().__init__(type2freq_1=type2freq_1, type2freq_2=type2freq_2, type2score_1=type2s_1, type2score_2=type2s_2, reference_value=reference_value, handle_missing_scores='error', normalization=normalization, stop_lens=None, stop_words=None)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.type2m = type2m
self.alpha = alpha
def get_shift_graph(self, top_n=50, show_plot=True, detailed=False, text_size_inset=True, cumulative_inset=True, title=None, filename=None, **kwargs):
if ((self.alpha == 1) and (self.reference_value == 0)):
all_pos_contributions = True
else:
all_pos_contributions = False
if (title is None):
title = ''
ax = super().get_shift_graph(top_n=top_n, text_size_inset=text_size_inset, cumulative_inset=cumulative_inset, detailed=detailed, show_plot=show_plot, filename=filename, title=title, all_pos_contributions=all_pos_contributions, **kwargs)
return ax
|
def test_jsd_shift_1():
shift = JSDivergenceShift(system_1_a, system_2_a)
shift.get_shift_graph(system_names=['1A', '2A'])
|
def test_entropy_shift_1():
shift = EntropyShift(system_1_a, system_2_a)
shift.get_shift_graph(system_names=['1A', '2A'])
|
def test_tsallis_shift_plus_1():
shift = EntropyShift(system_1_a, system_2_a, alpha=2)
shift.get_shift_graph(system_names=['1A', '2A'])
|
def test_tsallis_shift_minus_1():
shift = EntropyShift(system_1_a, system_2_a, alpha=0.5)
shift.get_shift_graph(system_names=['1A', '2A'])
|
def test_jsd_shift_2():
shift = JSDivergenceShift(system_1_b, system_2_b)
shift.get_shift_graph(system_names=['1B', '2B'])
|
def test_entropy_shift_2():
shift = EntropyShift(system_1_b, system_2_b)
shift.get_shift_graph(system_names=['1B', '2B'])
|
def test_tsallis_shift_plus_2():
shift = EntropyShift(system_1_b, system_2_b, alpha=2)
shift.get_shift_graph(system_names=['1B', '2B'])
|
def test_tsallis_shift_minus_2():
shift = EntropyShift(system_1_b, system_2_b, alpha=0.5)
shift.get_shift_graph(system_names=['1B', '2B'])
|
class EvaluationTap(Tap):
dataset_test_path: str
dataset_dev_path: str
prediction_test_path: str
prediction_dev_path: str
evaluate_bootstrap: bool = False
filtering: Optional[str] = None
document_level: bool = False
|
def binarize_scores(threshold: float, scores: list[float]) -> list[int]:
return (np.array(scores) > threshold).astype(int).tolist()
|
def get_evaluation_scores(score_name: Literal[('f1', 'accuracy', 'balanced_accuracy')], test_labels: list[int], dev_labels: list[int], test_scores: list[float], dev_scores: list[float], provided_threshold: Optional[float]=None, threshold_min: float=0.0, threshold_max: float=1.0) -> dict:
threshold: float = 0.0
max_score: float = 0.0
score_list: list[float] = []
thresholds_list: list[float] = []
if (provided_threshold is None):
for th in np.arange(threshold_min, threshold_max, step=((threshold_max - threshold_min) / 100)):
func = {'f1': f1_score, 'accuracy': accuracy_score, 'balanced_accuracy': balanced_accuracy_score}[score_name]
score = func(y_true=dev_labels, y_pred=binarize_scores(th, dev_scores))
score_list.append(score)
thresholds_list.append(th)
if (max_score < score):
threshold = th
max_score = score
else:
threshold = provided_threshold
output_dict = {}
if (score_name == 'f1'):
prfs = precision_recall_fscore_support(y_true=test_labels, y_pred=binarize_scores(threshold, test_scores), average='binary')
output_dict.update({'f1': prfs[2], 'precision': prfs[0], 'recall': prfs[1]})
elif (score_name == 'accuracy'):
accuracy = accuracy_score(y_true=test_labels, y_pred=binarize_scores(threshold, test_scores))
output_dict.update({'accuracy': accuracy})
elif (score_name == 'balanced_accuracy'):
ba = balanced_accuracy_score(y_true=test_labels, y_pred=binarize_scores(threshold, test_scores))
output_dict.update({'balanced_accuracy': ba})
output_dict['threshold'] = threshold
output_dict['dev'] = {'scores': score_list, 'thresholds': thresholds_list}
return output_dict
|
def filter_prediction(datasets: dict[(DevTest, list[RawData])], predictions: dict[(DevTest, dict[(str, dict)])], filtering_type: Optional[str]=None) -> tuple[(dict[(DevTest, list[RawData])], dict[(DevTest, dict[(str, dict)])])]:
'This filtering is not used in the final version of WiCE paper.'
if (filtering_type is None):
return (datasets, predictions)
filtered_dataset: dict[(DevTest, list[RawData])] = {}
filtered_predictions: dict[(DevTest, dict[(str, dict)])] = {}
for split in ['dev', 'test']:
filtered_dataset[split] = []
filtered_predictions[split] = {}
for d in datasets[split]:
use_this_case = False
if (d['label'] == 'not_supported'):
use_this_case = True
elif (filtering_type == 'multiple_supporting_sentences'):
if (max([len(s) for s in d['supporting_sentences']]) >= 2):
use_this_case = True
elif (filtering_type == 'distant'):
min_dist = 100
for s in d['supporting_sentences']:
min_dist = min(min_dist, (max(s) - min(s)))
if (min_dist >= 5):
use_this_case = True
else:
raise ValueError(f'{filtering_type} is not a valid value of filtering_type')
if use_this_case:
filtered_dataset[split].append(d)
article_id = d['meta']['id']
filtered_predictions[split][article_id] = predictions[split][article_id]
return (filtered_dataset, filtered_predictions)
|
def convert_to_document_level(predictions: dict[(str, float)]):
output_dict: dict[(str, float)] = {}
for (sentence_id, score) in predictions.items():
article_id = '_'.join(sentence_id.split('_')[:(- 1)])
if (article_id in output_dict.keys()):
output_dict[article_id] = min(output_dict[article_id], score)
else:
output_dict[article_id] = score
return output_dict
|
def make_prediction_list(datasets: dict[(DevTest, list[RawData])], predictions: dict[(DevTest, dict[(str, dict)])]) -> tuple[(dict[(DevTest, list[int])], dict[(DevTest, list[float])])]:
'Convert dataset and predictions to list of labels and scores.'
scores_dict_of_list: dict[(DevTest, list[float])] = {}
labels_dict_of_list: dict[(DevTest, list[int])] = {}
for split in ['dev', 'test']:
scores_dict_of_list[split] = []
labels_dict_of_list[split] = []
for d in datasets[split]:
if (d['meta']['id'] in predictions[split].keys()):
score = predictions[split][d['meta']['id']]
scores_dict_of_list[split].append(score)
labels_dict_of_list[split].append(label_str_to_int[d['label']])
return (labels_dict_of_list, scores_dict_of_list)
|
def get_list_of_prediction_scores_for_target_labels(target_labels: list[int], label_list: list[int], score_list: list[float]) -> list[float]:
'Get list of prediction scores for target labels.'
output_list: list[float] = []
for (idx, score) in enumerate(score_list):
if (label_list[idx] in target_labels):
output_list.append(score)
return output_list
|
def evaluate_entailment_classification(labels_dict_of_list: dict[(DevTest, list[int])], scores_dict_of_list: dict[(DevTest, list[float])], provided_thresholds_dict: Optional[dict]=None):
evaluation_output: dict = {'label_distribution': {}, 'roc': {}, 'f1': {}, 'accuracy': {}, 'balanced_accuracy': {}}
for label in [0, 1, 2]:
evaluation_output['label_distribution'][label] = np.sum((np.array(labels_dict_of_list['test']) == label)).item()
for (x, y) in [[[0], [1]], [[1], [2]], [[0], [2]], [[0], [1, 2]]]:
y_true_dict: dict[(DevTest, list[int])] = {}
y_score_dict: dict[(DevTest, list[float])] = {}
for split in ['dev', 'test']:
prediction_scores_for_data_with_target_labels = []
for target_labels in [x, y]:
prediction_scores_for_data_with_target_labels.append(get_list_of_prediction_scores_for_target_labels(target_labels=target_labels, label_list=labels_dict_of_list[split], score_list=scores_dict_of_list[split]))
y_true_dict[split] = []
for binary in [0, 1]:
y_true_dict[split] += ([[1, 0][binary]] * len(prediction_scores_for_data_with_target_labels[binary]))
y_score_dict[split] = (prediction_scores_for_data_with_target_labels[0] + prediction_scores_for_data_with_target_labels[1])
label_key = f"{','.join(map(str, x))}_vs_{','.join(map(str, y))}"
(fpr, tpr, _) = roc_curve(y_true=y_true_dict['test'], y_score=y_score_dict['test'])
try:
auroc = roc_auc_score(y_true=y_true_dict['test'], y_score=y_score_dict['test'])
except ValueError:
auroc = (- 1)
evaluation_output['roc'][label_key] = {'roc': auroc, 'fpr': fpr.tolist(), 'tpr': tpr.tolist()}
for score_name in ['f1', 'accuracy', 'balanced_accuracy']:
if (provided_thresholds_dict is not None):
provided_threshold = provided_thresholds_dict[score_name][label_key]['threshold']
elif (score_name == 'f1'):
provided_threshold = None
else:
provided_threshold = evaluation_output['f1'][label_key]['threshold']
evaluation_output[score_name][label_key] = get_evaluation_scores(score_name=score_name, test_labels=y_true_dict['test'], dev_labels=y_true_dict['dev'], test_scores=y_score_dict['test'], dev_scores=y_score_dict['dev'], provided_threshold=provided_threshold)
return evaluation_output
|
def get_article_for_bootstrap(dataset: list[dict], article_ids_list: list[str]) -> list[dict]:
'Get article data for bootstrap.'
dataset_id_to_data: dict[(str, dict)] = {}
for d in dataset:
dataset_id_to_data[d['meta']['id']] = d
output_list = []
for article_id in article_ids_list:
output_list.append(dataset_id_to_data[article_id])
return output_list
|
class PostprocessTap(Tap):
entailment_input_jsonl_path: str
prediction_txt_path: str
evaluation_num: int = 100
|
class GPTEvalTap(Tap):
model: Literal[('gpt-3.5-turbo-0613', 'gpt-4-0613')]
claim_subclaim: Literal[('claim', 'subclaim')]
split: Literal[('dev', 'test')] = 'test'
evaluation_num: int = 100
|
def process_gpt_output(gpt_output: dict) -> float:
'Postprocess the output and get the entailment score {"supported": 1.0, "partially_supported": 0.5, "not_supported": 0.0, "invalid": -1.0}\n The input format is {"prompt": prompt (str), "response": response from gpt (str)}'
try:
response: str = gpt_output['response']
answer = response.split('<answer>')[1].split('</answer>')[0]
return {'supported': 1.0, 'partially_supported': 0.5, 'not_supported': 0.0}[answer]
except:
print('parse failed')
print(gpt_output['response'])
return (- 1.0)
|
def get_gpt_prompt(claim: str, evidence_list: list[str], line_idx: list[int]) -> str:
assert (len(evidence_list) == len(line_idx)), f'{len(evidence_list)} != {len(line_idx)}, {line_idx}'
evidence_string = '\n'.join([f' <sentence_{idx}>{line}</sentence_{idx}>' for (idx, line) in zip(line_idx, evidence_list)])
return GPT_PROMPT.format(claim=claim, evidence=evidence_string)
|
class PreprocessTap(Tap):
split: str
claim_type: Literal[('claim', 'subclaim')]
word_num_in_chunk: int = 256
add_claim_context: bool = False
add_evidence_context: bool = False
dataset_dir: Path = Path('../../data/entailment_retrieval/')
output_dir: Path = Path('../entailment_inputs/')
def process_args(self):
self.dataset_dir = Path(self.dataset_dir)
self.output_dir = Path(self.output_dir)
|
def split_into_chunks(article_sentences: list[str], article_indices: list[int], word_num_in_chunk: int) -> Chunks:
'Split article (list of sentences) into overlapping chunks. This function does not split in the middle of sentences.\n\n Args:\n article_sentences (list[str]): list of sentences in an article\n word_num_in_chunk (int): maximum number of words in each chunk\n\n Returns:\n Chunks:\n '
chunks_list: list[str] = []
sentence_idx_list_of_dict: list[dict[(Literal[('start', 'end')], int)]] = [{'start': 0}]
sentence_idx: int = 0
cur_chunk: list[str] = []
while True:
cur_chunk.append(article_sentences[sentence_idx])
if (sentence_idx == (len(article_sentences) - 1)):
if (len(cur_chunk) > 0):
chunks_list.append(cur_chunk)
sentence_idx_list_of_dict[(- 1)]['end'] = sentence_idx
else:
sentence_idx_list_of_dict = sentence_idx_list_of_dict[:(- 1)]
break
sentence_idx += 1
cur_chunk_len = np.sum([len(sent.split()) for sent in cur_chunk])
if (cur_chunk_len >= word_num_in_chunk):
if (len(cur_chunk) == 1):
warnings.warn(f'''chunk is longer than provided limit:
{cur_chunk[0]}''')
sentence_idx_list_of_dict[(- 1)]['end'] = (sentence_idx - 1)
else:
cur_chunk = cur_chunk[:(- 1)]
sentence_idx_list_of_dict[(- 1)]['end'] = (sentence_idx - 2)
chunks_list.append(cur_chunk)
sentence_idx -= ((len(cur_chunk) - 1) // 2)
cur_chunk = []
sentence_idx_list_of_dict.append({'start': sentence_idx})
sentence_idx_list = [[article_indices[idx] for idx in range(sentence_idx_dict['start'], (sentence_idx_dict['end'] + 1), 1)] for sentence_idx_dict in sentence_idx_list_of_dict]
for (ck, idx_list) in zip(chunks_list, sentence_idx_list):
assert (len(ck) == len(idx_list)), f'{len(ck)} != {len(idx_list)}'
return Chunks(chunks_list=chunks_list, sentence_idx_list=sentence_idx_list)
|
def get_chunk_label(article_label: ThreeLabels, supporting_sentences_list: list[list[int]], chunk_idx_list: list[int]) -> ThreeLabels:
'Get chunk label based on the article label and supporting sentences.'
if (article_label == 'not_supported'):
return 'not_supported'
partially_supported: bool = False
for supporting_sentences in supporting_sentences_list:
product_set = set(supporting_sentences).intersection(set(chunk_idx_list))
if ((article_label == 'supported') and (len(product_set) == len(supporting_sentences))):
return 'supported'
if (len(product_set) > 0):
partially_supported = True
if partially_supported:
return 'partially_supported'
else:
return 'not_supported'
|
def get_chunk_stats(chunks_list: list[ProcessedData]):
labels_list: list[str] = []
for d in chunks_list:
labels_list.append(d['label'])
counter = Counter(labels_list)
output_dict: dict[(str, int)] = {}
for label_name in ['supported', 'partially_supported', 'not_supported']:
output_dict[label_name] = counter[label_name]
return output_dict
|
def get_balanced_output_list_for_evidence_context_data(output_list: list[ProcessedData]):
'If args.add_evidence_context, we only include sentences. Therefore, there will be too many non-supported cases.\n We make a balanced dataset by randomly sample (discard) non-supported cases.\n '
label_split = {'supported': [], 'partially_supported': [], 'not_supported': []}
for d in output_list:
label_split[d['label']].append(d)
new_output_list: list[dict] = []
for label in ['supported', 'partially_supported']:
new_output_list.extend(label_split[label])
random.shuffle(label_split['not_supported'])
new_output_list.extend(label_split['not_supported'][:len(label_split['partially_supported'])])
return new_output_list
|
def check_oracle_chunk(chunk_idx: list[int], oracle_idx: list[int]) -> bool:
'chunk_idx is a list of sentence indices in a chunk that will be used as the oracle set. oracle_idx is the ground truth oracle idx.\n \n This function check the following property: \n If len(chunk_idx) >= len(oracle_idx), all elements in oracle_idx should be included in chunk_idx.\n If len(chunk_idx) < len(oracle_idx), all elements in chunk_idx should be included in oracle_idx.'
if (len(chunk_idx) >= len(oracle_idx)):
assert set(oracle_idx).issubset(set(chunk_idx)), f'{set(oracle_idx)} is not subset of {set(chunk_idx)}'
else:
assert set(chunk_idx).issubset(set(oracle_idx)), f'{set(chunk_idx)} is not subset of {set(oracle_idx)}'
|
class RawData(TypedDict):
label: ThreeLabels
supporting_sentences: list[list[int]]
claim: str
evidence: list[str]
meta: dict
|
class Chunks(TypedDict):
chunks_list: list[list[str]]
sentence_idx_list: list[list[int]]
|
class ProcessedData(TypedDict):
label: str
claim: str
evidence: str
meta: dict[(str, str)]
|
@nox.session
def format(session):
session.run('yapf', '-i', '-p', '--recursive', 'utils', external=True)
session.notify('lint')
|
@nox.session
def format_check(session):
assert session.run('yapf', '-d', '-p', '--recursive', 'utils', external=True)
|
@nox.session
def lint(session):
session.run('pylint', 'utils/', external=True)
session.notify('type_check')
|
@nox.session
def type_check(session):
session.run('mypy', 'utils/', external=True)
|
def get_mutator_so_path(database):
if (database == 'mariadb'):
database = 'mysql'
return f'{ROOTPATH}/build/lib{database}_mutator.so'
|
def get_config_path(database):
return f'{ROOTPATH}/data/config_{database}.yml'
|
def set_env(database):
os.environ['AFL_CUSTOM_MUTATOR_ONLY'] = '1'
os.environ['AFL_DISABLE_TRIM'] = '1'
os.environ['AFL_FAST_CAL'] = '1'
os.environ['AFL_CUSTOM_MUTATOR_LIBRARY'] = get_mutator_so_path(database)
os.environ['SQUIRREL_CONFIG'] = get_config_path(database)
|
def run(database, input_dir, output_dir=None, config_file=None, fuzzer=None):
if (database not in DBMS):
print(f'Unsupported database. The supported ones are {DBMS}')
return
if (not output_dir):
output_dir = '/tmp/fuzz'
if (not config_file):
config_file = get_config_path(database)
if (not fuzzer):
fuzzer = f'{ROOTPATH}/AFLplusplus/afl-fuzz'
if (not os.path.exists(config_file)):
print('Invalid path for config file')
if (not os.path.exists(fuzzer)):
print('Invalid path for afl-fuzz')
set_env(database)
output_id = str(uuid.uuid4())[:10]
if (database == 'sqlite'):
cmd = f'{fuzzer} -i {input_dir} -o {output_dir} -M {output_id} -- /home/ossfuzz @@'
else:
cmd = f'{fuzzer} -i {input_dir} -o {output_dir} -M {output_id} -t 60000 -- {ROOTPATH}/build/db_driver'
os.system(cmd)
|
def read_json_line(path):
output = []
with open(path, 'r') as f:
for line in f:
output.append(json.loads(line))
return output
|
def write_json_line(data, path):
with open(path, 'w') as f:
for i in data:
f.write(('%s\n' % json.dumps(i)))
return None
|
def acquire_from_twitter_api(input_data):
auth = tweepy.OAuthHandler(args.API_key, args.API_secret_key)
auth.set_access_token(args.access_token, args.access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser(), wait_on_rate_limit=True)
tweets_by_API = []
wrong_ones = []
for (idx, i) in enumerate(input_data):
if ((idx % 500) == 0):
print('[I] number of ids processed:', idx)
try:
tweets_by_API.append(api.get_status(i['id'], tweet_mode='extended'))
except tweepy.TweepError as e:
wrong_ones.append([i, e])
return (tweets_by_API, wrong_ones)
|
def writeJSONLine(data, path):
with open(path, 'w') as f:
for i in data:
f.write(('%s\n' % json.dumps(i)))
return None
|
def read_jsonl_datafile(data_file):
data_instances = []
with open(data_file, 'r') as reader:
for line in reader:
line = line.strip()
if line:
data_instances.append(json.loads(line))
return data_instances
|
def get_label_for_key_from_annotation(key, annotation, candidate_chunk):
tagged_chunks = annotation[key]
label = 0
if tagged_chunks:
if ((key in ['name', 'who_cure', 'close_contact', 'opinion']) and (('I' in tagged_chunks) or ('i' in tagged_chunks))):
tagged_chunks.append('AUTHOR OF THE TWEET')
for tagged_chunk in tagged_chunks:
if (tagged_chunk == candidate_chunk):
label = 1
break
return (label, tagged_chunks)
|
def get_tagged_label_for_key_from_annotation(key, annotation):
tagged_chunks = annotation[key]
return tagged_chunks
|
def get_label_from_tagged_label(tagged_label):
if (tagged_label == 'Not Specified'):
return 0
elif (tagged_label == 'Yes'):
return 1
elif (tagged_label == 'Male'):
return 1
elif (tagged_label == 'Female'):
return 1
elif tagged_label.startswith('no_cure'):
return 0
elif tagged_label.startswith('not_effective'):
return 0
elif tagged_label.startswith('no_opinion'):
return 0
elif tagged_label.startswith('effective'):
return 1
else:
print(f'Unknown tagged_label {tagged_label}')
exit()
|
def find_text_to_tweet_tokens_mapping(text, tweet_tokens):
current_tok = 0
current_tok_c_pos = 0
n_toks = len(tweet_tokens)
tweet_toks_c_mapping = [list()]
for (c_pos, c) in enumerate(text):
if c.isspace():
continue
if (current_tok_c_pos == len(tweet_tokens[current_tok])):
current_tok += 1
current_tok_c_pos = 0
tweet_toks_c_mapping.append(list())
if (c == tweet_tokens[current_tok][current_tok_c_pos]):
tweet_toks_c_mapping[current_tok].append(c_pos)
current_tok_c_pos += 1
else:
print('Wrong mapping:')
print(text)
print(tweet_tokens)
print(c_pos, f'{text[(c_pos - 1)]};{c};{text[(c_pos + 1)]}')
print(current_tok, current_tok_c_pos, f';{tweet_tokens[current_tok][current_tok_c_pos]};')
exit()
assert (((len(tweet_tokens) - 1) == current_tok) and (len(tweet_tokens[current_tok]) == current_tok_c_pos))
return tweet_toks_c_mapping
|
def get_tweet_tokens_from_tags(tags):
tokens = [e.rsplit('/', 3)[0] for e in tags.split()]
return ' '.join(tokens)
|
def make_instances_from_dataset(dataset):
task_instances_dict = dict()
question_keys_and_tags = list()
dummy_annotation = dataset[0]['annotation']
for key in dummy_annotation.keys():
if (key.startswith('part2-') and key.endswith('.Response')):
question_tag = key.replace('part2-', '').replace('.Response', '')
question_keys_and_tags.append((question_tag, key))
question_keys_and_tags.sort(key=(lambda tup: tup[0]))
question_tags = [question_tag for (question_tag, question_key) in question_keys_and_tags]
question_keys = [question_key for (question_tag, question_key) in question_keys_and_tags]
if ('gender' in question_tags):
gender_index = question_tags.index('gender')
question_tags[gender_index] = 'gender_female'
question_tags.insert(gender_index, 'gender_male')
question_keys.insert(gender_index, question_keys[gender_index])
question_keys_and_tags = list(zip(question_tags, question_keys))
task_instances_dict = {question_tag: list() for (question_tag, question_key) in question_keys_and_tags}
gold_labels_stats = {question_tag: dict() for (question_tag, question_key) in question_keys_and_tags}
gold_labels_unique_tweets = {question_tag: dict() for (question_tag, question_key) in question_keys_and_tags}
skipped_chunks = 0
ignore_ones = []
for annotated_data in dataset:
id = annotated_data['id']
annotation = annotated_data['annotation']
text = annotated_data['text'].strip()
candidate_chunks_offsets = annotated_data['candidate_chunks_offsets']
candidate_chunks_from_text = [text[c[0]:c[1]] for c in candidate_chunks_offsets]
tags = annotated_data['tags']
tweet_tokens = get_tweet_tokens_from_tags(tags)
try:
assert (re.sub('\\s+', '', text) == re.sub('\\s+', '', tweet_tokens))
except AssertionError:
logging.error(f"Tweet and tokenized tweets don't match in id:{id}")
text_without_spaces = re.sub('\\s+', '', text)
logging.error(f'Tweets without spaces: {text_without_spaces}')
tweet_tokens_without_spaces = re.sub('\\s+', '', tweet_tokens)
logging.error(f'Tokens without spaces: {tweet_tokens_without_spaces}')
exit()
tweet_tokens = tweet_tokens.split()
tweet_tokens_char_mapping = find_text_to_tweet_tokens_mapping(text, tweet_tokens)
candidate_chunks_offsets_from_tweet_tokens = list()
ignore_flags = list()
for (chunk_start_idx, chunk_end_idx) in candidate_chunks_offsets:
ignore_flag = False
chunk_start_token_idx = None
chunk_end_token_idx = None
for (token_idx, tweet_token_char_mapping) in enumerate(tweet_tokens_char_mapping):
if (chunk_start_idx in tweet_token_char_mapping):
chunk_start_token_idx = token_idx
if ((chunk_end_idx - 1) in tweet_token_char_mapping):
chunk_end_token_idx = (token_idx + 1)
if ((chunk_start_token_idx == None) or (chunk_end_token_idx == None)):
logging.error(f'''Tweet id:{id}
Couldn't find chunk tokens for chunk offsets [{chunk_start_idx}, {chunk_end_idx}]:{text[chunk_start_idx:chunk_end_idx]};''')
logging.error(f'Found chunk start and end token idx [{chunk_start_token_idx}, {chunk_end_token_idx}]')
logging.error(f'Ignoring this chunk')
ignore_flag = True
ignore_info = {}
ignore_info['id'] = id
ignore_info['problem_chunk_id'] = [chunk_start_idx, chunk_end_idx]
ignore_info['problem_chunk_text'] = text[chunk_start_idx:chunk_end_idx]
ignore_info['whole_mapping'] = []
for i in tweet_tokens_char_mapping:
if (len(i) == 1):
ignore_info['whole_mapping'].append([i, text[i[0]]])
else:
ignore_info['whole_mapping'].append([i, text[i[0]:(i[(- 1)] + 1)]])
ignore_info['flag'] = 'NOT_FIND_ERROR'
ignore_ones.append(ignore_info)
ignore_flags.append(ignore_flag)
candidate_chunks_offsets_from_tweet_tokens.append((chunk_start_token_idx, chunk_end_token_idx))
candidate_chunks_from_tokens = [' '.join(tweet_tokens[c[0]:c[1]]) for c in candidate_chunks_offsets_from_tweet_tokens]
'\n\t\tfor chunk_text, chunk_token, ignore_flag in zip(candidate_chunks_from_text, candidate_chunks_from_tokens, ignore_flags):\n\t\t\tif ignore_flag:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tassert re.sub("\\s+", "", chunk_text) == re.sub("\\s+", "", chunk_token)\n\t\t\texcept AssertionError:\n\t\t\t\tlogging.error(f"Chunk and text is not matching the chunk in tokenized tweet")\n\t\t\t\tchunk_text_without_spaces = re.sub("\\s+", "", chunk_text)\n\t\t\t\tchunk_token_without_spaces = re.sub("\\s+", "", chunk_token)\n\t\t\t\tlogging.error(f"Chunk from text without spaces: {chunk_text_without_spaces}")\n\t\t\t\tlogging.error(f"Chunk from tokens without spaces: {chunk_token_without_spaces}")\n\t\t\t\texit()\n\t\t'
candidate_chunks_from_text = [e for (ignore_flag, e) in zip(ignore_flags, candidate_chunks_from_text) if (not ignore_flag)]
candidate_chunks_from_tokens = [e for (ignore_flag, e) in zip(ignore_flags, candidate_chunks_from_tokens) if (not ignore_flag)]
candidate_chunks_offsets = [e for (ignore_flag, e) in zip(ignore_flags, candidate_chunks_offsets) if (not ignore_flag)]
candidate_chunks_offsets_from_tweet_tokens = [e for (ignore_flag, e) in zip(ignore_flags, candidate_chunks_offsets_from_tweet_tokens) if (not ignore_flag)]
chunk_char_offsets_to_token_idxs_mapping = {(offset[0], offset[1]): (c[0], c[1]) for (offset, c) in zip(candidate_chunks_offsets, candidate_chunks_offsets_from_tweet_tokens)}
annotation_tweet_tokens = dict()
for (key, value) in annotation.items():
if (value == 'NO_CONSENSUS'):
new_assignments = ['Not Specified']
else:
new_assignments = list()
for assignment in value:
if (type(assignment) == list):
gold_chunk_token_idxs = chunk_char_offsets_to_token_idxs_mapping[tuple(assignment)]
new_assignment = ' '.join(tweet_tokens[gold_chunk_token_idxs[0]:gold_chunk_token_idxs[1]])
new_assignments.append(new_assignment)
else:
new_assignments.append(assignment)
annotation_tweet_tokens[key] = new_assignments
final_tweet_tokens = [(URL_TOKEN if (e.startswith('http') or ('twitter.com' in e) or e.startswith('www.')) else e) for e in tweet_tokens]
final_candidate_chunks_with_token_id = [(f'{c[0]}_{c[1]}', ' '.join(tweet_tokens[c[0]:c[1]]), c) for c in candidate_chunks_offsets_from_tweet_tokens]
for (question_tag, question_key) in question_keys_and_tags:
if (question_tag in ['name', 'close_contact', 'who_cure', 'opinion']):
final_candidate_chunks_with_token_id.append(['author_chunk', 'AUTHOR OF THE TWEET', [0, 0]])
elif (question_tag in ['where', 'recent_travel']):
final_candidate_chunks_with_token_id.append(['near_author_chunk', 'AUTHOR OF THE TWEET', [0, 0]])
current_candidate_chunks = set()
for candidate_chunk_with_id in final_candidate_chunks_with_token_id:
candidate_chunk_id = candidate_chunk_with_id[0]
candidate_chunk = candidate_chunk_with_id[1]
if (candidate_chunk.lower() == 'coronavirus'):
continue
chunk_start_id = candidate_chunk_with_id[2][0]
chunk_start_text_id = tweet_tokens_char_mapping[chunk_start_id][0]
chunk_end_id = candidate_chunk_with_id[2][1]
chunk_end_text_id = (tweet_tokens_char_mapping[(chunk_end_id - 1)][(- 1)] + 1)
if (candidate_chunk == 'AUTHOR OF THE TWEET'):
pass
else:
if (chunk_end_id > len(tweet_tokens)):
continue
candidate_chunk = ' '.join(final_tweet_tokens[chunk_start_id:chunk_end_id])
if (candidate_chunk in current_candidate_chunks):
skipped_chunks += 1
continue
else:
current_candidate_chunks.add(candidate_chunk)
if (question_tag in ['relation', 'gender_male', 'gender_female', 'believe', 'binary-relation', 'binary-symptoms', 'symptoms', 'opinion']):
special_tagged_chunks = get_tagged_label_for_key_from_annotation(question_key, annotation_tweet_tokens)
try:
assert (len(special_tagged_chunks) == 1)
except AssertionError:
logging.error(f'for question_tag {question_tag} the special_tagged_chunks = {special_tagged_chunks}')
exit()
tagged_label = special_tagged_chunks[0]
if (tagged_label == 'No'):
tagged_label = 'Not Specified'
if (question_tag in ['gender_male', 'gender_female']):
gender = ('Male' if (question_tag == 'gender_male') else 'Female')
if (gender == tagged_label):
special_question_label = get_label_from_tagged_label(tagged_label)
else:
special_question_label = 0
else:
special_question_label = get_label_from_tagged_label(tagged_label)
if (question_tag == 'opinion'):
tagged_chunks = []
if (candidate_chunk == 'AUTHOR OF THE TWEET'):
question_label = 1
tagged_chunks.append('AUTHOR OF THE TWEET')
else:
question_label = 0
else:
(question_label, tagged_chunks) = get_label_for_key_from_annotation('part2-name.Response', annotation_tweet_tokens, candidate_chunk)
question_label = (question_label & special_question_label)
if (question_label == 0):
tagged_chunks = []
else:
(question_label, tagged_chunks) = get_label_for_key_from_annotation(question_key, annotation_tweet_tokens, candidate_chunk)
tokenized_tweet = ' '.join(final_tweet_tokens)
task_instances_dict[question_tag].append((text, candidate_chunk, candidate_chunk_id, chunk_start_text_id, chunk_end_text_id, tokenized_tweet, ' '.join(((final_tweet_tokens[:chunk_start_id] + [Q_TOKEN]) + final_tweet_tokens[chunk_end_id:])), tagged_chunks, question_label))
gold_labels_stats[question_tag].setdefault(question_label, 0)
gold_labels_stats[question_tag][question_label] += 1
gold_labels_unique_tweets[question_tag].setdefault(question_label, set())
gold_labels_unique_tweets[question_tag][question_label].add(tokenized_tweet)
logging.info(f'Total skipped chunks:{skipped_chunks} n_question tags:{len(question_keys_and_tags)}')
for (question_tag, question_key) in question_keys_and_tags:
label_unique_tweets = gold_labels_unique_tweets[question_tag]
label_unique_tweets_counts = dict()
for (label, tweets) in label_unique_tweets.items():
label_unique_tweets_counts[label] = len(tweets)
gold_labels_unique_tweets[question_tag] = label_unique_tweets_counts
logging.info('Gold label instances statistics:')
log_list(gold_labels_stats.items())
logging.info('Gold label tweets statistics:')
log_list(gold_labels_unique_tweets.items())
tag_statistics = (gold_labels_stats, gold_labels_unique_tweets)
question_tag_gold_chunks = [(qt + '_gold_chunks') for qt in question_tags]
question_tag_gold_labels = [(qt + '_label') for qt in question_tags]
return (task_instances_dict, tag_statistics, question_keys_and_tags)
|
def main():
logging.info(f'Reading annotations from {args.data_file} file...')
dataset = read_jsonl_datafile(args.data_file)
logging.info(f'Total annotations:{len(dataset)}')
logging.info(f'Creating labeled data instances from annotations...')
print(dataset[0].keys())
(task_instances_dict, tag_statistics, question_keys_and_tags) = make_instances_from_dataset(dataset)
logging.info(f'Saving all the instances, statistics and labels in {args.save_file}')
save_in_pickle((task_instances_dict, tag_statistics, question_keys_and_tags), args.save_file)
|
def print_list(l):
for e in l:
print(e)
print()
|
def log_list(l):
for e in l:
logging.info(e)
logging.info('')
|
def save_in_pickle(save_object, save_file):
with open(save_file, 'wb') as pickle_out:
pickle.dump(save_object, pickle_out)
|
def load_from_pickle(pickle_file):
with open(pickle_file, 'rb') as pickle_in:
return pickle.load(pickle_in)
|
def save_in_json(save_dict, save_file):
with open(save_file, 'w') as fp:
json.dump(save_dict, fp)
|
def load_from_json(json_file):
with open(json_file, 'r') as fp:
return json.load(fp)
|
def read_json_line(path):
output = []
with open(path, 'r') as f:
for line in f:
output.append(json.loads(line))
return output
|
def write_json_line(data, path):
with open(path, 'w') as f:
for i in data:
f.write(('%s\n' % json.dumps(i)))
return None
|
def make_dir_if_not_exists(directory):
if (not os.path.exists(directory)):
logging.info('Creating new directory: {}'.format(directory))
os.makedirs(directory)
|
def extract_instances_for_current_subtask(task_instances, sub_task):
return task_instances[sub_task]
|
def get_multitask_instances_for_valid_tasks(task_instances, tag_statistics):
subtasks = list()
for subtask in task_instances.keys():
current_question_tag_statistics = tag_statistics[0][subtask]
if ((len(current_question_tag_statistics) > 1) and (current_question_tag_statistics[1] >= MIN_POS_SAMPLES_THRESHOLD)):
subtasks.append(subtask)
text_to_subtask_instances = dict()
original_text_list = list()
for subtask in subtasks:
for (text, chunk, chunk_id, chunk_start_text_id, chunk_end_text_id, tokenized_tweet, tokenized_tweet_with_masked_chunk, gold_chunk, label) in task_instances[subtask]:
instance = (text, chunk, chunk_id, chunk_start_text_id, chunk_end_text_id, tokenized_tweet, tokenized_tweet_with_masked_chunk)
if (text not in text_to_subtask_instances):
original_text_list.append(text)
text_to_subtask_instances[text] = dict()
text_to_subtask_instances[text].setdefault(instance, dict())
text_to_subtask_instances[text][instance][subtask] = (gold_chunk, label)
for text in original_text_list:
for (instance, subtasks_labels_dict) in text_to_subtask_instances[text].items():
for subtask in subtasks:
if (subtask not in subtasks_labels_dict):
subtasks_labels_dict[subtask] = ([], 0)
assert (len(subtasks_labels_dict) == len(subtasks))
text_to_subtask_instances[text][instance] = subtasks_labels_dict
all_multitask_instances = list()
for text in original_text_list:
for (instance, subtasks_labels_dict) in text_to_subtask_instances[text].items():
all_multitask_instances.append((*instance, subtasks_labels_dict))
return (all_multitask_instances, subtasks)
|
def split_multitask_instances_in_train_dev_test(multitask_instances, TRAIN_RATIO=0.6, DEV_RATIO=0.15):
original_tweets = dict()
original_tweets_list = list()
for (tweet, _, _, _, _, _, _, _) in multitask_instances:
if (tweet not in original_tweets):
original_tweets[tweet] = 1
original_tweets_list.append(tweet)
else:
original_tweets[tweet] += 1
train_size = int((len(original_tweets_list) * TRAIN_RATIO))
dev_size = int((len(original_tweets_list) * DEV_RATIO))
train_tweets = original_tweets_list[:train_size]
dev_tweets = original_tweets_list[train_size:(train_size + dev_size)]
test_tweets = original_tweets_list[(train_size + dev_size):]
segment_multitask_instances = {'train': list(), 'dev': list(), 'test': list()}
tweets_to_segment = dict()
for tweet in train_tweets:
tweets_to_segment[tweet] = 'train'
for tweet in dev_tweets:
tweets_to_segment[tweet] = 'dev'
for tweet in test_tweets:
tweets_to_segment[tweet] = 'test'
for instance in multitask_instances:
tweet = instance[0]
segment_multitask_instances[tweets_to_segment[tweet]].append(instance)
return (segment_multitask_instances['train'], segment_multitask_instances['dev'], segment_multitask_instances['test'])
|
def split_instances_in_train_dev_test(instances, TRAIN_RATIO=0.6, DEV_RATIO=0.15):
original_tweets = dict()
original_tweets_list = list()
for (tweet, _, _, _, _, _, _, _, _) in instances:
if (tweet not in original_tweets):
original_tweets[tweet] = 1
original_tweets_list.append(tweet)
else:
original_tweets[tweet] += 1
train_size = int((len(original_tweets_list) * TRAIN_RATIO))
dev_size = int((len(original_tweets_list) * DEV_RATIO))
train_tweets = original_tweets_list[:train_size]
dev_tweets = original_tweets_list[train_size:(train_size + dev_size)]
test_tweets = original_tweets_list[(train_size + dev_size):]
segment_instances = {'train': list(), 'dev': list(), 'test': list()}
tweets_to_segment = dict()
for tweet in train_tweets:
tweets_to_segment[tweet] = 'train'
for tweet in dev_tweets:
tweets_to_segment[tweet] = 'dev'
for tweet in test_tweets:
tweets_to_segment[tweet] = 'test'
for instance in instances:
tweet = instance[0]
segment_instances[tweets_to_segment[tweet]].append(instance)
return (segment_instances['train'], segment_instances['dev'], segment_instances['test'])
|
def log_data_statistics(data):
logging.info(f'Total instances in the data = {len(data)}')
pos_count = sum((label for (_, _, _, _, _, _, _, _, label) in data))
logging.info(f'Positive labels = {pos_count} Negative labels = {(len(data) - pos_count)}')
return (len(data), pos_count, (len(data) - pos_count))
|
def normalize_answer(s):
'Lower text and remove punctuation, articles and extra whitespace.'
def remove_articles(text):
regex = re.compile('\\b(a|an|the)\\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join((ch for ch in text if (ch not in exclude)))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
def get_tokens(s):
if (not s):
return []
return normalize_answer(s).split()
|
def compute_exact(a_gold, a_pred):
return int((normalize_answer(a_gold) == normalize_answer(a_pred)))
|
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = (collections.Counter(gold_toks) & collections.Counter(pred_toks))
num_same = sum(common.values())
if ((len(gold_toks) == 0) or (len(pred_toks) == 0)):
return int((gold_toks == pred_toks))
if (num_same == 0):
return 0
precision = ((1.0 * num_same) / len(pred_toks))
recall = ((1.0 * num_same) / len(gold_toks))
f1 = (((2 * precision) * recall) / (precision + recall))
return f1
|
def get_raw_scores(data, prediction_scores, positive_only=False):
predicted_chunks_for_each_instance = dict()
for ((text, chunk, chunk_id, chunk_start_text_id, chunk_end_text_id, tokenized_tweet, tokenized_tweet_with_masked_chunk, gold_chunk, label), prediction_score) in zip(data, prediction_scores):
original_text = text
predicted_chunks_for_each_instance.setdefault(original_text, ('', 0.0, set(), set()))
(current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks) = predicted_chunks_for_each_instance[original_text]
if (gold_chunk != ['Not Specified']):
gold_chunks = gold_chunks.union(set(gold_chunk))
if (prediction_score > 0.5):
predicted_chunks.add(chunk)
current_predicted_chunk_score = prediction_score
current_predicted_chunk = chunk
predicted_chunks_for_each_instance[original_text] = (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)
elif (prediction_score > current_predicted_chunk_score):
current_predicted_chunk_score = prediction_score
current_predicted_chunk = chunk
predicted_chunks_for_each_instance[original_text] = (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)
total = 0.0
(exact_scores, f1_scores) = (0.0, 0.0)
for (original_text, (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)) in predicted_chunks_for_each_instance.items():
if (len(gold_chunks) > 0):
if (len(predicted_chunks) > 0):
for predicted_chunk in predicted_chunks:
(best_exact_score, best_f1_score) = (0.0, 0.0)
for gold_chunk in gold_chunks:
best_exact_score = max(best_exact_score, compute_exact(gold_chunk, predicted_chunk))
best_f1_score = max(best_f1_score, compute_f1(gold_chunk, predicted_chunk))
exact_scores += best_exact_score
f1_scores += best_f1_score
total += 1.0
else:
(best_exact_score, best_f1_score) = (0.0, 0.0)
exact_scores += best_exact_score
f1_scores += best_f1_score
total += 1.0
elif ((len(gold_chunks) == 0) and (not positive_only)):
if (len(predicted_chunks) > 0):
for i in range(len(predicted_chunks)):
(best_exact_score, best_f1_score) = (0.0, 0.0)
exact_scores += best_exact_score
f1_scores += best_f1_score
total += 1.0
else:
(best_exact_score, best_f1_score) = (1.0, 1.0)
exact_scores += best_exact_score
f1_scores += best_f1_score
total += 1.0
if (total == 0):
predictions_exact_score = total
predictions_f1_score = total
else:
predictions_exact_score = ((exact_scores * 100.0) / total)
predictions_f1_score = ((f1_scores * 100.0) / total)
return (predictions_exact_score, predictions_f1_score, total)
|
def get_TP_FP_FN(data, prediction_scores, THRESHOLD=0.5):
predicted_chunks_for_each_instance = dict()
for ((text, chunk, chunk_id, chunk_start_text_id, chunk_end_text_id, tokenized_tweet, tokenized_tweet_with_masked_chunk, gold_chunk, label), prediction_score) in zip(data, prediction_scores):
original_text = text
predicted_chunks_for_each_instance.setdefault(original_text, ('', 0.0, set(), set()))
(current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks) = predicted_chunks_for_each_instance[original_text]
if ((gold_chunk != ['Not Specified']) and (label == 1)):
gold_chunks = gold_chunks.union(set(gold_chunk))
predicted_chunks_for_each_instance[original_text] = (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)
if (prediction_score > THRESHOLD):
predicted_chunks.add(chunk)
current_predicted_chunk_score = prediction_score
current_predicted_chunk = chunk
predicted_chunks_for_each_instance[original_text] = (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)
elif (prediction_score > current_predicted_chunk_score):
current_predicted_chunk_score = prediction_score
current_predicted_chunk = chunk
predicted_chunks_for_each_instance[original_text] = (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)
(TP, FP, FN) = (0.0, 0.0, 0.0)
total_gold_chunks = 0
for (original_text, (current_predicted_chunk, current_predicted_chunk_score, predicted_chunks, gold_chunks)) in predicted_chunks_for_each_instance.items():
total_gold_chunks += len(gold_chunks)
if (len(gold_chunks) > 0):
if (len(predicted_chunks) > 0):
for predicted_chunk in predicted_chunks:
if (predicted_chunk in gold_chunks):
TP += 1
else:
FP += 1
for gold_chunk in gold_chunks:
if (gold_chunk not in predicted_chunks):
FN += 1
elif (len(predicted_chunks) > 0):
for predicted_chunk in predicted_chunks:
FP += 1
if ((TP + FP) == 0):
P = 0.0
else:
P = (TP / (TP + FP))
if ((TP + FN) == 0):
R = 0.0
else:
R = (TP / (TP + FN))
if ((P + R) == 0):
F1 = 0.0
else:
F1 = (((2.0 * P) * R) / (P + R))
return (F1, P, R, TP, FP, FN)
|
def read_json_line(path):
output = []
with open(path, 'r') as f:
for line in f:
output.append(json.loads(line))
return output
|
def main():
system_predictions = read_json_line(args.prediction)
golden_predictions = read_json_line(args.golden)
golden_predictions_dict = {}
for each_line in golden_predictions:
golden_predictions_dict[each_line['id']] = each_line
question_tag = golden_predictions[0]['golden_annotation']
result = []
for each_task in question_tag:
curr_task = {}
(TP, FP, FN) = (0.0, 0.0, 0.0)
for each_line in system_predictions:
curr_sys_pred = [i.lower() for i in each_line['predicted_annotation'][each_task] if (i != 'Not Specified')]
curr_golden_ann = [i.lower() for i in golden_predictions_dict[each_line['id']]['golden_annotation'][each_task] if (i != 'Not Specified')]
if (len(curr_golden_ann) > 0):
for predicted_chunk in curr_sys_pred:
if (predicted_chunk in curr_golden_ann):
TP += 1
else:
FP += 1
for gold_chunk in curr_golden_ann:
if (gold_chunk not in curr_sys_pred):
FN += 1
elif (len(curr_sys_pred) > 0):
for predicted_chunk in curr_sys_pred:
FP += 1
if ((TP + FP) == 0):
P = 0.0
else:
P = (TP / (TP + FP))
if ((TP + FN) == 0):
R = 0.0
else:
R = (TP / (TP + FN))
if ((P + R) == 0):
F1 = 0.0
else:
F1 = (((2.0 * P) * R) / (P + R))
curr_task['F1'] = F1
curr_task['P'] = P
curr_task['R'] = R
curr_task['TP'] = TP
curr_task['FP'] = FP
curr_task['FN'] = FN
N = (TP + FN)
curr_task['N'] = N
result.append(curr_task)
print(each_task.replace('.Response', ''))
print('P:', curr_task['P'], 'R:', curr_task['R'], 'F1:', curr_task['F1'])
print('=======')
|
def read_json_line(path):
output = []
with open(path, 'r') as f:
for line in f:
output.append(json.loads(line))
return output
|
def format_checker_each_file(category, input_data_path):
print('[I] Checking', category.upper(), 'category')
try:
input_data = read_json_line(input_data_path)
except:
input_data = None
print('[ERROR] check your file format, should be .jsonl')
assert (len(input_data) == 500), 'check the number of predictions, should be 500'
for each_line in input_data:
curr_keys = each_line.keys()
assert ('id' in curr_keys), 'input missing id field'
assert ('predicted_annotation' in curr_keys), 'input missing predicted annotations'
for each_pred in each_line['predicted_annotation'].items():
assert isinstance(each_pred[1], list), (each_pred[0] + ' contains prediction with no list format')
if (category == 'positive'):
assert (len(each_line['predicted_annotation']) == 9), 'check number of slots'
if (category == 'negative'):
assert (len(each_line['predicted_annotation']) == 7), 'check number of slots'
if (category == 'can_not_test'):
assert (len(each_line['predicted_annotation']) == 5), 'check number of slots'
if (category == 'death'):
assert (len(each_line['predicted_annotation']) == 5), 'check number of slots'
if (category == 'cure'):
assert (len(each_line['predicted_annotation']) == 3), 'check number of slots'
print('[I] You have passed the format checker for', category.upper(), 'category')
return None
|
def format_checker(input_folder_path):
input_files = glob.glob((input_folder_path + '*.jsonl'))
assert (len(input_files) == 5), 'missing prediction files - should be 5 files'
for each_file in input_files:
curr_category_name = each_file.split('/')[(- 1)].split('-')[(- 1)].replace('.jsonl', '')
assert (curr_category_name in ['positive', 'negative', 'can_not_test', 'death', 'cure']), 'check your event category name.'
format_checker_each_file(curr_category_name, each_file)
return None
|
def calPR(true_label, conf_score):
'\n calculate precision / recall curve\n :param true_label: true labels\n :param conf_score: predictions scores\n :return: precision, recall values\n '
combine = []
for i in range(len(true_label)):
combine.append((conf_score[i], true_label[i]))
updated_prob_sorted = sorted(combine, key=(lambda x: x[0]), reverse=True)
TP = 0
FP = 0
precision = []
recall = []
f1 = []
if (np.sum(true_label) == 0):
print('[WARNING] no true label')
else:
for (prob, label) in updated_prob_sorted:
if (label == 1):
TP += 1
else:
FP += 1
pre_value = (float(TP) / (TP + FP))
rec_value = (float(TP) / np.sum(true_label))
if ((pre_value == 0) and (rec_value == 0)):
f1_value = 0
else:
f1_value = ((2 * (pre_value * rec_value)) / (pre_value + rec_value))
precision.append(pre_value)
recall.append(rec_value)
f1.append(f1_value)
return (precision, recall)
|
def printTopFeatures(train_ngram_dict, lr):
'\n evaluate top ranked features for logistic regression\n :param train_ngram_dict: trained n-gram dictionary\n :param lr: trained lr model\n :return: None\n '
train_ngram_dict_reverse = {}
for i in train_ngram_dict.items():
train_ngram_dict_reverse[i[1]] = i[0]
token_ranked_coef = sorted([(train_ngram_dict_reverse[i], lr.coef_[0][i]) for i in range(len(train_ngram_dict))], key=(lambda x: x[1]), reverse=True)
for i in range(50):
print(token_ranked_coef[i][0], ('%.2f' % token_ranked_coef[i][1]))
return None
|
def convertToSparseMatrix(features_idx, features_dict):
'\n convert feature idx matrix in to sparse matrix\n :param features_idx: [list] original feature idx matrix\n :param features_dict: [dict] train_ngram_dict (for determine the dimension)\n :return: [dict] sparse matrix tuple\n '
location = []
for (idx, line) in enumerate(features_idx):
for token in line:
each_location = []
each_location.append(idx)
each_location.append(token)
location.append(each_location)
row = [i[0] for i in location]
col = [i[1] for i in location]
elements = [int(i) for i in list(np.ones(len(location)))]
dim = [len(features_idx), (len(features_dict) + 1)]
sparse_matrix = csr_matrix((elements, (row, col)), shape=(len(features_idx), len(features_dict)))
results = {}
results['idx'] = location
results['sparse_matrix'] = sparse_matrix
results['elements'] = elements
results['dim'] = dim
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.