|
|
import sys |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
import math |
|
|
from collections import Counter, defaultdict |
|
|
from typing import List, Any |
|
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
|
import os |
|
|
import pickle |
|
|
import hashlib |
|
|
import json |
|
|
from gram2vec import vectorizer |
|
|
from openai import OpenAI |
|
|
from openai.lib._pydantic import to_strict_json_schema |
|
|
from pydantic import BaseModel |
|
|
from pydantic import ValidationError |
|
|
import time |
|
|
from utils.llm_feat_utils import generate_feature_spans_cached |
|
|
from utils.gram2vec_feat_utils import get_shorthand, get_fullform |
|
|
from gram2vec.feature_locator import find_feature_spans |
|
|
from collections import Counter |
|
|
import numpy as np |
|
|
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances |
|
|
from sklearn.decomposition import PCA |
|
|
|
|
|
CACHE_DIR = "datasets/embeddings_cache" |
|
|
G2V_CACHE = "datasets/gram2vec_cache" |
|
|
ZOOM_CACHE = "datasets/zoom_cache/features_cache.json" |
|
|
REGION_CACHE = "datasets/region_cache/regions_cache.pkl" |
|
|
SUMMARY_CACHE = "datasets/summary_cache/summaries.json" |
|
|
os.makedirs(CACHE_DIR, exist_ok=True) |
|
|
os.makedirs(G2V_CACHE, exist_ok=True) |
|
|
os.makedirs(os.path.dirname(ZOOM_CACHE), exist_ok=True) |
|
|
os.makedirs(os.path.dirname(REGION_CACHE), exist_ok=True) |
|
|
|
|
|
CACHE_VERSION = 1 |
|
|
|
|
|
class style_analysis_schema(BaseModel): |
|
|
features: list[str] |
|
|
spans: dict[str, dict[str, list[str]]] |
|
|
|
|
|
class FeatureIdentificationSchema(BaseModel): |
|
|
features: list[str] |
|
|
|
|
|
class SpanExtractionSchema(BaseModel): |
|
|
spans: dict[str, dict[str, list[str]]] |
|
|
|
|
|
class StyleSummarySchema(BaseModel): |
|
|
summary_paragraph: str |
|
|
|
|
|
|
|
|
|
|
|
def compute_g2v_features(clustered_authors_df: pd.DataFrame, task_authors_df: pd.DataFrame=None, text_clm='fullText') -> pd.DataFrame: |
|
|
""" |
|
|
Computes gram2vec feature vectors for each author and adds them to the DataFrame. |
|
|
This effectively creates a mapping from each author to their vector. |
|
|
""" |
|
|
if task_authors_df is not None: |
|
|
print (f"concatenating task authors and background corpus authors") |
|
|
print(f"Number of task authors: {len(task_authors_df)}") |
|
|
print(f"task authors author_ids: {task_authors_df.authorID.tolist()}") |
|
|
|
|
|
|
|
|
print(f"Number of background corpus authors: {len(clustered_authors_df)}") |
|
|
clustered_authors_df = pd.concat([task_authors_df, clustered_authors_df]) |
|
|
print(f"Number of authors after concatenation: {len(clustered_authors_df)}") |
|
|
|
|
|
|
|
|
|
|
|
author_texts = ['\n\n'.join(x) for x in clustered_authors_df.fullText.tolist()] |
|
|
|
|
|
print(f"Number of author_texts: {len(author_texts)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
serialized = json.dumps({ |
|
|
"col": text_clm, |
|
|
"texts": author_texts |
|
|
}, sort_keys=True, ensure_ascii=False) |
|
|
|
|
|
|
|
|
digest = hashlib.md5(serialized.encode("utf-8")).hexdigest() |
|
|
cache_path = os.path.join(G2V_CACHE, f"{digest}.pkl") |
|
|
|
|
|
|
|
|
if os.path.exists(cache_path): |
|
|
|
|
|
|
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache hit for {cache_path} <<<\033[0m\n") |
|
|
with open(cache_path, "rb") as f: |
|
|
clustered_authors_df = pickle.load(f) |
|
|
|
|
|
else: |
|
|
|
|
|
print(f"\n\n\n\033[1m\033[91m>>> Cache miss for {cache_path} => Computing fresh!! <<<\033[0m\n") |
|
|
|
|
|
g2v_feats_df = vectorizer.from_documents(author_texts, batch_size=8) |
|
|
|
|
|
print(f"Number of g2v features: {len(g2v_feats_df)}") |
|
|
print(f"Number of clustered_authors_df.authorID.tolist(): {len(clustered_authors_df.authorID.tolist())}") |
|
|
print(f"Number of g2v_feats_df.to_numpy().tolist(): {len(g2v_feats_df.to_numpy().tolist())}") |
|
|
|
|
|
ids = clustered_authors_df.authorID.tolist() |
|
|
counter = Counter(ids) |
|
|
duplicates = [k for k, v in counter.items() if v > 1] |
|
|
|
|
|
print(f"Duplicate authorIDs: {duplicates}") |
|
|
print(f"Number of duplicates: {len(ids) - len(set(ids))}") |
|
|
|
|
|
author_to_g2v_feats = {x[0]: x[1] for x in zip(clustered_authors_df.authorID.tolist(), g2v_feats_df.to_numpy().tolist())} |
|
|
|
|
|
print(f"Number of authors with g2v features: {len(author_to_g2v_feats)}") |
|
|
|
|
|
|
|
|
vector_std = np.std(list(author_to_g2v_feats.values()), axis=0) |
|
|
vector_mean = np.mean(list(author_to_g2v_feats.values()), axis=0) |
|
|
vector_std[vector_std == 0] = 1.0 |
|
|
author_to_g2v_feats_z_normalized = {x[0]: (x[1] - vector_mean) / vector_std for x in author_to_g2v_feats.items()} |
|
|
|
|
|
print(f"Number of authors with g2v features normalized: {len(author_to_g2v_feats_z_normalized)}") |
|
|
print(f" len of clustered authors df: {len(clustered_authors_df)}") |
|
|
|
|
|
|
|
|
|
|
|
clustered_authors_df['g2v_vector'] = [{x[1]: x[0] for x in zip(val, g2v_feats_df.columns.tolist())} |
|
|
for val in author_to_g2v_feats_z_normalized.values()] |
|
|
|
|
|
with open(cache_path, "wb") as f: |
|
|
pickle.dump(clustered_authors_df, f) |
|
|
|
|
|
print(f"\n\n\n\033[1m\033[92m>>> Saved to {cache_path} <<<\033[0m\n") |
|
|
|
|
|
|
|
|
if task_authors_df is not None: |
|
|
task_authors_df = clustered_authors_df[clustered_authors_df.authorID.isin(task_authors_df.authorID.tolist())] |
|
|
clustered_authors_df = clustered_authors_df[~clustered_authors_df.authorID.isin(task_authors_df.authorID.tolist())] |
|
|
|
|
|
|
|
|
return clustered_authors_df['g2v_vector'].tolist(), task_authors_df['g2v_vector'].tolist() |
|
|
|
|
|
|
|
|
def get_task_authors_from_background_df(background_df): |
|
|
task_authors_df = background_df[background_df.authorID.isin(["Q_author", "a0_author", "a1_author", "a2_author"])] |
|
|
return task_authors_df |
|
|
|
|
|
def instance_to_df(instance, predicted_author=None, ground_truth_author=None): |
|
|
|
|
|
task_authos_df = pd.DataFrame([ |
|
|
{'authorID': 'Mystery author', 'fullText': instance['Q_fullText'], 'predicted': None, 'ground_truth': None}, |
|
|
{'authorID': 'Candidate Author 1', 'fullText': instance['a0_fullText'], 'predicted': int(predicted_author) == 0 if predicted_author is not None else None, 'ground_truth': int(ground_truth_author) == 0 if ground_truth_author is not None else None}, |
|
|
{'authorID': 'Candidate Author 2', 'fullText': instance['a1_fullText'], 'predicted': int(predicted_author) == 1 if predicted_author is not None else None, 'ground_truth': int(ground_truth_author) == 1 if ground_truth_author is not None else None}, |
|
|
{'authorID': 'Candidate Author 3', 'fullText': instance['a2_fullText'], 'predicted': int(predicted_author) == 2 if predicted_author is not None else None, 'ground_truth': int(ground_truth_author) == 2 if ground_truth_author is not None else None} |
|
|
|
|
|
]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return task_authos_df |
|
|
|
|
|
|
|
|
def generate_style_embedding(background_corpus_df: pd.DataFrame, text_clm: str, model_name: str, dimensionality_reduction: bool = True, dimensions: int = 100) -> pd.DataFrame: |
|
|
""" |
|
|
Generates style embeddings for documents in a background corpus using a specified model. |
|
|
If a row in `text_clm` contains a list of strings, the final embedding for that row |
|
|
is the average of the embeddings of all strings in the list. |
|
|
|
|
|
Args: |
|
|
background_corpus_df (pd.DataFrame): DataFrame containing the corpus. |
|
|
text_clm (str): Name of the column containing the text data (either string or list of strings). |
|
|
model_name (str): Name of the model to use for generating embeddings. |
|
|
|
|
|
Returns: |
|
|
pd.DataFrame: The input DataFrame with a new column for style embeddings. |
|
|
""" |
|
|
from sentence_transformers import SentenceTransformer |
|
|
import torch |
|
|
|
|
|
if model_name not in [ |
|
|
'gabrielloiseau/LUAR-MUD-sentence-transformers', |
|
|
'gabrielloiseau/LUAR-CRUD-sentence-transformers', |
|
|
'miladalsh/light-luar', |
|
|
'AnnaWegmann/Style-Embedding', |
|
|
|
|
|
]: |
|
|
print('Model is not supported') |
|
|
return background_corpus_df |
|
|
|
|
|
print(f"Generating style embeddings using {model_name} on column '{text_clm}'...") |
|
|
|
|
|
model = SentenceTransformer(model_name) |
|
|
embedding_dim = model.get_sentence_embedding_dimension() |
|
|
|
|
|
|
|
|
|
|
|
is_list_column = False |
|
|
if not background_corpus_df.empty: |
|
|
|
|
|
series_no_na = background_corpus_df[text_clm].dropna() |
|
|
if not series_no_na.empty: |
|
|
first_valid_item = series_no_na.iloc[0] |
|
|
if isinstance(first_valid_item, list): |
|
|
is_list_column = True |
|
|
|
|
|
if is_list_column: |
|
|
|
|
|
texts_to_encode = [] |
|
|
row_lengths = [] |
|
|
for text_list in background_corpus_df[text_clm]: |
|
|
|
|
|
if isinstance(text_list, list) and text_list: |
|
|
texts_to_encode.extend(text_list) |
|
|
row_lengths.append(len(text_list)) |
|
|
else: |
|
|
row_lengths.append(0) |
|
|
|
|
|
if texts_to_encode: |
|
|
all_embeddings = model.encode(texts_to_encode, convert_to_tensor=True, show_progress_bar=True) |
|
|
else: |
|
|
all_embeddings = torch.empty((0, embedding_dim), device=model.device) |
|
|
|
|
|
|
|
|
final_embeddings = [] |
|
|
current_pos = 0 |
|
|
for length in row_lengths: |
|
|
if length > 0: |
|
|
row_embeddings = all_embeddings[current_pos:current_pos + length] |
|
|
avg_embedding = torch.mean(row_embeddings, dim=0) |
|
|
final_embeddings.append(avg_embedding.cpu().numpy()) |
|
|
current_pos += length |
|
|
else: |
|
|
final_embeddings.append(np.zeros(embedding_dim)) |
|
|
else: |
|
|
|
|
|
texts = background_corpus_df[text_clm].fillna("").tolist() |
|
|
|
|
|
embeddings = model.encode(texts, show_progress_bar=True) |
|
|
final_embeddings = list(embeddings) |
|
|
|
|
|
|
|
|
if dimensionality_reduction: |
|
|
if len(final_embeddings) > 0 and len(final_embeddings[0]) > dimensions: |
|
|
pca = PCA(n_components=dimensions) |
|
|
final_embeddings = pca.fit_transform(final_embeddings) |
|
|
|
|
|
return list(final_embeddings) |
|
|
|
|
|
|
|
|
def cached_generate_style_embedding(background_corpus_df: pd.DataFrame, |
|
|
text_clm: str, |
|
|
model_name: str, |
|
|
task_authors_df: pd.DataFrame = None) -> pd.DataFrame: |
|
|
""" |
|
|
Wraps `generate_style_embedding`, caching its output in pickle files |
|
|
keyed by an MD5 of (model_name + text list). If the cache exists, |
|
|
loads and returns it instead of recomputing. |
|
|
""" |
|
|
|
|
|
if task_authors_df is not None: |
|
|
print (f"concatenating task authors and background corpus authors") |
|
|
print(f"Number of task authors: {len(task_authors_df)}") |
|
|
print(f"task authors author_ids: {task_authors_df.authorID.tolist()}") |
|
|
print(f"Number of background corpus authors: {len(background_corpus_df)}") |
|
|
background_corpus_df = pd.concat([task_authors_df, background_corpus_df]) |
|
|
print(f"Number of authors after concatenation: {len(background_corpus_df)}") |
|
|
|
|
|
|
|
|
texts = background_corpus_df[text_clm].fillna("").tolist() |
|
|
|
|
|
|
|
|
serialized = json.dumps({ |
|
|
"model": model_name, |
|
|
"col": text_clm, |
|
|
"texts": texts |
|
|
}, sort_keys=True, ensure_ascii=False) |
|
|
|
|
|
|
|
|
digest = hashlib.md5(serialized.encode("utf-8")).hexdigest() |
|
|
cache_path = os.path.join(CACHE_DIR, f"{digest}.pkl") |
|
|
|
|
|
|
|
|
if os.path.exists(cache_path): |
|
|
|
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache hit for {cache_path} for {model_name} on column '{text_clm} <<<\033[0m\n") |
|
|
with open(cache_path, "rb") as f: |
|
|
background_corpus_df = pickle.load(f) |
|
|
|
|
|
else: |
|
|
|
|
|
print(f"\n\n\n\033[1m\033[91m>>> Cache miss for {cache_path} for {model_name} on column '{text_clm} <<<\033[0m\n") |
|
|
task_and_background_embeddings = generate_style_embedding(background_corpus_df, text_clm, model_name, dimensionality_reduction=False) |
|
|
|
|
|
col_name = f'{model_name.split("/")[-1]}_style_embedding' |
|
|
background_corpus_df[col_name] = task_and_background_embeddings |
|
|
|
|
|
with open(cache_path, "wb") as f: |
|
|
pickle.dump(background_corpus_df, f) |
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache saved for {cache_path} for {model_name} on column '{text_clm} <<<\033[0m\n") |
|
|
|
|
|
if task_authors_df is not None: |
|
|
task_authors_df = background_corpus_df[background_corpus_df.authorID.isin(task_authors_df.authorID.tolist())] |
|
|
background_corpus_df = background_corpus_df[~background_corpus_df.authorID.isin(task_authors_df.authorID.tolist())] |
|
|
|
|
|
return background_corpus_df, task_authors_df |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_cache_key(author_names: List[str], max_num_feats: int) -> str: |
|
|
"""Generate a unique cache key based on author names and max features""" |
|
|
|
|
|
sorted_authors = sorted(author_names) |
|
|
key_data = { |
|
|
"authors": sorted_authors, |
|
|
"max_num_feats": max_num_feats |
|
|
} |
|
|
key_string = json.dumps(key_data, sort_keys=True) |
|
|
return hashlib.md5(key_string.encode()).hexdigest() |
|
|
|
|
|
def identify_style_features(author_texts: list[str], author_names: list[str], max_num_feats: int = 5) -> list[str]: |
|
|
cache_key = None |
|
|
if author_names: |
|
|
cache_key = generate_cache_key(author_names, max_num_feats) |
|
|
|
|
|
if os.path.exists(ZOOM_CACHE): |
|
|
with open(ZOOM_CACHE, 'r') as f: |
|
|
cache = json.load(f) |
|
|
else: |
|
|
cache = {} |
|
|
|
|
|
if cache_key in cache: |
|
|
print(f"\nCache hit! Using cached features for authors: {author_names}") |
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache hit for {cache_key} in {ZOOM_CACHE} <<<\033[0m\n") |
|
|
return cache[cache_key]["features"] |
|
|
else: |
|
|
print(f"\n\n\n\033[1m\033[91m>>> Cache miss for {cache_key} in {ZOOM_CACHE} \nComputing features for authors: {author_names}<<<\033[0m\n") |
|
|
|
|
|
client = OpenAI(base_url=os.getenv("OPENAI_BASE_URL", None), api_key=os.getenv("OPENAI_API_KEY")) |
|
|
prompt = f"""Identify {max_num_feats} writing style features that are common between the authors texts. |
|
|
Author Texts: |
|
|
|
|
|
{author_texts} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _make_call(): |
|
|
response = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=[ |
|
|
{"role": "assistant", "content": "You are a forensic linguist who knows how to analyze linguistic and stylometric similarites between texts."}, |
|
|
{"role": "user", "content": prompt} |
|
|
], |
|
|
response_format={ |
|
|
"type": "json_schema", |
|
|
"json_schema": { |
|
|
"name": "FeatureIdentificationSchema", |
|
|
"schema": to_strict_json_schema(FeatureIdentificationSchema) |
|
|
} |
|
|
} |
|
|
) |
|
|
return json.loads(response.choices[0].message.content) |
|
|
|
|
|
features = retry_call(_make_call, FeatureIdentificationSchema).features |
|
|
|
|
|
if cache_key and author_names: |
|
|
cache[cache_key] = { |
|
|
"features": features |
|
|
} |
|
|
|
|
|
with open(ZOOM_CACHE, 'w') as f: |
|
|
json.dump(cache, f, indent=2) |
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache saved for {cache_key} in {ZOOM_CACHE}<<<\033[0m\n") |
|
|
|
|
|
|
|
|
print(f"Cached features for authors: {author_names}") |
|
|
|
|
|
return features |
|
|
|
|
|
def retry_call(call_fn, schema_class, max_attempts=3, wait_sec=2): |
|
|
for attempt in range(max_attempts): |
|
|
try: |
|
|
result = call_fn() |
|
|
|
|
|
validated = schema_class(**result) |
|
|
return validated |
|
|
except (ValidationError, KeyError, json.JSONDecodeError) as e: |
|
|
print(f"Attempt {attempt + 1} failed with error: {e}") |
|
|
time.sleep(wait_sec) |
|
|
raise RuntimeError("All retry attempts failed for OpenAI call.") |
|
|
|
|
|
def extract_all_spans(authors_df: pd.DataFrame, features: list[str], cluster_label_clm_name: str = 'authorID') -> dict[str, dict[str, list[str]]]: |
|
|
""" |
|
|
For each author, use `generate_feature_spans_cached` to get feature->span mappings. |
|
|
Returns a dict: {author_name: {feature: [spans]}} |
|
|
""" |
|
|
client = OpenAI(base_url=os.getenv("OPENAI_BASE_URL", None), api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
spans_by_author = {} |
|
|
|
|
|
for _, row in authors_df.iterrows(): |
|
|
author_name = str(row[cluster_label_clm_name]) |
|
|
|
|
|
role = f"{author_name}" |
|
|
full_text = row['fullText'] |
|
|
spans = generate_feature_spans_cached(client, full_text, features, role) |
|
|
spans_by_author[author_name] = spans |
|
|
|
|
|
return spans_by_author |
|
|
|
|
|
def compute_clusters_style_representation_3( |
|
|
background_corpus_df: pd.DataFrame, |
|
|
cluster_ids: List[Any], |
|
|
cluster_label_clm_name: str = 'authorID', |
|
|
max_num_feats: int = 25, |
|
|
max_num_documents_per_author=10, |
|
|
max_num_authors=10, |
|
|
max_authors_for_span_extraction=4, |
|
|
top_k: int = 10, |
|
|
predicted_author = None, |
|
|
return_only_feats= False |
|
|
): |
|
|
|
|
|
print(f"Computing style representation for visible clusters: {len(cluster_ids)}") |
|
|
|
|
|
background_corpus_df['fullText'] = background_corpus_df['fullText'].map(lambda x: '\n\n'.join(x[:max_num_documents_per_author]) if isinstance(x, list) else x) |
|
|
background_corpus_df_feat_id = background_corpus_df[background_corpus_df[cluster_label_clm_name].isin(cluster_ids)] |
|
|
|
|
|
author_texts = background_corpus_df_feat_id['fullText'].tolist()[:max_num_authors] |
|
|
author_texts = "\n\n".join(["""Author {}:\n""".format(i+1) + text for i, text in enumerate(author_texts)]) |
|
|
author_names = background_corpus_df_feat_id[cluster_label_clm_name].tolist()[:max_num_authors] |
|
|
print(f"Number of authors: {len(background_corpus_df_feat_id)}") |
|
|
|
|
|
features = identify_style_features(author_texts, author_names, max_num_feats=max_num_feats) |
|
|
|
|
|
if return_only_feats: |
|
|
return features |
|
|
|
|
|
|
|
|
|
|
|
span_df = background_corpus_df.iloc[:max_authors_for_span_extraction] |
|
|
author_names = span_df[cluster_label_clm_name].tolist()[:max_authors_for_span_extraction] |
|
|
print(f"Number of authors for span detection : {len(span_df)}") |
|
|
|
|
|
spans_by_author = extract_all_spans(span_df, features, cluster_label_clm_name) |
|
|
|
|
|
|
|
|
task_author_names = {'Mystery author', 'Candidate Author 1', 'Candidate Author 2', 'Candidate Author 3'} |
|
|
|
|
|
|
|
|
|
|
|
feature_importance = {f : 0 for f in features} |
|
|
for author, feature_map in spans_by_author.items(): |
|
|
if author in task_author_names.intersection(set(cluster_ids)): |
|
|
for feature, spans in feature_map.items(): |
|
|
if spans: |
|
|
feature_importance[feature] += len(spans) |
|
|
else: |
|
|
|
|
|
for feature, spans in feature_map.items(): |
|
|
if spans: |
|
|
feature_importance[feature] -= len(spans) |
|
|
|
|
|
print(f"Feature importance scores: {feature_importance}") |
|
|
selected_features_ranked = sorted(feature_importance, key=lambda f: -feature_importance[f])[:int(top_k)] |
|
|
|
|
|
|
|
|
|
|
|
return { |
|
|
"features": list(selected_features_ranked), |
|
|
"spans": spans_by_author |
|
|
} |
|
|
|
|
|
def summarize_style_features_to_paragraph(features: list[str]) -> str: |
|
|
""" |
|
|
Takes a list of writing style features and uses an LLM to generate a |
|
|
coherent, descriptive paragraph summarizing the style. |
|
|
|
|
|
Args: |
|
|
features (list[str]): A list of style features. |
|
|
|
|
|
Returns: |
|
|
str: A single paragraph summarizing the writing style. |
|
|
""" |
|
|
if not features: |
|
|
return "No style features were identified for this selection." |
|
|
|
|
|
|
|
|
feature_key = hashlib.md5(json.dumps(sorted(features)).encode()).hexdigest() |
|
|
|
|
|
os.makedirs(os.path.dirname(SUMMARY_CACHE), exist_ok=True) |
|
|
if os.path.exists(SUMMARY_CACHE): |
|
|
with open(SUMMARY_CACHE, 'r') as f: |
|
|
try: |
|
|
cache = json.load(f) |
|
|
except json.JSONDecodeError: |
|
|
cache = {} |
|
|
else: |
|
|
cache = {} |
|
|
|
|
|
if feature_key in cache: |
|
|
print(f"Cache hit for style summary. Key: {feature_key}") |
|
|
return cache[feature_key] |
|
|
|
|
|
print(f"Cache miss for style summary. Generating new summary...") |
|
|
client = OpenAI(base_url=os.getenv("OPENAI_BASE_URL", None), api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
feature_list_str = "\n".join([f"- {feat}" for feat in features]) |
|
|
prompt = f"""You are a linguistic analyst. Your task is to synthesize the following list of writing style features into a single, coherent, and descriptive paragraph. The paragraph should flow naturally and explain the overall writing style of an author based on these features. Be concise and only mention the features without referring to example spans. |
|
|
|
|
|
Style Features: |
|
|
{feature_list_str} |
|
|
|
|
|
Please provide the summary as a single paragraph. |
|
|
""" |
|
|
|
|
|
def _make_call(): |
|
|
response = client.chat.completions.create( |
|
|
model="gpt-4o", |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
response_format={"type": "json_schema", "json_schema": {"name": "StyleSummarySchema", "schema": to_strict_json_schema(StyleSummarySchema)}} |
|
|
) |
|
|
return json.loads(response.choices[0].message.content) |
|
|
|
|
|
summary_paragraph = retry_call(_make_call, StyleSummarySchema).summary_paragraph |
|
|
|
|
|
|
|
|
cache[feature_key] = summary_paragraph |
|
|
with open(SUMMARY_CACHE, 'w') as f: |
|
|
json.dump(cache, f, indent=2) |
|
|
|
|
|
return summary_paragraph |
|
|
|
|
|
def find_closest_cluster_style(texts: list[str], interp_space, model_name: str) -> str: |
|
|
""" |
|
|
Computes the average embedding for a list of texts and finds the most similar |
|
|
cluster from the interpretable space, returning its style description. |
|
|
|
|
|
Args: |
|
|
texts (list[str]): A list of texts for which to find a style description. |
|
|
interp_space_path (str): Path to the interpretable_space.json file. |
|
|
model_name (str): The name of the sentence transformer model to use for embeddings. |
|
|
|
|
|
Returns: |
|
|
str: The style description paragraph of the most similar cluster. |
|
|
""" |
|
|
if not texts: |
|
|
return "No texts provided for analysis." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
temp_df = pd.DataFrame([{'fullText': texts}]) |
|
|
input_embedding_list = generate_style_embedding(temp_df, 'fullText', model_name, dimensionality_reduction=False) |
|
|
|
|
|
if not input_embedding_list: |
|
|
return "Could not generate an embedding for the provided texts." |
|
|
|
|
|
input_embedding = np.array(input_embedding_list[0]).reshape(1, -1) |
|
|
|
|
|
|
|
|
cluster_embeddings = {int(k): np.array(v[0]) for k, v in interp_space.items()} |
|
|
|
|
|
best_cluster_label = -1 |
|
|
max_similarity = -1 |
|
|
|
|
|
for label, cluster_emb in cluster_embeddings.items(): |
|
|
similarity = cosine_similarity(input_embedding, cluster_emb.reshape(1, -1))[0][0] |
|
|
if similarity > max_similarity: |
|
|
max_similarity = similarity |
|
|
best_cluster_label = label |
|
|
|
|
|
|
|
|
return interp_space.get(str(best_cluster_label), [None, "Could not find a matching style description."])[1], input_embedding[0] |
|
|
|
|
|
|
|
|
def compute_clusters_g2v_representation( |
|
|
background_corpus_df: pd.DataFrame, |
|
|
author_ids: List[Any], |
|
|
other_author_ids: List[Any], |
|
|
features_clm_name: str, |
|
|
top_n: int = 15, |
|
|
max_candidates_for_span_sorting: int = 50, |
|
|
predicted_author: int = None |
|
|
) -> List[tuple]: |
|
|
|
|
|
print(f"[INFO] Computing G2V representation with predicted_author: {predicted_author}") |
|
|
|
|
|
selected_mask = background_corpus_df['authorID'].isin(author_ids).to_numpy() |
|
|
|
|
|
if not selected_mask.any(): |
|
|
return [] |
|
|
|
|
|
|
|
|
|
|
|
all_feature_dicts = background_corpus_df[features_clm_name].tolist() |
|
|
if not all_feature_dicts: |
|
|
return [] |
|
|
|
|
|
|
|
|
all_features = list(all_feature_dicts[0].keys()) |
|
|
population_matrix = np.array( |
|
|
[[feat_dict.get(feat, 0.0) for feat in all_features] for feat_dict in all_feature_dicts], |
|
|
dtype=float |
|
|
) |
|
|
|
|
|
|
|
|
col_means = population_matrix.mean(axis=0) |
|
|
col_stds = population_matrix.std(axis=0) |
|
|
col_stds[col_stds == 0] = 1.0 |
|
|
z_population = (population_matrix - col_means) / col_stds |
|
|
|
|
|
|
|
|
selected_mean = z_population[selected_mask].mean(axis=0) |
|
|
|
|
|
|
|
|
feature_scores = [(feat, float(score)) for feat, score in zip(all_features, selected_mean) if score > 0] |
|
|
feature_scores.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
|
|
|
candidate_features = feature_scores[:top_n] |
|
|
|
|
|
|
|
|
task_author_names = {'Mystery author', 'Candidate Author 1', 'Candidate Author 2', 'Candidate Author 3'} |
|
|
task_authors_in_selection = task_author_names.intersection(set(author_ids)) |
|
|
|
|
|
if not task_authors_in_selection: |
|
|
|
|
|
print("[INFO] No task authors in selection, returning z-score sorted features") |
|
|
return feature_scores[:top_n] |
|
|
|
|
|
|
|
|
task_authors_df = background_corpus_df[background_corpus_df['authorID'].isin(task_author_names)] |
|
|
|
|
|
|
|
|
print('len of task_authors_df ', len(task_authors_df)) |
|
|
print('zoomed in authors {}'.format(task_authors_in_selection)) |
|
|
|
|
|
feature_span_scores = {} |
|
|
for feat_shorthand, _ in candidate_features: |
|
|
span_score = 0 |
|
|
|
|
|
for _, author_row in task_authors_df.iterrows(): |
|
|
author_name = author_row['authorID'] |
|
|
author_text = author_row['fullText'] |
|
|
if isinstance(author_text, list): |
|
|
author_text = '\n\n'.join(author_text) |
|
|
|
|
|
try: |
|
|
|
|
|
spans = find_feature_spans(author_text, feat_shorthand) |
|
|
span_count = len(spans) |
|
|
|
|
|
if author_name in task_authors_in_selection: |
|
|
span_score += span_count |
|
|
else: |
|
|
|
|
|
span_score -= span_count |
|
|
except Exception as e: |
|
|
|
|
|
pass |
|
|
|
|
|
feature_span_scores[feat_shorthand] = span_score |
|
|
|
|
|
|
|
|
sorted_by_spans = sorted( |
|
|
candidate_features, |
|
|
key=lambda x: (-feature_span_scores.get(x[0], 0), -x[1]) |
|
|
) |
|
|
|
|
|
print(f"[INFO] Top 5 gram2vec features by span score: {[(f, feature_span_scores.get(f, 0), z) for f, z in sorted_by_spans[:5]]}") |
|
|
|
|
|
return sorted_by_spans |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def compute_predicted_author(task_authors_df: pd.DataFrame, col_name: str) -> int: |
|
|
""" |
|
|
Computes the predicted author based on the style features. |
|
|
""" |
|
|
print("Computing predicted author using embeddings...") |
|
|
|
|
|
|
|
|
mystery_embedding = np.array(task_authors_df.iloc[0][col_name]).reshape(1, -1) |
|
|
candidate_embeddings = np.array([ |
|
|
task_authors_df.iloc[1][col_name], |
|
|
task_authors_df.iloc[2][col_name], |
|
|
task_authors_df.iloc[3][col_name] |
|
|
]) |
|
|
|
|
|
|
|
|
similarities = cosine_similarity(mystery_embedding, candidate_embeddings)[0] |
|
|
predicted_author = int(np.argmax(similarities)) |
|
|
print(f"Predicted author is Candidate {predicted_author + 1}") |
|
|
|
|
|
return predicted_author |
|
|
|
|
|
|
|
|
def compute_precomputed_regions(bg_proj, bg_ids, q_proj, c_proj, pred_idx, model_name, n_neighbors=7): |
|
|
""" |
|
|
Compute precomputed regions for mystery author and candidates. |
|
|
|
|
|
Args: |
|
|
bg_proj: (N,2) numpy array with 2D coordinates of background authors |
|
|
bg_ids: list of N author IDs for background authors |
|
|
q_proj: (1,2) numpy array with mystery author coordinates |
|
|
c_proj: (3,2) numpy array with candidate author coordinates |
|
|
n_neighbors: number of closest neighbors to include in each region |
|
|
|
|
|
Returns: |
|
|
dict: mapping region names to bounding boxes and author lists |
|
|
""" |
|
|
print("Computing sugested regions for zoom...") |
|
|
key = f"{hashlib.md5((model_name + str(q_proj.tolist()) + str(c_proj.tolist()) + str(n_neighbors)).encode()).hexdigest()}" |
|
|
|
|
|
if os.path.exists(REGION_CACHE): |
|
|
with open(REGION_CACHE, 'rb') as f: |
|
|
cache = pickle.load(f) |
|
|
else: |
|
|
cache = {} |
|
|
if key in cache: |
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache hit for {key} in {REGION_CACHE}: Using cached regions<<<\033[0m\n") |
|
|
return cache[key] |
|
|
else: |
|
|
print(f"\n\n\n\033[1m\033[91m>>> Cache miss for {key} in {REGION_CACHE}: Computing Regions<<<\033[0m\n") |
|
|
|
|
|
regions = {} |
|
|
|
|
|
|
|
|
all_points = np.vstack([q_proj.reshape(1, -1), c_proj, bg_proj]) |
|
|
all_ids = ['mystery'] + [f'candidate_{i}' for i in range(3)] + bg_ids |
|
|
|
|
|
def get_region_around_point(center_point, region_name, include_points=None): |
|
|
"""Get region around a specific point""" |
|
|
|
|
|
if center_point.ndim == 1: |
|
|
center_point = center_point.reshape(1, -1) |
|
|
|
|
|
|
|
|
distances = euclidean_distances(center_point, bg_proj)[0] |
|
|
|
|
|
|
|
|
closest_indices = np.argsort(distances)[:n_neighbors] |
|
|
closest_authors = [bg_ids[i] for i in closest_indices] |
|
|
closest_points = bg_proj[closest_indices] |
|
|
|
|
|
|
|
|
|
|
|
if include_points is not None: |
|
|
region_points = include_points.copy() |
|
|
|
|
|
region_points = np.vstack([region_points, center_point, closest_points]) |
|
|
else: |
|
|
|
|
|
region_points = np.vstack([center_point, closest_points]) |
|
|
|
|
|
|
|
|
x_min, x_max = region_points[:, 0].min(), region_points[:, 0].max() |
|
|
y_min, y_max = region_points[:, 1].min(), region_points[:, 1].max() |
|
|
|
|
|
|
|
|
x_padding = (x_max - x_min) * 0.1 |
|
|
y_padding = (y_max - y_min) * 0.1 |
|
|
|
|
|
bbox = { |
|
|
'xaxis': [x_min - x_padding, x_max + x_padding], |
|
|
'yaxis': [y_min - y_padding, y_max + y_padding] |
|
|
} |
|
|
|
|
|
return { |
|
|
'bbox': bbox, |
|
|
'authors': closest_authors, |
|
|
'center_point': center_point, |
|
|
'description': f"Region around {region_name} ({len(closest_authors)} closest authors)" |
|
|
} |
|
|
|
|
|
def get_region_between_points(point1, point2, name1, name2): |
|
|
"""Get region around the midpoint between two points""" |
|
|
midpoint = (point1 + point2) / 2 |
|
|
region_name = f"{name1} & {name2}" |
|
|
|
|
|
include_points = np.vstack([point1.reshape(1, -1), point2.reshape(1, -1)]) |
|
|
return get_region_around_point(midpoint, region_name, include_points=include_points) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for i in range(3): |
|
|
regions[f"Candidate {i+1} Neighborhood"] = get_region_around_point( |
|
|
c_proj[i], f"Candidate {i+1}" |
|
|
) |
|
|
|
|
|
|
|
|
for i in range(3): |
|
|
if i == pred_idx: |
|
|
region_name = f"Mystery & Candidate {i+1}" |
|
|
regions[region_name] = get_region_between_points( |
|
|
q_proj, c_proj[i], "Mystery", f"Candidate {i+1}" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def serialize_numpy_dtypes(obj): |
|
|
if isinstance(obj, np.ndarray): |
|
|
return obj.tolist() |
|
|
elif isinstance(obj, (np.float32, np.float64)): |
|
|
return float(obj) |
|
|
elif isinstance(obj, (np.int32, np.int64)): |
|
|
return int(obj) |
|
|
elif isinstance(obj, dict): |
|
|
return {key: serialize_numpy_dtypes(value) for key, value in obj.items()} |
|
|
elif isinstance(obj, list): |
|
|
return [serialize_numpy_dtypes(item) for item in obj] |
|
|
else: |
|
|
return obj |
|
|
|
|
|
serializable_regions = serialize_numpy_dtypes(regions) |
|
|
response = json.dumps(serializable_regions, default=str) |
|
|
cache[key] = response |
|
|
with open(REGION_CACHE, 'wb') as f: |
|
|
print(f"\n\n\n\033[1m\033[92m>>> Cache saved for {key} in {REGION_CACHE} <<<\033[0m\n") |
|
|
pickle.dump(cache, f) |
|
|
|
|
|
return response |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|