| # https://atlas.nomic.ai/data/derek2/boru-subreddit-neural-search/map | |
| import os | |
| import pandas as pd | |
| import nomic | |
| from nomic import atlas | |
| import numpy as np | |
| NOMIC_KEY = os.getenv('NOMIC_KEY') | |
| nomic.login(NOMIC_KEY) | |
| def build_nomic(dataset): | |
| df = dataset['train'].to_pandas() | |
| non_embedding_columns = ['date_utc', 'title', 'flair', 'content', 'poster', 'permalink', 'id', 'content_length', | |
| 'score', 'percentile_ranges'] | |
| # Calculate the 0th, 10th, 20th, ..., 90th percentiles for the 'score' column | |
| percentiles = df['score'].quantile([0, .1, .2, .3, .4, .5, .6, .7, .8, .9]).tolist() | |
| # Ensure the bins are unique and include the maximum score | |
| bins = sorted(set(percentiles + [df['score'].max()])) | |
| # Define the labels for the percentile ranges | |
| # The number of labels should be one less than the number of bins | |
| labels = [int(i * 10) for i in range(len(bins) - 1)] | |
| # Add a 'percentile_ranges' column to the DataFrame | |
| # This assigns each score to its corresponding percentile range | |
| df['score_percentile'] = pd.cut(df['score'], bins=bins, labels=labels, include_lowest=True) | |
| # Create Atlas project | |
| project = atlas.map_data(embeddings=np.stack(df['embedding'].values), | |
| data=df[non_embedding_columns].to_dict(orient='records'), | |
| id_field='id', | |
| identifier='BORU Subreddit Neural Search', | |
| ) |