import torch import torch.nn as nn from transformers import BertModel, BertTokenizer, T5EncoderModel, T5Tokenizer import gradio as gr # Load models bert_model = BertModel.from_pretrained('bert-base-uncased') bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') t5_model = T5EncoderModel.from_pretrained('t5-small') t5_tokenizer = T5Tokenizer.from_pretrained('t5-small') # Universal Encoder class UniversalEncoder(nn.Module): def __init__(self, input_dim, latent_dim): super(UniversalEncoder, self).__init__() self.fc = nn.Linear(input_dim, latent_dim) def forward(self, x): return self.fc(x) latent_dim = 512 encoder_bert = UniversalEncoder(768, latent_dim) encoder_t5 = UniversalEncoder(512, latent_dim) # Get embedding function def get_embedding(text, model, tokenizer): inputs = tokenizer(text, return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) return outputs.last_hidden_state.mean(dim=1) # Gradio Interface def translate(text): bert_emb = get_embedding(text, bert_model, bert_tokenizer) t5_emb = get_embedding(text, t5_model, t5_tokenizer) z_bert = encoder_bert(bert_emb) z_t5 = encoder_t5(t5_emb) return f"Cosine Similarity: {torch.cosine_similarity(z_bert, z_t5).item():.4f}" # Build Gradio UI with gr.Blocks(title="Sheri Dee's Universal Geometry Translator") as demo: gr.Markdown("## 🌐 Sheri Dee's Universal Geometry Translator") gr.Markdown("*Find the hidden connections between worlds of language.*") with gr.Row(): text_input = gr.Textbox(label="Enter your text here:") output = gr.Textbox(label="Cosine Similarity Score") translate_button = gr.Button("🔍 Analyze") translate_button.click(fn=translate, inputs=text_input, outputs=output) demo.launch()