| import gradio as gr |
| import os |
| os.environ["KERAS_BACKEND"] = "tensorflow" |
| import keras |
| import keras_nlp |
| import utils |
|
|
| css = """ |
| html, body { |
| margin: 0; |
| padding: 0; |
| height: 100%; |
| overflow: hidden; |
| } |
| |
| body::before { |
| content: ''; |
| position: fixed; |
| top: 0; |
| left: 0; |
| width: 100vw; |
| height: 100vh; |
| background-image: url('https://stsci-opo.org/STScI-01J5E849R5W27ZZ2C3QAE9ET75.png'); |
| background-size: cover; |
| background-repeat: no-repeat; |
| opacity: 0.35; /* Faint background image */ |
| background-position: center; |
| z-index: -1; /* Keep the background behind text */ |
| } |
| .gradio-container { |
| display: flex; |
| justify-content: center; |
| align-items: center; |
| height: 100vh; /* Ensure the content is vertically centered */ |
| } |
| """ |
|
|
| |
| gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://sultan-hassan/CosmoGemma_2b_en") |
| chat = utils.ChatState(gemma_lm) |
|
|
| def launch(message): |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| return chat.send_message(message) + "\nhistory\n" + chat.get_history() |
|
|
| iface = gr.Interface(launch, |
| inputs="text", |
| outputs="text", |
| css=css, |
| title="Hey I am CosmoGemma 👋 I can answer cosmology questions from astroph.CO research articles. Try me :)", |
| description="Gemma_2b_en fine-tuned on QA pairs (~3.5k) generated from Cosmology and Nongalactic Astrophysics articles (arXiv astro-ph.CO) from 2018-2022 and tested on QA pairs (~1k) generated from 2023 articles, scoring over 75% accuracy.") |
|
|
| iface.launch() |
|
|
|
|