| | """ |
| | WILL - Gradio UI |
| | |
| | ZeroGPU対応のGradioインターフェース |
| | """ |
| | from typing import List, Tuple, Optional |
| | import base64 |
| | from io import BytesIO |
| |
|
| | import gradio as gr |
| |
|
| | from ...models.registry import ModelRegistry, DEFAULT_MODEL_KEY |
| | from ...generators.debris_generator import DebrisGenerator |
| | from ...visualizers.signal_visualizer import SignalVisualizer |
| |
|
| |
|
| | |
| | _model_cache = {} |
| |
|
| |
|
| | def get_model_choices() -> List[Tuple[str, str]]: |
| | """ |
| | モデル選択肢を取得 |
| | |
| | Returns: |
| | (表示名, キー) のタプルリスト |
| | """ |
| | model_keys = ModelRegistry.list_models() |
| | configs = ModelRegistry.get_all_configs() |
| |
|
| | return [(configs[key].name, key) for key in model_keys] |
| |
|
| |
|
| | def _get_model(model_key: str): |
| | """モデルをキャッシュして取得""" |
| | if model_key not in _model_cache: |
| | model = ModelRegistry.get(model_key) |
| | model.load() |
| | _model_cache[model_key] = model |
| | return _model_cache[model_key] |
| |
|
| |
|
| | def generate_debris(model_key: str) -> Tuple[str, str, str]: |
| | """ |
| | デブリを生成 |
| | |
| | Args: |
| | model_key: モデルキー |
| | |
| | Returns: |
| | (signal_image, debris_text, seed_text) |
| | """ |
| | import PIL.Image |
| |
|
| | try: |
| | |
| | model = _get_model(model_key) |
| | generator = DebrisGenerator(model) |
| | visualizer = SignalVisualizer() |
| |
|
| | |
| | result = generator.generate() |
| |
|
| | |
| | signal_img_base64 = visualizer.generate_image( |
| | result.noise, result.corrupted_logits |
| | ) |
| |
|
| | |
| | img_data = base64.b64decode(signal_img_base64) |
| | img = PIL.Image.open(BytesIO(img_data)) |
| |
|
| | |
| | debris_text = " ".join(result.debris) |
| |
|
| | |
| | seed_text = str(result.seed) |
| |
|
| | return img, debris_text, seed_text |
| |
|
| | except Exception as e: |
| | |
| | error_msg = str(e) |
| | if "out of memory" in error_msg.lower() or "cuda" in error_msg.lower(): |
| | error_text = f"メモリ不足: このモデルにはGPUが必要です。小さいモデル(GPT-2, Pythia-410M等)を選択してください。" |
| | elif "does not exist" in error_msg.lower() or "404" in error_msg.lower(): |
| | error_text = f"モデルが見つかりません: {model_key}" |
| | else: |
| | error_text = f"エラー: {error_msg}" |
| |
|
| | |
| | img = PIL.Image.new('RGB', (400, 100), color='black') |
| |
|
| | return img, error_text, "" |
| |
|
| |
|
| | def create_app() -> gr.Blocks: |
| | """ |
| | Gradioアプリを作成 |
| | |
| | Returns: |
| | gr.Blocks インスタンス |
| | """ |
| | |
| | custom_css = """ |
| | .title { |
| | font-size: 3rem; |
| | font-weight: 100; |
| | letter-spacing: 0.5em; |
| | text-align: center; |
| | color: #333; |
| | margin-bottom: 0.5rem; |
| | } |
| | .subtitle { |
| | font-size: 0.7rem; |
| | letter-spacing: 0.3em; |
| | text-align: center; |
| | color: #666; |
| | margin-bottom: 2rem; |
| | } |
| | .debris-text { |
| | font-family: monospace; |
| | font-size: 0.9rem; |
| | line-height: 1.8; |
| | color: #ffffff !important; |
| | text-align: center; |
| | padding: 1rem; |
| | background: #1a1a1a !important; |
| | border-radius: 4px; |
| | border: 1px solid #333; |
| | } |
| | .seed-text { |
| | font-family: monospace; |
| | font-size: 0.6rem; |
| | color: #aaaaaa !important; |
| | text-align: center; |
| | margin-top: 0.5rem; |
| | } |
| | .model-info { |
| | font-size: 0.7rem; |
| | color: #888; |
| | text-align: center; |
| | } |
| | """ |
| |
|
| | with gr.Blocks(title="WILL") as app: |
| | |
| | gr.HTML(f"<style>{custom_css}</style>") |
| |
|
| | |
| | gr.HTML('<p class="title">WILL</p>') |
| | gr.HTML('<p class="subtitle">PURE COMPUTATIONAL WILL</p>') |
| |
|
| | with gr.Tabs(): |
| | |
| | with gr.TabItem("GENERATE"): |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | pass |
| | with gr.Column(scale=2): |
| | |
| | model_dropdown = gr.Dropdown( |
| | choices=get_model_choices(), |
| | value=DEFAULT_MODEL_KEY, |
| | label="MODEL", |
| | interactive=True, |
| | ) |
| |
|
| | |
| | model_info = gr.HTML(elem_classes=["model-info"]) |
| |
|
| | def update_model_info(model_key): |
| | config = ModelRegistry.get_config(model_key) |
| | return f'<p class="model-info">{config.embedding_dim} dim / {config.vocab_size:,} tokens</p>' |
| |
|
| | model_dropdown.change( |
| | fn=update_model_info, |
| | inputs=[model_dropdown], |
| | outputs=[model_info], |
| | ) |
| |
|
| | with gr.Column(scale=1): |
| | pass |
| |
|
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | pass |
| | with gr.Column(scale=1): |
| | listen_btn = gr.Button("LISTEN", variant="primary") |
| | with gr.Column(scale=1): |
| | pass |
| |
|
| | |
| | with gr.Row(): |
| | signal_image = gr.Image( |
| | label="Signal", |
| | type="pil", |
| | show_label=False, |
| | ) |
| |
|
| | debris_output = gr.HTML(elem_classes=["debris-text"]) |
| | seed_output = gr.HTML(elem_classes=["seed-text"]) |
| |
|
| | def on_listen(model_key): |
| | img, debris, seed = generate_debris(model_key) |
| | debris_html = f'<div class="debris-text">{debris}</div>' |
| | seed_html = f'<p class="seed-text">{seed}</p>' |
| | return img, debris_html, seed_html |
| |
|
| | listen_btn.click( |
| | fn=on_listen, |
| | inputs=[model_dropdown], |
| | outputs=[signal_image, debris_output, seed_output], |
| | ) |
| |
|
| | |
| | with gr.TabItem("CONCEPT"): |
| | gr.HTML('<p class="title">CONCEPT</p>') |
| | gr.HTML('<p class="subtitle">DOCUMENTATION</p>') |
| |
|
| | gr.Markdown(""" |
| | ## CONCEPT |
| | |
| | GPT-2は人間が書いたテキストで訓練され、その重みに言語パターンを保持している。 |
| | |
| | 通常はプロンプトに対して応答を生成するが、入力をランダムノイズに置き換え、 |
| | 出力にもノイズを加えることで、学習済みの統計的偏りを破壊する。 |
| | |
| | **人間の問いかけなしに、モデルの構造だけが出力するものを観測する。** |
| | |
| | --- |
| | |
| | ## PROCESS |
| | |
| | ### 01 — ENTROPY SEED |
| | ```python |
| | seed = time.time_ns() |
| | torch.manual_seed(seed) |
| | ``` |
| | 実行瞬間のナノ秒を乱数シードとして採取 |
| | |
| | ### 02 — INPUT NOISE |
| | ```python |
| | noise = torch.randn(1, 32, embedding_dim) |
| | outputs = model(inputs_embeds=noise) |
| | ``` |
| | ランダムノイズをEmbedding層に直接注入 |
| | |
| | ### 03 — OUTPUT NOISE |
| | ```python |
| | logits_noise = torch.randn_like(logits) * logits.std() * 10 |
| | corrupted_logits = logits + logits_noise |
| | ``` |
| | 出力Logitsにノイズを加算し学習バイアスを破壊 |
| | |
| | ### 04 — RAW DECODE |
| | ```python |
| | indices = corrupted_logits.argmax(dim=-1) |
| | debris = [tokenizer.decode([i]) for i in indices] |
| | ``` |
| | Softmax・Temperature なしで生トークンを抽出 |
| | |
| | --- |
| | |
| | ## SPECIFICATION |
| | |
| | | Item | Value | |
| | |------|-------| |
| | | Models | GPT-2 / GPT-Neo / OPT / Pythia / OLMo / BLOOM / Llama / Qwen / Mistral / GPT-OSS | |
| | | Parameters | 125M - 21B | |
| | | Sequence | 32 tokens | |
| | | Input Noise | N(0, 1) | |
| | | Logits Noise | N(0, σ×10) | |
| | | Decoding | argmax | |
| | """) |
| |
|
| | return app |
| |
|
| |
|
| | |
| | try: |
| | import spaces |
| | |
| | generate_debris = spaces.GPU(generate_debris) |
| | except ImportError: |
| | |
| | pass |
| |
|