text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path import torch from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params logger = getLogger(__name__) DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def generate_summaries_or_translations( examples: list[str], out_file: str, model_name: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, task="summarization", prefix=None, **generate_kwargs, ) -> dict: """Save model.generate results to <out_file>, and return how long it took.""" fout = Path(out_file).open("w", encoding="utf-8") model_name = str(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. start_time = time.time() # update config with task specific params use_task_specific_params(model, task) if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" for examples_chunk in tqdm(list(chunks(examples, batch_size))): examples_chunk = [prefix + text for text in examples_chunk] batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) summaries = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **generate_kwargs, ) dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) for hypothesis in dec: fout.write(hypothesis + "\n") fout.flush() fout.close() runtime = int(time.time() - start_time) # seconds n_obs = len(examples) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4)} def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,google-t5/t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path).readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=True) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") if args.device == "cpu" and args.fp16: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("Can't mix --fp16 and --device cpu") runtime_metrics = generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args, ) if args.reference_path is None: return {} # Compute scores score_fn = calculate_bleu if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) scores.update(runtime_metrics) if args.dump_args: scores.update(parsed_args) if args.info: scores["info"] = args.info if verbose: print(scores) if args.score_path is not None: json.dump(scores, open(args.score_path, "w")) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
transformers/examples/legacy/seq2seq/run_eval.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/run_eval.py", "repo_id": "transformers", "token_count": 2787 }
393
{ "annotations": { "list": [ { "builtIn": 1, "datasource": { "type": "grafana", "uid": "-- Grafana --" }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, "type": "dashboard" } ] }, "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 2, "links": [], "panels": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "description": "Memory usage of the PagedAttentionCache", "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "max": 10737418240, "min": 0, "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "yellow", "value": 5368709120 }, { "color": "red", "value": 8589934592 } ] }, "unit": "bytes" }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 0, "y": 0 }, "id": 2, "options": { "minVizHeight": 75, "minVizWidth": 75, "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showThresholdLabels": false, "showThresholdMarkers": true, "sizing": "auto" }, "pluginVersion": "12.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "kv_cache_memory_bytes", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false } ], "title": "KV Cache Memory Usage", "transparent": true, "type": "gauge" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "dark-blue" } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 6, "y": 0 }, "id": 13, "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": false, "textMode": "auto", "wideLayout": true }, "pluginVersion": "12.0.0", "targets": [ { "disableTextWrap": false, "editorMode": "builder", "expr": "active_requests_count", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false } ], "title": "Active Requests", "transparent": true, "type": "stat" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "dark-orange" } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 12, "y": 0 }, "id": 14, "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": false, "textMode": "auto", "wideLayout": true }, "pluginVersion": "12.0.0", "targets": [ { "disableTextWrap": false, "editorMode": "builder", "expr": "waiting_requests_count", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false } ], "title": "Waiting Requests", "transparent": true, "type": "stat" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "description": "Ratio of decode tokens to prefill tokens in a batch", "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "blue" } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 6, "x": 18, "y": 0 }, "id": 6, "options": { "colorMode": "value", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showPercentChange": false, "textMode": "auto", "wideLayout": true }, "pluginVersion": "12.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "decode_prefill_ratio", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false } ], "title": "Decode/Prefill Ratio", "transparent": true, "type": "stat" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 8 }, "id": 10, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "pluginVersion": "12.0.0", "targets": [ { "editorMode": "code", "expr": "rate(decode_tokens_processed_total[$__rate_interval])", "legendFormat": "__auto", "range": true, "refId": "A" } ], "title": "Decode tokens throupught tok/s", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 8 }, "id": 11, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "pluginVersion": "12.0.0", "targets": [ { "editorMode": "code", "expr": "rate(prefill_tokens_processed_total[$__rate_interval])", "legendFormat": "__auto", "range": true, "refId": "A" } ], "title": "Prefill rate tok/s", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 16 }, "id": 9, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "pluginVersion": "12.0.0", "targets": [ { "editorMode": "code", "expr": "histogram_quantile(0.95, sum by(le) (rate(batch_fill_percentage_percent_bucket[$__rate_interval])))", "legendFormat": "p95", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "editorMode": "code", "expr": "histogram_quantile(0.99, sum by(le) (rate(batch_fill_percentage_percent_bucket[$__rate_interval])))", "hide": false, "instant": false, "legendFormat": "p99", "range": true, "refId": "B" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "editorMode": "code", "expr": "histogram_quantile(0.5, sum by(le) (rate(batch_fill_percentage_percent_bucket[$__rate_interval])))", "hide": false, "instant": false, "legendFormat": "p50", "range": true, "refId": "C" } ], "title": "Batch fill percentage percentiles", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "description": "KV Cache Memory Usage Over Time", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 20, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 2, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] }, "unit": "bytes" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 16 }, "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "pluginVersion": "12.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "kv_cache_memory_bytes", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "Used memory", "range": true, "refId": "A", "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "kv_cache_free_memory_bytes", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, "instant": false, "legendFormat": "free memory", "range": true, "refId": "B", "useBackend": false } ], "title": "KV Cache Memory Usage Over Time", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] }, "unit": "ms" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 24 }, "id": 8, "options": { "displayMode": "gradient", "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": false }, "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, "namePlacement": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showUnfilled": true, "sizing": "auto", "valueMode": "color" }, "pluginVersion": "12.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "histogram_quantile(0.95, sum by(le) (rate(ttft_milliseconds_bucket[$__rate_interval])))", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "p95", "range": true, "refId": "A", "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "histogram_quantile(0.5, sum by(le) (rate(ttft_milliseconds_bucket[$__rate_interval])))", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, "legendFormat": "p50", "range": true, "refId": "B", "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "disableTextWrap": false, "editorMode": "builder", "expr": "histogram_quantile(0.99, sum by(le) (rate(ttft_milliseconds_bucket[$__rate_interval])))", "fullMetaSearch": false, "hide": false, "includeNullMetadata": false, "instant": false, "legendFormat": "p99", "range": true, "refId": "C", "useBackend": false } ], "title": "Time to First Token (TTFT)", "type": "bargauge" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] }, "unit": "ms" }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 24 }, "id": 12, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, "pluginVersion": "12.0.0", "targets": [ { "editorMode": "code", "expr": "histogram_quantile(0.5, sum by(le) (rate(request_latency_milliseconds_bucket[$__rate_interval])))", "legendFormat": "p50", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "editorMode": "code", "expr": "histogram_quantile(0.95, sum by(le) (rate(request_latency_milliseconds_bucket[$__rate_interval])))", "hide": false, "instant": false, "legendFormat": "p95", "range": true, "refId": "B" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "editorMode": "code", "expr": "histogram_quantile(0.99, sum by(le) (rate(request_latency_milliseconds_bucket[$__rate_interval])))", "hide": false, "instant": false, "legendFormat": "p99", "range": true, "refId": "C" } ], "title": "Request latency percentiles", "type": "timeseries" } ], "preload": false, "refresh": "5s", "schemaVersion": 41, "tags": [], "templating": { "list": [] }, "time": { "from": "now-15m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Transformers Continuous Batching Metrics", "uid": "Lw6CTvVSz", "version": 5 }
transformers/examples/metrics-monitoring/continuous-batching-dashboard.json/0
{ "file_path": "transformers/examples/metrics-monitoring/continuous-batching-dashboard.json", "repo_id": "transformers", "token_count": 23850 }
394
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from examples/modular-transformers/modular_add_function.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_add_function.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Note that zamba does not have the `apply_rotary_pos_emb` function! from typing import Optional import torch from torch import nn from ...utils.deprecation import deprecate_kwarg def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class TestAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". Adapted from transformers.models.mistral.modeling_mistral.MistralAttention: The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads. The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer (see fig. 2 in https://huggingface.co/papers/2405.16712). Additionally, replaced attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2) """ def __init__(self): pass @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: _ = apply_rotary_pos_emb(1, 1, 1, 1)
transformers/examples/modular-transformers/modeling_add_function.py/0
{ "file_path": "transformers/examples/modular-transformers/modeling_add_function.py", "repo_id": "transformers", "token_count": 1643 }
395
""" Here, because clip is not consistent with the use of the "Text" and "Vision" prefixes, we cannot simply use ``` class Multimodal2VisionModel(CLIPVisionModel): pass ``` with the hope that all dependencies will be renamed as `Multimodal2VisionClass`. For this reason, if we want consistency and use the "Vision" part everywhere, we need to overwrite the intermediate classes and add the prefix everytime. This adds noise to the modular, but is unfortunately unavoidable. """ from torch import nn from transformers.models.clip.modeling_clip import ( CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer, ) from transformers.utils import add_start_docstrings class Multimodal2VisionAttention(CLIPAttention): pass class Multimodal2VisionMLP(CLIPMLP): pass class Multimodal2VisionEncoderLayer(CLIPEncoderLayer): def __init__(self, config): super().__init__() self.mlp = Multimodal2VisionMLP(config) class Multimodal2VisionEncoder(CLIPEncoder): def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) # Finally here the `Vision` part was correct in CLIP, but we still need to tell it that the encoder arg should use it as well class Multimodal2VisionTransformer(CLIPVisionTransformer): def __init__(self, config): super().__init__(config) self.encoder = Multimodal2VisionEncoder(config) class Multimodal2VisionPreTrainedModel(CLIPPreTrainedModel): def _init_weights(self, module): if isinstance(module, Multimodal2VisionMLP): pass MULTIMODAL2_VISION_START_DOCSTRING = "doc" # Here the only arg `self.vision_model = CLIPVisionTransformer(config)` in CLIPVisionModel already has the "Vision" part, so # no need to overwrite it, it will look for `Multimodal2VisionTransformer` which has already being redefined above # Note: we may want to redefine decorator as well for full consistency, as CLIP does not use "CLIP_VISION_START_DOCSTRING" but only # "CLIP_START_DOCSTRING" @add_start_docstrings("New doc", MULTIMODAL2_VISION_START_DOCSTRING) class Multimodal2VisionModel(CLIPVisionModel, Multimodal2VisionPreTrainedModel): _no_split_modules = ["Multimodal2VisionEncoderLayer"]
transformers/examples/modular-transformers/modular_multimodal2.py/0
{ "file_path": "transformers/examples/modular-transformers/modular_multimodal2.py", "repo_id": "transformers", "token_count": 825 }
396
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Semantic segmentation examples This directory contains 2 scripts that showcase how to fine-tune any model supported by the [`AutoModelForSemanticSegmentation` API](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModelForSemanticSegmentation) (such as [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer), [BEiT](https://huggingface.co/docs/transformers/main/en/model_doc/beit), [DPT](https://huggingface.co/docs/transformers/main/en/model_doc/dpt)) using PyTorch. ![segformer_inference_widget](https://user-images.githubusercontent.com/48327001/163667406-01f323a6-72ec-4e7e-bdeb-7d9da71b0697.gif) Content: * [Note on custom data](#note-on-custom-data) * [PyTorch version, Trainer](#pytorch-version-trainer) * [PyTorch version, no Trainer](#pytorch-version-no-trainer) * [Reload and perform inference](#reload-and-perform-inference) * [Important notes](#important-notes) ## Note on custom data In case you'd like to use the script with custom data, there are 2 things required: 1) creating a DatasetDict 2) creating an id2label mapping. Below, these are explained in more detail. ### Creating a `DatasetDict` The script assumes that you have a `DatasetDict` with 2 columns, "image" and "label", both of type [Image](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Image). This can be created as follows: ```python from datasets import Dataset, DatasetDict, Image # your images can of course have a different extension # semantic segmentation maps are typically stored in the png format image_paths_train = ["path/to/image_1.jpg/jpg", "path/to/image_2.jpg/jpg", ..., "path/to/image_n.jpg/jpg"] label_paths_train = ["path/to/annotation_1.png", "path/to/annotation_2.png", ..., "path/to/annotation_n.png"] # same for validation # image_paths_validation = [...] # label_paths_validation = [...] def create_dataset(image_paths, label_paths): dataset = Dataset.from_dict({"image": sorted(image_paths), "label": sorted(label_paths)}) dataset = dataset.cast_column("image", Image()) dataset = dataset.cast_column("label", Image()) return dataset # step 1: create Dataset objects train_dataset = create_dataset(image_paths_train, label_paths_train) validation_dataset = create_dataset(image_paths_validation, label_paths_validation) # step 2: create DatasetDict dataset = DatasetDict({ "train": train_dataset, "validation": validation_dataset, } ) # step 3: push to hub (assumes you have ran the hf auth login command in a terminal/notebook) dataset.push_to_hub("name of repo on the hub") # optionally, you can push to a private repo on the hub # dataset.push_to_hub("name of repo on the hub", private=True) ``` An example of such a dataset can be seen at [nielsr/ade20k-demo](https://huggingface.co/datasets/nielsr/ade20k-demo). ### Creating an id2label mapping Besides that, the script also assumes the existence of an `id2label.json` file in the repo, containing a mapping from integers to actual class names. An example of that can be seen [here](https://huggingface.co/datasets/nielsr/ade20k-demo/blob/main/id2label.json). This can be created in Python as follows: ```python import json # simple example id2label = {0: 'cat', 1: 'dog'} with open('id2label.json', 'w') as fp: json.dump(id2label, fp) ``` You can easily upload this by clicking on "Add file" in the "Files and versions" tab of your repo on the hub. ## PyTorch version, Trainer Based on the script [`run_semantic_segmentation.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py). The script leverages the [🤗 Trainer API](https://huggingface.co/docs/transformers/main_classes/trainer) to automatically take care of the training for you, running on distributed environments right away. Here we show how to fine-tune a [SegFormer](https://huggingface.co/nvidia/mit-b0) model on the [segments/sidewalk-semantic](https://huggingface.co/datasets/segments/sidewalk-semantic) dataset: In order to use `segments/sidewalk-semantic`: - Log in to Hugging Face with `hf auth login` (token can be accessed [here](https://huggingface.co/settings/tokens)). - Accept terms of use for `sidewalk-semantic` on [dataset page](https://huggingface.co/datasets/segments/sidewalk-semantic). ```bash python run_semantic_segmentation.py \ --model_name_or_path nvidia/mit-b0 \ --dataset_name segments/sidewalk-semantic \ --output_dir ./segformer_outputs/ \ --remove_unused_columns False \ --do_train \ --do_eval \ --push_to_hub \ --push_to_hub_model_id segformer-finetuned-sidewalk-10k-steps \ --max_steps 10000 \ --learning_rate 0.00006 \ --lr_scheduler_type polynomial \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 100 \ --eval_strategy epoch \ --save_strategy epoch \ --seed 1337 ``` The resulting model can be seen here: https://huggingface.co/nielsr/segformer-finetuned-sidewalk-10k-steps. The corresponding Weights and Biases report [here](https://wandb.ai/nielsrogge/huggingface/reports/SegFormer-fine-tuning--VmlldzoxODY5NTQ2). Note that it's always advised to check the original paper to know the details regarding training hyperparameters. E.g. from the SegFormer paper: > We trained the models using AdamW optimizer for 160K iterations on ADE20K, Cityscapes, and 80K iterations on COCO-Stuff. (...) We used a batch size of 16 for ADE20K and COCO-Stuff, and a batch size of 8 for Cityscapes. The learning rate was set to an initial value of 0.00006 and then used a “poly” LR schedule with factor 1.0 by default. Note that you can replace the model and dataset by simply setting the `model_name_or_path` and `dataset_name` arguments respectively, with any model or dataset from the [hub](https://huggingface.co/). For an overview of all possible arguments, we refer to the [docs](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) of the `TrainingArguments`, which can be passed as flags. ## PyTorch version, no Trainer Based on the script [`run_semantic_segmentation_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py). The script leverages [🤗 `Accelerate`](https://github.com/huggingface/accelerate), which allows to write your own training loop in PyTorch, but have it run instantly on any (distributed) environment, including CPU, multi-CPU, GPU, multi-GPU and TPU. It also supports mixed precision. First, run: ```bash accelerate config ``` and reply to the questions asked regarding the environment on which you'd like to train. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_semantic_segmentation_no_trainer.py --output_dir segformer-finetuned-sidewalk --with_tracking --push_to_hub ``` and boom, you're training, possibly on multiple GPUs, logging everything to all trackers found in your environment (like Weights and Biases, Tensorboard) and regularly pushing your model to the hub (with the repo name being equal to `args.output_dir` at your HF username) 🤗 With the default settings, the script fine-tunes a [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer) model on the [segments/sidewalk-semantic](https://huggingface.co/datasets/segments/sidewalk-semantic) dataset. The resulting model can be seen here: https://huggingface.co/nielsr/segformer-finetuned-sidewalk. Note that the script usually requires quite a few epochs to achieve great results, e.g. the SegFormer authors fine-tuned their model for 160k steps (batches) on [`scene_parse_150`](https://huggingface.co/datasets/scene_parse_150). ## Reload and perform inference This means that after training, you can easily load your trained model as follows: ```python from transformers import AutoImageProcessor, AutoModelForSemanticSegmentation model_name = "name_of_repo_on_the_hub_or_path_to_local_folder" image_processor = AutoImageProcessor.from_pretrained(model_name) model = AutoModelForSemanticSegmentation.from_pretrained(model_name) ``` and perform inference as follows: ```python from PIL import Image import requests import torch url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) # prepare image for the model inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # rescale logits to original image size logits = nn.functional.interpolate(outputs.logits.detach().cpu(), size=image.size[::-1], # (height, width) mode='bilinear', align_corners=False) predicted = logits.argmax(1) ``` For visualization of the segmentation maps, we refer to the [example notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SegFormer/Segformer_inference_notebook.ipynb). ## Important notes Some datasets, like [`scene_parse_150`](https://huggingface.co/datasets/scene_parse_150), contain a "background" label that is not part of the classes. The Scene Parse 150 dataset for instance contains labels between 0 and 150, with 0 being the background class, and 1 to 150 being actual class names (like "tree", "person", etc.). For these kind of datasets, one replaces the background label (0) by 255, which is the `ignore_index` of the PyTorch model's loss function, and reduces all labels by 1. This way, the `labels` are PyTorch tensors containing values between 0 and 149, and 255 for all background/padding. In case you're training on such a dataset, make sure to set the ``do_reduce_labels`` flag, which will take care of this.
transformers/examples/pytorch/semantic-segmentation/README.md/0
{ "file_path": "transformers/examples/pytorch/semantic-segmentation/README.md", "repo_id": "transformers", "token_count": 3497 }
397
# Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock from accelerate.utils import write_basic_config from transformers.testing_utils import ( TestCasePlus, backend_device_count, run_command, slow, torch_device, ) logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f def get_results(output_dir): results = {} path = os.path.join(output_dir, "all_results.json") if os.path.exists(path): with open(path) as f: results = json.load(f) else: raise ValueError(f"can't find {path}") return results stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTestsNoTrainer(TestCasePlus): @classmethod def setUpClass(cls): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU cls.tmpdir = tempfile.mkdtemp() cls.configPath = os.path.join(cls.tmpdir, "default_config.yml") write_basic_config(save_location=cls.configPath) cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_glue_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --num_warmup_steps=2 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "glue_no_trainer"))) @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_clm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilbert/distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if backend_device_count(torch_device) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertLess(result["perplexity"], 100) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "clm_no_trainer"))) @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_mlm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilbert/distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertLess(result["perplexity"], 42) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "mlm_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_ner_no_trainer(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu epochs = 7 if backend_device_count(torch_device) > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertLess(result["train_loss"], 0.6) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "ner_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_squad_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"], 28) self.assertGreaterEqual(result["eval_exact"], 28) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "qa_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_swag_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.8) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "swag_no_trainer"))) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_summarization_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path google-t5/t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_rouge1"], 10) self.assertGreaterEqual(result["eval_rouge2"], 2) self.assertGreaterEqual(result["eval_rougeL"], 7) self.assertGreaterEqual(result["eval_rougeLsum"], 7) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "summarization_no_trainer"))) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_translation_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_bleu"], 30) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "translation_no_trainer"))) @slow def test_run_semantic_segmentation_no_trainer(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_overall_accuracy"], 0.10) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_image_classification_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 --label_column_name labels """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"], 0.4) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "step_1"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_classification_no_trainer"))) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_object_detection_no_trainer(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/object-detection/run_object_detection_no_trainer.py --model_name_or_path qubvel-hf/detr-resnet-50-finetuned-10k-cppe5 --dataset_name qubvel-hf/cppe-5-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=1e-6 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["test_map"], 0.10) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_instance_segmentation_no_trainer(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/instance-segmentation/run_instance_segmentation_no_trainer.py --model_name_or_path qubvel-hf/finetune-instance-segmentation-ade20k-mini-mask2former --output_dir {tmp_dir} --dataset_name qubvel-hf/ade20k-nano --do_reduce_labels --image_height 256 --image_width 256 --num_train_epochs 1 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --seed 1234 """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["test_map"], 0.1)
transformers/examples/pytorch/test_accelerate_examples.py/0
{ "file_path": "transformers/examples/pytorch/test_accelerate_examples.py", "repo_id": "transformers", "token_count": 7368 }
398
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image classification examples This directory contains 2 scripts that showcase how to fine-tune any model supported by the [`TFAutoModelForImageClassification` API](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.TFAutoModelForImageClassification) (such as [ViT](https://huggingface.co/docs/transformers/main/en/model_doc/vit), [ConvNeXT](https://huggingface.co/docs/transformers/main/en/model_doc/convnext), [ResNet](https://huggingface.co/docs/transformers/main/en/model_doc/resnet), [Swin Transformer](https://huggingface.co/docs/transformers/main/en/model_doc/swin)...) using TensorFlow. They can be used to fine-tune models on both [datasets from the hub](#using-datasets-from-hub) as well as on [your own custom data](#using-your-own-data). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_classification_inference_widget.png" height="400" /> Try out the inference widget here: https://huggingface.co/google/vit-base-patch16-224 ## TensorFlow Based on the script [`run_image_classification.py`](https://github.com/huggingface/transformers/blob/main/examples/tensorflow/image-classification/run_image_classification.py). ### Using datasets from Hub Here we show how to fine-tune a Vision Transformer (`ViT`) on the [beans](https://huggingface.co/datasets/beans) dataset, to classify the disease type of bean leaves. The following will train a model and push it to the `amyeroberts/vit-base-beans` repo. ```bash python run_image_classification.py \ --dataset_name beans \ --output_dir ./beans_outputs/ \ --remove_unused_columns False \ --do_train \ --do_eval \ --push_to_hub \ --hub_model_id amyeroberts/vit-base-beans \ --learning_rate 2e-5 \ --num_train_epochs 5 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 10 \ --eval_strategy epoch \ --save_strategy epoch \ --load_best_model_at_end True \ --save_total_limit 3 \ --seed 1337 ``` 👀 See the results here: [amyeroberts/vit-base-beans](https://huggingface.co/amyeroberts/vit-base-beans). Note that you can replace the model and dataset by simply setting the `model_name_or_path` and `dataset_name` arguments respectively, with any model or dataset from the [hub](https://huggingface.co/). For an overview of all possible arguments, we refer to the [docs](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) of the `TrainingArguments`, which can be passed as flags. > If your model classification head dimensions do not fit the number of labels in the dataset, you can specify `--ignore_mismatched_sizes` to adapt it. ### Using your own data To use your own dataset, there are 2 ways: - you can either provide your own folders as `--train_dir` and/or `--validation_dir` arguments - you can upload your dataset to the hub (possibly as a private repo, if you prefer so), and simply pass the `--dataset_name` argument. Below, we explain both in more detail. #### Provide them as folders If you provide your own folders with images, the script expects the following directory structure: ```bash root/dog/xxx.png root/dog/xxy.png root/dog/[...]/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/[...]/asd932_.png ``` In other words, you need to organize your images in subfolders, based on their class. You can then run the script like this: ```bash python run_image_classification.py \ --train_dir <path-to-train-root> \ --output_dir ./outputs/ \ --remove_unused_columns False \ --do_train \ --do_eval ``` Internally, the script will use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature which will automatically turn the folders into 🤗 Dataset objects. ##### 💡 The above will split the train dir into training and evaluation sets - To control the split amount, use the `--train_val_split` flag. - To provide your own validation split in its own directory, you can pass the `--validation_dir <path-to-val-root>` flag. #### Upload your data to the hub, as a (possibly private) repo To upload your image dataset to the hub you can use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature available in 🤗 Datasets. Simply do the following: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip") # example 4: providing several splits dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}) ``` `ImageFolder` will create a `label` column, and the label name is based on the directory name. Next, push it to the hub! ```python # assuming you have ran the hf auth login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` and that's it! You can now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the hub (as explained in [Using datasets from the 🤗 hub](#using-datasets-from-hub)). More on this can also be found in [this blog post](https://huggingface.co/blog/image-search-datasets). ### Sharing your model on 🤗 Hub 0. If you haven't already, [sign up](https://huggingface.co/join) for a 🤗 account 1. Make sure you have `git-lfs` installed and git set up. ```bash $ apt install git-lfs $ git config --global user.email "you@example.com" $ git config --global user.name "Your Name" ``` 2. Log in with your HuggingFace account credentials using `hf`: ```bash $ hf auth login # ...follow the prompts ``` 3. When running the script, pass the following arguments: ```bash python run_image_classification.py \ --push_to_hub \ --push_to_hub_model_id <name-your-model> \ ... ```
transformers/examples/tensorflow/image-classification/README.md/0
{ "file_path": "transformers/examples/tensorflow/image-classification/README.md", "repo_id": "transformers", "token_count": 2310 }
399
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from . import BaseTransformersCLICommand def download_command_factory(args): return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code) class DownloadCommand(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("download") download_parser.add_argument( "--cache-dir", type=str, default=None, help="Path to location to store the models" ) download_parser.add_argument( "--force", action="store_true", help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code", action="store_true", help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine", ) download_parser.add_argument("model", type=str, help="Name of the model to download") download_parser.set_defaults(func=download_command_factory) def __init__(self, model: str, cache: str, force: bool, trust_remote_code: bool): self._model = model self._cache = cache self._force = force self._trust_remote_code = trust_remote_code def run(self): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
transformers/src/transformers/commands/download.py/0
{ "file_path": "transformers/src/transformers/commands/download.py", "repo_id": "transformers", "token_count": 828 }
400
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import random import time import warnings from typing import Optional import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) DEPRECATION_WARNING = ( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: {0}" ) class TextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str] = None, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False) directory, filename = os.path.split(file_path) cached_features_file = os.path.join( cache_dir if cache_dir is not None else directory, f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.examples = [] with open(file_path, encoding="utf-8") as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size self.examples.append( tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]) ) # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should look for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.examples) def __getitem__(self, i) -> torch.Tensor: return torch.tensor(self.examples[i], dtype=torch.long) class LineByLineTextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") with open(file_path, encoding="utf-8") as f: lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] def __len__(self): return len(self.examples) def __getitem__(self, i) -> dict[str, torch.tensor]: return self.examples[i] class LineByLineWithRefDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") if os.path.isfile(ref_path) is False: raise ValueError(f"Ref file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") logger.info(f"Use ref segment results at {ref_path}") with open(file_path, encoding="utf-8") as f: data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # Get ref inf from file with open(ref_path, encoding="utf-8") as f: ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] if len(data) != len(ref): raise ValueError( f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} " f"while length of {ref_path} is {len(ref)}" ) batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] n = len(self.examples) for i in range(n): self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long) def __len__(self): return len(self.examples) def __getitem__(self, i) -> dict[str, torch.tensor]: return self.examples[i] class LineByLineWithSOPTextDataset(Dataset): """ Dataset for sentence order prediction task, prepare sentence pairs for SOP task """ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isdir(file_dir) is False: raise ValueError(f"{file_dir} is not a directory") logger.info(f"Creating features from dataset file folder at {file_dir}") self.examples = [] # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed) # file path looks like ./dataset/wiki_1, ./dataset/wiki_2 for file_name in os.listdir(file_dir): file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path) is False: raise ValueError(f"{file_path} is not a file") article_open = False with open(file_path, encoding="utf-8") as f: original_lines = f.readlines() article_lines = [] for line in original_lines: if "<doc id=" in line: article_open = True elif "</doc>" in line: article_open = False document = [ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)) for line in article_lines[1:] if (len(line) > 0 and not line.isspace()) ] examples = self.create_examples_from_document(document, block_size, tokenizer) self.examples.extend(examples) article_lines = [] else: if article_open: article_lines.append(line) logger.info("Dataset parse finished.") def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1): """Creates examples for a single document.""" # Account for special tokens max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < short_seq_prob: target_seq_length = random.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. examples = [] current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] # get a segment if not segment: i += 1 continue current_chunk.append(segment) # add a segment to current chunk current_length += len(segment) # overall token length # if current length goes to the target length or reaches the end of file, start building token a and b if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence. a_end = 1 # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) # token a tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) # token b tokens_b = [] for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if len(tokens_a) == 0 or len(tokens_b) == 0: continue # switch tokens_a and tokens_b randomly if random.random() < 0.5: is_next = False tokens_a, tokens_b = tokens_b, tokens_a else: is_next = True def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b if not (len(trunc_tokens) >= 1): raise ValueError("Sequence length to be truncated must be no less than one") # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if random.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long), } examples.append(example) current_chunk = [] # clear current chunk current_length = 0 # reset current text length i += 1 # go to next line return examples def __len__(self): return len(self.examples) def __getitem__(self, i) -> dict[str, torch.tensor]: return self.examples[i] class TextDatasetForNextSentencePrediction(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, short_seq_probability=0.1, nsp_probability=0.5, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if not os.path.isfile(file_path): raise ValueError(f"Input file path {file_path} not found") self.short_seq_probability = short_seq_probability self.nsp_probability = nsp_probability directory, filename = os.path.split(file_path) cached_features_file = os.path.join( directory, f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) self.tokenizer = tokenizer # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. # # Example: # I am very happy. # Here is the second sentence. # # A new document. with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.documents = [[]] with open(file_path, encoding="utf-8") as f: while True: line = f.readline() if not line: break line = line.strip() # Empty lines are used as document delimiters if not line and len(self.documents[-1]) != 0: self.documents.append([]) tokens = tokenizer.tokenize(line) tokens = tokenizer.convert_tokens_to_ids(tokens) if tokens: self.documents[-1].append(tokens) logger.info(f"Creating examples from {len(self.documents)} documents.") self.examples = [] for doc_index, document in enumerate(self.documents): self.create_examples_from_document(document, doc_index, block_size) start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def create_examples_from_document(self, document: list[list[int]], doc_index: int, block_size: int): """Creates examples for a single document.""" max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < self.short_seq_probability: target_seq_length = random.randint(2, max_num_tokens) current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] if len(current_chunk) == 1 or random.random() < self.nsp_probability: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = random.randint(0, len(self.documents) - 1) if random_document_index != doc_index: break random_document = self.documents[random_document_index] random_start = random.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long), } self.examples.append(example) current_chunk = [] current_length = 0 i += 1 def __len__(self): return len(self.examples) def __getitem__(self, i): return self.examples[i]
transformers/src/transformers/data/datasets/language_modeling.py/0
{ "file_path": "transformers/src/transformers/data/datasets/language_modeling.py", "repo_id": "transformers", "token_count": 11441 }
401
# Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extraction saving/loading class for common feature extractors. """ import copy import json import os import warnings from collections import UserDict from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union import numpy as np from .dynamic_module_utils import custom_object_save from .utils import ( FEATURE_EXTRACTOR_NAME, PushToHubMixin, TensorType, cached_file, copy_func, download_url, is_flax_available, is_jax_tensor, is_numpy_array, is_offline_mode, is_remote_url, is_tf_available, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends, ) if TYPE_CHECKING: if is_torch_available(): import torch # noqa logger = logging.get_logger(__name__) PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821 # type hinting: specifying the type of feature extractor class that inherits from FeatureExtractionMixin SpecificFeatureExtractorType = TypeVar("SpecificFeatureExtractorType", bound="FeatureExtractionMixin") class BatchFeature(UserDict): r""" Holds the output of the [`~SequenceFeatureExtractor.pad`] and feature extractor specific `__call__` methods. This class is derived from a python dictionary and can be used as a dictionary. Args: data (`dict`, *optional*): Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask', etc.). tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """ def __init__(self, data: Optional[dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None): super().__init__(data) self.convert_to_tensors(tensor_type=tensor_type) def __getitem__(self, item: str) -> Any: """ If the key is a string, returns the value of the dict associated to `key` ('input_values', 'attention_mask', etc.). """ if isinstance(item, str): return self.data[item] else: raise KeyError("Indexing with integers is not available when using Python based feature extractors") def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def __getstate__(self): return {"data": self.data} def __setstate__(self, state): if "data" in state: self.data = state["data"] def _get_is_as_tensor_fns(self, tensor_type: Optional[Union[str, TensorType]] = None): if tensor_type is None: return None, None # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: logger.warning_once( "TensorFlow and JAX classes are deprecated and will be removed in Transformers v5. We " "recommend migrating to PyTorch classes or pinning your version of Transformers." ) if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch # noqa def as_tensor(value): if isinstance(value, (list, tuple)) and len(value) > 0: if isinstance(value[0], np.ndarray): value = np.array(value) elif ( isinstance(value[0], (list, tuple)) and len(value[0]) > 0 and isinstance(value[0][0], np.ndarray) ): value = np.array(value) if isinstance(value, np.ndarray): return torch.from_numpy(value) else: return torch.tensor(value) is_tensor = torch.is_tensor elif tensor_type == TensorType.JAX: logger.warning_once( "TensorFlow and JAX classes are deprecated and will be removed in Transformers v5. We " "recommend migrating to PyTorch classes or pinning your version of Transformers." ) if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 as_tensor = jnp.array is_tensor = is_jax_tensor else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: # we have a ragged list so handle explicitly value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array return is_tensor, as_tensor def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. """ if tensor_type is None: return self is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type) # Do the tensor conversion in batch for key, value in self.items(): try: if not is_tensor(value): tensor = as_tensor(value) self[key] = tensor except: # noqa E722 if key == "overflowing_values": raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") raise ValueError( "Unable to create tensor, you should probably activate padding " "with 'padding=True' to have batched tensors with the same length." ) return self def to(self, *args, **kwargs) -> "BatchFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. To enable asynchronous data transfer, set the `non_blocking` flag in `kwargs` (defaults to `False`). Returns: [`BatchFeature`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch # noqa device = kwargs.get("device") non_blocking = kwargs.get("non_blocking", False) # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` def maybe_to(v): # check if v is a floating point if isinstance(v, torch.Tensor) and torch.is_floating_point(v): # cast and send to device return v.to(*args, **kwargs) elif isinstance(v, torch.Tensor) and device is not None: return v.to(device=device, non_blocking=non_blocking) else: return v self.data = {k: maybe_to(v) for k, v in self.items()} return self class FeatureExtractionMixin(PushToHubMixin): """ This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature extractors. """ _auto_class = None def __init__(self, **kwargs): """Set elements of `kwargs` as attributes.""" # Pop "processor_class" as it should be saved as private attribute self._processor_class = kwargs.pop("processor_class", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @classmethod def from_pretrained( cls: type[SpecificFeatureExtractorType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ) -> SpecificFeatureExtractorType: r""" Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a derived class of [`SequenceFeatureExtractor`]. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a feature extractor file saved using the [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved feature extractor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final feature extractor object. If `True`, then this functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. kwargs (`dict[str, Any]`, *optional*): The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]. Examples: ```python # We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a # derived class: *Wav2Vec2FeatureExtractor* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h" ) # Download feature_extraction_config from huggingface.co and cache. feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "./test/saved_model/" ) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')* feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False ) assert feature_extractor.return_attention_mask is False feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True ) assert feature_extractor.return_attention_mask is False assert unused_kwargs == {"foo": False} ```""" kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(feature_extractor_dict, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token") is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME) self.to_json_file(output_feature_extractor_file) logger.info(f"Feature extractor saved in {output_feature_extractor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return [output_feature_extractor_file] @classmethod def get_feature_extractor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> tuple[dict[str, Any], dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. Returns: `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) subfolder = kwargs.pop("subfolder", None) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME) if os.path.isfile(pretrained_model_name_or_path): resolved_feature_extractor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): feature_extractor_file = pretrained_model_name_or_path resolved_feature_extractor_file = download_url(pretrained_model_name_or_path) else: feature_extractor_file = FEATURE_EXTRACTOR_NAME try: # Load from local folder or from cache or download from model Hub and cache resolved_feature_extractor_file = cached_file( pretrained_model_name_or_path, feature_extractor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, subfolder=subfolder, token=token, user_agent=user_agent, revision=revision, ) except OSError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise OSError( f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {FEATURE_EXTRACTOR_NAME} file" ) try: # Load feature_extractor dict with open(resolved_feature_extractor_file, encoding="utf-8") as reader: text = reader.read() feature_extractor_dict = json.loads(text) except json.JSONDecodeError: raise OSError( f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_feature_extractor_file}") else: logger.info( f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}" ) return feature_extractor_dict, kwargs @classmethod def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor: """ Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of parameters. Args: feature_extractor_dict (`dict[str, Any]`): Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method. kwargs (`dict[str, Any]`): Additional parameters from which to initialize the feature extractor object. Returns: [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those parameters. """ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # Update feature_extractor with kwargs if needed to_remove = [] for key, value in kwargs.items(): if key in feature_extractor_dict: feature_extractor_dict[key] = value to_remove.append(key) for key in to_remove: kwargs.pop(key, None) feature_extractor = cls(**feature_extractor_dict) logger.info(f"Feature extractor {feature_extractor}") if return_unused_kwargs: return feature_extractor, kwargs else: return feature_extractor def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ output = copy.deepcopy(self.__dict__) output["feature_extractor_type"] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "window" in output: del output["window"] return output @classmethod def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor: """ Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor object instantiated from that JSON file. """ with open(json_file, encoding="utf-8") as reader: text = reader.read() feature_extractor_dict = json.loads(text) return cls(**feature_extractor_dict) def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this feature_extractor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" @classmethod def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"): """ Register this class with a given auto class. This should only be used for custom feature extractors as the ones in the library are already mapped with `AutoFeatureExtractor`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`): The auto class to register this new feature extractor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub) if FeatureExtractionMixin.push_to_hub.__doc__ is not None: FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format( object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file" )
transformers/src/transformers/feature_extraction_utils.py/0
{ "file_path": "transformers/src/transformers/feature_extraction_utils.py", "repo_id": "transformers", "token_count": 13038 }
402
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team and Google DeepMind. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from dataclasses import dataclass from functools import lru_cache from typing import Any, Optional, Union import numpy as np import torch from torch import nn from torch.nn import BCELoss from ..modeling_utils import PreTrainedModel from ..utils import ModelOutput, is_torch_available, logging from .configuration_utils import PretrainedConfig, WatermarkingConfig if is_torch_available(): import torch from .logits_process import SynthIDTextWatermarkLogitsProcessor, WatermarkLogitsProcessor logger = logging.get_logger(__name__) @dataclass class WatermarkDetectorOutput: """ Outputs of a watermark detector. Args: num_tokens_scored (np.array of shape (batch_size)): Array containing the number of tokens scored for each element in the batch. num_green_tokens (np.array of shape (batch_size)): Array containing the number of green tokens for each element in the batch. green_fraction (np.array of shape (batch_size)): Array containing the fraction of green tokens for each element in the batch. z_score (np.array of shape (batch_size)): Array containing the z-score for each element in the batch. Z-score here shows how many standard deviations away is the green token count in the input text from the expected green token count for machine-generated text. p_value (np.array of shape (batch_size)): Array containing the p-value for each batch obtained from z-scores. prediction (np.array of shape (batch_size)), *optional*: Array containing boolean predictions whether a text is machine-generated for each element in the batch. confidence (np.array of shape (batch_size)), *optional*: Array containing confidence scores of a text being machine-generated for each element in the batch. """ num_tokens_scored: Optional[np.array] = None num_green_tokens: Optional[np.array] = None green_fraction: Optional[np.array] = None z_score: Optional[np.array] = None p_value: Optional[np.array] = None prediction: Optional[np.array] = None confidence: Optional[np.array] = None class WatermarkDetector: r""" Detector for detection of watermark generated text. The detector needs to be given the exact same settings that were given during text generation to replicate the watermark greenlist generation and so detect the watermark. This includes the correct device that was used during text generation, the correct watermarking arguments and the correct tokenizer vocab size. The code was based on the [original repo](https://github.com/jwkirchenbauer/lm-watermarking/tree/main). See [the paper](https://huggingface.co/papers/2306.04634) for more information. Args: model_config (`PretrainedConfig`): The model config that will be used to get model specific arguments used when generating. device (`str`): The device which was used during watermarked text generation. watermarking_config (Union[`WatermarkingConfig`, `Dict`]): The exact same watermarking config and arguments used when generating text. ignore_repeated_ngrams (`bool`, *optional*, defaults to `False`): Whether to count every unique ngram only once or not. max_cache_size (`int`, *optional*, defaults to 128): The max size to be used for LRU caching of seeding/sampling algorithms called for every token. Examples: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, WatermarkDetector, WatermarkingConfig >>> model_id = "openai-community/gpt2" >>> model = AutoModelForCausalLM.from_pretrained(model_id) >>> tok = AutoTokenizer.from_pretrained(model_id) >>> tok.pad_token_id = tok.eos_token_id >>> tok.padding_side = "left" >>> inputs = tok(["This is the beginning of a long story", "Alice and Bob are"], padding=True, return_tensors="pt") >>> input_len = inputs["input_ids"].shape[-1] >>> # first generate text with watermark and without >>> watermarking_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash") >>> out_watermarked = model.generate(**inputs, watermarking_config=watermarking_config, do_sample=False, max_length=20) >>> out = model.generate(**inputs, do_sample=False, max_length=20) >>> # now we can instantiate the detector and check the generated text >>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config) >>> detection_out_watermarked = detector(out_watermarked, return_dict=True) >>> detection_out = detector(out, return_dict=True) >>> detection_out_watermarked.prediction array([ True, True]) >>> detection_out.prediction array([False, False]) ``` """ def __init__( self, model_config: PretrainedConfig, device: str, watermarking_config: Union[WatermarkingConfig, dict], ignore_repeated_ngrams: bool = False, max_cache_size: int = 128, ): if isinstance(watermarking_config, WatermarkingConfig): watermarking_config = watermarking_config.to_dict() self.bos_token_id = ( model_config.bos_token_id if not model_config.is_encoder_decoder else model_config.decoder_start_token_id ) self.greenlist_ratio = watermarking_config["greenlist_ratio"] self.ignore_repeated_ngrams = ignore_repeated_ngrams self.processor = WatermarkLogitsProcessor( vocab_size=model_config.vocab_size, device=device, **watermarking_config ) # Expensive re-seeding and sampling is cached. self._get_ngram_score_cached = lru_cache(maxsize=max_cache_size)(self._get_ngram_score) def _get_ngram_score(self, prefix: torch.LongTensor, target: int): greenlist_ids = self.processor._get_greenlist_ids(prefix) return target in greenlist_ids def _score_ngrams_in_passage(self, input_ids: torch.LongTensor): batch_size, seq_length = input_ids.shape selfhash = int(self.processor.seeding_scheme == "selfhash") n = self.processor.context_width + 1 - selfhash indices = torch.arange(n).unsqueeze(0) + torch.arange(seq_length - n + 1).unsqueeze(1) ngram_tensors = input_ids[:, indices] num_tokens_scored_batch = np.zeros(batch_size) green_token_count_batch = np.zeros(batch_size) for batch_idx in range(ngram_tensors.shape[0]): frequencies_table = collections.Counter(ngram_tensors[batch_idx]) ngram_to_watermark_lookup = {} for ngram_example in frequencies_table: prefix = ngram_example if selfhash else ngram_example[:-1] target = ngram_example[-1] ngram_to_watermark_lookup[ngram_example] = self._get_ngram_score_cached(prefix, target) if self.ignore_repeated_ngrams: # counts a green/red hit once per unique ngram. # num total tokens scored becomes the number unique ngrams. num_tokens_scored_batch[batch_idx] = len(frequencies_table.keys()) green_token_count_batch[batch_idx] = sum(ngram_to_watermark_lookup.values()) else: num_tokens_scored_batch[batch_idx] = sum(frequencies_table.values()) green_token_count_batch[batch_idx] = sum( freq * outcome for freq, outcome in zip(frequencies_table.values(), ngram_to_watermark_lookup.values()) ) return num_tokens_scored_batch, green_token_count_batch def _compute_z_score(self, green_token_count: np.array, total_num_tokens: np.array) -> np.array: expected_count = self.greenlist_ratio numer = green_token_count - expected_count * total_num_tokens denom = np.sqrt(total_num_tokens * expected_count * (1 - expected_count)) z = numer / denom return z def _compute_pval(self, x, loc=0, scale=1): z = (x - loc) / scale return 1 - (0.5 * (1 + np.sign(z) * (1 - np.exp(-2 * z**2 / np.pi)))) def __call__( self, input_ids: torch.LongTensor, z_threshold: float = 3.0, return_dict: bool = False, ) -> Union[WatermarkDetectorOutput, np.array]: """ Args: input_ids (`torch.LongTensor`): The watermark generated text. It is advised to remove the prompt, which can affect the detection. z_threshold (`Dict`, *optional*, defaults to `3.0`): Changing this threshold will change the sensitivity of the detector. Higher z threshold gives less sensitivity and vice versa for lower z threshold. return_dict (`bool`, *optional*, defaults to `False`): Whether to return `~generation.WatermarkDetectorOutput` or not. If not it will return boolean predictions, ma Return: [`~generation.WatermarkDetectorOutput`] or `np.array`: A [`~generation.WatermarkDetectorOutput`] if `return_dict=True` otherwise a `np.array`. """ # Let's assume that if one batch start with `bos`, all batched also do if input_ids[0, 0] == self.bos_token_id: input_ids = input_ids[:, 1:] if input_ids.shape[-1] - self.processor.context_width < 1: raise ValueError( f"Must have at least `1` token to score after the first " f"min_prefix_len={self.processor.context_width} tokens required by the seeding scheme." ) num_tokens_scored, green_token_count = self._score_ngrams_in_passage(input_ids) z_score = self._compute_z_score(green_token_count, num_tokens_scored) prediction = z_score > z_threshold if return_dict: p_value = self._compute_pval(z_score) confidence = 1 - p_value return WatermarkDetectorOutput( num_tokens_scored=num_tokens_scored, num_green_tokens=green_token_count, green_fraction=green_token_count / num_tokens_scored, z_score=z_score, p_value=p_value, prediction=prediction, confidence=confidence, ) return prediction class BayesianDetectorConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BayesianDetectorModel`]. It is used to instantiate a Bayesian Detector model according to the specified arguments. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: watermarking_depth (`int`, *optional*): The number of tournament layers. base_rate (`float1`, *optional*, defaults to 0.5): Prior probability P(w) that a text is watermarked. """ def __init__(self, watermarking_depth: Optional[int] = None, base_rate: float = 0.5, **kwargs): self.watermarking_depth = watermarking_depth self.base_rate = base_rate # These can be set later to store information about this detector. self.model_name = None self.watermarking_config = None super().__init__(**kwargs) def set_detector_information(self, model_name, watermarking_config): self.model_name = model_name self.watermarking_config = watermarking_config @dataclass class BayesianWatermarkDetectorModelOutput(ModelOutput): """ Base class for outputs of models predicting if the text is watermarked. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. posterior_probabilities (`torch.FloatTensor` of shape `(1,)`): Multiple choice classification loss. """ loss: Optional[torch.FloatTensor] = None posterior_probabilities: Optional[torch.FloatTensor] = None class BayesianDetectorWatermarkedLikelihood(nn.Module): """Watermarked likelihood model for binary-valued g-values. This takes in g-values and returns p(g_values|watermarked). """ def __init__(self, watermarking_depth: int): """Initializes the model parameters.""" super().__init__() self.watermarking_depth = watermarking_depth self.beta = torch.nn.Parameter(-2.5 + 0.001 * torch.randn(1, 1, watermarking_depth)) self.delta = torch.nn.Parameter(0.001 * torch.randn(1, 1, self.watermarking_depth, watermarking_depth)) def _compute_latents(self, g_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Computes the unique token probability distribution given g-values. Args: g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`): PRF values. Returns: p_one_unique_token and p_two_unique_tokens, both of shape [batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l] gives the probability of there being one unique token in a tournament match on layer l, on timestep t, for batch item i. p_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1. """ # Tile g-values to produce feature vectors for predicting the latents # for each layer in the tournament; our model for the latents psi is a # logistic regression model psi = sigmoid(delta * x + beta). # [batch_size, seq_len, watermarking_depth, watermarking_depth] x = torch.repeat_interleave(torch.unsqueeze(g_values, dim=-2), self.watermarking_depth, axis=-2) # mask all elements above -1 diagonal for autoregressive factorization x = torch.tril(x, diagonal=-1) # [batch_size, seq_len, watermarking_depth] # (i, j, k, l) x (i, j, k, l) -> (i, j, k) einsum equivalent logits = (self.delta[..., None, :] @ x.type(self.delta.dtype)[..., None]).squeeze() + self.beta p_two_unique_tokens = torch.sigmoid(logits) p_one_unique_token = 1 - p_two_unique_tokens return p_one_unique_token, p_two_unique_tokens def forward(self, g_values: torch.Tensor) -> torch.Tensor: """Computes the likelihoods P(g_values|watermarked). Args: g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`): g-values (values 0 or 1) Returns: p(g_values|watermarked) of shape [batch_size, seq_len, watermarking_depth]. """ p_one_unique_token, p_two_unique_tokens = self._compute_latents(g_values) # P(g_tl | watermarked) is equal to # 0.5 * [ (g_tl+0.5) * p_two_unique_tokens + p_one_unique_token]. return 0.5 * ((g_values + 0.5) * p_two_unique_tokens + p_one_unique_token) class BayesianDetectorModel(PreTrainedModel): r""" Bayesian classifier for watermark detection. This detector uses Bayes' rule to compute a watermarking score, which is the sigmoid of the log of ratio of the posterior probabilities P(watermarked|g_values) and P(unwatermarked|g_values). Please see the section on BayesianScore in the paper for further details. Paper URL: https://www.nature.com/articles/s41586-024-08025-4 Note that this detector only works with non-distortionary Tournament-based watermarking using the Bernoulli(0.5) g-value distribution. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BayesianDetectorConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ config: BayesianDetectorConfig base_model_prefix = "model" def __init__(self, config): super().__init__(config) self.watermarking_depth = config.watermarking_depth self.base_rate = config.base_rate self.likelihood_model_watermarked = BayesianDetectorWatermarkedLikelihood( watermarking_depth=self.watermarking_depth ) self.prior = torch.nn.Parameter(torch.tensor([self.base_rate])) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Parameter): module.weight.data.normal_(mean=0.0, std=0.02) def _compute_posterior( self, likelihoods_watermarked: torch.Tensor, likelihoods_unwatermarked: torch.Tensor, mask: torch.Tensor, prior: float, ) -> torch.Tensor: """ Compute posterior P(w|g) given likelihoods, mask and prior. Args: likelihoods_watermarked (`torch.Tensor` of shape `(batch, length, depth)`): Likelihoods P(g_values|watermarked) of g-values under watermarked model. likelihoods_unwatermarked (`torch.Tensor` of shape `(batch, length, depth)`): Likelihoods P(g_values|unwatermarked) of g-values under unwatermarked model. mask (`torch.Tensor` of shape `(batch, length)`): A binary array indicating which g-values should be used. g-values with mask value 0 are discarded. prior (`float`): the prior probability P(w) that the text is watermarked. Returns: Posterior probability P(watermarked|g_values), shape [batch]. """ mask = torch.unsqueeze(mask, dim=-1) prior = torch.clamp(prior, min=1e-5, max=1 - 1e-5) log_likelihoods_watermarked = torch.log(torch.clamp(likelihoods_watermarked, min=1e-30, max=float("inf"))) log_likelihoods_unwatermarked = torch.log(torch.clamp(likelihoods_unwatermarked, min=1e-30, max=float("inf"))) log_odds = log_likelihoods_watermarked - log_likelihoods_unwatermarked # Sum relative surprisals (log odds) across all token positions and layers. relative_surprisal_likelihood = torch.einsum("i...->i", log_odds * mask) # Compute the relative surprisal prior relative_surprisal_prior = torch.log(prior) - torch.log(1 - prior) # Combine prior and likelihood. # [batch_size] relative_surprisal = relative_surprisal_prior + relative_surprisal_likelihood # Compute the posterior probability P(w|g) = sigmoid(relative_surprisal). return torch.sigmoid(relative_surprisal) def forward( self, g_values: torch.Tensor, mask: torch.Tensor, labels: Optional[torch.Tensor] = None, loss_batch_weight=1, return_dict=False, ) -> BayesianWatermarkDetectorModelOutput: """ Computes the watermarked posterior P(watermarked|g_values). Args: g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth, ...)`): g-values (with values 0 or 1) mask: A binary array shape [batch_size, seq_len] indicating which g-values should be used. g-values with mask value 0 are discarded. Returns: p(watermarked | g_values), of shape [batch_size]. """ likelihoods_watermarked = self.likelihood_model_watermarked(g_values) likelihoods_unwatermarked = 0.5 * torch.ones_like(g_values) out = self._compute_posterior( likelihoods_watermarked=likelihoods_watermarked, likelihoods_unwatermarked=likelihoods_unwatermarked, mask=mask, prior=self.prior, ) loss = None if labels is not None: loss_fct = BCELoss() loss_unwweight = torch.sum(self.likelihood_model_watermarked.delta**2) loss_weight = loss_unwweight * loss_batch_weight loss = loss_fct(torch.clamp(out, 1e-5, 1 - 1e-5), labels) + loss_weight if not return_dict: return (out,) if loss is None else (out, loss) return BayesianWatermarkDetectorModelOutput(loss=loss, posterior_probabilities=out) class SynthIDTextWatermarkDetector: r""" SynthID text watermark detector class. This class has to be initialized with the trained bayesian detector module check script in examples/synthid_text/detector_training.py for example in training/saving/loading this detector module. The folder also showcases example use case of this detector. Parameters: detector_module ([`BayesianDetectorModel`]): Bayesian detector module object initialized with parameters. Check https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for usage. logits_processor (`SynthIDTextWatermarkLogitsProcessor`): The logits processor used for watermarking. tokenizer (`Any`): The tokenizer used for the model. Examples: ```python >>> from transformers import ( ... AutoTokenizer, BayesianDetectorModel, SynthIDTextWatermarkLogitsProcessor, SynthIDTextWatermarkDetector ... ) >>> # Load the detector. See https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for training a detector. >>> detector_model = BayesianDetectorModel.from_pretrained("joaogante/dummy_synthid_detector") >>> logits_processor = SynthIDTextWatermarkLogitsProcessor( ... **detector_model.config.watermarking_config, device="cpu" ... ) >>> tokenizer = AutoTokenizer.from_pretrained(detector_model.config.model_name) >>> detector = SynthIDTextWatermarkDetector(detector_model, logits_processor, tokenizer) >>> # Test whether a certain string is watermarked >>> test_input = tokenizer(["This is a test input"], return_tensors="pt") >>> is_watermarked = detector(test_input.input_ids) ``` """ def __init__( self, detector_module: BayesianDetectorModel, logits_processor: SynthIDTextWatermarkLogitsProcessor, tokenizer: Any, ): self.detector_module = detector_module self.logits_processor = logits_processor self.tokenizer = tokenizer def __call__(self, tokenized_outputs: torch.Tensor): # eos mask is computed, skip first ngram_len - 1 tokens # eos_mask will be of shape [batch_size, output_len] eos_token_mask = self.logits_processor.compute_eos_token_mask( input_ids=tokenized_outputs, eos_token_id=self.tokenizer.eos_token_id, )[:, self.logits_processor.ngram_len - 1 :] # context repetition mask is computed context_repetition_mask = self.logits_processor.compute_context_repetition_mask( input_ids=tokenized_outputs, ) # context repetition mask shape [batch_size, output_len - (ngram_len - 1)] combined_mask = context_repetition_mask * eos_token_mask g_values = self.logits_processor.compute_g_values( input_ids=tokenized_outputs, ) # g values shape [batch_size, output_len - (ngram_len - 1), depth] return self.detector_module(g_values, combined_mask)
transformers/src/transformers/generation/watermarking.py/0
{ "file_path": "transformers/src/transformers/generation/watermarking.py", "repo_id": "transformers", "token_count": 9729 }
403
# coding=utf-8 # Copyright 2024 NetEase, Inc. and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import is_accelerate_available, is_eetq_available, logging if is_eetq_available(): import eetq import torch.nn as nn if is_accelerate_available(): from accelerate import init_empty_weights logger = logging.get_logger(__name__) def _replace_with_eetq_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, pre_quantized=False, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successful or not. """ if current_key_name is None: current_key_name = [] for name, module in model.named_children(): current_key_name.append(name) if (isinstance(module, nn.Linear)) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): with init_empty_weights(): in_features = module.in_features out_features = module.out_features model._modules[name] = eetq.EetqLinear( in_features, out_features, module.bias is not None, module.weight.device ) if pre_quantized: model._modules[name].register_scale(module.weight.device) has_been_replaced = True # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = _replace_with_eetq_linear( module, modules_to_not_convert, current_key_name, quantization_config, has_been_replaced=has_been_replaced, pre_quantized=pre_quantized, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def replace_with_eetq_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, pre_quantized=False ): """ A helper function to replace all `torch.nn.Linear` modules by `eetq.EetqLinear` modules from the `eetq` library. This will enable running your models using high performance int8 weight-only gemm kerner from FasterTransformer and TensorRT-LLM. Make sure `eetq` compiled with the correct CUDA version of your hardware is installed before running this function. EETQ shall be installed via the source 'https://github.com/NetEase-FuXi/EETQ' The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Each weight will be quantized along the channel. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`list[`str`]`, *optional*, defaults to `["lm_head"]`): Names of the modules to not convert in `EetqLinear`. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`list[`str`]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or `disk`). """ modules_to_not_convert = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert if quantization_config.modules_to_not_convert is not None: modules_to_not_convert.extend(quantization_config.modules_to_not_convert) modules_to_not_convert = list(set(modules_to_not_convert)) model, has_been_replaced = _replace_with_eetq_linear( model, modules_to_not_convert, current_key_name, quantization_config, pre_quantized=pre_quantized ) if not has_been_replaced: logger.warning( "You are loading your model using eetq but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model
transformers/src/transformers/integrations/eetq.py/0
{ "file_path": "transformers/src/transformers/integrations/eetq.py", "repo_id": "transformers", "token_count": 2105 }
404
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import torch from ..utils.import_utils import is_torch_npu_available if is_torch_npu_available(): from torch_npu import npu_fusion_attention # FlashAttention2 is supported on Ascend NPU with down-right aligned causal mask by default. # Set environment variable `NPU_FA2_SPARSE_MODE` to 2 when using top-left aligned causal mask. TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE = 2 DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE = 3 SPARSE_MODE = int(os.getenv("NPU_FA2_SPARSE_MODE", default=DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE)) if SPARSE_MODE not in [TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE, DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE]: raise ValueError( "Environment variable `NPU_FA2_SPARSE_MODE` can only be set as 2 (top-left aligned causal mask) " "or 3 (down-right aligned causal mask)." ) ATTN_MASK_NPU_CACHE = {} def get_attn_mask_npu(device): """Get or create attention mask for the specified device.""" if device not in ATTN_MASK_NPU_CACHE: ATTN_MASK_NPU_CACHE[device] = torch.triu(torch.ones([2048, 2048], device=device), diagonal=1).bool() return ATTN_MASK_NPU_CACHE[device] def is_npu_fa2_top_left_aligned_causal_mask(): return SPARSE_MODE == TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE if is_torch_npu_available() else False def npu_flash_attn_func( q, k, v, dropout_p=0.0, softmax_scale=None, causal=False, **kwargs, ): keep_prob = 1.0 - dropout_p if softmax_scale is None: softmax_scale = 1.0 / math.sqrt(q.shape[-1]) if not causal: head_num = q.shape[2] output = npu_fusion_attention(q, k, v, head_num, "BSND", keep_prob=keep_prob, scale=softmax_scale)[0] else: attn_mask_npu = get_attn_mask_npu(q.device) head_num = q.shape[2] output = npu_fusion_attention( q, k, v, head_num, "BSND", keep_prob=keep_prob, scale=softmax_scale, atten_mask=attn_mask_npu, sparse_mode=SPARSE_MODE, )[0] return output def npu_flash_attn_varlen_func( q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q=None, # defined for aligning params order with corresponding function in `flash-attn` max_seqlen_k=None, # defined for aligning params order with corresponding function in `flash-attn` dropout_p=0.0, softmax_scale=None, causal=False, **kwargs, ): keep_prob = 1.0 - dropout_p if softmax_scale is None: softmax_scale = 1.0 / math.sqrt(q.shape[-1]) if not causal: head_num = q.shape[1] output = npu_fusion_attention( q, k, v, head_num, pse=None, atten_mask=None, scale=softmax_scale, keep_prob=keep_prob, input_layout="TND", actual_seq_qlen=tuple(cu_seqlens_q[1:].cpu().numpy().tolist()), actual_seq_kvlen=tuple(cu_seqlens_k[1:].cpu().numpy().tolist()), )[0] else: attn_mask_npu = get_attn_mask_npu(q.device) head_num = q.shape[1] output = npu_fusion_attention( q, k, v, head_num, pse=None, padding_mask=None, atten_mask=attn_mask_npu, scale=softmax_scale, keep_prob=keep_prob, input_layout="TND", actual_seq_qlen=tuple(cu_seqlens_q[1:].cpu().numpy().tolist()), actual_seq_kvlen=tuple(cu_seqlens_k[1:].cpu().numpy().tolist()), sparse_mode=SPARSE_MODE, )[0] return output
transformers/src/transformers/integrations/npu_flash_attention.py/0
{ "file_path": "transformers/src/transformers/integrations/npu_flash_attention.py", "repo_id": "transformers", "token_count": 2011 }
405
#include "common.h" template<typename T> __device__ int set_insert(T *set, int set_size, T value) { int slot = value % set_size; int start_slot = slot; while (true) { T prev = atomicCAS(&set[slot], EMPTY_VALUE, value); if (prev == EMPTY_VALUE || prev == value) { return slot; } slot = (slot + 1) % set_size; if (slot == start_slot) { return -1; } } return -1; } template<typename T> __device__ int set_lookup(T *set, int set_size, T value) { int slot = value % set_size; int start_slot = slot; while (true) { if (set[slot] == value) { return slot; } slot = (slot + 1) % set_size; if (slot == start_slot) { return -1; } } return -1; } template<typename T> __device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) { __syncthreads(); for (int i = 0; i < buffer_size; i = i + num_threads) { int offset_idx = i + thread_id; if (offset_idx < buffer_size) { buffer[offset_idx] = init_value; } } __syncthreads(); } template<typename T> __device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) { __syncthreads(); for (int i = 0; i < data_length; i = i + num_threads) { int offset_idx = i + thread_id; if (offset_idx < data_length) { dist_pt[offset_idx] = src_pt[offset_idx]; } } __syncthreads(); } template<typename T> __device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) { for (int i = 0; i < buffer_size; i = i + num_threads) { int offset_idx = i + thread_id; if (offset_idx < buffer_size) { buffer[offset_idx] = init_value; } } } template<typename T> __device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) { for (int i = 0; i < data_length; i = i + num_threads) { int offset_idx = i + thread_id; if (offset_idx < data_length) { dist_pt[offset_idx] = src_pt[offset_idx]; } } }
transformers/src/transformers/kernels/yoso/common_cuda_device.h/0
{ "file_path": "transformers/src/transformers/kernels/yoso/common_cuda_device.h", "repo_id": "transformers", "token_count": 892 }
406
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IMPORTANT NOTICE: Every class and function in this file is deprecated in favor of using the much more general `masking_utils.py` primitives. New code should not rely on it, it is only kept for backward compatibility for now, and will be removed in the future. """ from dataclasses import dataclass from typing import Optional, Union import torch from .utils.import_utils import is_torchdynamo_compiling @dataclass class AttentionMaskConverter: """ A utility attention mask class that allows one to: - Create a causal 4d mask - Create a causal 4d mask with slided window - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores Examples: ```python >>> import torch >>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter >>> converter = AttentionMaskConverter(True) >>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32) tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38], [-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]]) ``` Parameters: is_causal (`bool`): Whether the attention mask should be a uni-directional (causal) or bi-directional mask. sliding_window (`int`, *optional*): Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ is_causal: bool sliding_window: int def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window if self.sliding_window is not None and self.sliding_window <= 0: raise ValueError( f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`" ) def to_causal_4d( self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: Union[torch.device, "str"] = "cpu", ) -> Optional[torch.Tensor]: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative bias to upper right hand triangular matrix (causal mask). """ if not self.is_causal: raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") # If shape is not cached, create a new causal mask and cache it input_shape = (batch_size, query_length) past_key_values_length = key_value_length - query_length # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None if input_shape[-1] > 1 or self.sliding_window is not None: causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window, ) return causal_4d_mask def to_4d( self, attention_mask_2d: torch.Tensor, query_length: int, dtype: torch.dtype, key_value_length: Optional[int] = None, ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is causal, a causal mask will be added. """ input_shape = (attention_mask_2d.shape[0], query_length) # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: if key_value_length is None: raise ValueError( "This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask." ) past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window, ) elif self.sliding_window is not None: raise NotImplementedError("Sliding window is currently only implemented for causal masking") # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( attention_mask_2d.device ) if causal_4d_mask is not None: expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) # expanded_attn_mask + causal_4d_mask can cause some overflow expanded_4d_mask = expanded_attn_mask return expanded_4d_mask @staticmethod def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) # add lower triangular sliding window mask if necessary if sliding_window is not None: diagonal = past_key_values_length - sliding_window - 1 context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal) # Recent changes in PyTorch prevent mutations on tensors converted with aten::_to_copy # See https://github.com/pytorch/pytorch/issues/127571 if is_torchdynamo_compiling(): mask = mask.clone() mask.masked_fill_(context_mask, torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) @staticmethod def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = torch.tensor(1.0, dtype=dtype) - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) @staticmethod def _unmask_unattended( expanded_mask: torch.FloatTensor, min_dtype: float, ): # fmt: off """ Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. Details: https://github.com/pytorch/pytorch/issues/110213 `expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len]. `attention_mask` is [bsz, src_seq_len]. The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias. For example, if `expanded_mask` is (e.g. here left-padding case) ``` [[[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]]] ``` then the modified `expanded_mask` will be ``` [[[[1, 1, 1], <-- modified [1, 1, 1], <-- modified [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], <-- modified [0, 1, 0], [0, 1, 1]]]] ``` """ # fmt: on if expanded_mask.dtype == torch.bool: raise ValueError( "AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor." ) return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True)) @staticmethod def _ignore_causal_mask_sdpa( attention_mask: Optional[torch.Tensor], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int] = None, is_training: bool = False, ) -> bool: """ Detects whether the optional user-specified attention_mask & the automatically created causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument. In case no token is masked in the `attention_mask` argument, if `query_length == 1` or `key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks, allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). """ _, query_length = inputs_embeds.shape[0], inputs_embeds.shape[1] key_value_length = query_length + past_key_values_length is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = False if attention_mask is None: # TODO: When tracing with TorchDynamo with fullgraph=True, the model is recompiled depending on the input # shape, thus SDPA's `is_causal` argument is rightfully updated # (see https://gist.github.com/fxmarty/1313f39037fc1c112508989628c57363). However, when using # `torch.export` or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is # hard-coded. If a user exports a model with q_len > 1, the exported model will hard-code `is_causal=True` # which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108). # Thus, we only set `ignore_causal_mask = True` if the model is set to training. # # Besides, jit.trace can not handle the `q_len > 1` condition for `is_causal` # ("TypeError: scaled_dot_product_attention(): argument 'is_causal' must be bool, not Tensor"). if ( (is_training or not is_tracing) and (query_length == 1 or key_value_length == query_length) and (sliding_window is None or key_value_length < sliding_window) ): ignore_causal_mask = True elif sliding_window is None or key_value_length < sliding_window: if len(attention_mask.shape) == 4: return False elif not is_tracing and torch.all(attention_mask == 1): if query_length == 1 or key_value_length == query_length: # For query_length == 1, causal attention and bi-directional attention are the same. ignore_causal_mask = True # Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore # the attention mask, as SDPA causal mask generation may be wrong. We will set `is_causal=False` in # SDPA and rely on Transformers attention_mask instead, hence not setting it to None here. # Reference: https://github.com/pytorch/pytorch/issues/108108 # TODO: maybe revisit this with https://github.com/pytorch/pytorch/pull/114823 in PyTorch 2.3. return ignore_causal_mask def _prepare_4d_causal_attention_mask( attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, tuple, list], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int] = None, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)` Args: attention_mask (`torch.Tensor` or `None`): A 2D attention mask of shape `(batch_size, key_value_length)` input_shape (`tuple(int)` or `list(int)` or `torch.Size`): The input shape should be a tuple that defines `(batch_size, query_length)`. inputs_embeds (`torch.Tensor`): The embedded inputs as a torch Tensor. past_key_values_length (`int`): The length of the key value cache. sliding_window (`int`, *optional*): If the model uses windowed attention, a sliding window should be passed. """ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = input_shape[-1] + past_key_values_length # 4d mask is passed through the layers if attention_mask is not None and len(attention_mask.shape) == 2: attention_mask = attn_mask_converter.to_4d( attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype ) elif attention_mask is not None and len(attention_mask.shape) == 4: expected_shape = (input_shape[0], 1, input_shape[1], key_value_length) if tuple(attention_mask.shape) != expected_shape: raise ValueError( f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}." ) else: # if the 4D mask has correct shape - invert it and fill with negative infinity inverted_mask = 1.0 - attention_mask attention_mask = inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min ) else: attention_mask = attn_mask_converter.to_causal_4d( input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device ) return attention_mask # Adapted from _prepare_4d_causal_attention_mask def _prepare_4d_causal_attention_mask_for_sdpa( attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, tuple, list], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int] = None, ): """ Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`. In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and `key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks, allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed). """ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = input_shape[-1] + past_key_values_length # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1` # used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing. # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400). is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask=attention_mask, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, sliding_window=sliding_window, ) if ignore_causal_mask: expanded_4d_mask = None elif attention_mask is None: expanded_4d_mask = attn_mask_converter.to_causal_4d( input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device ) else: if attention_mask.dim() == 4: expanded_4d_mask = attention_mask else: expanded_4d_mask = attn_mask_converter.to_4d( attention_mask, input_shape[-1], dtype=inputs_embeds.dtype, key_value_length=key_value_length, ) # Attend to all tokens in masked rows from the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 if not is_tracing and expanded_4d_mask.device.type == "cuda": expanded_4d_mask = AttentionMaskConverter._unmask_unattended( expanded_4d_mask, min_dtype=torch.finfo(inputs_embeds.dtype).min ) return expanded_4d_mask def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)` Args: mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` dtype (`torch.dtype`): The torch dtype the created mask shall have. tgt_len (`int`): The target length or query length the created mask shall have. """ return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)` Args: mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` dtype (`torch.dtype`): The torch dtype the created mask shall have. tgt_len (`int`): The target length or query length the created mask shall have. """ _, key_value_length = mask.shape tgt_len = tgt_len if tgt_len is not None else key_value_length is_tracing = torch.jit.is_tracing() or isinstance(mask, torch.fx.Proxy) or is_torchdynamo_compiling() # torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture data-dependent controlflows. if not is_tracing and torch.all(mask == 1): return None else: return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) def _create_4d_causal_attention_mask( input_shape: Union[torch.Size, tuple, list], dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None, ) -> Optional[torch.Tensor]: """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` Args: input_shape (`tuple(int)` or `list(int)` or `torch.Size`): The input shape should be a tuple that defines `(batch_size, query_length)`. dtype (`torch.dtype`): The torch dtype the created mask shall have. device (`int`): The torch device the created mask shall have. sliding_window (`int`, *optional*): If the model uses windowed attention, a sliding window should be passed. """ attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = past_key_values_length + input_shape[-1] attention_mask = attn_mask_converter.to_causal_4d( input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device ) return attention_mask
transformers/src/transformers/modeling_attn_mask_utils.py/0
{ "file_path": "transformers/src/transformers/modeling_attn_mask_utils.py", "repo_id": "transformers", "token_count": 9240 }
407
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import re from typing import Optional import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import ( Aimv2Config, Aimv2Model, Aimv2VisionConfig, Aimv2VisionModel, AutoImageProcessor, AutoProcessor, ) ORIGINAL_TO_CONVERTED_KEY_MAPPING_VISION_MODEL = { # Embeddings r"preprocessor.patchifier.proj": r"embeddings.patch_embed", r"preprocessor.pos_embed": r"embeddings.position_embedding.weight", r"preprocessor.patchifier.norm.weight": r"embeddings.rms_norm.weight", # Encoder Layers r"trunk.blocks.(\d+).attn.qkv": r"encoder.layers.\1.attention.qkv", r"trunk.blocks.(\d+).attn.proj": r"encoder.layers.\1.attention.out_proj", r"trunk.blocks.(\d+).mlp.fc1": r"encoder.layers.\1.ffn.gate_proj", r"trunk.blocks.(\d+).mlp.fc2": r"encoder.layers.\1.ffn.down_proj", r"trunk.blocks.(\d+).mlp.fc3": r"encoder.layers.\1.ffn.up_proj", # Normalization Layers r"trunk.blocks.(\d+).norm_1": r"encoder.layers.\1.rms_norm1", r"trunk.blocks.(\d+).norm_2": r"encoder.layers.\1.rms_norm2", # Final Norm r"trunk.post_trunk_norm": r"rms_norm", } ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # Vision Embeddings r"image_encoder.preprocessor.patchifier.proj": r"vision_model.embeddings.patch_embed", r"image_encoder.preprocessor.pos_embed": r"vision_model.embeddings.position_embedding.weight", r"image_encoder.preprocessor.patchifier.norm.weight": r"vision_model.embeddings.rms_norm.weight", # Vision Encoder Layers r"image_encoder.trunk.blocks.(\d+).attn.qkv": r"vision_model.encoder.layers.\1.attention.qkv", r"image_encoder.trunk.blocks.(\d+).attn.proj": r"vision_model.encoder.layers.\1.attention.out_proj", r"image_encoder.trunk.blocks.(\d+).mlp.fc1": r"vision_model.encoder.layers.\1.ffn.gate_proj", r"image_encoder.trunk.blocks.(\d+).mlp.fc2": r"vision_model.encoder.layers.\1.ffn.down_proj", r"image_encoder.trunk.blocks.(\d+).mlp.fc3": r"vision_model.encoder.layers.\1.ffn.up_proj", # Normalization Layers r"image_encoder.trunk.blocks.(\d+).norm_1": r"vision_model.encoder.layers.\1.rms_norm1", r"image_encoder.trunk.blocks.(\d+).norm_2": r"vision_model.encoder.layers.\1.rms_norm2", r"image_encoder.trunk.post_trunk_norm": r"vision_model.rms_norm", r"image_projector": r"visual_projection", # Vision Head r"image_encoder.head.cls_token": r"vision_model.head.cls_token", r"image_encoder.head.k": r"vision_model.head.k_proj", r"image_encoder.head.v": r"vision_model.head.v_proj", r"image_encoder.head.linear": r"vision_model.head.output_proj", # Text Embeddings r"text_encoder.preprocessor.text_embedding.weight": r"text_model.embeddings.token_embedding.weight", r"text_encoder.preprocessor.positional_embedding": r"text_model.embeddings.position_embedding.weight", # Text Encoder Layers r"text_encoder.trunk.blocks.(\d+).attn.qkv": r"text_model.encoder.layers.\1.attention.qkv", r"text_encoder.trunk.blocks.(\d+).attn.proj": r"text_model.encoder.layers.\1.attention.out_proj", r"text_encoder.trunk.blocks.(\d+).mlp.fc1": r"text_model.encoder.layers.\1.ffn.gate_proj", r"text_encoder.trunk.blocks.(\d+).mlp.fc2": r"text_model.encoder.layers.\1.ffn.down_proj", r"text_encoder.trunk.blocks.(\d+).mlp.fc3": r"text_model.encoder.layers.\1.ffn.up_proj", # Text Normalization Layers r"text_encoder.trunk.blocks.(\d+).norm_1": r"text_model.encoder.layers.\1.rms_norm1", r"text_encoder.trunk.blocks.(\d+).norm_2": r"text_model.encoder.layers.\1.rms_norm2", r"text_encoder.trunk.post_trunk_norm": r"text_model.rms_norm", r"text_projector": r"text_projection", r"log_logit_scale": r"logit_scale", } def load_original_state_dict(model_id: str, revision: Optional[str] = None) -> dict[str, torch.Tensor]: # Download only the model.safetensors file directory_path = snapshot_download( repo_id=model_id, revision=revision, allow_patterns=["model.safetensors"], ) original_state_dict = {} safetensor_path = f"{directory_path}/model.safetensors" with safe_open(safetensor_path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) return original_state_dict def convert_old_keys_to_new_keys(state_dict_keys: dict, ORIGINAL_TO_CONVERTED_KEY_MAPPING: dict): """Converts state dict keys from the old format to the new format.""" output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def split_qkv_tensor(key, tensor): """Splits a qkv tensor into separate q, k, v tensors and updates the key accordingly.""" new_keys = ["q_proj", "k_proj", "v_proj"] split_size = tensor.shape[0] // 3 split_tensors = torch.split(tensor, split_size, dim=0) return {key.replace("qkv", new_key): split_tensors[i] for i, new_key in enumerate(new_keys)} def get_model_config_mapping(model_id: str): """Determines the correct model, config, and key mappings based on the checkpoint name.""" if model_id == "apple/aimv2-large-patch14-224-lit": return Aimv2Model, Aimv2Config, ORIGINAL_TO_CONVERTED_KEY_MAPPING else: return Aimv2VisionModel, Aimv2VisionConfig, ORIGINAL_TO_CONVERTED_KEY_MAPPING_VISION_MODEL def write_model( hf_repo_id: str, output_dir: str, safe_serialization: bool = True, ): """ Converts a model checkpoint to Hugging Face format and saves it. Args: hf_repo_id (str): The Hugging Face repo ID to load from. output_dir (str): The directory to save the converted model. safe_serialization (bool): Whether to use safe serialization. Returns: model: The reloaded Hugging Face model. """ os.makedirs(output_dir, exist_ok=True) # Get the appropriate model, config, and key mapping model_class, config_class, key_mapping = get_model_config_mapping(hf_repo_id) # Load config and original state dict config = config_class.from_pretrained(hf_repo_id) # Checkpoint `apple/aimv2-large-patch14-224-lit` uses AttentionPoolingHead hence set the required attr in config. if hf_repo_id != "apple/aimv2-large-patch14-224-lit": config.use_head = False if hf_repo_id == "apple/aimv2-large-patch14-native": config.is_native = True original_state_dict = load_original_state_dict(hf_repo_id) print("Converting model...") state_dict = {} result = convert_old_keys_to_new_keys(original_state_dict, key_mapping) all_keys = list(original_state_dict.keys()) for key in all_keys: value = original_state_dict[key] new_key = result.pop(key) if "qkv" in new_key: qkv_state_dict = split_qkv_tensor(new_key, value) state_dict.update(qkv_state_dict) else: state_dict[new_key] = value # Check if position embeddings exist before squeezing if new_key.endswith("position_embedding.weight"): state_dict[new_key] = value.squeeze(0) print(f"Loading the checkpoint in a {model_class.__name__}.") model = model_class(config) model.load_state_dict(state_dict, strict=True, assign=True) print("Checkpoint loaded successfully.") print("Saving the model.") model.save_pretrained(output_dir, safe_serialization=safe_serialization) del state_dict, model gc.collect() print("Reloading the model to check if it's saved correctly.") model = model_class.from_pretrained(output_dir, device_map="auto") print("Model reloaded successfully.") return model def write_image_processor(hf_repo_id: str, output_dir: str): if hf_repo_id == "apple/aimv2-large-patch14-224-lit": image_processor = AutoProcessor.from_pretrained(hf_repo_id, use_fast=True) else: image_processor = AutoImageProcessor.from_pretrained(hf_repo_id, use_fast=True) image_processor.save_pretrained(output_dir) return image_processor def main(): parser = argparse.ArgumentParser() parser.add_argument( "--hf_repo_id", default="apple/aimv2-large-patch14-224", help="Location of official weights from apple on HF", ) parser.add_argument( "--output_dir", default="aimv2_model", help="Location to write the converted model and processor", ) parser.add_argument( "--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`." ) parser.add_argument( "--push_to_hub", action=argparse.BooleanOptionalAction, help="Whether or not to push the converted model to the huggingface hub.", ) parser.add_argument( "--hub_repo_id", default=None, help="Huggingface hub repo to write the converted model and processor", ) args = parser.parse_args() model = write_model( hf_repo_id=args.hf_repo_id, output_dir=args.output_dir, safe_serialization=args.safe_serialization, ) image_processor = write_image_processor( hf_repo_id=args.hf_repo_id, output_dir=args.output_dir, ) if args.push_to_hub: print("Pushing to hub...") model.push_to_hub(args.hub_repo_id) image_processor.push_to_hub(args.hub_repo_id) if __name__ == "__main__": main()
transformers/src/transformers/models/aimv2/convert_aimv2_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/aimv2/convert_aimv2_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 4382 }
408
# coding=utf-8 # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Audio Spectogram Transformer (AST) model configuration""" from typing import Any from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ASTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the AST [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. frequency_stride (`int`, *optional*, defaults to 10): Frequency stride to use when patchifying the spectrograms. time_stride (`int`, *optional*, defaults to 10): Temporal stride to use when patchifying the spectrograms. max_length (`int`, *optional*, defaults to 1024): Temporal dimension of the spectrograms. num_mel_bins (`int`, *optional*, defaults to 128): Frequency dimension of the spectrograms (number of Mel-frequency bins). Example: ```python >>> from transformers import ASTConfig, ASTModel >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> configuration = ASTConfig() >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> model = ASTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "audio-spectrogram-transformer" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.patch_size = patch_size self.qkv_bias = qkv_bias self.frequency_stride = frequency_stride self.time_stride = time_stride self.max_length = max_length self.num_mel_bins = num_mel_bins # Overwritten from the parent class: AST is not compatible with `generate`, but has a config parameter sharing the # same name (`max_length`). Sharing the same name triggers checks regarding the config -> generation_config # generative parameters deprecation cycle, overwriting this function prevents this from happening. def _get_non_default_generation_parameters(self) -> dict[str, Any]: return {} __all__ = ["ASTConfig"]
transformers/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py/0
{ "file_path": "transformers/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py", "repo_id": "transformers", "token_count": 2147 }
409
# coding=utf-8 # Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BARK model.""" import math import warnings from typing import Optional, Union import numpy as np import torch from torch import nn from torch.nn import functional as F from ...cache_utils import DynamicCache from ...generation import GenerationMixin from ...generation.logits_process import ( AlternatingCodebooksLogitsProcessor, BarkEosPrioritizerLogitsProcessor, SuppressTokensLogitsProcessor, ) from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput from ...modeling_utils import PreTrainedModel, get_parameter_device from ...utils import ( auto_docstring, is_accelerate_available, is_torch_accelerator_available, logging, ) from ..auto import AutoModel from .configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, BarkSubModelConfig, ) from .generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) class BarkSelfAttention(nn.Module): # adapted from GPTNeoSelfAttention and Bark code # BarkSelfAttention can have two attention type, i.e full attention or causal attention def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() # regularization self.dropout = config.dropout self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if config.hidden_size % config.num_heads != 0: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) # key, query, value projections for all heads, but in a batch self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) # output projection self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) self.is_causal = is_causal self.layer_idx = layer_idx if is_causal: block_size = config.block_size bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) self.register_buffer("bias", bias) # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.transpose(1, 2).contiguous() tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) if self.is_causal: query_length, key_length = query.size(-2), key.size(-2) # fill the upper left part of the attention weights with inf attn_weights = attn_weights.masked_fill( self.bias[:, :, key_length - query_length : key_length, :key_length] == 0, torch.finfo(attn_weights.dtype).min, ) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size) # -> (batch, num_heads, seq_len, attn_head_size) attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None, ): # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: key, value = past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position}) attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class BarkSelfFlashAttention2(BarkSelfAttention): """ Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ # re-assemble all head outputs side by side # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def forward( self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None, ): batch_size, query_len, _ = hidden_states.size() # calculate query, key, values for all heads in batch and move head forward to be the batch dim query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: key, value = past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position}) attn_output = _flash_attention_forward( query, key, value, attention_mask, query_len, dropout=self.dropout if self.training else 0.0, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, None BARK_ATTENTION_CLASSES = { "eager": BarkSelfAttention, "flash_attention_2": BarkSelfFlashAttention2, } class BarkMLP(nn.Module): def __init__(self, config): super().__init__() self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) self.dropout = nn.Dropout(config.dropout) self.gelu = nn.GELU() def forward(self, hidden_states): hidden_states = self.in_proj(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.out_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BarkBlock(GradientCheckpointingLayer): def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() if is_causal: # if causal, the layerNorm bias is optional to stick with Bark choice of leaving optional bias # in AutoRegressive models (corresponding to the "Text" and the "Coarse" modules) self.layernorm_1 = nn.LayerNorm(config.hidden_size, bias=config.bias) self.layernorm_2 = nn.LayerNorm(config.hidden_size, bias=config.bias) else: self.layernorm_1 = nn.LayerNorm(config.hidden_size) self.layernorm_2 = nn.LayerNorm(config.hidden_size) self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation]( config, is_causal=is_causal, layer_idx=layer_idx ) self.mlp = BarkMLP(config) def forward( self, hidden_states, past_key_values=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None, ): intermediary_hidden_states = self.layernorm_1(hidden_states) attn_outputs = self.attn( intermediary_hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights) outputs = attn_outputs[1:] intermediary_hidden_states = hidden_states + attn_output intermediary_hidden_states = intermediary_hidden_states + self.mlp( self.layernorm_2(intermediary_hidden_states) ) return (intermediary_hidden_states,) + outputs @auto_docstring class BarkPreTrainedModel(PreTrainedModel): config: BarkConfig supports_gradient_checkpointing = False _supports_flash_attn = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear,)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self, "_hf_hook"): return get_parameter_device(self) for module in self.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return get_parameter_device(self) # GPT2-like autoregressive model class BarkCausalModel(BarkPreTrainedModel, GenerationMixin): config: BarkSubModelConfig def __init__(self, config): super().__init__(config) self.config = config # initialize as an autoregressive GPT-like model self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=True, layer_idx=i) for i in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = nn.LayerNorm(config.hidden_size, bias=config.bias) self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): # NOTE: get_output_embeddings() must return None to prevent accidental weight tying. # See e.g. https://github.com/huggingface/transformers/pull/39339#discussion_r2219126400 return None def get_input_embeddings(self): return self.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def prepare_inputs_for_generation( self, input_ids, attention_mask=None, input_embeds=None, past_key_values=None, position_ids=None, use_cache=None, cache_position=None, **kwargs, ): # Overwritten -- bark uses `input_embeds` not `inputS_embeds` model_inputs = super().prepare_inputs_for_generation( input_ids, attention_mask=attention_mask, inputs_embeds=input_embeds, past_key_values=past_key_values, position_ids=position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs, ) model_inputs["input_embeds"] = model_inputs.pop("inputs_embeds", None) return model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]: r""" input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds` is used in priority instead of `input_ids`. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError( "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model." ) # Verify if input_embeds already exists # then compute embeddings. if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") elif input_embeds is not None and past_key_values is None: # we want to return the input_embeds in priority so that it is in line with a weird hack # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model pass elif input_ids is not None: input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd) elif input_embeds is not None: pass else: raise ValueError("You have to specify either input_ids or input_embeds") input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[-1] device = input_ids.device if input_ids is not None else input_embeds.device if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = DynamicCache() if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `DynamicCache` instead, e.g. " "`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." ) past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if position_ids is None: position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: attention_mask = attention_mask.view(batch_size, -1) # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_heads x N x N # head_mask has shape num_layers x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_head(hidden_states) if not return_dict: return tuple( v for v in [None, logits, past_key_values, all_hidden_states, all_self_attentions] if v is not None ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring( custom_intro=""" Bark semantic (or text) model. It shares the same architecture as the coarse model. It is a GPT-2 like autoregressive model with a language modeling head on top. """ ) class BarkSemanticModel(BarkCausalModel): base_model_prefix = "semantic" config: BarkSemanticConfig def generate( self, input_ids: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, history_prompt: Optional[dict[str, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.LongTensor: """ Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids, i.e tokenized input sentences. Will be truncated up to semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as long as the longest generation among the batch. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. history_prompt (`Optional[dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. attention_mask (`Optional[torch.Tensor]`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Returns: torch.LongTensor: Output semantic tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") batch_size = input_ids.shape[0] max_input_semantic_length = semantic_generation_config.max_input_semantic_length input_ids = input_ids + semantic_generation_config.text_encoding_offset if attention_mask is not None: input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) if history_prompt is not None: semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:] semantic_history = nn.functional.pad( semantic_history, (0, max_input_semantic_length - len(semantic_history)), value=semantic_generation_config.semantic_pad_token, mode="constant", ) else: semantic_history = torch.tensor( [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int ).to(self.device) semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) infer_array = torch.tensor( [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int ).to(self.device) input_embeds = torch.cat( [ self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]), self.input_embeds_layer(infer_array), ], dim=1, ) tokens_to_suppress = list( range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token) ) tokens_to_suppress.extend( list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size)) ) suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress, device=input_ids.device) min_eos_p = kwargs.get("min_eos_p", semantic_generation_config.min_eos_p) early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor( eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p, device=input_ids.device ) # pass input_ids in order to stay consistent with the transformers generate method even though it is not used # (except to get the input seq_len - that's why we keep the first 257 tokens) semantic_output = super().generate( torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int, device=self.device), input_embeds=input_embeds, logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor], generation_config=semantic_generation_config, **kwargs, ) # size: 10048 # take the generated semantic tokens semantic_output = semantic_output[:, max_input_semantic_length + 1 :] return semantic_output @auto_docstring( custom_intro=""" Bark coarse acoustics model. It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a language modeling head on top. """ ) class BarkCoarseModel(BarkCausalModel): base_model_prefix = "coarse_acoustics" config: BarkCoarseConfig def preprocess_histories( self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[dict[str, torch.Tensor]] = None, ): """ Preprocess the optional `Bark` speaker prompts before `self.generate`. Args: max_coarse_history (`int`): Maximum size of coarse tokens used. semantic_to_coarse_ratio (`int`): Ratio of semantic to coarse frequency batch_size (`int`): Batch size, i.e the number of samples. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. codebook_size (`int`): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[dict[str,torch.Tensor]]`): Optional `Bark` speaker prompt. Returns: Returns: `tuple(torch.FloatTensor)`: - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt. - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt. """ if history_prompt is not None: x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0) # clone to avoid modifying history_prompt.coarse_prompt x_coarse_history = history_prompt["coarse_prompt"].clone() # offset x_coarse_history if codebook_size is not None: for n in range(1, x_coarse_history.shape[0]): # offset x_coarse_history[n, :] += codebook_size * n # flatten x_coarse_history x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1) x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens # dedicated to second codebook. max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) # trim histories correctly n_semantic_hist_provided = min( [ max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)), ] ) n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() # bit of a hack for time alignment (sounds better) - from Bark original implementation x_coarse_history = x_coarse_history[:, :-2] else: # shape: (batch_size, 0) x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device) x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int, device=self.device) return x_semantic_history, x_coarse_history def generate( self, semantic_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> Union[torch.LongTensor, tuple[torch.LongTensor, torch.LongTensor]]: """ Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker prompt. Args: semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*): Input text semantic ids, i.e the output of `BarkSemanticModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. return_output_lengths (`bool`, *optional*): Whether or not to return the output lengths. Useful when batching. Returns: By default: torch.LongTensor: Output coarse acoustics tokens. If `return_output_lengths=True`: `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample of the batch. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") max_coarse_input_length = coarse_generation_config.max_coarse_input_length max_coarse_history = coarse_generation_config.max_coarse_history sliding_window_len = coarse_generation_config.sliding_window_len # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token # used in the next model semantic_output.masked_fill_( semantic_output == semantic_generation_config.semantic_pad_token, coarse_generation_config.coarse_semantic_pad_token, ) semantic_to_coarse_ratio = ( coarse_generation_config.coarse_rate_hz / semantic_generation_config.semantic_rate_hz * coarse_generation_config.n_coarse_codebooks ) max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) output_lengths = torch.floor( output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks ) output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() max_generated_len = torch.max(output_lengths).item() batch_size = semantic_output.shape[0] x_semantic_history, x_coarse = self.preprocess_histories( history_prompt=history_prompt, max_coarse_history=max_coarse_history, semantic_to_coarse_ratio=semantic_to_coarse_ratio, batch_size=batch_size, semantic_generation_config=semantic_generation_config, codebook_size=codebook_size, ) base_semantic_idx = x_semantic_history.shape[1] semantic_output = torch.hstack([x_semantic_history, semantic_output]) n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) total_generated_len = 0 len_coarse_history = x_coarse.shape[1] for _ in range(n_window_steps): semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) # pad from right side input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :] input_coarse = input_coarse[:, :max_coarse_input_length] input_coarse = F.pad( input_coarse, (0, max_coarse_input_length - input_coarse.shape[-1]), "constant", coarse_generation_config.coarse_semantic_pad_token, ) input_coarse = torch.hstack( [ input_coarse, torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size, device=self.device), x_coarse[:, -max_coarse_history:], ] ) alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor( input_coarse.shape[1], semantic_generation_config.semantic_vocab_size, codebook_size, ) output_coarse = super().generate( input_coarse, logits_processor=[alternatingLogitsProcessor], max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), generation_config=coarse_generation_config, **kwargs, ) input_coarse_len = input_coarse.shape[1] x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) total_generated_len = x_coarse.shape[1] - len_coarse_history del output_coarse coarse_output = x_coarse[:, len_coarse_history:] if return_output_lengths: return coarse_output, output_lengths return coarse_output @auto_docstring( custom_intro=""" Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and language modeling heads, one for each codebook. """ ) class BarkFineModel(BarkPreTrainedModel): base_model_prefix = "fine_acoustics" config: BarkFineConfig main_input_name = "codebook_idx" def __init__(self, config): # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec super().__init__(config) self.config = config # initialize a modified non causal GPT-like model # note that for there is one embedding layer and one lm_head for each codebook of Encodec self.input_embeds_layers = nn.ModuleList( [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)] ) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList( [BarkBlock(config, is_causal=False, layer_idx=i) for i in range(config.num_layers)] ) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layernorm_final = nn.LayerNorm(config.hidden_size) self.lm_heads = nn.ModuleList( [ nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) for _ in range(config.n_codes_given, config.n_codes_total) ] ) self.gradient_checkpointing = False self.n_codes_total = config.n_codes_total # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): # one embedding layers for each codebook return self.input_embeds_layers def set_input_embeddings(self, new_embeddings): # one embedding layers for each codebook self.input_embeds_layers = new_embeddings def get_output_embeddings(self): # one lm_head for each codebook return self.lm_heads def set_output_embeddings(self, new_output_embeddings): # one lm_head for each codebook self.lm_heads = new_output_embeddings def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None, mean_resizing=True): old_embeddings_list = self.get_input_embeddings() new_embeddings_list = nn.ModuleList( [ self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of, mean_resizing) for old_embeddings in old_embeddings_list ] ) self.set_input_embeddings(new_embeddings_list) new_num_tokens = new_embeddings_list[0].weight.shape[0] # if word embeddings are not tied, make sure that lm head is resized as well if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: old_lm_head_list = self.get_output_embeddings() new_lm_head_list = nn.ModuleList( [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list] ) self.set_output_embeddings(new_lm_head_list) return self.get_input_embeddings() def resize_token_embeddings( self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True, ) -> nn.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. pad_to_multiple_of (`int`, *optional*): If set will pad the embedding matrix to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc mean_resizing (`bool`): Whether to initialize the added embeddings from a multivariate normal distribution that has old embeddings' mean and covariance or to initialize them with a normal distribution that has a mean of zero and std equals `config.initializer_range`. Setting `mean_resizing` to `True` is useful when increasing the size of the embeddings of causal language models, where the generated tokens' probabilities won't be affected by the added embeddings because initializing the new embeddings with the old embeddings' mean will reduce the kl-divergence between the next token probability before and after adding the new embeddings. Refer to this article for more information: https://nlp.stanford.edu/~johnhew/vocab-expansion.html Return: `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds # Update base model and current model config self.config.output_vocab_size = model_embeds[0].weight.shape[0] self.config.vocab_size = model_embeds[0].weight.shape[0] self.output_vocab_size = model_embeds[0].weight.shape[0] self.vocab_size = model_embeds[0].weight.shape[0] # Tie weights again if needed self.tie_weights() return model_embeds def _tie_weights(self): if getattr(self.config, "tie_word_embeddings", True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f"lm_heads.{i}.weight") def tie_weights(self): """ Tie the weights between the input embeddings list and the output embeddings list. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() @auto_docstring def forward( self, codebook_idx: int, # an additional idx corresponding to the id of the codebook that will be predicted input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, input_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MaskedLMOutput]: r""" codebook_idx (`int`): Index of the codebook that will be predicted. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET. input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `input_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") if codebook_idx == 0: raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model") if input_ids is not None and input_embeds is not None: raise ValueError("You cannot specify both input_ids and input_embeds at the same time") if input_ids is None and input_embeds is None: raise ValueError("You have to specify either input_ids or input_embeds") if input_ids is not None: # the input_embeddings are the sum of the j previous codebooks embeddings before # the current codebook_idx codebook # forward the GPT model itself input_embeds = [ input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) for i, input_embeds_layer in enumerate(self.input_embeds_layers) ] # token embeddings of shape (b, t, n_embd) input_embeds = torch.cat(input_embeds, dim=-1) input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1) input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else input_embeds.device if position_ids is None: position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] # from_seq_length is 1 to easily broadcast attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], output_attentions=output_attentions, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) if not return_dict: return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None) return MaskedLMOutput( loss=loss, logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @torch.no_grad() def generate( self, coarse_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig = None, coarse_generation_config: BarkCoarseGenerationConfig = None, fine_generation_config: BarkFineGenerationConfig = None, codebook_size: int = 1024, history_prompt: Optional[dict[str, torch.Tensor]] = None, **kwargs, ) -> torch.LongTensor: """ Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker prompt. Args: coarse_output (`torch.Tensor` of shape (batch_size, seq_len)): Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`. semantic_generation_config (`BarkSemanticGenerationConfig`): Generation config indicating how to generate the semantic tokens. coarse_generation_config (`BarkCoarseGenerationConfig`): Generation config indicating how to generate the coarse tokens. fine_generation_config (`BarkFineGenerationConfig`): Generation config indicating how to generate the fine tokens. codebook_size (`int`, *optional*, defaults to 1024): Codebook channel size, i.e. the size of the output vocabulary per codebook channel. history_prompt (`Optional[dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Returns: torch.LongTensor: Output fine acoustics tokens. """ if semantic_generation_config is None: raise ValueError("`semantic_generation_config` has to be provided") if coarse_generation_config is None: raise ValueError("`coarse_generation_config` has to be provided") if fine_generation_config is None: raise ValueError("`fine_generation_config` has to be provided") # since we don't really use GenerationConfig through the fine model (autoencoder) # and since only temperature is used from the classic GenerationConfig parameters # manually impose the kwargs priority over the generation config temperature = kwargs.get("temperature", fine_generation_config.temperature) max_fine_history_length = fine_generation_config.max_fine_history_length max_fine_input_length = fine_generation_config.max_fine_input_length # shape: (batch, n_coarse_codebooks * seq_len) # new_shape: (batch, seq_len, n_coarse_codebooks) coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) # brings ids into the range [0, codebook_size -1] coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) batch_size = coarse_output.shape[0] if history_prompt is not None: x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0) # transpose to get to shape (seq_len, n_fine_codebooks) else: x_fine_history = None n_coarse = coarse_generation_config.n_coarse_codebooks # pad the last 6th codebooks fine_input = F.pad( coarse_output, (0, fine_generation_config.n_fine_codebooks - n_coarse), "constant", codebook_size, ) # prepend history if available (max max_fine_history_length) if x_fine_history is not None: fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) # len of the fine_history that has been added to fine_input n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] else: n_history = 0 n_remove_from_end = 0 # need to pad if too short (since non-causal model) if fine_input.shape[1] < max_fine_input_length: n_remove_from_end = max_fine_input_length - fine_input.shape[1] fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size) # we can be lazy about fractional loop and just keep overwriting codebooks. # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0) # If not, we loop over at least twice. n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length n_loops = int(np.ceil(n_loops)) n_loops = max(0, n_loops) + 1 for n_outer in range(n_loops): start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) start_fill_idx = min( [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length] ) rel_start_fill_idx = start_fill_idx - start_idx input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :] for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): logits = self.forward(n_inner, input_buffer).logits if temperature is None or temperature == 1.0: relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] codebook_preds = torch.argmax(relevant_logits, -1) else: relevant_logits = logits[:, :, :codebook_size] / temperature # apply softmax probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size) probs = probs.reshape((-1, codebook_size)) # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) codebook_preds = codebook_preds.to(torch.int32) input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds del logits, codebook_preds # transfer into fine_input for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): fine_input[ :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner ] = input_buffer[:, rel_start_fill_idx:, n_inner] del input_buffer fine_input = fine_input.transpose(1, 2)[:, :, n_history:] if n_remove_from_end > 0: fine_input = fine_input[:, :, :-n_remove_from_end] if fine_input.shape[-1] != coarse_output.shape[-2]: raise ValueError("input and output should have the same seq_len") return fine_input @auto_docstring( custom_intro=""" The full Bark model, a text-to-speech model composed of 4 sub-models: - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that takes as input tokenized text, and predicts semantic text tokens that capture the meaning of the text. - [`BarkCoarseModel`] (also referred to as the 'coarse acoustics' model), also a causal autoregressive transformer, that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary to `encodec`. - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively predicts the last codebooks based on the sum of the previous codebooks embeddings. - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio array. It should be noted that each of the first three modules can support conditional speaker embeddings to condition the output sound according to specific predefined voice. """ ) class BarkModel(BarkPreTrainedModel): config: BarkConfig def __init__(self, config): super().__init__(config) self.semantic = BarkSemanticModel(config.semantic_config) self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) self.codec_model = AutoModel.from_config(config.codec_config) self.config = config @classmethod def can_generate(cls) -> bool: # Bark has a unique model structure, where the external class (`BarkModel`) doesn't need to inherit from # `GenerationMixin` (it has a non-standard generation method), but one of the internal models do # (`BarkSemanticModel`). This means that the base `can_generate()` will return `False`, but we need to # override it so as to do `GenerationConfig` handling in multiple parts of the codebase. return True @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # for bark_model, device must be verified on its sub-models # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self.semantic, "_hf_hook"): return get_parameter_device(self) for module in self.semantic.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) def enable_cpu_offload( self, accelerator_id: Optional[int] = 0, **kwargs, ): r""" Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This method moves one whole sub-model at a time to the accelerator when it is used, and the sub-model remains in accelerator until the next sub-model runs. Args: accelerator_id (`int`, *optional*, defaults to 0): accelerator id on which the sub-models will be loaded and offloaded. This argument is deprecated. kwargs (`dict`, *optional*): additional keyword arguments: `gpu_id`: accelerator id on which the sub-models will be loaded and offloaded. """ if is_accelerate_available(): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate`.") gpu_id = kwargs.get("gpu_id", 0) if gpu_id != 0: warnings.warn( "The argument `gpu_id` is deprecated and will be removed in version 4.54.0 of Transformers. Please use `accelerator_id` instead.", FutureWarning, ) accelerator_id = gpu_id device_type = "cuda" if is_torch_accelerator_available(): device_type = torch.accelerator.current_accelerator().type device = torch.device(f"{device_type}:{accelerator_id}") torch_accelerator_module = getattr(torch, device_type) if self.device.type != "cpu": self.to("cpu") torch_accelerator_module.empty_cache() # otherwise we don't see the memory savings (but they probably exist) # this layer is used outside the first forward pass of semantic so need to be loaded before semantic self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) hook = None for cpu_offloaded_model in [ self.semantic, self.coarse_acoustics, self.fine_acoustics, ]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.fine_acoustics_hook = hook _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.codec_model_hook = hook def codec_decode(self, fine_output, output_lengths=None): """Turn quantized audio codes into audio array using encodec.""" fine_output = fine_output.transpose(0, 1) emb = self.codec_model.quantizer.decode(fine_output) if output_lengths is not None: # encodec uses LSTMs which behaves differently with appended padding # decoding with encodec takes around 0.1% of the total generation time # to keep generation quality, we break batching out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] else: out = self.codec_model.decoder(emb) audio_arr = out.squeeze(1) # squeeze the codebook dimension return audio_arr @torch.no_grad() def generate( self, input_ids: Optional[torch.Tensor] = None, history_prompt: Optional[dict[str, torch.Tensor]] = None, return_output_lengths: Optional[bool] = None, **kwargs, ) -> torch.LongTensor: """ Generates audio from an input prompt and an additional optional `Bark` speaker prompt. Args: input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the longest generation among the batch. history_prompt (`Optional[dict[str,torch.Tensor]]`, *optional*): Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch. kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types: - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. This means you can, for example, specify a generation strategy for all sub-models except one. return_output_lengths (`bool`, *optional*): Whether or not to return the waveform lengths. Useful when batching. Returns: By default: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. When `return_output_lengths=True`: Returns a tuple made of: - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch Example: ```python >>> from transformers import AutoProcessor, BarkModel >>> processor = AutoProcessor.from_pretrained("suno/bark-small") >>> model = BarkModel.from_pretrained("suno/bark-small") >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)` >>> voice_preset = "v2/en_speaker_6" >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset) >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` """ # TODO (joao):workaround until nested generation config is compatible with PreTrained Model # todo: dict semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) kwargs_semantic = { # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel "attention_mask": kwargs.pop("attention_mask", None), "min_eos_p": kwargs.pop("min_eos_p", None), } kwargs_coarse = {} kwargs_fine = {} for key, value in kwargs.items(): if key.startswith("semantic_"): key = key[len("semantic_") :] kwargs_semantic[key] = value elif key.startswith("coarse_"): key = key[len("coarse_") :] kwargs_coarse[key] = value elif key.startswith("fine_"): key = key[len("fine_") :] kwargs_fine[key] = value else: # If the key is already in a specific config, then it's been set with a # submodules specific value and we don't override if key not in kwargs_semantic: kwargs_semantic[key] = value if key not in kwargs_coarse: kwargs_coarse[key] = value if key not in kwargs_fine: kwargs_fine[key] = value # 1. Generate from the semantic model if "generation_config" in kwargs_semantic: kwargs_semantic.pop("generation_config") semantic_output = self.semantic.generate( input_ids, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, **kwargs_semantic, ) # 2. Generate from the coarse model if "generation_config" in kwargs_coarse: kwargs_coarse.pop("generation_config") coarse_output = self.coarse_acoustics.generate( semantic_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, codebook_size=self.generation_config.codebook_size, return_output_lengths=return_output_lengths, **kwargs_coarse, ) output_lengths = None if return_output_lengths: coarse_output, output_lengths = coarse_output # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len) output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks # 3. "generate" from the fine model if "generation_config" in kwargs_fine: kwargs_fine.pop("generation_config") output = self.fine_acoustics.generate( coarse_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=self.generation_config.codebook_size, **kwargs_fine, ) if getattr(self, "fine_acoustics_hook", None) is not None: # Manually offload fine_acoustics to CPU # and load codec_model to GPU # since bark doesn't use codec_model forward pass self.fine_acoustics_hook.offload() self.codec_model = self.codec_model.to(self.device) # 4. Decode the output and generate audio array audio = self.codec_decode(output, output_lengths) if getattr(self, "codec_model_hook", None) is not None: # Offload codec_model to CPU self.codec_model_hook.offload() if return_output_lengths: output_lengths = [len(sample) for sample in audio] audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) return audio, output_lengths return audio def tie_weights(self): """ Tie the weights between the input embeddings list and the output embeddings list. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() __all__ = [ "BarkFineModel", "BarkSemanticModel", "BarkCoarseModel", "BarkModel", "BarkPreTrainedModel", "BarkCausalModel", ]
transformers/src/transformers/models/bark/modeling_bark.py/0
{ "file_path": "transformers/src/transformers/models/bark/modeling_bark.py", "repo_id": "transformers", "token_count": 31038 }
410
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BEiT model configuration""" import warnings from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class BeitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BEiT [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture. Args: vocab_size (`int`, *optional*, defaults to 8192): Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during pre-training. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to use BERT-style absolute position embeddings. use_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use T5-style relative position embeddings in the self-attention layers. use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): Whether to use the same relative position embeddings across all self-attention layers of the Transformer. layer_scale_init_value (`float`, *optional*, defaults to 0.1): Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate per sample (when applied in the main path of residual layers). use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the CLS token, before applying the classification head. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. add_fpn (`bool`, *optional*, defaults to `False`): Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`]. reshape_hidden_states (`bool`, *optional*, defaults to `True`): Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size, seq_len, hidden_size)`. Only relevant for [`BeitBackbone`]. Example: ```python >>> from transformers import BeitConfig, BeitModel >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration >>> configuration = BeitConfig() >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration >>> model = BeitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "beit" def __init__( self, vocab_size=8192, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, out_features=None, out_indices=None, add_fpn=False, reshape_hidden_states=True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.use_mask_token = use_mask_token self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_bias = use_relative_position_bias self.use_shared_relative_position_bias = use_shared_relative_position_bias self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.use_mean_pooling = use_mean_pooling # decode head attributes (semantic segmentation) self.pool_scales = pool_scales # auxiliary head attributes (semantic segmentation) self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.semantic_loss_ignore_index = semantic_loss_ignore_index # handle backwards compatibility if "segmentation_indices" in kwargs: warnings.warn( "The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.", FutureWarning, ) out_indices = kwargs.pop("segmentation_indices") # backbone attributes self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self.add_fpn = add_fpn self.reshape_hidden_states = reshape_hidden_states # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig class BeitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 __all__ = ["BeitConfig", "BeitOnnxConfig"]
transformers/src/transformers/models/beit/configuration_beit.py/0
{ "file_path": "transformers/src/transformers/models/beit/configuration_beit.py", "repo_id": "transformers", "token_count": 4438 }
411
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BLIP-2 model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Blip2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1408): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 6144): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 39): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries and values in the self-attention layers. Example: ```python >>> from transformers import Blip2VisionConfig, Blip2VisionModel >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2VisionConfig() >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_2_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act="gelu", layer_norm_eps=1e-6, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.qkv_bias = qkv_bias class Blip2QFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling the model. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Index to be used for padding token. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). cross_attention_frequency (`int`, *optional*, defaults to 2): The frequency of adding cross-attention to the Transformer layers. encoder_hidden_size (`int`, *optional*, defaults to 1408): The hidden size of the hidden states for cross-attention. use_qformer_text_input (`bool`, *optional*, defaults to `False`): Whether to use BERT-style embeddings. Examples: ```python >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2QFormerConfig() >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2QFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "blip_2_qformer" base_config_key = "qformer_config" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", cross_attention_frequency=2, encoder_hidden_size=1408, use_qformer_text_input=False, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.cross_attention_frequency = cross_attention_frequency self.encoder_hidden_size = encoder_hidden_size self.use_qformer_text_input = use_qformer_text_input class Blip2Config(PretrainedConfig): r""" [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. num_query_tokens (`int`, *optional*, defaults to 32): The number of query tokens passed through the Transformer. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. image_token_index (`int`, *optional*): Token index of special image token. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... Blip2VisionConfig, ... Blip2QFormerConfig, ... OPTConfig, ... Blip2Config, ... Blip2ForConditionalGeneration, ... ) >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2Config() >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations >>> vision_config = Blip2VisionConfig() >>> qformer_config = Blip2QFormerConfig() >>> text_config = OPTConfig() >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config) ```""" model_type = "blip-2" attribute_map = { "image_token_id": "image_token_index", } sub_configs = {"text_config": AutoConfig, "qformer_config": Blip2QFormerConfig, "vision_config": Blip2VisionConfig} def __init__( self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_text_hidden_size=256, image_token_index=None, **kwargs, ): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.") if qformer_config is None: qformer_config = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.") if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).") self.vision_config = Blip2VisionConfig(**vision_config) self.qformer_config = Blip2QFormerConfig(**qformer_config) text_model_type = text_config.get("model_type", "opt") self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.num_query_tokens = num_query_tokens self.image_text_hidden_size = image_text_hidden_size self.image_token_index = image_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs( cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig] = None, **kwargs, ): r""" Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model configurations. Args: vision_config (`dict`): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. Returns: [`Blip2Config`]: An instance of a configuration object """ return cls( vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict() if text_config is not None else None, **kwargs, ) __all__ = ["Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig"]
transformers/src/transformers/models/blip_2/configuration_blip_2.py/0
{ "file_path": "transformers/src/transformers/models/blip_2/configuration_blip_2.py", "repo_id": "transformers", "token_count": 6114 }
412
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert CANINE checkpoint.""" import argparse from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path): # Initialize PyTorch model config = CanineConfig() model = CanineModel(config) model.eval() print(f"Building PyTorch model from configuration: {config}") # Load weights from tf checkpoint load_tf_weights_in_canine(model, config, tf_checkpoint_path) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) # Save tokenizer files tokenizer = CanineTokenizer() print(f"Save tokenizer files to {pytorch_dump_path}") tokenizer.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint. Should end with model.ckpt", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to a folder where the PyTorch model will be placed.", ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
transformers/src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 742 }
413
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Chinese-CLIP model.""" from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig logger = logging.get_logger(__name__) # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass @auto_docstring class ChineseCLIPOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.align.modeling_align.AlignTextEmbeddings with Align->ChineseCLIP class ChineseCLIPTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.align.modeling_align.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.align.modeling_align.AlignTextSelfAttention with Align->ChineseCLIP class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.attention_dropout = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.attention_head_size) query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.align.modeling_align.AlignTextAttention with Align->ChineseCLIP class ChineseCLIPTextAttention(nn.Module): def __init__(self, config): super().__init__() self.self = ChineseCLIPTextSelfAttention(config) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class ChineseCLIPVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, **kwargs ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) * self.scale key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, None, dropout=0.0 if not self.training else self.dropout, scaling=1.0, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.align.modeling_align.AlignTextLayer with Align->ChineseCLIP class ChineseCLIPTextLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor]: self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ChineseCLIPVisionLayer(GradientCheckpointingLayer): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @auto_docstring class ChineseCLIPPreTrainedModel(PreTrainedModel): config: ChineseCLIPConfig base_model_prefix = "chinese_clip" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() # Copied from transformers.models.align.modeling_align.AlignTextEncoder with Align->ChineseCLIP class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, **kwargs, ) -> Union[tuple[torch.Tensor], BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class ChineseCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig """ def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, inputs_embeds, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The text model from CHINESE_CLIP without any head or projection on top. """ ) class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ config: ChineseCLIPTextConfig _no_split_modules = ["ChineseCLIPTextEmbeddings"] def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The vision model from CHINESE_CLIP without any head or projection on top. """ ) class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config: ChineseCLIPVisionConfig main_input_name = "pixel_values" _no_split_modules = ["ChineseCLIPVisionEmbeddings", "ChineseCLIPVisionAttention"] def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) @auto_docstring class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config: ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise TypeError( "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise TypeError( "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config # The module using it is not a PreTrainedModel subclass so we need this vision_config._attn_implementation = config._attn_implementation self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @auto_docstring def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features @auto_docstring def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, ChineseCLIPOutput]: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) return ChineseCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) __all__ = ["ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel"]
transformers/src/transformers/models/chinese_clip/modeling_chinese_clip.py/0
{ "file_path": "transformers/src/transformers/models/chinese_clip/modeling_chinese_clip.py", "repo_id": "transformers", "token_count": 22402 }
414
# coding=utf-8 # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 CLIP model.""" from __future__ import annotations import math from dataclasses import dataclass from typing import Any import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling # Public API from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32" LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: int | None = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean( keras.metrics.sparse_categorical_crossentropy( y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True ) ) def clip_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 @dataclass class TFCLIPOutput(ModelOutput): """ Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`]. image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPVisionModel`]. text_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]): The output of the [`TFCLIPTextModel`]. vision_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]): The output of the [`TFCLIPVisionModel`]. """ loss: tf.Tensor | None = None logits_per_image: tf.Tensor | None = None logits_per_text: tf.Tensor | None = None text_embeds: tf.Tensor | None = None image_embeds: tf.Tensor | None = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class TFCLIPVisionEmbeddings(keras.layers.Layer): def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.config = config self.patch_embedding = keras.layers.Conv2D( filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, padding="valid", data_format="channels_last", use_bias=False, kernel_initializer=get_initializer(self.config.initializer_range * self.config.initializer_factor), name="patch_embedding", ) def build(self, input_shape: tf.TensorShape = None): factor = self.config.initializer_factor self.class_embedding = self.add_weight( shape=(self.embed_dim,), initializer=get_initializer(self.embed_dim**-0.5 * factor), trainable=True, name="class_embedding", ) with tf.name_scope("position_embedding"): self.position_embedding = self.add_weight( shape=(self.num_positions, self.embed_dim), initializer=get_initializer(self.config.initializer_range * factor), trainable=True, name="embeddings", ) if self.built: return self.built = True if getattr(self, "patch_embedding", None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, self.config.num_channels]) def call(self, pixel_values: tf.Tensor) -> tf.Tensor: """`pixel_values` is expected to be of NCHW format.""" batch_size, num_channels, height, width = shape_list(pixel_values) # When running on CPU, `tf.nn.conv2d` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) patch_embeds = self.patch_embedding(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) patch_embeds = tf.reshape(tensor=patch_embeds, shape=(batch_size, self.num_patches, -1)) # add the [CLS] token to the embedded patch tokens class_embeds = tf.broadcast_to(self.class_embedding, shape=(batch_size, 1, self.embed_dim)) embeddings = tf.concat((class_embeds, patch_embeds), axis=1) embeddings = embeddings + self.position_embedding return embeddings class TFCLIPTextEmbeddings(keras.layers.Layer): def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape = None): with tf.name_scope("token_embedding"): self.weight = self.add_weight( shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="weight", ) with tf.name_scope("position_embedding"): self.position_embedding = self.add_weight( shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name="embeddings", ) super().build(input_shape) def call( self, input_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings class TFCLIPAttention(keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.attention_head_size = self.embed_dim // self.num_attention_heads if self.attention_head_size * self.num_attention_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_attention_heads})." ) factor = config.initializer_factor in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor out_proj_std = (self.embed_dim**-0.5) * factor self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.q_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj" ) self.k_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj" ) self.v_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj" ) self.dropout = keras.layers.Dropout(rate=config.attention_dropout) self.out_proj = keras.layers.Dense( units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj" ) # copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: """Input shape: Batch x Time x Channel""" batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.q_proj(inputs=hidden_states) mixed_key_layer = self.k_proj(inputs=hidden_states) mixed_value_layer = self.v_proj(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) # apply the causal_attention_mask first if causal_attention_mask is not None: # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function) attention_scores = tf.add(attention_scores, causal_attention_mask) if attention_mask is not None: # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. _attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=_attention_probs, training=training) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, embed_dim) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) attention_output = self.out_proj(attention_output, training=training) # In TFBert, attention weights are returned after dropout. # However, in CLIP, they are returned before dropout. outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFCLIPMLP(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.activation_fn = get_tf_activation(config.hidden_act) factor = config.initializer_factor in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * config.hidden_size) ** -0.5 * factor self.fc1 = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1" ) self.fc2 = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2" ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(inputs=hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.config.hidden_size]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.intermediate_size]) class TFCLIPEncoderLayer(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFCLIPAttention(config, name="self_attn") self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.mlp = TFCLIPMLP(config, name="mlp") self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. causal_attention_mask (`tf.Tensor`): causal attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`): Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(inputs=hidden_states) attention_outputs = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(inputs=hidden_states) hidden_states = self.mlp(hidden_states=hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFCLIPEncoder(keras.layers.Layer): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`TFCLIPEncoderLayer`]. Args: config: CLIPConfig """ def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.layers = [TFCLIPEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFCLIPTextTransformer(keras.layers.Layer): def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFCLIPTextEmbeddings(config, name="embeddings") self.encoder = TFCLIPEncoder(config, name="encoder") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") # For `pooled_output` computation self.eos_token_id = config.eos_token_id self.embed_dim = config.hidden_size def call( self, input_ids: TFModelInputType, attention_mask: tf.Tensor, position_ids: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: input_shape = shape_list(input_ids) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) batch_size, seq_length = input_shape # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) # check attention mask and invert # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.final_layer_norm(inputs=sequence_output) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) pooled_output = tf.gather_nd( params=sequence_output, indices=tf.stack( values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1 ), ) else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) pooled_output = tf.gather_nd( params=sequence_output, indices=tf.stack( values=( tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1), ), axis=1, ), ) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): # It is possible with an unspecified sequence length for seq_length to be # a runtime value, which is unsupported by tf.constant. Per the TensorFlow # docs, tf.fill can handle runtime dynamic shapes: # https://www.tensorflow.org/api_docs/python/tf/fill diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) # set an additive 2D attention mask with all places being masked to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked) # TIP: think the 2D matrix as the space of (query_seq, key_seq) to_mask = tf.linalg.band_part(to_mask, 0, -1) # to_mask = tf.linalg.band_part(to_mask, -1, 0) to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) @keras_serializable class TFCLIPTextMainLayer(keras.layers.Layer): config_class = CLIPTextConfig def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.text_model = TFCLIPTextTransformer(config, name="text_model") def get_input_embeddings(self) -> keras.layers.Layer: return self.text_model.embeddings def set_input_embeddings(self, value: tf.Variable): self.text_model.embeddings.weight = value self.text_model.embeddings.vocab_size = shape_list(value)[0] @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_model_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return text_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "text_model", None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) class TFCLIPVisionTransformer(keras.layers.Layer): def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFCLIPVisionEmbeddings(config, name="embeddings") self.pre_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="pre_layrnorm") self.encoder = TFCLIPEncoder(config, name="encoder") self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm") self.embed_dim = config.hidden_size def call( self, pixel_values: TFModelInputType, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: embedding_output = self.embeddings(pixel_values=pixel_values) embedding_output = self.pre_layernorm(inputs=embedding_output) encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] pooled_output = self.post_layernorm(inputs=pooled_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "pre_layernorm", None) is not None: with tf.name_scope(self.pre_layernorm.name): self.pre_layernorm.build([None, None, self.embed_dim]) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "post_layernorm", None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, self.embed_dim]) @keras_serializable class TFCLIPVisionMainLayer(keras.layers.Layer): config_class = CLIPVisionConfig def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.vision_model = TFCLIPVisionTransformer(config, name="vision_model") def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: if pixel_values is None: raise ValueError("You have to specify pixel_values") vision_model_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return vision_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) @keras_serializable class TFCLIPMainLayer(keras.layers.Layer): config_class = CLIPConfig def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, CLIPTextConfig): raise TypeError( "config.text_config is expected to be of type CLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPVisionConfig): raise TypeError( "config.vision_config is expected to be of type CLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) self.config = config text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_model = TFCLIPTextTransformer(text_config, name="text_model") self.vision_model = TFCLIPVisionTransformer(vision_config, name="vision_model") self.visual_projection = keras.layers.Dense( units=self.projection_dim, kernel_initializer=get_initializer(vision_config.hidden_size**-0.5 * self.config.initializer_factor), use_bias=False, name="visual_projection", ) self.text_projection = keras.layers.Dense( units=self.projection_dim, kernel_initializer=get_initializer(text_config.hidden_size**-0.5 * self.config.initializer_factor), use_bias=False, name="text_projection", ) self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size def build(self, input_shape: tf.TensorShape = None): self.logit_scale = self.add_weight( shape=(1,), initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True, name="logit_scale", ) if self.built: return self.built = True if getattr(self, "text_model", None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "visual_projection", None) is not None: with tf.name_scope(self.visual_projection.name): self.visual_projection.build([None, None, self.vision_embed_dim]) if getattr(self, "text_projection", None) is not None: with tf.name_scope(self.text_projection.name): self.text_projection.build([None, None, self.text_embed_dim]) @unpack_inputs def get_text_features( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = text_outputs[1] text_features = self.text_projection(inputs=pooled_output) return text_features @unpack_inputs def get_image_features( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: if pixel_values is None: raise ValueError("You have to specify pixel_values") vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(inputs=pooled_output) return image_features @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, pixel_values: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, return_loss: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFCLIPOutput | tuple[tf.Tensor]: if input_ids is None: raise ValueError("You have to specify either input_ids") if pixel_values is None: raise ValueError("You have to specify pixel_values") input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(inputs=image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(inputs=text_embeds) # normalized features image_embeds = image_embeds / tf.norm(tensor=image_embeds, ord="euclidean", axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(tensor=text_embeds, ord="euclidean", axis=-1, keepdims=True) # cosine similarity as logits logit_scale = tf.math.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) loss = None if return_loss: loss = clip_loss(logits_per_text) loss = tf.reshape(loss, (1,)) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return TFCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) class TFCLIPPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CLIPConfig base_model_prefix = "clip" _keys_to_ignore_on_load_missing = [r"position_ids"] _keys_to_ignore_on_load_unexpected = [r"position_ids"] CLIP_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ class TFCLIPTextModel(TFCLIPPreTrainedModel): config_class = CLIPTextConfig def __init__(self, config: CLIPTextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPTextMainLayer(config, name="clip") @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPTextConfig) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFCLIPTextModel >>> model = TFCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" outputs = self.clip( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "clip", None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) class TFCLIPVisionModel(TFCLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: CLIPVisionConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPVisionMainLayer(config, name="clip") @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPVisionConfig) def call( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFCLIPVisionModel >>> model = TFCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" outputs = self.clip( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "clip", None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) @add_start_docstrings(CLIP_START_DOCSTRING) class TFCLIPModel(TFCLIPPreTrainedModel): config_class = CLIPConfig def __init__(self, config: CLIPConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPMainLayer(config, name="clip") @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def get_text_features( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: r""" Returns: text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, TFCLIPModel >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") >>> text_features = model.get_text_features(**inputs) ```""" text_features = self.clip.get_text_features( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return text_features @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: TFModelInputType | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tf.Tensor: r""" Returns: image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFCLIPModel >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="tf") >>> image_features = model.get_image_features(**inputs) ```""" image_features = self.clip.get_image_features( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return image_features @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFCLIPOutput, config_class=CLIPConfig) def call( self, input_ids: TFModelInputType | None = None, pixel_values: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, return_loss: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFCLIPOutput | tuple[tf.Tensor]: r""" Returns: Examples: ```python >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, TFCLIPModel >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities ```""" outputs = self.clip( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs def serving_output(self, output: TFCLIPOutput) -> TFCLIPOutput: # TODO: As is this currently fails with saved_model=True, because # TensorFlow cannot trace through nested dataclasses. Reference: # https://github.com/huggingface/transformers/pull/16886 return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "clip", None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) __all__ = ["TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel"]
transformers/src/transformers/models/clip/modeling_tf_clip.py/0
{ "file_path": "transformers/src/transformers/models/clip/modeling_tf_clip.py", "repo_id": "transformers", "token_count": 25874 }
415
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert ColQwen2 weights from the original repository to the HF model format. Don't forget to manually upload the processor-related files to the HF model repository after running this script. Original repository: https://github.com/illuin-tech/colqwen2. NOTE: This script was originally run using `torch==2.5.1` and with: ```bash python src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py \ --model_id vidore/colqwen2-v1.0-merged \ --revision eeccbae1d44bdcb0c83b1788127a2b2cad7d718e \ --original_vlm_name_or_path Qwen/Qwen2-VL-2B-Instruct \ --output_dir vidore/colqwen2-v1.0-hf-internal \ --push_to_hub ``` """ import argparse import glob from pathlib import Path from typing import Any, Optional import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import AutoConfig from transformers.models.colqwen2 import ColQwen2ForRetrieval from transformers.models.colqwen2.configuration_colqwen2 import ColQwen2Config from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) ORIGINAL_DTYPE = torch.bfloat16 def load_original_state_dict(model_id: str, revision: Optional[str] = None) -> dict[str, torch.Tensor]: directory_path = snapshot_download( repo_id=model_id, revision=revision, allow_patterns=["*.safetensors"], ) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) # Some weights are tied, so `lm.head`` is not saved. Let's clone to load state dict. if "lm_head.weight" not in original_state_dict: original_state_dict["lm_head.weight"] = original_state_dict["model.embed_tokens.weight"].clone() return original_state_dict def rename_state_dict_keys(state_dict: dict[str, Any]) -> dict[str, Any]: new_state_dict: dict[str, Any] = {} for key, value in state_dict.items(): if key.startswith("custom_text_proj"): new_key = key.replace("custom_text_proj", "embedding_proj_layer") else: # The original ColQwen2 inherits from Qwen2VL, so we simply need to add the `vlm.` prefix # to all remaining keys. if key.startswith("model."): key = key.replace("model.", "model.language_model.") if key.startswith("visual."): key = key.replace("visual.", "model.visual.") new_key = "vlm." + key new_state_dict[new_key] = value return new_state_dict @torch.no_grad() def convert_colqwen2_weights_to_hf( model_id: str, output_dir: str, push_to_hub: bool, revision: Optional[str] = None, original_vlm_name_or_path: Optional[str] = None, ): # Load the original model data original_config = AutoConfig.from_pretrained( model_id, revision=revision, ) if original_vlm_name_or_path is not None: original_config._name_or_path = original_vlm_name_or_path if hasattr(original_config, "architectures"): delattr(original_config, "architectures") original_state_dict = load_original_state_dict(model_id, revision=revision) # Format the state_dict keys original_state_dict = rename_state_dict_keys(original_state_dict) # Create the new config config = ColQwen2Config( vlm_config=original_config, embedding_dim=128, # hardcoded in the original model ) config.model_type = "colqwen2" config.is_composition = False # Load the untrained model model = ColQwen2ForRetrieval(config=config).to("cpu").eval() print("Created model with new config and randomly initialized weights") # NOTE: The new model was initialized with float32 weights. We need to convert it to the desired precision. # There are two ways to set the model's dtype: # - Using `model.from_pretrained(..., dtype=dtype_precision)` doesn't convert the hyperparameters to the desired precision. # - Using `model.to(dtype_precision)` converts all values - including the hyperparameters - to the desired precision. # The following snippet allows a fine-grained control over the model's dtype, making sure that all # the new weights' dtypes match the original model. for param in model.parameters(): param.data = param.data.to(ORIGINAL_DTYPE) print(f"Converted the new model weights to `{ORIGINAL_DTYPE}`") # Load the original weights model.load_state_dict(original_state_dict) print("Loaded original model weights") # # Sanity check: ensure all keys are the same state_dict_keys_old = set(original_state_dict.keys()) state_dict_keys_new = set(model.state_dict().keys()) disjoint_keys = state_dict_keys_old.symmetric_difference(state_dict_keys_new) if disjoint_keys: raise ValueError(f"Incompatible keys: {disjoint_keys}") # Save the model if push_to_hub: model.push_to_hub(output_dir, private=True) print(f"Model pushed to the hub at `{output_dir}`") else: Path(output_dir).mkdir(exist_ok=True, parents=True) model.save_pretrained(output_dir) print(f"Model saved to `{output_dir}`") if __name__ == "__main__": parser = argparse.ArgumentParser( description=""" This script converts the original ColQwen2 model to the HF model format. Don't forget to manually upload the processor-related files to the HF model repository after running this script. Example usage: ```bash python src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py \ --model_id vidore/colqwen2-v1.0-merged \ --revision eeccbae1d44bdcb0c83b1788127a2b2cad7d718e \ --original_vlm_name_or_path Qwen/Qwen2-VL-2B-Instruct \ --output_dir vidore/colqwen2-v1.0-hf-internal \ --push_to_hub ``` """ ) parser.add_argument( "--model_id", help="Model ID of the original model to convert", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--push_to_hub", help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally", action="store_true", default=False, ) parser.add_argument( "--revision", help="Revision of the model to download", default=None, ) parser.add_argument( "--original_vlm_name_or_path", help="Name or path of the original VLM backbone model", default=None, ) args = parser.parse_args() convert_colqwen2_weights_to_hf( model_id=args.model_id, output_dir=args.output_dir, push_to_hub=args.push_to_hub, revision=args.revision, original_vlm_name_or_path=args.original_vlm_name_or_path, )
transformers/src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py", "repo_id": "transformers", "token_count": 3034 }
416
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 ConvBERT model.""" from __future__ import annotations import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_convbert import ConvBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base" _CONFIG_FOR_DOC = "ConvBertConfig" # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert class TFConvBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: ConvBertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFConvBertSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) new_num_attention_heads = int(config.num_attention_heads / config.head_ratio) if new_num_attention_heads < 1: self.head_ratio = config.num_attention_heads num_attention_heads = 1 else: num_attention_heads = new_num_attention_heads self.head_ratio = config.head_ratio self.num_attention_heads = num_attention_heads self.conv_kernel_size = config.conv_kernel_size if config.hidden_size % self.num_attention_heads != 0: raise ValueError("hidden_size should be divisible by num_attention_heads") self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.key_conv_attn_layer = keras.layers.SeparableConv1D( self.all_head_size, self.conv_kernel_size, padding="same", activation=None, depthwise_initializer=get_initializer(1 / self.conv_kernel_size), pointwise_initializer=get_initializer(config.initializer_range), name="key_conv_attn_layer", ) self.conv_kernel_layer = keras.layers.Dense( self.num_attention_heads * self.conv_kernel_size, activation=None, name="conv_kernel_layer", kernel_initializer=get_initializer(config.initializer_range), ) self.conv_out_layer = keras.layers.Dense( self.all_head_size, activation=None, name="conv_out_layer", kernel_initializer=get_initializer(config.initializer_range), ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer) conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer) conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1]) conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1) paddings = tf.constant( [ [ 0, 0, ], [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)], [0, 0], ] ) conv_out_layer = self.conv_out_layer(hidden_states) conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size]) conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT") unfold_conv_out_layer = tf.stack( [ tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size]) for i in range(self.conv_kernel_size) ], axis=-1, ) conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]) conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer) conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size]) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask value_layer = tf.reshape( mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size] ) value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]) context_layer = tf.concat([context_layer, conv_out], 2) context_layer = tf.reshape( context_layer, (batch_size, -1, self.head_ratio * self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "key_conv_attn_layer", None) is not None: with tf.name_scope(self.key_conv_attn_layer.name): self.key_conv_attn_layer.build([None, None, self.config.hidden_size]) if getattr(self, "conv_kernel_layer", None) is not None: with tf.name_scope(self.conv_kernel_layer.name): self.conv_kernel_layer.build([None, None, self.all_head_size]) if getattr(self, "conv_out_layer", None) is not None: with tf.name_scope(self.conv_out_layer.name): self.conv_out_layer.build([None, None, self.config.hidden_size]) class TFConvBertSelfOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFConvBertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self_attention = TFConvBertSelfAttention(config, name="self") self.dense_output = TFConvBertSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False): self_outputs = self.self_attention( input_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.dense_output(self_outputs[0], input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class GroupedLinearLayer(keras.layers.Layer): def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs): super().__init__(**kwargs) self.input_size = input_size self.output_size = output_size self.num_groups = num_groups self.kernel_initializer = kernel_initializer self.group_in_dim = self.input_size // self.num_groups self.group_out_dim = self.output_size // self.num_groups def build(self, input_shape=None): self.kernel = self.add_weight( "kernel", shape=[self.group_out_dim, self.group_in_dim, self.num_groups], initializer=self.kernel_initializer, trainable=True, ) self.bias = self.add_weight( "bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True ) super().build(input_shape) def call(self, hidden_states): batch_size = shape_list(hidden_states)[0] x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2]) x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0])) x = tf.transpose(x, [1, 0, 2]) x = tf.reshape(x, [batch_size, -1, self.output_size]) x = tf.nn.bias_add(value=x, bias=self.bias) return x class TFConvBertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.num_groups == 1: self.dense = keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) else: self.dense = GroupedLinearLayer( config.hidden_size, config.intermediate_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFConvBertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.num_groups == 1: self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) else: self.dense = GroupedLinearLayer( config.intermediate_size, config.hidden_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFConvBertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFConvBertAttention(config, name="attention") self.intermediate = TFConvBertIntermediate(config, name="intermediate") self.bert_output = TFConvBertOutput(config, name="output") def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions, training=training ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) class TFConvBertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFConvBertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) @keras_serializable class TFConvBertMainLayer(keras.layers.Layer): config_class = ConvBertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.embeddings = TFConvBertEmbeddings(config, name="embeddings") if config.embedding_size != config.hidden_size: self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project") self.encoder = TFConvBertEncoder(config, name="encoder") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def get_extended_attention_mask(self, attention_mask, input_shape, dtype): if attention_mask is None: attention_mask = tf.fill(input_shape, 1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask(self, head_mask): if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers return head_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype) head_mask = self.get_head_mask(head_mask) if hasattr(self, "embeddings_project"): hidden_states = self.embeddings_project(hidden_states, training=training) hidden_states = self.encoder( hidden_states, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "embeddings_project", None) is not None: with tf.name_scope(self.embeddings_project.name): self.embeddings_project.build([None, None, self.config.embedding_size]) class TFConvBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvBertConfig base_model_prefix = "convbert" CONVBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONVBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.", CONVBERT_START_DOCSTRING, ) class TFConvBertModel(TFConvBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.convbert = TFConvBertMainLayer(config, name="convbert") @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.array | tf.Tensor | None = None, token_type_ids: np.array | tf.Tensor | None = None, position_ids: np.array | tf.Tensor | None = None, head_mask: np.array | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: outputs = self.convbert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) class TFConvBertMaskedLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFConvBertGeneratorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dense = keras.layers.Dense(config.embedding_size, name="dense") self.config = config def call(self, generator_hidden_states, training=False): hidden_states = self.dense(generator_hidden_states) hidden_states = get_tf_activation("gelu")(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING) class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, **kwargs) self.config = config self.convbert = TFConvBertMainLayer(config, name="convbert") self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions") if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head") def get_lm_head(self): return self.generator_lm_head def get_prefix_bias_name(self): return self.name + "/" + self.generator_lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFMaskedLMOutput: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ generator_hidden_states = self.convbert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output, training=training) prediction_scores = self.generator_lm_head(prediction_scores, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "generator_predictions", None) is not None: with tf.name_scope(self.generator_predictions.name): self.generator_predictions.build(None) if getattr(self, "generator_lm_head", None) is not None: with tf.name_scope(self.generator_lm_head.name): self.generator_lm_head.build(None) class TFConvBertClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, hidden_states, **kwargs): x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = get_tf_activation(self.config.hidden_act)(x) x = self.dropout(x) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") self.classifier = TFConvBertClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFSequenceClassifierOutput: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.classifier(outputs[0], training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.convbert = TFConvBertMainLayer(config, name="convbert") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFMultipleChoiceModelOutput: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.convbert( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) logits = self.sequence_summary(outputs[0], training=training) logits = self.classifier(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFTokenClassifierOutput: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, CONVBERT_START_DOCSTRING, ) class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, start_positions: tf.Tensor | None = None, end_positions: tf.Tensor | None = None, training: bool | None = False, ) -> tuple | TFQuestionAnsweringModelOutput: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) __all__ = [ "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ]
transformers/src/transformers/models/convbert/modeling_tf_convbert.py/0
{ "file_path": "transformers/src/transformers/models/convbert/modeling_tf_convbert.py", "repo_id": "transformers", "token_count": 26581 }
417
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CTRL model.""" from typing import Optional, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...cache_utils import DynamicCache from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( auto_docstring, logging, ) from .configuration_ctrl import CTRLConfig logger = logging.get_logger(__name__) def angle_defn(pos, i, d_model_size): angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size) return pos * angle_rates def positional_encoding(position, d_model_size, dtype): # create the sinusoidal pattern for the positional encoding angle_rads = angle_defn( torch.arange(position, dtype=torch.int64).to(dtype).unsqueeze(1), torch.arange(d_model_size, dtype=torch.int64).to(dtype).unsqueeze(0), d_model_size, ) sines = torch.sin(angle_rads[:, 0::2]) cosines = torch.cos(angle_rads[:, 1::2]) pos_encoding = torch.cat([sines, cosines], dim=-1) return pos_encoding def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None): # calculate attention matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2)) dk = k.shape[-1] scaled_attention_logits = matmul_qk / np.sqrt(dk) if mask is not None: nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1) scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4 if attention_mask is not None: # Apply the attention mask scaled_attention_logits = scaled_attention_logits + attention_mask attention_weights = torch.softmax(scaled_attention_logits, dim=-1) # Mask heads if we want to if head_mask is not None: attention_weights = attention_weights * head_mask output = torch.matmul(attention_weights, v) return output, attention_weights class MultiHeadAttention(nn.Module): def __init__(self, d_model_size, num_heads, layer_idx=None): super().__init__() self.num_heads = num_heads self.d_model_size = d_model_size self.layer_idx = layer_idx self.depth = int(d_model_size / self.num_heads) self.Wq = nn.Linear(d_model_size, d_model_size) self.Wk = nn.Linear(d_model_size, d_model_size) self.Wv = nn.Linear(d_model_size, d_model_size) self.dense = nn.Linear(d_model_size, d_model_size) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.d_model_size // self.num_heads if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads) # Prune linear layers self.Wq = prune_linear_layer(self.Wq, index) self.Wk = prune_linear_layer(self.Wk, index) self.Wv = prune_linear_layer(self.Wv, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params self.num_heads = self.num_heads - len(heads) self.d_model_size = attention_head_size * self.num_heads self.pruned_heads = self.pruned_heads.union(heads) def split_into_heads(self, x, batch_size): x = x.reshape(batch_size, -1, self.num_heads, self.depth) return x.permute([0, 2, 1, 3]) def forward( self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None, ): batch_size = q.shape[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) if layer_past is not None: k, v = layer_past.update(k, v, self.layer_idx, {"cache_position": cache_position}) output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask) scaled_attention = output[0].permute([0, 2, 1, 3]) attn = output[1] original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size) output = self.dense(original_size_attention) return output, attn def point_wise_feed_forward_network(d_model_size, dff): return nn.Sequential(nn.Linear(d_model_size, dff), nn.ReLU(), nn.Linear(dff, d_model_size)) class EncoderLayer(nn.Module): def __init__(self, d_model_size, num_heads, dff, rate=0.1, layer_idx=None): super().__init__() self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, layer_idx=layer_idx) self.ffn = point_wise_feed_forward_network(d_model_size, dff) self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-6) self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-6) self.dropout1 = nn.Dropout(rate) self.dropout2 = nn.Dropout(rate) def forward( self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None, ): normed = self.layernorm1(x) attn_outputs = self.multi_head_attention( normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) attn_output = attn_outputs[0] attn_output = self.dropout1(attn_output) out1 = x + attn_output out2 = self.layernorm2(out1) ffn_output = self.ffn(out2) ffn_output = self.dropout2(ffn_output) out2 = out1 + ffn_output outputs = (out2,) + attn_outputs[1:] return outputs @auto_docstring class CTRLPreTrainedModel(PreTrainedModel): config: CTRLConfig base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class CTRLModel(CTRLPreTrainedModel): def __init__(self, config): super().__init__(config) self.d_model_size = config.n_embd self.num_layers = config.n_layer self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float) self.w = nn.Embedding(config.vocab_size, config.n_embd) self.dropout = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList( [ EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, layer_idx=i) for i in range(config.n_layer) ] ) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.w def set_input_embeddings(self, new_embeddings): self.w = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].multi_head_attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, # NOOP kwargs, for now ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) Example: ```python >>> from transformers import AutoTokenizer, CTRLModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 5, 1280] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions use_cache = use_cache if use_cache is not None else self.config.use_cache output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if use_cache and past_key_values is None: past_key_values = DynamicCache() if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `DynamicCache` instead, e.g. " "`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." ) past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.n_layer) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) token_type_embeds = self.w(token_type_ids) token_type_embeds *= np.sqrt(self.d_model_size) else: token_type_embeds = 0 if inputs_embeds is None: inputs_embeds = self.w(input_ids) # inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded seq_len = input_shape[-1] mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device) inputs_embeds *= np.sqrt(self.d_model_size) # `self.pos_encoding` won't be sent to the correct device along the model, so we do it manually. self.pos_encoding = self.pos_encoding.to(device) pos_embeds = self.pos_encoding[position_ids, :] hidden_states = inputs_embeds + pos_embeds + token_type_embeds hidden_states = self.dropout(hidden_states) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, h in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = h( hidden_states, mask, layer_past=past_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = outputs[0] if output_attentions: all_attentions += (outputs[1],) hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, ) @auto_docstring( custom_intro=""" The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class CTRLLMHeadModel(CTRLPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Example: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLLMHeadModel >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> sequence_ids = model.generate(inputs["input_ids"]) >>> sequences = tokenizer.batch_decode(sequence_ids) >>> sequences ['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,'] >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> round(outputs.loss.item(), 2) 9.21 >>> list(outputs.logits.shape) [1, 5, 246534] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs): # Overwritten -- inputs_embeds not working properly # only last tokens for inputs_ids if past is defined in kwargs if past_key_values is not None: past_length = past_key_values.get_seq_length() # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": use_cache} @auto_docstring( custom_intro=""" The CTRL Model transformer with a sequence classification head on top (linear layer). [`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class CTRLForSequenceClassification(CTRLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = CTRLModel(config) self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl") >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> import torch >>> torch.manual_seed(42) # doctest: +IGNORE_RESULT >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 0.93 ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, CTRLForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl") >>> model = CTRLForSequenceClassification.from_pretrained( ... "Salesforce/ctrl", problem_type="multi_label_classification" ... ) >>> # CTRL was trained with control codes as the first token >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt") >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values() >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels) >>> num_labels = len(model.config.id2label) >>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to( ... torch.float ... ) >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() # doctest: +IGNORE_RESULT ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = ["CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel"]
transformers/src/transformers/models/ctrl/modeling_ctrl.py/0
{ "file_path": "transformers/src/transformers/models/ctrl/modeling_ctrl.py", "repo_id": "transformers", "token_count": 13959 }
418
# coding=utf-8 # Copyright 2024 IDEA Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch DAB-DETR model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, auto_docstring, logging, ) from ...utils.backbone_utils import load_backbone from .configuration_dab_detr import DabDetrConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the Conditional DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. """ ) # Copied from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoderOutput with ConditionalDetr->DabDetr,Conditional DETR->DAB-DETR,2 (anchor points)->4 (anchor points) class DabDetrDecoderOutput(BaseModelOutputWithCrossAttentions): r""" cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. """ ) # Copied from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrModelOutput with ConditionalDetr->DabDetr,Conditional DETR->DAB-DETR,2 (anchor points)->4 (anchor points) class DabDetrModelOutput(Seq2SeqModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`DabDetrForObjectDetection`]. """ ) # Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->DabDetr class DabDetrObjectDetectionOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DabDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DabDetr class DabDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DabDetr def replace_batch_norm(model): r""" Recursively replace all `torch.nn.BatchNorm2d` with `DabDetrFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model """ for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DabDetrFrozenBatchNorm2d(module.num_features) if module.weight.device != torch.device("meta"): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) # Modified from transformers.models.detr.modeling_detr.DetrConvEncoder with Detr->DabDetr class DabDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DabDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config: DabDetrConfig): super().__init__() self.config = config backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DabDetr class DabDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos # Modified from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrSinePositionEmbedding with ConditionalDetr->DabDetr class DabDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, config: DabDetrConfig): super().__init__() self.config = config self.embedding_dim = config.hidden_size / 2 self.temperature_height = config.temperature_height self.temperature_width = config.temperature_width scale = config.sine_position_embedding_scale if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale # We use float32 to ensure reproducibility of the original implementation dim_tx = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) # Modifying dim_tx in place to avoid extra memory allocation -> dim_tx = self.temperature_width ** (2 * (dim_tx // 2) / self.embedding_dim) dim_tx //= 2 dim_tx.mul_(2 / self.embedding_dim) dim_tx.copy_(self.temperature_width**dim_tx) pos_x = x_embed[:, :, :, None] / dim_tx # We use float32 to ensure reproducibility of the original implementation dim_ty = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) # Modifying dim_ty in place to avoid extra memory allocation -> dim_ty = self.temperature_height ** (2 * (dim_ty // 2) / self.embedding_dim) dim_ty //= 2 dim_ty.mul_(2 / self.embedding_dim) dim_ty.copy_(self.temperature_height**dim_ty) pos_y = y_embed[:, :, :, None] / dim_ty pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # function to generate sine positional embedding for 4d coordinates def gen_sine_position_embeddings(pos_tensor, hidden_size=256): """ This function computes position embeddings using sine and cosine functions from the input positional tensor, which has a shape of (batch_size, num_queries, 4). The last dimension of `pos_tensor` represents the following coordinates: - 0: x-coord - 1: y-coord - 2: width - 3: height The output shape is (batch_size, num_queries, 512), where final dim (hidden_size*2 = 512) is the total embedding dimension achieved by concatenating the sine and cosine values for each coordinate. """ scale = 2 * math.pi dim = hidden_size // 2 dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device) dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / dim) x_embed = pos_tensor[:, :, 0] * scale y_embed = pos_tensor[:, :, 1] * scale pos_x = x_embed[:, :, None] / dim_t pos_y = y_embed[:, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) if pos_tensor.size(-1) == 4: w_embed = pos_tensor[:, :, 2] * scale pos_w = w_embed[:, :, None] / dim_t pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) h_embed = pos_tensor[:, :, 3] * scale pos_h = h_embed[:, :, None] / dim_t pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) else: raise ValueError(f"Unknown pos_tensor shape(-1):{pos_tensor.size(-1)}") return pos.to(pos_tensor.dtype) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Modified from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, config: DabDetrConfig, bias: bool = True, ): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.encoder_attention_heads self.attention_dropout = config.attention_dropout self.head_dim = self.hidden_size // self.num_heads if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" f" {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.v_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=bias) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if object_queries is not None: hidden_states_original = hidden_states hidden_states = hidden_states + object_queries query_states = self.q_proj(hidden_states) * self.scaling key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states_original) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, embed_dim) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # Modified from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrAttention with ConditionalDetr->DABDETR,Conditional DETR->DabDetr class DabDetrAttention(nn.Module): """ Cross-Attention used in DAB-DETR 'DAB-DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__(self, config: DabDetrConfig, bias: bool = True, is_cross: bool = False): super().__init__() self.config = config self.embed_dim = config.hidden_size * 2 if is_cross else config.hidden_size self.output_dim = config.hidden_size self.attention_heads = config.decoder_attention_heads self.attention_dropout = config.attention_dropout self.attention_head_dim = self.embed_dim // self.attention_heads if self.attention_head_dim * self.attention_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `attention_heads`:" f" {self.attention_heads})." ) # head dimension of values self.values_head_dim = self.output_dim // self.attention_heads if self.values_head_dim * self.attention_heads != self.output_dim: raise ValueError( f"output_dim must be divisible by attention_heads (got `output_dim`: {self.output_dim} and `attention_heads`: {self.attention_heads})." ) self.scaling = self.attention_head_dim**-0.5 self.output_proj = nn.Linear(self.output_dim, self.output_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, key_states: Optional[torch.Tensor] = None, value_states: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, q_len, _ = hidden_states.size() # scaling query and refactor key-, value states query_states = hidden_states * self.scaling query_states = query_states.view(batch_size, -1, self.attention_heads, self.attention_head_dim).transpose(1, 2) key_states = key_states.view(batch_size, -1, self.attention_heads, self.attention_head_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.attention_heads, self.values_head_dim).transpose(1, 2) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_probs, value_states) if attn_output.size() != (batch_size, self.attention_heads, q_len, self.values_head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.attention_heads, q_len, self.values_head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.output_dim) attn_output = self.output_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class DabDetrDecoderLayerSelfAttention(nn.Module): def __init__(self, config: DabDetrConfig): super().__init__() self.dropout = config.dropout self.self_attn_query_content_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_query_pos_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_key_content_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_key_pos_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn_value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.self_attn = DabDetrAttention(config) self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size) def forward( self, hidden_states: torch.Tensor, query_position_embeddings: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, ): residual = hidden_states query_content = self.self_attn_query_content_proj(hidden_states) query_pos = self.self_attn_query_pos_proj(query_position_embeddings) key_content = self.self_attn_key_content_proj(hidden_states) key_pos = self.self_attn_key_pos_proj(query_position_embeddings) value = self.self_attn_value_proj(hidden_states) query = query_content + query_pos key = key_content + key_pos hidden_states, attn_weights = self.self_attn( hidden_states=query, attention_mask=attention_mask, key_states=key, value_states=value, output_attentions=True, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) return hidden_states, attn_weights class DabDetrDecoderLayerCrossAttention(nn.Module): def __init__(self, config: DabDetrConfig, is_first: bool = False): super().__init__() hidden_size = config.hidden_size self.cross_attn_query_content_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_query_pos_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_key_content_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_key_pos_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_value_proj = nn.Linear(hidden_size, hidden_size) self.cross_attn_query_pos_sine_proj = nn.Linear(hidden_size, hidden_size) self.decoder_attention_heads = config.decoder_attention_heads self.cross_attn_layer_norm = nn.LayerNorm(hidden_size) self.cross_attn = DabDetrAttention(config, is_cross=True) self.keep_query_pos = config.keep_query_pos if not self.keep_query_pos and not is_first: self.cross_attn_query_pos_proj = None self.is_first = is_first self.dropout = config.dropout def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, ): query_content = self.cross_attn_query_content_proj(hidden_states) key_content = self.cross_attn_key_content_proj(encoder_hidden_states) value = self.cross_attn_value_proj(encoder_hidden_states) batch_size, num_queries, n_model = query_content.shape _, height_width, _ = key_content.shape key_pos = self.cross_attn_key_pos_proj(object_queries) # For the first decoder layer, we add the positional embedding predicted from # the object query (the positional embedding) into the original query (key) in DETR. if self.is_first or self.keep_query_pos: query_pos = self.cross_attn_query_pos_proj(query_position_embeddings) query = query_content + query_pos key = key_content + key_pos else: query = query_content key = key_content query = query.view( batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads ) query_sine_embed = self.cross_attn_query_pos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view( batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads ) query = torch.cat([query, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) key = key.view(batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads) key_pos = key_pos.view( batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads ) key = torch.cat([key, key_pos], dim=3).view(batch_size, height_width, n_model * 2) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.cross_attn( hidden_states=query, attention_mask=encoder_attention_mask, key_states=key, value_states=value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.cross_attn_layer_norm(hidden_states) return hidden_states, cross_attn_weights class DabDetrDecoderLayerFFN(nn.Module): def __init__(self, config: DabDetrConfig): super().__init__() hidden_size = config.hidden_size self.final_layer_norm = nn.LayerNorm(hidden_size) self.fc1 = nn.Linear(hidden_size, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, hidden_size) self.activation_fn = ACT2FN[config.activation_function] self.dropout = config.dropout self.activation_dropout = config.activation_dropout self.keep_query_pos = config.keep_query_pos def forward(self, hidden_states: torch.Tensor): residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return hidden_states # Modified from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->DabDetrEncoderLayer,DetrConfig->DabDetrConfig class DabDetrEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = DetrAttention(config) self.self_attn_layer_norm = nn.LayerNorm(self.hidden_size) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.fc1 = nn.Linear(self.hidden_size, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.hidden_size) self.final_layer_norm = nn.LayerNorm(self.hidden_size) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor, output_attentions: Optional[bool] = None, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Modified from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoderLayer with ConditionalDetr->DabDetr class DabDetrDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: DabDetrConfig, is_first: bool = False): super().__init__() self.self_attn = DabDetrDecoderLayerSelfAttention(config) self.cross_attn = DabDetrDecoderLayerCrossAttention(config, is_first) self.mlp = DabDetrDecoderLayerFFN(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, query_position_embeddings=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states, cross_attn_weights = self.cross_attn( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, query_position_embeddings=query_position_embeddings, object_queries=object_queries, encoder_attention_mask=encoder_attention_mask, query_sine_embed=query_sine_embed, output_attentions=output_attentions, ) hidden_states = self.mlp(hidden_states=hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Modified from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->DabDetrMLP class DabDetrMLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, input_tensor): for i, layer in enumerate(self.layers): input_tensor = nn.functional.relu(layer(input_tensor)) if i < self.num_layers - 1 else layer(input_tensor) return input_tensor # Modified from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->DabDetr @auto_docstring class DabDetrPreTrainedModel(PreTrainedModel): config: DabDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" _no_split_modules = [r"DabDetrConvEncoder", r"DabDetrEncoderLayer", r"DabDetrDecoderLayer"] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, DabDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, DabDetrForObjectDetection): nn.init.constant_(module.bbox_predictor.layers[-1].weight.data, 0) nn.init.constant_(module.bbox_predictor.layers[-1].bias.data, 0) # init prior_prob setting for focal loss prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1) bias_value = -math.log((1 - prior_prob) / prior_prob) module.class_embed.bias.data.fill_(bias_value) elif isinstance(module, nn.PReLU): module.reset_parameters() # Modified from transformers.models.detr.modeling_detr.DetrEncoder with Detr->DabDetr,DETR->ConditionalDETR class DabDetrEncoder(DabDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`DabDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for DAB-DETR: - object_queries are added to the forward pass. Args: config: DabDetrConfig """ def __init__(self, config: DabDetrConfig): super().__init__(config) self.dropout = config.dropout self.query_scale = DabDetrMLP(config.hidden_size, config.hidden_size, config.hidden_size, 2) self.layers = nn.ModuleList([DabDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.norm = nn.LayerNorm(config.hidden_size) if config.normalize_before else None self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds, attention_mask, object_queries, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # pos scaler pos_scales = self.query_scale(hidden_states) # we add object_queries * pos_scaler as extra input to the encoder_layer scaled_object_queries = object_queries * pos_scales layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, object_queries=scaled_object_queries, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.norm: hidden_states = self.norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Modified from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoder with ConditionalDetr->DabDetr,Conditional DETR->DAB-DETR class DabDetrDecoder(DabDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DabDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for DAB-DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: DabDetrConfig """ def __init__(self, config: DabDetrConfig): super().__init__(config) self.config = config self.dropout = config.dropout self.num_layers = config.decoder_layers self.gradient_checkpointing = False self.layers = nn.ModuleList( [DabDetrDecoderLayer(config, is_first=(layer_id == 0)) for layer_id in range(config.decoder_layers)] ) # in DAB-DETR, the decoder uses layernorm after the last decoder layer output self.hidden_size = config.hidden_size self.layernorm = nn.LayerNorm(self.hidden_size) # Default cond-elewise self.query_scale = DabDetrMLP(self.hidden_size, self.hidden_size, self.hidden_size, 2) self.ref_point_head = DabDetrMLP( config.query_dim // 2 * self.hidden_size, self.hidden_size, self.hidden_size, 2 ) self.bbox_embed = None # Default decoder_modulate_hw_attn is True self.ref_anchor_head = DabDetrMLP(self.hidden_size, self.hidden_size, 2, 2) # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds, encoder_hidden_states, memory_key_padding_mask, object_queries, query_position_embeddings, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(encoder_sequence_length, batch_size, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. memory_key_padding_mask (`torch.Tensor.bool` of shape `(batch_size, sequence_length)`): The memory_key_padding_mask indicates which positions in the memory (encoder outputs) should be ignored during the attention computation, ensuring padding tokens do not influence the attention mechanism. object_queries (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(num_queries, batch_size, number_of_anchor_points)`): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = [] reference_points = query_position_embeddings.sigmoid() ref_points = [reference_points] # expand encoder attention mask if encoder_hidden_states is not None and memory_key_padding_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] memory_key_padding_mask = _prepare_4d_attention_mask( memory_key_padding_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) for layer_id, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) obj_center = reference_points[..., : self.config.query_dim] query_sine_embed = gen_sine_position_embeddings(obj_center, self.hidden_size) query_pos = self.ref_point_head(query_sine_embed) # For the first decoder layer, we do not apply transformation over p_s pos_transformation = 1 if layer_id == 0 else self.query_scale(hidden_states) # apply transformation query_sine_embed = query_sine_embed[..., : self.hidden_size] * pos_transformation # modulated Height Width attentions reference_anchor_size = self.ref_anchor_head(hidden_states).sigmoid() # nq, bs, 2 query_sine_embed[..., self.hidden_size // 2 :] *= ( reference_anchor_size[..., 0] / obj_center[..., 2] ).unsqueeze(-1) query_sine_embed[..., : self.hidden_size // 2] *= ( reference_anchor_size[..., 1] / obj_center[..., 3] ).unsqueeze(-1) layer_outputs = decoder_layer( hidden_states, None, # attention_mask object_queries, query_pos, query_sine_embed, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=memory_key_padding_mask, output_attentions=output_attentions, ) # iter update hidden_states = layer_outputs[0] if self.bbox_embed is not None: new_reference_points = self.bbox_embed(hidden_states) new_reference_points[..., : self.config.query_dim] += inverse_sigmoid(reference_points) new_reference_points = new_reference_points[..., : self.config.query_dim].sigmoid() if layer_id != self.num_layers - 1: ref_points.append(new_reference_points) reference_points = new_reference_points.detach() intermediate.append(self.layernorm(hidden_states)) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Layer normalization on hidden states hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) output_intermediate_hidden_states = torch.stack(intermediate) output_reference_points = torch.stack(ref_points) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, output_intermediate_hidden_states, output_reference_points, ] if v is not None ) return DabDetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=output_intermediate_hidden_states, reference_points=output_reference_points, ) @auto_docstring( custom_intro=""" The bare DAB-DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states, intermediate hidden states, reference points, output coordinates without any specific head on top. """ ) class DabDetrModel(DabDetrPreTrainedModel): def __init__(self, config: DabDetrConfig): super().__init__(config) self.auxiliary_loss = config.auxiliary_loss # Create backbone + positional encoding self.backbone = DabDetrConvEncoder(config) object_queries = DabDetrSinePositionEmbedding(config) self.query_refpoint_embeddings = nn.Embedding(config.num_queries, config.query_dim) self.random_refpoints_xy = config.random_refpoints_xy if self.random_refpoints_xy: self.query_refpoint_embeddings.weight.data[:, :2].uniform_(0, 1) self.query_refpoint_embeddings.weight.data[:, :2] = inverse_sigmoid( self.query_refpoint_embeddings.weight.data[:, :2] ) self.query_refpoint_embeddings.weight.data[:, :2].requires_grad = False # Create projection layer self.input_projection = nn.Conv2d( self.backbone.intermediate_channel_sizes[-1], config.hidden_size, kernel_size=1 ) self.backbone = DabDetrConvModel(self.backbone, object_queries) self.encoder = DabDetrEncoder(config) self.decoder = DabDetrDecoder(config) # decoder related variables self.hidden_size = config.hidden_size self.num_queries = config.num_queries self.num_patterns = config.num_patterns if not isinstance(self.num_patterns, int): logger.warning(f"num_patterns should be int but {type(self.num_patterns)}") self.num_patterns = 0 if self.num_patterns > 0: self.patterns = nn.Embedding(self.num_patterns, self.hidden_size) self.aux_loss = config.auxiliary_loss # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], DabDetrModelOutput]: r""" decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("IDEA-Research/dab_detr-base") >>> model = AutoModel.from_pretrained("IDEA-Research/dab_detr-base") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, _, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, object_queries_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") flattened_mask = mask.flatten(1) # Second, apply 1x1 convolution to reduce the channel dimension to hidden_size (256 by default) projected_feature_map = self.input_projection(feature_map) # Third, flatten the feature map + object_queries of shape NxCxHxW to HWxNxC, and permute it to NxHWxC # In other words, turn their shape into ( sequence_length, batch_size, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) reference_position_embeddings = self.query_refpoint_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) # Fourth, sent flattened_features + flattened_mask + object_queries through encoder # flattened_features is a Tensor of shape (height*width, batch_size, hidden_size) # flattened_mask is a Tensor of shape (batch_size, height*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + object_queries through the decoder (which is conditioned on the encoder output) num_queries = reference_position_embeddings.shape[1] if self.num_patterns == 0: queries = torch.zeros(batch_size, num_queries, self.hidden_size, device=device) else: queries = ( self.patterns.weight[:, None, None, :] .repeat(1, self.num_queries, batch_size, 1) .flatten(0, 1) .permute(1, 0, 2) ) # bs, n_q*n_pat, hidden_size reference_position_embeddings = reference_position_embeddings.repeat( 1, self.num_patterns, 1 ) # bs, n_q*n_pat, hidden_size # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, query_position_embeddings=reference_position_embeddings, object_queries=object_queries, encoder_hidden_states=encoder_outputs[0], memory_key_padding_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: # last_hidden_state output = (decoder_outputs[0],) reference_points = decoder_outputs[-1] intermediate_hidden_states = decoder_outputs[-2] # it has to follow the order of DABDETRModelOutput that is based on ModelOutput # If we only use one of the variables then the indexing will change. # E.g: if we return everything then 'decoder_attentions' is decoder_outputs[2], if we only use output_attentions then its decoder_outputs[1] if output_hidden_states and output_attentions: output += ( decoder_outputs[1], decoder_outputs[2], decoder_outputs[3], encoder_outputs[0], encoder_outputs[1], encoder_outputs[2], ) elif output_hidden_states: # decoder_hidden_states, encoder_last_hidden_state, encoder_hidden_states output += ( decoder_outputs[1], encoder_outputs[0], encoder_outputs[1], ) elif output_attentions: # decoder_self_attention, decoder_cross_attention, encoder_attentions output += ( decoder_outputs[1], decoder_outputs[2], encoder_outputs[1], ) output += (intermediate_hidden_states, reference_points) return output reference_points = decoder_outputs.reference_points intermediate_hidden_states = decoder_outputs.intermediate_hidden_states return DabDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states if output_hidden_states else None, decoder_attentions=decoder_outputs.attentions if output_attentions else None, cross_attentions=decoder_outputs.cross_attentions if output_attentions else None, encoder_last_hidden_state=encoder_outputs.last_hidden_state if output_hidden_states else None, encoder_hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, encoder_attentions=encoder_outputs.attentions if output_attentions else None, intermediate_hidden_states=intermediate_hidden_states, reference_points=reference_points, ) # Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->DabDetr class DabDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights = weights.masked_fill(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights @auto_docstring( custom_intro=""" DAB_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """ ) class DabDetrForObjectDetection(DabDetrPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _tied_weights_keys = [ r"bbox_predictor\.layers\.\d+\.(weight|bias)", r"model\.decoder\.bbox_embed\.layers\.\d+\.(weight|bias)", ] def __init__(self, config: DabDetrConfig): super().__init__(config) self.config = config self.auxiliary_loss = config.auxiliary_loss self.query_dim = config.query_dim # DAB-DETR encoder-decoder model self.model = DabDetrModel(config) _bbox_embed = DabDetrMLP(config.hidden_size, config.hidden_size, 4, 3) # Object detection heads self.class_embed = nn.Linear(config.hidden_size, config.num_labels) # Default bbox_embed_diff_each_layer is False self.bbox_predictor = _bbox_embed # Default iter_update is True self.model.decoder.bbox_embed = self.bbox_predictor # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/dab_detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @auto_docstring def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[list[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], DabDetrObjectDetectionOutput]: r""" decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DAB_DETR base model to obtain encoder + decoder outputs model_outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) reference_points = model_outputs.reference_points if return_dict else model_outputs[-1] intermediate_hidden_states = model_outputs.intermediate_hidden_states if return_dict else model_outputs[-2] # class logits + predicted bounding boxes logits = self.class_embed(intermediate_hidden_states[-1]) reference_before_sigmoid = inverse_sigmoid(reference_points) bbox_with_refinement = self.bbox_predictor(intermediate_hidden_states) bbox_with_refinement[..., : self.query_dim] += reference_before_sigmoid outputs_coord = bbox_with_refinement.sigmoid() pred_boxes = outputs_coord[-1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: outputs_class = None if self.config.auxiliary_loss: outputs_class = self.class_embed(intermediate_hidden_states) loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord ) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + model_outputs else: output = (logits, pred_boxes) + model_outputs # Since DabDetrObjectDetectionOutput doesn't have reference points + intermedieate_hidden_states we cut down. return ((loss, loss_dict) + output) if loss is not None else output[:-2] return DabDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=model_outputs.last_hidden_state, decoder_hidden_states=model_outputs.decoder_hidden_states if output_hidden_states else None, decoder_attentions=model_outputs.decoder_attentions if output_attentions else None, cross_attentions=model_outputs.cross_attentions if output_attentions else None, encoder_last_hidden_state=model_outputs.encoder_last_hidden_state if output_hidden_states else None, encoder_hidden_states=model_outputs.encoder_hidden_states if output_hidden_states else None, encoder_attentions=model_outputs.encoder_attentions if output_attentions else None, ) __all__ = [ "DabDetrForObjectDetection", "DabDetrModel", "DabDetrPreTrainedModel", ]
transformers/src/transformers/models/dab_detr/modeling_dab_detr.py/0
{ "file_path": "transformers/src/transformers/models/dab_detr/modeling_dab_detr.py", "repo_id": "transformers", "token_count": 31447 }
419
# coding=utf-8 # Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 Data2Vec Vision model.""" from __future__ import annotations import collections.abc import math from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSemanticSegmenterOutput, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_data2vec_vision import Data2VecVisionConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "Data2VecVisionConfig" # Base docstring _CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base" _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k" _IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote" @dataclass class TFData2VecVisionModelOutputWithPooling(TFBaseModelOutputWithPooling): """ Class for outputs of [`TFData2VecVisionModel`]. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token will be returned. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor | None = None pooler_output: tf.Tensor | None = None hidden_states: tuple[tf.Tensor] | None = None attentions: tuple[tf.Tensor] | None = None class TFData2VecVisionDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x class TFData2VecVisionEmbeddings(keras.layers.Layer): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.patch_embeddings = TFData2VecVisionPatchEmbeddings(config, name="patch_embeddings") self.num_patches = self.patch_embeddings.num_patches self.config = config self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape=None): self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name="cls_token", ) if self.config.use_mask_token: self.mask_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name="mask_token", ) else: self.mask_token = None if self.config.use_absolute_position_embeddings: self.position_embeddings = self.add_weight( shape=(1, self.num_patches + 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name="position_embeddings", ) else: self.position_embeddings = None if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) def call(self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None = None) -> tf.Tensor: embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, projection_dim = shape_list(embeddings) cls_tokens = tf.tile(self.cls_token, (batch_size, 1, 1)) if bool_masked_pos is not None: mask_tokens = tf.broadcast_to(self.mask_token, (batch_size, seq_len, projection_dim)) # replace the masked visual tokens by mask_tokens w = bool_masked_pos[..., None] w = tf.cast(w, mask_tokens.dtype) # since TF doesn't support eager tensor assignment embeddings = embeddings * (1 - w) + mask_tokens * w embeddings = tf.concat([cls_tokens, embeddings], axis=1) if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class TFData2VecVisionPatchEmbeddings(keras.layers.Layer): """ Image to Patch Embedding. """ def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.patch_shape = patch_shape self.num_channels = num_channels self.projection = keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding="valid", data_format="channels_last", kernel_initializer="glorot_uniform", # following torch.nn.Linear bias_initializer="zeros", name="projection", ) def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly(): if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the" " configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) return tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFData2VecVisionSelfAttention(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", use_bias=False, ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) if window_size: self.relative_position_bias = TFData2VecVisionRelativePositionBias( config, window_size=window_size, name="relative_position_bias" ) else: self.relative_position_bias = None self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: TFData2VecVisionRelativePositionBias | None = None, training: bool = False, ) -> tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) mixed_key_layer = self.key(inputs=hidden_states) mixed_value_layer = self.value(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = attention_scores / self.sqrt_att_head_size # Add relative position bias if present. if self.relative_position_bias is not None: # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras # might complain about `Layer.call()` not being invoked properly. In this case this input # i.e., 0.0 is not going to be used in any calculations so we're safe. attention_scores = attention_scores + self.relative_position_bias(0.0)[None, ...] # Add shared relative position bias if provided. if relative_position_bias is not None: attention_scores = attention_scores + relative_position_bias # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "relative_position_bias", None) is not None: with tf.name_scope(self.relative_position_bias.name): self.relative_position_bias.build(None) class TFData2VecVisionSelfOutput(keras.layers.Layer): """ The residual connection is defined in TFData2VecVisionLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, gamma=None, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFData2VecVisionAttention(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None, **kwargs): super().__init__(**kwargs) self.attention = TFData2VecVisionSelfAttention(config, window_size=window_size, name="attention") self.dense_output = TFData2VecVisionSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: TFData2VecVisionRelativePositionBias | None = None, training: bool = False, ) -> tuple[tf.Tensor]: self_outputs = self.attention( hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) # Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->Data2VecVision class TFData2VecVisionIntermediate(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFData2VecVisionOutput(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFData2VecVisionLayer(keras.layers.Layer): """This corresponds to the Block class in the timm implementation.""" def __init__( self, config: Data2VecVisionConfig, window_size: tuple | None = None, drop_path_rate: float = 0.0, **kwargs ): super().__init__(**kwargs) self.config = config self.attention = TFData2VecVisionAttention(config, window_size=window_size, name="attention") self.intermediate = TFData2VecVisionIntermediate(config, name="intermediate") self.data2vec_output = TFData2VecVisionOutput(config, name="output") self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") # Using `layers.Activation` instead of `tf.identity` to better control `training` # behaviour. self.drop_path = ( TFData2VecVisionDropPath(drop_path_rate, name="drop_path") if drop_path_rate > 0.0 else keras.layers.Activation("linear", name="drop_path") ) self.init_values = config.layer_scale_init_value def build(self, input_shape: tf.TensorShape = None): if self.init_values > 0: self.lambda_1 = self.add_weight( shape=(self.config.hidden_size), initializer="ones", trainable=True, name="lambda_1", ) self.lambda_2 = self.add_weight( shape=(self.config.hidden_size), initializer="ones", trainable=True, name="lambda_2", ) self.lambda_1.assign(self.init_values * tf.ones(self.config.hidden_size)) self.lambda_2.assign(self.init_values * tf.ones(self.config.hidden_size)) else: self.lambda_1, self.lambda_2 = None, None if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "data2vec_output", None) is not None: with tf.name_scope(self.data2vec_output.name): self.data2vec_output.build(None) if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.config.hidden_size]) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.config.hidden_size]) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: TFData2VecVisionRelativePositionBias | None = None, training: bool = False, ) -> tuple[tf.Tensor]: self_attention_outputs = self.attention( # in Data2VecVision, layernorm is applied before self-attention input_tensor=self.layernorm_before(inputs=hidden_states), head_mask=head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, training=training, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # apply lambda_1 if present if self.lambda_1 is not None: attention_output = self.lambda_1 * attention_output # first residual connection hidden_states = self.drop_path(attention_output) + hidden_states # in Data2VecVision, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.data2vec_output(layer_output) if self.lambda_2 is not None: layer_output = self.lambda_2 * layer_output # second residual connection layer_output = self.drop_path(layer_output) + hidden_states outputs = (layer_output,) + outputs return outputs # Taken and modified from here: # https://github.com/leondgarse/keras_cv_attention_models/blob/main/keras_cv_attention_models/beit/beit.py#L28 class TFData2VecVisionRelativePositionBias(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, window_size: tuple, **kwargs) -> None: super().__init__(**kwargs) self.config = config self.window_size = window_size # +3 for cls_token_pos_len # window_size can be something like (14, 14) self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_index = self.get_position_index() def build(self, input_shape): self.relative_position_bias_table = self.add_weight( shape=(self.num_relative_distance, self.config.num_attention_heads), initializer="zeros", trainable=True, name="relative_position_bias_table", ) # [2*Wh-1 * 2*Ww-1, nH] # cls to token & token 2 cls & cls to cls super().build(input_shape) def get_position_index(self): # get pair-wise relative position index for each token inside the window xx, yy = tf.meshgrid(range(self.window_size[0]), range(self.window_size[1])) coords = tf.stack([yy, xx], axis=0) # [2, Wh, Ww] coords_flatten = tf.reshape(coords, [2, -1]) # [2, Wh*Ww] relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Wh*Ww, Wh*Ww] relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0]) # [Wh*Ww, Wh*Ww, 2] xx = (relative_coords[:, :, 0] + self.window_size[0] - 1) * (2 * self.window_size[1] - 1) yy = relative_coords[:, :, 1] + self.window_size[1] - 1 relative_coords = tf.stack([xx, yy], axis=-1) relative_position_index = tf.reduce_sum(relative_coords, axis=-1) # [Wh*Ww, Wh*Ww] top = tf.ones((1, relative_position_index.shape[1]), dtype=relative_position_index.dtype) * ( self.num_relative_distance - 3 ) left = tf.ones((relative_position_index.shape[0], 1), dtype=relative_position_index.dtype) * ( self.num_relative_distance - 2 ) corner = tf.ones((1, 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 1) left_corner = tf.concat([corner, left], axis=0) relative_position_index = tf.concat([top, relative_position_index], axis=0) relative_position_index = tf.concat([left_corner, relative_position_index], axis=1) # [Wh*Ww + 1, Wh*Ww + 1] return relative_position_index def call(self, inputs=None) -> tf.Tensor: relative_position_bias = tf.gather(self.relative_position_bias_table, self.relative_position_index, axis=0) return tf.transpose(relative_position_bias, [2, 0, 1]) class TFData2VecVisionEncoder(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, window_size: tuple | None = None, **kwargs): super().__init__(**kwargs) self.config = config if config.use_shared_relative_position_bias: self.relative_position_bias = TFData2VecVisionRelativePositionBias( config, window_size=window_size, name="relative_position_bias" ) else: self.relative_position_bias = None # stochastic depth decay rule dpr = list(tf.linspace(0.0, config.drop_path_rate, config.num_hidden_layers)) self.layer = [ TFData2VecVisionLayer( config, window_size=window_size if config.use_relative_position_bias else None, drop_path_rate=dpr[i], name=f"layer_._{i}", ) for i in range(config.num_hidden_layers) ] def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> tuple | TFBaseModelOutput: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras # might complain about `Layer.call()` not being invoked properly. In this case this input # i.e., 0.0 is not going to be used in any calculations so we're safe. relative_position_bias = ( self.relative_position_bias(0.0) if self.relative_position_bias is not None else None ) layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "relative_position_bias", None) is not None: with tf.name_scope(self.relative_position_bias.name): self.relative_position_bias.build(None) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFData2VecVisionMainLayer(keras.layers.Layer): config_class = Data2VecVisionConfig def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.add_pooling_layer = add_pooling_layer self.embeddings = TFData2VecVisionEmbeddings(config, name="embeddings") self.encoder = TFData2VecVisionEncoder( config, window_size=self.embeddings.patch_embeddings.patch_shape, name="encoder" ) self.layernorm = ( tf.identity if config.use_mean_pooling else keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") ) # We are setting the `data_format` like so because from here on we will revert to the # NCHW output format self.pooler = TFData2VecVisionPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple | TFData2VecVisionModelOutputWithPooling: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings(pixel_values, bool_masked_pos, training=training) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return TFData2VecVisionModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: if hasattr(self.layernorm, "name"): with tf.name_scope(self.layernorm.name): self.layernorm.build((None, self.config.hidden_size)) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFData2VecVisionPooler(keras.layers.Layer): def __init__(self, config: Data2VecVisionConfig, **kwargs): super().__init__(**kwargs) self.layernorm = ( keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") if config.use_mean_pooling else None ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: if self.layernorm is not None: # Mean pool the final hidden states of the patch tokens patch_tokens = hidden_states[:, 1:, :] pooled_output = self.layernorm(tf.reduce_mean(patch_tokens, axis=1)) else: # Pool by simply taking the final hidden state of the [CLS] token pooled_output = hidden_states[:, 0] return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layernorm", None) is not None: if hasattr(self.layernorm, "name"): with tf.name_scope(self.layernorm.name): self.layernorm.build((None, self.config.hidden_size)) class TFData2VecVisionPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Data2VecVisionConfig base_model_prefix = "data2vec_vision" main_input_name = "pixel_values" _keys_to_ignore_on_load_unexpected = [r"relative_position_index"] DATA2VEC_VISION_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.). This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ DATA2VEC_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BeitImageProcessor.__call__`] for details. head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.", DATA2VEC_VISION_START_DOCSTRING, ) class TFData2VecVisionModel(TFData2VecVisionPreTrainedModel): def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.data2vec_vision = TFData2VecVisionMainLayer( config, add_pooling_layer=add_pooling_layer, name="data2vec_vision" ) def get_input_embeddings(self): return self.data2vec_vision.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFData2VecVisionModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: TFModelInputType | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple | TFData2VecVisionModelOutputWithPooling: r""" bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ outputs = self.data2vec_vision( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "data2vec_vision", None) is not None: with tf.name_scope(self.data2vec_vision.name): self.data2vec_vision.build(None) @add_start_docstrings( """ Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet. """, DATA2VEC_VISION_START_DOCSTRING, ) class TFData2VecVisionForImageClassification(TFData2VecVisionPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=True, name="data2vec_vision") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFSequenceClassifierOutput | tuple: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.data2vec_vision( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "data2vec_vision", None) is not None: with tf.name_scope(self.data2vec_vision.name): self.data2vec_vision.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) class TFData2VecVisionConvModule(keras.layers.Layer): """ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int | tuple[int, int], padding: str = "valid", bias: bool = False, dilation: int | tuple[int, int] = 1, **kwargs, ) -> None: super().__init__(**kwargs) self.conv = keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, padding=padding, use_bias=bias, dilation_rate=dilation, name="conv", ) self.bn = keras.layers.BatchNormalization(name="bn", momentum=0.9, epsilon=1e-5) self.activation = tf.nn.relu self.in_channels = in_channels self.out_channels = out_channels def call(self, input: tf.Tensor) -> tf.Tensor: output = self.conv(input) output = self.bn(output) output = self.activation(output) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, None, self.in_channels]) if getattr(self, "bn", None) is not None: with tf.name_scope(self.bn.name): self.bn.build((None, None, None, self.out_channels)) class TFAdaptiveAvgPool2D(keras.layers.Layer): def __init__(self, output_dims: tuple[int, int], input_ordering: str = "NHWC", **kwargs): super().__init__(**kwargs) self.output_dims = output_dims self.input_ordering = input_ordering if input_ordering not in ("NCHW", "NHWC"): raise ValueError("Unrecognized input_ordering, should be 'NCHW' or 'NHWC'!") self.h_axis = input_ordering.index("H") self.w_axis = input_ordering.index("W") def pseudo_1d_pool(self, inputs: tf.Tensor, h_pooling: bool): # Figure out which axis we're pooling on if h_pooling: axis = self.h_axis output_dim = self.output_dims[0] else: axis = self.w_axis output_dim = self.output_dims[1] input_dim = inputs.shape[axis] # Figure out the potential pooling windows # This is the key idea - the torch op always uses only two # consecutive pooling window sizes, like 3 and 4. Therefore, # if we pool with both possible sizes, we simply need to gather # the 'correct' pool at each position to reimplement the torch op. small_window = math.ceil(input_dim / output_dim) big_window = small_window + 1 if h_pooling: output_dim = self.output_dims[0] small_window_shape = (small_window, 1) big_window_shape = (big_window, 1) else: output_dim = self.output_dims[1] small_window_shape = (1, small_window) big_window_shape = (1, big_window) # For resizes to 1, or integer resizes, we can take quick shortcuts if output_dim == input_dim: return inputs elif output_dim == 1: return tf.reduce_mean(inputs, axis=axis, keepdims=True) elif input_dim % output_dim == 0: return tf.nn.avg_pool2d( inputs, ksize=small_window_shape, strides=small_window_shape, padding="VALID", data_format=self.input_ordering, ) # When upscaling by an integer factor we can also take a quick shortcut elif output_dim > input_dim and output_dim % input_dim == 0: return tf.repeat(inputs, repeats=output_dim // input_dim, axis=axis) # For non-integer resizes, we pool with both possible window sizes and concatenate them if output_dim < input_dim: small_pool = tf.nn.avg_pool2d( inputs, ksize=small_window_shape, strides=1, padding="VALID", data_format=self.input_ordering ) big_pool = tf.nn.avg_pool2d( inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering ) both_pool = tf.concat([small_pool, big_pool], axis=axis) else: # When we're actually upscaling instead, then we build the pools a bit differently small_pool = inputs big_pool = tf.nn.avg_pool2d( inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering ) both_pool = tf.concat([small_pool, big_pool], axis=axis) # We compute vectors of the start and end positions for each pooling window # Each (start, end) pair here corresponds to a single output position window_starts = tf.math.floor((tf.range(output_dim, dtype=tf.float32) * input_dim) / output_dim) window_starts = tf.cast(window_starts, tf.int64) window_ends = tf.math.ceil((tf.range(1, output_dim + 1, dtype=tf.float32) * input_dim) / output_dim) window_ends = tf.cast(window_ends, tf.int64) # pool_selector is a boolean array of shape (output_dim,) where 1 indicates that output position # has a big receptive field and 0 indicates that that output position has a small receptive field pool_selector = tf.cast(window_ends - window_starts - small_window, tf.bool) # Since we concatenated the small and big pools, we need to do a bit of # pointer arithmetic to get the indices of the big pools small_indices = window_starts big_indices = window_starts + small_pool.shape[axis] # Finally, we use the pool_selector to generate a list of indices, one per output position gather_indices = tf.where(pool_selector, big_indices, small_indices) # Gathering from those indices yields the final, correct pooling return tf.gather(both_pool, gather_indices, axis=axis) def call(self, inputs: tf.Tensor): if self.input_ordering == "NHWC": input_shape = inputs.shape[1:3] else: input_shape = inputs.shape[2:] # We break the task down into each possible case # Firstly, if we're resizing down to 1, it's just tf.reduce_mean if self.output_dims[0] == self.output_dims[1] == 1: if self.input_ordering == "NHWC": reduce_dims = [1, 2] else: reduce_dims = [2, 3] return tf.reduce_mean(inputs, axis=reduce_dims, keepdims=True) # Secondly, if we're resizing by an integer factor on both dimensions, we can take a quick shortcut elif input_shape[0] % self.output_dims[0] == 0 and input_shape[1] % self.output_dims[1] == 0: h_resize = int(input_shape[0] // self.output_dims[0]) w_resize = int(input_shape[1] // self.output_dims[1]) return tf.nn.avg_pool2d( inputs, ksize=(h_resize, w_resize), strides=(h_resize, w_resize), padding="VALID", data_format=self.input_ordering, ) else: # Finally, if we can't take the shortcut, we do a 1D pool on each axis. pseudo_1d_pool will take a shortcut # for dimensions where an integer resize is possible. It can also handle upscaling. h_pooled = self.pseudo_1d_pool(inputs, h_pooling=True) return self.pseudo_1d_pool(h_pooled, h_pooling=False) class TFData2VecVisionPyramidPoolingModule(keras.layers.Layer): """ Pyramid Pooling Module (PPM) used in PSPNet. Args: pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid Module. channels (int): Channels after modules, before conv_seg. Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. """ def __init__(self, pool_scales: tuple[int, ...], in_channels: int, out_channels: int, **kwargs) -> None: super().__init__(**kwargs) self.pool_scales = pool_scales self.in_channels = in_channels self.out_channels = out_channels self.layer_list = [] for idx, pool_scale in enumerate(pool_scales): pool_scale = pool_scale if isinstance(pool_scale, collections.abc.Iterable) else (pool_scale, pool_scale) self.layer_list.append( [ TFAdaptiveAvgPool2D(output_dims=pool_scale), TFData2VecVisionConvModule( in_channels=in_channels, out_channels=self.out_channels, kernel_size=1, name=f"{idx}.1" ), ] ) def call(self, x: tf.Tensor) -> list[tf.Tensor]: ppm_outs = [] inputs = x for ppm in self.layer_list: for layer_module in ppm: ppm_out = layer_module(x) x = ppm_out upsampled_ppm_out = tf.image.resize(ppm_out, size=shape_list(inputs)[1:-1], method="bilinear") ppm_outs.append(upsampled_ppm_out) return ppm_outs def build(self, input_shape=None): for layer in self.layer_list: for layer_module in layer: with tf.name_scope(layer_module.name): layer_module.build(None) class TFData2VecVisionUperHead(keras.layers.Layer): """ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of [UPerNet](https://huggingface.co/papers/1807.10221). Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. """ def __init__(self, config: Data2VecVisionConfig, **kwargs) -> None: super().__init__(**kwargs) self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6) self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768] self.channels = config.hidden_size self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier") # PSP Module self.psp_modules = TFData2VecVisionPyramidPoolingModule( self.pool_scales, self.in_channels[-1], self.channels, name="psp_modules" ) self.bottleneck = TFData2VecVisionConvModule( self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding="same", name="bottleneck", ) # FPN Module self.lateral_convs = [] self.fpn_convs = [] for idx, in_channels in enumerate(self.in_channels[:-1]): # skip the top layer l_conv = TFData2VecVisionConvModule( in_channels, out_channels=self.channels, kernel_size=1, name=f"lateral_convs.{idx}" ) fpn_conv = TFData2VecVisionConvModule( in_channels=self.channels, out_channels=self.channels, kernel_size=3, padding="same", name=f"fpn_convs.{idx}", ) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) self.fpn_bottleneck = TFData2VecVisionConvModule( in_channels=len(self.in_channels) * self.channels, out_channels=self.channels, kernel_size=3, padding="same", name="fpn_bottleneck", ) def psp_forward(self, inputs): x = inputs[-1] psp_outs = [x] psp_outs.extend(self.psp_modules(x)) psp_outs = tf.concat(psp_outs, axis=-1) output = self.bottleneck(psp_outs) return output def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor: # build laterals laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)] laterals.append(self.psp_forward(encoder_hidden_states)) # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = shape_list(laterals[i - 1])[1:-1] laterals[i - 1] = laterals[i - 1] + tf.image.resize(laterals[i], size=prev_shape, method="bilinear") # build outputs fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)] # append psp feature fpn_outs.append(laterals[-1]) for i in range(used_backbone_levels - 1, 0, -1): fpn_outs[i] = tf.image.resize(fpn_outs[i], size=shape_list(fpn_outs[0])[1:-1], method="bilinear") fpn_outs = tf.concat(fpn_outs, axis=-1) output = self.fpn_bottleneck(fpn_outs) output = self.classifier(output) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, None, self.channels]) if getattr(self, "psp_modules", None) is not None: with tf.name_scope(self.psp_modules.name): self.psp_modules.build(None) if getattr(self, "bottleneck", None) is not None: with tf.name_scope(self.bottleneck.name): self.bottleneck.build(None) if getattr(self, "fpn_bottleneck", None) is not None: with tf.name_scope(self.fpn_bottleneck.name): self.fpn_bottleneck.build(None) for layer in self.lateral_convs: with tf.name_scope(layer.name): layer.build(None) for layer in self.fpn_convs: with tf.name_scope(layer.name): layer.build(None) class TFData2VecVisionFCNHead(keras.layers.Layer): """ Fully Convolution Networks for Semantic Segmentation. This head is implemented from [FCNNet](https://huggingface.co/papers/1411.4038). Args: config (Data2VecVisionConfig): Configuration. kernel_size (int): The kernel size for convs in the head. Default: 3. dilation (int): The dilation rate for convs in the head. Default: 1. Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. """ def __init__( self, config: Data2VecVisionConfig, in_index: int = 2, kernel_size: int = 3, dilation: int | tuple[int, int] = 1, **kwargs, ) -> None: super().__init__(**kwargs) self.in_channels = config.hidden_size self.channels = config.auxiliary_channels self.num_convs = config.auxiliary_num_convs self.concat_input = config.auxiliary_concat_input self.in_index = in_index convs = [] convs.append( TFData2VecVisionConvModule( in_channels=self.in_channels, out_channels=self.channels, kernel_size=kernel_size, padding="same", dilation=dilation, name="convs.0", ) ) for i in range(self.num_convs - 1): convs.append( TFData2VecVisionConvModule( in_channels=self.channels, out_channels=self.channels, kernel_size=kernel_size, padding="same", dilation=dilation, name=f"conv_module_{i + 2}", ) ) if self.num_convs == 0: self.convs = [tf.identity] else: self.convs = convs if self.concat_input: self.conv_cat = TFData2VecVisionConvModule( self.in_channels + self.channels, out_channels=self.channels, kernel_size=kernel_size, padding="same", name="conv_cat", ) self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier") def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor: # just take the relevant feature maps hidden_states = encoder_hidden_states[self.in_index] output = hidden_states for layer_module in self.convs: output = layer_module(output) if self.concat_input: output = self.conv_cat(tf.concat([hidden_states, output], axis=-1)) output = self.classifier(output) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, None, self.channels]) if getattr(self, "conv_cat", None) is not None: with tf.name_scope(self.conv_cat.name): self.conv_cat.build(None) @add_start_docstrings( """ Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes. """, DATA2VEC_VISION_START_DOCSTRING, ) class TFData2VecVisionForSemanticSegmentation(TFData2VecVisionPreTrainedModel): def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=False, name="data2vec_vision") # FPNs self.fpn1 = [ keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.0"), keras.layers.BatchNormalization(name="fpn1.1", momentum=0.9, epsilon=1e-5), keras.layers.Activation("gelu"), keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.3"), ] self.fpn2 = [keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn2.0")] self.fpn3 = tf.identity self.fpn4 = keras.layers.MaxPool2D(pool_size=2, strides=2) # Semantic segmentation head(s) self.decode_head = TFData2VecVisionUperHead(config, name="decode_head") self.auxiliary_head = ( TFData2VecVisionFCNHead(config, name="auxiliary_head") if config.use_auxiliary_head else None ) def compute_loss(self, logits, auxiliary_logits, labels): # upsample logits to the images' original size if len(shape_list(labels)) > 3: label_interp_shape = shape_list(labels)[1:-1] else: label_interp_shape = shape_list(labels)[-2:] upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear") if auxiliary_logits is not None: upsampled_auxiliary_logits = tf.image.resize(auxiliary_logits, size=label_interp_shape, method="bilinear") # compute weighted loss loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none") # Copied from https://www.tensorflow.org/text/tutorials/transformer#loss_and_metrics. # Utility to mask the index to ignore during computing the loss. def masked_loss(real, pred): mask = tf.math.logical_not(tf.math.equal(real, self.config.semantic_loss_ignore_index)) loss_ = loss_fct(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask) return tf.reshape(reduced_masked_loss, (1,)) main_loss = masked_loss(labels, upsampled_logits) auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits) loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss return loss @unpack_inputs @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, ) -> tuple | TFSemanticSegmenterOutput: r""" labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFData2VecVisionForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base") >>> model = TFData2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.data2vec_vision( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] # only keep certain features, and reshape # note that we do +1 as the encoder_hidden_states also includes the initial embeddings features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices] patch_resolution = self.config.image_size // self.config.patch_size def reshape_features(x): # We do it this way so TF can always infer the non-batch dims at compile time x = tf.reshape(x, (-1, patch_resolution, patch_resolution, self.config.hidden_size)) return x features = [reshape_features(x[:, 1:, :]) for x in features] # apply FPNs ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for module in ops[0]: features[0] = module(features[0]) features[1] = ops[1][0](features[1]) for i in range(len(features[2:])): features[i + 2] = ops[i + 2](features[i + 2]) logits = self.decode_head(features) # Transpose the logits to maintain consistency in the output formats. transposed_logits = tf.transpose(logits, perm=[0, 3, 1, 2]) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(features) loss = None if labels is not None: loss = self.compute_loss(logits, auxiliary_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSemanticSegmenterOutput( loss=loss, logits=transposed_logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "data2vec_vision", None) is not None: with tf.name_scope(self.data2vec_vision.name): self.data2vec_vision.build(None) if getattr(self, "decode_head", None) is not None: with tf.name_scope(self.decode_head.name): self.decode_head.build(None) if getattr(self, "auxiliary_head", None) is not None: with tf.name_scope(self.auxiliary_head.name): self.auxiliary_head.build(None) if getattr(self, "fpn1", None) is not None: with tf.name_scope(self.fpn1[0].name): self.fpn1[0].build([None, None, None, self.config.hidden_size]) with tf.name_scope(self.fpn1[1].name): self.fpn1[1].build((None, None, None, self.config.hidden_size)) with tf.name_scope(self.fpn1[3].name): self.fpn1[3].build([None, None, None, self.config.hidden_size]) if getattr(self, "fpn2", None) is not None: with tf.name_scope(self.fpn2[0].name): self.fpn2[0].build([None, None, None, self.config.hidden_size]) __all__ = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ]
transformers/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py/0
{ "file_path": "transformers/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py", "repo_id": "transformers", "token_count": 32226 }
420
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization class for model DeBERTa.""" import os from shutil import copyfile from typing import Optional from ...file_utils import is_sentencepiece_available from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if is_sentencepiece_available(): from .tokenization_deberta_v2 import DebertaV2Tokenizer else: DebertaV2Tokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"} class DebertaV2TokenizerFast(PreTrainedTokenizerFast): r""" Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = DebertaV2Tokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ) -> None: super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs, ) self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.vocab_file = vocab_file def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) __all__ = ["DebertaV2TokenizerFast"]
transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py/0
{ "file_path": "transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py", "repo_id": "transformers", "token_count": 3526 }
421
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/deepseek_vl/modular_deepseek_vl.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_deepseek_vl.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch.nn.functional as F from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling, SizeDict from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, ) if is_torch_available(): import torch class DeepseekVLFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width falls below this value after resizing. """ min_size: int @auto_docstring class DeepseekVLImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} min_size = 14 do_resize = True do_rescale = True do_normalize = True valid_kwargs = DeepseekVLFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[DeepseekVLFastImageProcessorKwargs]): super().__init__(**kwargs) if kwargs.get("image_mean") is None: background_color = (127, 127, 127) else: background_color = tuple(int(x * 255) for x in kwargs.get("image_mean")) self.background_color = tuple(background_color) def resize( self, image: "torch.Tensor", size: SizeDict, min_size: int, interpolation: "F.InterpolationMode" = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": if size.height is None or size.width is None or size.height != size.width: raise ValueError( f"Output height and width must be the same. Got height={size['height']} and width={size['width']}" ) size = size.height height, width = image.shape[-2:] max_size = max(height, width) delta = size / max_size # Largest side becomes `size` and the other side is scaled according to the aspect ratio. output_size_nonpadded = SizeDict( height=max(int(height * delta), min_size), width=max(int(width * delta), min_size), ) return super().resize(image, size=output_size_nonpadded, interpolation=interpolation, antialias=antialias) def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`torch.Tensor`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = images.shape[-2:] num_channels = images.shape[1] batch_size = images.shape[0] if height == width: return images max_dim = max(height, width) # Ensure background_color is the correct shape if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) padded_images = torch.zeros( (batch_size, num_channels, max_dim, max_dim), dtype=images.dtype, device=images.device ) for i, color in enumerate(background_color): padded_images[:, i, :, :] = color if width > height: start = (max_dim - height) // 2 padded_images[:, :, start : start + height, :] = images else: start = (max_dim - width) // 2 padded_images[:, :, :, start : start + width] = images return padded_images def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, min_size: int, interpolation: Optional["F.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], do_pad: bool = True, **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize( image=stacked_images, size=size, min_size=min_size, interpolation=interpolation ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_pad: stacked_images = self.pad_to_square(stacked_images, background_color=self.background_color) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["DeepseekVLImageProcessorFast"]
transformers/src/transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py/0
{ "file_path": "transformers/src/transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py", "repo_id": "transformers", "token_count": 3461 }
422
# coding=utf-8 # Copyright 2023, HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GPTSAN-japanese model configuration""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class GPTSanJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 36000): Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`]. max_position_embeddings (`int`, *optional*, defaults to 1280): The maximum sequence length that this model might ever be used with. Defaults set this to 1280. d_model (`int`, *optional*, defaults to 1024): Size of the encoder layers and the pooler layer. d_ff (`int`, *optional*, defaults to 8192): Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. d_ext (`int`, *optional*, defaults to 4096): Size of the intermediate feed forward layer in each Extra-layers. d_spout (`int`, *optional*, defaults to 128): Size of the `spout` vector. num_switch_layers (`int`, *optional*, defaults to 10): Number of layers in the Switch Transformer layer. num_ext_layers (`int`, *optional*, defaults to 0): Number of layers in the Extra-layers. num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_experts (`int`, *optional*, defaults to 16): Number of experts for each SwitchTransformer layer. expert_capacity (`int`, *optional*, defaults to 128): Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular Transformer. dropout_rate (`float`, *optional*, defaults to 0.0): The ratio for all dropout layers. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. router_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the router. router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2) during training. router_dtype (`str`, *optional*, default to `"float32"`): The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the *selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961). router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): Whether to ignore padding tokens when routing. output_hidden_states (`bool`, *optional*, default to `False`): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. initializer_factor (`float`, *optional*, defaults to 0.002): A factor for initializing all weight matrices. output_router_logits (`bool`, *optional*, default to `False`): Whether or not to return the router logits of all experts. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) """ model_type = "gptsan-japanese" keys_to_ignore_at_inference = [ "past_key_values", ] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-5, router_bias=False, router_jitter_noise=0.0, router_dtype="float32", router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.d_ff = d_ff self.d_ext = d_ext self.d_spout = d_spout self.num_switch_layers = num_switch_layers self.num_ext_layers = num_ext_layers self.num_layers = num_switch_layers + num_ext_layers self.num_heads = num_heads self.num_experts = num_experts self.expert_capacity = expert_capacity self.dropout_rate = dropout_rate self.layer_norm_epsilon = layer_norm_epsilon self.router_bias = router_bias self.router_jitter_noise = router_jitter_noise self.router_dtype = router_dtype self.router_ignore_padding_tokens = router_ignore_padding_tokens self.output_hidden_states = output_hidden_states self.output_attentions = output_attentions self.initializer_factor = initializer_factor self.output_router_logits = output_router_logits self.use_cache = use_cache super().__init__( separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs, ) __all__ = ["GPTSanJapaneseConfig"]
transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py", "repo_id": "transformers", "token_count": 2860 }
423
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for M-CTC-T """ from typing import Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging logger = logging.get_logger(__name__) class MCTCTFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a M-CTC-T feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an) that takes the user step-by-step in the implementation. Args: feature_size (`int`, defaults to 80): The feature dimension of the extracted features. This is the number of mel_frequency sampling_rate (`int`, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). padding_value (`float`, defaults to 0.0): The value that is used to fill the padding values. hop_length (`int`, defaults to 10): Number of audio samples between windows. Otherwise referred to as "shift" in many papers. win_length (`int`, defaults to 25): Number of ms per window win_function (`str`, defaults to `"hamming_window"`): Name for the window function used for windowing, must be accessible via `torch.{win_function}` frame_signal_scale (`float`, defaults to 32768.0): Constant multiplied in creating the frames before applying DFT. preemphasis_coeff (`float`, defaults to 0.97): Constant multiplied in applying Pre-emphasis before DFT. mel_floor (`float` defaults to 1.0): Minimum value of mel frequency banks. normalize_means (`bool`, *optional*, defaults to `True`): Whether or not to zero-mean normalize the extracted features. normalize_vars (`bool`, *optional*, defaults to `True`): Whether or not to unit-variance normalize the extracted features. """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=16000, padding_value=0.0, hop_length=10, win_length=25, win_function="hamming_window", frame_signal_scale=32768.0, preemphasis_coeff=0.97, mel_floor=1.0, normalize_means=True, normalize_vars=True, return_attention_mask=False, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.hop_length = hop_length self.win_length = win_length self.frame_signal_scale = frame_signal_scale self.preemphasis_coeff = preemphasis_coeff self.mel_floor = mel_floor self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.win_function = win_function self.return_attention_mask = return_attention_mask self.sample_size = win_length * sampling_rate // 1000 self.sample_stride = hop_length * sampling_rate // 1000 self.n_fft = optimal_fft_length(self.sample_size) self.n_freqs = (self.n_fft // 2) + 1 def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray: """ Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code. """ if self.win_function == "hamming_window": window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False) else: window = window_function(window_length=self.sample_size, name=self.win_function) fbanks = mel_filter_bank( num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, ) msfc_features = spectrogram( one_waveform * self.frame_signal_scale, window=window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=False, preemphasis=self.preemphasis_coeff, mel_filters=fbanks, mel_floor=self.mel_floor, log_mel="log", ) return msfc_features.T def _normalize_one(self, x, input_length, padding_value): # make sure we normalize float32 arrays if self.normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if self.normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if input_length < x.shape[0]: x[input_length:] = padding_value # make sure array is in float32 x = x.astype(np.float32) return x def normalize( self, input_features: list[np.ndarray], attention_mask: Optional[np.ndarray] = None ) -> list[np.ndarray]: lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)] def __call__( self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code. Args: raw_speech (`torch.Tensor`, `np.ndarray`, `list[float]`, `list[torch.Tensor]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. padding_value (`float`, defaults to 0.0): """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [raw_speech] # extract fbank features features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech] # convert into correct format for padding encoded_inputs = BatchFeature({"input_features": features}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, **kwargs, ) # make sure list is in array format input_features = padded_inputs.get("input_features") if isinstance(input_features[0], list): padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] if self.normalize_means or self.normalize_vars: attention_mask = ( np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD and padding else None ) padded_inputs["input_features"] = self.normalize( padded_inputs["input_features"], attention_mask=attention_mask ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs __all__ = ["MCTCTFeatureExtractor"]
transformers/src/transformers/models/deprecated/mctct/feature_extraction_mctct.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/mctct/feature_extraction_mctct.py", "repo_id": "transformers", "token_count": 5589 }
424
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A TF 2.0 Adaptive Softmax for Transformer XL model. """ import tensorflow as tf from ....modeling_tf_utils import keras from ....tf_utils import shape_list class TFAdaptiveSoftmaxMask(keras.layers.Layer): def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [vocab_size] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.keep_order = keep_order self.out_layers = [] self.out_projs = [] def build(self, input_shape): if self.n_clusters > 0: self.cluster_weight = self.add_weight( shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight" ) self.cluster_bias = self.add_weight( shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: weight = self.add_weight( shape=(self.d_embed, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}", ) self.out_projs.append(weight) else: self.out_projs.append(None) weight = self.add_weight( shape=(self.vocab_size, self.d_embed), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._weight", ) bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._bias", ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = self.d_embed // (self.div_val**i) weight = self.add_weight( shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}" ) self.out_projs.append(weight) weight = self.add_weight( shape=(r_idx - l_idx, d_emb_i), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._weight", ) bias = self.add_weight( shape=(r_idx - l_idx,), initializer="zeros", trainable=True, name=f"out_layers_._{i}_._bias", ) self.out_layers.append((weight, bias)) super().build(input_shape) @staticmethod def _logit(x, W, b, proj=None): y = x if proj is not None: y = tf.einsum("ibd,ed->ibe", y, proj) return tf.einsum("ibd,nd->ibn", y, W) + b @staticmethod def _gather_logprob(logprob, target): lp_size = shape_list(logprob) r = tf.range(lp_size[0], dtype=target.dtype) idx = tf.stack([r, target], 1) return tf.gather_nd(logprob, idx) def call(self, hidden, target, return_mean=True, training=False): head_logprob = 0 if self.n_clusters == 0: output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0]) if target is not None: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) out = tf.nn.log_softmax(output, axis=-1) else: hidden_sizes = shape_list(hidden) out = [] loss = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: mask = (target >= l_idx) & (target < r_idx) mask_idx = tf.where(mask) cur_target = tf.boolean_mask(target, mask) - l_idx if self.div_val == 1: cur_W = self.out_layers[0][0][l_idx:r_idx] cur_b = self.out_layers[0][1][l_idx:r_idx] else: cur_W = self.out_layers[i][0] cur_b = self.out_layers[i][1] if i == 0: cur_W = tf.concat([cur_W, self.cluster_weight], 0) cur_b = tf.concat([cur_b, self.cluster_bias], 0) head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0]) head_logprob = tf.nn.log_softmax(head_logit) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_logprob = self._gather_logprob(cur_head_logprob, cur_target) else: tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i]) tail_logprob = tf.nn.log_softmax(tail_logit) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(logprob_i) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_tail_logprob = tf.boolean_mask(tail_logprob, mask) cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss)) out = tf.concat(out, axis=-1) if target is not None: if return_mean: loss = tf.reduce_mean(loss) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(loss) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "") return out
transformers/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py", "repo_id": "transformers", "token_count": 4106 }
425
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT hybrid checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token")) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias")) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias")) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our ViT structure. """ # define default ViT hybrid configuration backbone_config = BitConfig( global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=True, ) config = ViTHybridConfig(backbone_config=backbone_config, image_size=384, num_labels=1000) base_model = False # load original model from timm timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() # load state_dict of original model, remove and rename some keys state_dict = timm_model.state_dict() if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load HuggingFace model if vit_name[-5:] == "in21k": model = ViTHybridModel(config).eval() else: model = ViTHybridForImageClassification(config).eval() model.load_state_dict(state_dict) # create image processor transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } processor = ViTHybridImageProcessor( do_resize=True, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors="pt").pixel_values # verify pixel values assert torch.allclose(timm_pixel_values, pixel_values) # verify logits with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print("Predicted class:", logits.argmax(-1).item()) if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=1e-3) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor to the hub {vit_name}") model.push_to_hub(f"ybelkada/{vit_name}") processor.push_to_hub(f"ybelkada/{vit_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) args = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/deprecated/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 5670 }
426
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for DepthPro.""" from typing import TYPE_CHECKING, Optional, Union from ...image_processing_base import BatchFeature from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling, SizeDict from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, logging, requires_backends, ) from ...utils.import_utils import requires if TYPE_CHECKING: from .modeling_depth_pro import DepthProDepthEstimatorOutput logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_torchvision_available(): from ...image_utils import pil_torch_interpolation_mapping if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F @auto_docstring @requires(backends=("torchvision", "torch")) class DepthProImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"height": 1536, "width": 1536} do_resize = True do_rescale = True do_normalize = True # DepthPro resizes image after rescaling and normalizing, # which makes it different from BaseImageProcessorFast._preprocess def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: # Group images by size for batched scaling grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) if do_resize: stacked_images = self.resize( image=stacked_images, size=size, interpolation=interpolation, antialias=False, ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) # Copied from transformers.models.depth_pro.image_processing_depth_pro.DepthProImageProcessor.post_process_depth_estimation def post_process_depth_estimation( self, outputs: "DepthProDepthEstimatorOutput", target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None, ) -> list[dict[str, TensorType]]: """ Post-processes the raw depth predictions from the model to generate final depth predictions which is caliberated using the field of view if provided and resized to specified target sizes if provided. Args: outputs ([`DepthProDepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`Optional[Union[TensorType, list[tuple[int, int]], None]]`, *optional*, defaults to `None`): Target sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)` or a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing is performed. Returns: `list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`. Raises: `ValueError`: If the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth fov = outputs.field_of_view batch_size = len(predicted_depth) if target_sizes is not None and batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many fov values as the batch dimension of the predicted depth" ) results = [] fov = [None] * batch_size if fov is None else fov target_sizes = [None] * batch_size if target_sizes is None else target_sizes for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes): focal_length = None if target_size is not None: # scale image w.r.t fov if fov_value is not None: width = target_size[1] focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value)) depth = depth * width / focal_length # interpolate depth = torch.nn.functional.interpolate( # input should be (B, C, H, W) input=depth.unsqueeze(0).unsqueeze(1), size=target_size, mode=pil_torch_interpolation_mapping[self.resample].value, ).squeeze() # inverse the depth depth = 1.0 / torch.clamp(depth, min=1e-4, max=1e4) results.append( { "predicted_depth": depth, "field_of_view": fov_value, "focal_length": focal_length, } ) return results __all__ = ["DepthProImageProcessorFast"]
transformers/src/transformers/models/depth_pro/image_processing_depth_pro_fast.py/0
{ "file_path": "transformers/src/transformers/models/depth_pro/image_processing_depth_pro_fast.py", "repo_id": "transformers", "token_count": 2917 }
427
# coding=utf-8 # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax DINOv2 model.""" import collections.abc import math from typing import Optional import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxSequenceClassifierOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from .configuration_dinov2 import Dinov2Config DINOV2_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Dinov2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ DINOV2_INPUTS_DOCSTRING = r""" Args: pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`Dinov2ImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxDinov2PatchEmbeddings(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): image_size = self.config.image_size patch_size = self.config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.num_patches = num_patches self.num_channels = self.config.num_channels self.projection = nn.Conv( self.config.hidden_size, kernel_size=patch_size, strides=patch_size, padding="VALID", dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), ) # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTPatchEmbeddings.__call__ def __call__(self, pixel_values): num_channels = pixel_values.shape[-1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) batch_size, _, _, channels = embeddings.shape return jnp.reshape(embeddings, (batch_size, -1, channels)) class FlaxDinov2Embeddings(nn.Module): """Construct the CLS token, position and patch embeddings.""" config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.cls_token = self.param( "cls_token", jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), (1, 1, self.config.hidden_size), ) if self.config.use_mask_token: self.mask_token = self.param( "mask_token", jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), (1, self.config.hidden_size), ) self.patch_embeddings = FlaxDinov2PatchEmbeddings(self.config, dtype=self.dtype) num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.param( "position_embeddings", jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), (1, num_patches + 1, self.config.hidden_size), ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def interpolate_pos_encoding(self, config, hidden_states, height, width, position_embeddings): num_patches = hidden_states.shape[1] - 1 num_positions = position_embeddings.shape[1] - 1 if num_patches == num_positions and height == width: return position_embeddings class_pos_embed = position_embeddings[:, 0] patch_pos_embed = position_embeddings[:, 1:] dim = hidden_states.shape[-1] h = height // config.patch_size w = width // config.patch_size height, width = h + 0.1, w + 0.1 patch_pos_embed = patch_pos_embed.reshape( (1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) ) patch_pos_embed = jnp.transpose(patch_pos_embed, (0, 3, 1, 2)) target_dtype = patch_pos_embed.dtype new_height_ratio = jnp.float32(height / math.sqrt(num_positions)) new_width_ratio = jnp.float32(width / math.sqrt(num_positions)) scale = jnp.array([new_height_ratio, new_width_ratio], dtype=jnp.float32) translation = jnp.array([0.0, 0.0], dtype=jnp.float32) patch_pos_embed = jax.image.scale_and_translate( patch_pos_embed.astype(jnp.float32), shape=(patch_pos_embed.shape[0], patch_pos_embed.shape[1], h, w), spatial_dims=(2, 3), scale=scale, translation=translation, method="bicubic", antialias=False, ) patch_pos_embed = patch_pos_embed.astype(target_dtype) patch_pos_embed = jnp.transpose(patch_pos_embed, (0, 2, 3, 1)).reshape((position_embeddings.shape[0], -1, dim)) patch_pos_embed_expanded = jnp.tile(patch_pos_embed, (hidden_states.shape[0], 1, 1)) class_pos_embed_expanded = jnp.tile(class_pos_embed, (hidden_states.shape[0], 1, 1)) return jnp.concatenate((class_pos_embed_expanded, patch_pos_embed_expanded), axis=1) def __call__(self, pixel_values, deterministic=True): batch_size = pixel_values.shape[0] target_dtype = self.patch_embeddings.projection.dtype height, width = pixel_values.shape[1], pixel_values.shape[2] embeddings = self.patch_embeddings(pixel_values.astype(target_dtype)) cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size)) embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1) embeddings = embeddings + self.interpolate_pos_encoding( self.config, embeddings, height, width, self.position_embeddings ) embeddings = self.dropout(embeddings, deterministic=deterministic) return embeddings # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTSelfAttention with ViT->Dinov2 class FlaxDinov2SelfAttention(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`:" " {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" ), use_bias=self.config.qkv_bias, ) def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) value_states = self.value(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) key_states = self.key(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTSelfOutput with ViT->Dinov2 class FlaxDinov2SelfOutput(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTAttention with ViT->Dinov2 class FlaxDinov2Attention(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxDinov2SelfAttention(self.config, dtype=self.dtype) self.output = FlaxDinov2SelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True, output_attentions: bool = False): attn_outputs = self.attention(hidden_states, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs def ones_with_scale(key, shape, scale, dtype=jnp.float32): return jnp.ones(shape, dtype) * scale class FlaxDinov2LayerScale(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.lambda1 = self.config.layerscale_value * self.param( "lambda1", jax.nn.initializers.ones, (self.config.hidden_size,), ) self.lambda1 = self.lambda1 * self.config.layerscale_value def __call__(self, hidden_states): return self.lambda1 * hidden_states # Copied from transformers.models.beit.modeling_flax_beit.FlaxBeitDropPath with Beit -> Dinov2 class FlaxDinov2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" rate: float @nn.module.compact def __call__(self, inputs, deterministic: Optional[bool] = True): if self.rate == 0.0: return inputs keep_prob = 1.0 - self.rate if deterministic: return inputs else: shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets rng = self.make_rng("droppath") random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype) binary_tensor = jnp.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output class FlaxDinov2MLP(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.fc1 = nn.Dense( self.config.hidden_size * self.config.mlp_ratio, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.fc2 = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.act = ACT2FN[self.config.hidden_act] else: self.act = self.config.hidden_act def __call__(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class FlaxDinov2SwiGLUFFN(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): hidden_features = int(self.config.hidden_size * self.config.mlp_ratio) hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 self.weights_in = nn.Dense( 2 * hidden_features, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) self.weights_out = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.weights_in(hidden_states) x1, x2 = jnp.split(hidden_states, 2, axis=-1) hidden = nn.silu(x1) * x2 return self.weights_out(hidden) class FlaxDinov2Layer(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.attention = FlaxDinov2Attention(self.config, dtype=self.dtype) self.layer_scale1 = FlaxDinov2LayerScale(self.config, dtype=self.dtype) self.drop_path = FlaxDinov2DropPath(self.config.drop_path_rate) self.norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) if self.config.use_swiglu_ffn: self.mlp = FlaxDinov2SwiGLUFFN(self.config, dtype=self.dtype) else: self.mlp = FlaxDinov2MLP(self.config, dtype=self.dtype) self.layer_scale2 = FlaxDinov2LayerScale(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False): self_attention_outputs = self.attention( self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention deterministic=deterministic, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] attention_output = self.layer_scale1(attention_output) outputs = self_attention_outputs[1:] # first residual connection hidden_states = self.drop_path(attention_output) + hidden_states # in Dinov2, layernorm is also applied after self-attention layer_output = self.norm2(hidden_states) layer_output = self.mlp(layer_output) layer_output = self.layer_scale2(layer_output) # second residual connection layer_output = self.drop_path(layer_output) + hidden_states outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTLayerCollection with ViT->Dinov2 class FlaxDinov2LayerCollection(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxDinov2Layer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.vit.modeling_flax_vit.FlaxViTEncoder with ViT->Dinov2 class FlaxDinov2Encoder(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxDinov2LayerCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class FlaxDinov2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Dinov2Config base_model_prefix = "dinov2" main_input_name = "pixel_values" module_class: nn.Module = None def __init__( self, config: Dinov2Config, input_shape=None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors pixel_values = jnp.zeros(input_shape, dtype=self.dtype) params_rng, dropout_rng = jax.random.split(rng) dropout_rng, droppath_rng = jax.random.split(dropout_rng) rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng} random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, pixel_values, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: dropout_rng, droppath_rng = jax.random.split(dropout_rng) rngs["dropout"] = dropout_rng rngs["droppath"] = droppath_rng return self.module.apply( {"params": params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) class FlaxDinov2Module(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embeddings = FlaxDinov2Embeddings(self.config, dtype=self.dtype) self.encoder = FlaxDinov2Encoder(self.config, dtype=self.dtype) self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__( self, pixel_values, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): hidden_states = self.embeddings(pixel_values, deterministic=deterministic) encoder_outputs = self.encoder( hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = sequence_output[:, 0, :] if not return_dict: head_outputs = (sequence_output, pooled_output) return head_outputs + encoder_outputs[1:] return FlaxBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The bare Dinov2 Model transformer outputting raw hidden-states without any specific head on top.", DINOV2_START_DOCSTRING, ) class FlaxDinov2Model(FlaxDinov2PreTrainedModel): module_class = FlaxDinov2Module FLAX_VISION_MODEL_DOCSTRING = """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FlaxDinov2Model >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base") >>> model = FlaxDinov2Model.from_pretrained("facebook/dinov2-base") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxDinov2Model, FLAX_VISION_MODEL_DOCSTRING) append_replace_return_docstrings( FlaxDinov2Model, output_type=FlaxBaseModelOutputWithPooling, config_class=Dinov2Config ) class FlaxDinov2ForImageClassificationModule(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.dinov2 = FlaxDinov2Module(config=self.config, dtype=self.dtype) self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling( self.config.initializer_range**2, "fan_in", "truncated_normal" ), ) def __call__( self, pixel_values=None, deterministic: bool = True, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.dinov2( pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] cls_token = hidden_states[:, 0] patch_tokens = hidden_states[:, 1:] linear_input = jnp.concatenate([cls_token, patch_tokens.mean(axis=1)], axis=-1) logits = self.classifier(linear_input) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, DINOV2_START_DOCSTRING, ) class FlaxDinov2ForImageClassification(FlaxDinov2PreTrainedModel): module_class = FlaxDinov2ForImageClassificationModule FLAX_VISION_CLASSIFICATION_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoImageProcessor, FlaxDinov2ForImageClassification >>> from PIL import Image >>> import jax >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base-imagenet1k-1-layer") >>> model = FlaxDinov2ForImageClassification.from_pretrained("facebook/dinov2-base-imagenet1k-1-layer", from_pt=True) >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1) >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()]) ``` """ overwrite_call_docstring(FlaxDinov2ForImageClassification, FLAX_VISION_CLASSIFICATION_DOCSTRING) append_replace_return_docstrings( FlaxDinov2ForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=Dinov2Config ) __all__ = ["FlaxDinov2ForImageClassification", "FlaxDinov2Model", "FlaxDinov2PreTrainedModel"]
transformers/src/transformers/models/dinov2/modeling_flax_dinov2.py/0
{ "file_path": "transformers/src/transformers/models/dinov2/modeling_flax_dinov2.py", "repo_id": "transformers", "token_count": 13548 }
428
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def get_configs(model): original_config = model.config encoder_config = DonutSwinConfig( image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, ) decoder_config = MBartConfig( is_decoder=True, is_encoder_decoder=False, add_cross_attention=True, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len( model.decoder.tokenizer ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json) scale_embedding=True, add_final_layer_norm=True, ) return encoder_config, decoder_config def rename_key(name): if "encoder.model" in name: name = name.replace("encoder.model", "encoder") if "decoder.model" in name: name = name.replace("decoder.model", "decoder") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.norm") if name.startswith("encoder"): if "layers" in name: name = "encoder." + name if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "mask" not in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if name == "encoder.norm.weight": name = "encoder.layernorm.weight" if name == "encoder.norm.bias": name = "encoder.layernorm.bias" return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[3]) block_num = int(key_split[5]) dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = ( val[dim : dim * 2, :] ) orig_state_dict[ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = ( val[:dim] ) orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = ( val[dim : dim * 2] ) orig_state_dict[f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = ( val[-dim:] ) elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): # load original model original_model = DonutModel.from_pretrained(model_name).eval() # load HuggingFace model encoder_config, decoder_config = get_configs(original_model) encoder = DonutSwinModel(encoder_config) decoder = MBartForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # verify results on scanned document dataset = load_dataset("hf-internal-testing/example-documents") # no-script image = dataset["test"][0]["image"].convert("RGB") tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) image_processor = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] ) processor = DonutProcessor(image_processor, tokenizer) pixel_values = processor(image, return_tensors="pt").pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" question = "When is the coffee break?" task_prompt = task_prompt.replace("{user_input}", question) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": task_prompt = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: task_prompt = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": task_prompt = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": task_prompt = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt task_prompt = "hello world" else: raise ValueError("Model name not supported") prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[ "input_ids" ] original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) patch_embeddings, _ = model.encoder.embeddings(pixel_values) assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3) # verify encoder hidden states original_last_hidden_state = original_model.encoder(pixel_values) last_hidden_state = model.encoder(pixel_values).last_hidden_state assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) # verify decoder hidden states original_logits = original_model(pixel_values, prompt_tensors, None).logits logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits assert torch.allclose(original_logits, logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) args = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/donut/convert_donut_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/donut/convert_donut_to_pytorch.py", "repo_id": "transformers", "token_count": 4049 }
429
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Constants used in AlphaFold.""" import collections import copy import functools from collections.abc import Mapping, Sequence from importlib import resources import numpy as np # Internal import (35fd). # Distance from one CA to next CA [trans configuration: omega = 180]. ca_ca = 3.80209737096 # Format: The list for each AA type contains chi1, chi2, chi3, chi4 in # this order (or a relevant subset from chi1 onwards). ALA and GLY don't have # chi angles so their chi angle lists are empty. chi_angles_atoms: dict[str, list[list[str]]] = { "ALA": [], # Chi5 in arginine is always 0 +- 5 degrees, so ignore it. "ARG": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "NE"], ["CG", "CD", "NE", "CZ"]], "ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]], "ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]], "CYS": [["N", "CA", "CB", "SG"]], "GLN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]], "GLU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]], "GLY": [], "HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]], "ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]], "LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]], "LYS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "CE"], ["CG", "CD", "CE", "NZ"]], "MET": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "SD"], ["CB", "CG", "SD", "CE"]], "PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]], "PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]], "SER": [["N", "CA", "CB", "OG"]], "THR": [["N", "CA", "CB", "OG1"]], "TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]], "TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]], "VAL": [["N", "CA", "CB", "CG1"]], } # If chi angles given in fixed-length array, this matrix determines how to mask # them for each AA type. The order is as per restype_order (see below). chi_angles_mask: list[list[float]] = [ [0.0, 0.0, 0.0, 0.0], # ALA [1.0, 1.0, 1.0, 1.0], # ARG [1.0, 1.0, 0.0, 0.0], # ASN [1.0, 1.0, 0.0, 0.0], # ASP [1.0, 0.0, 0.0, 0.0], # CYS [1.0, 1.0, 1.0, 0.0], # GLN [1.0, 1.0, 1.0, 0.0], # GLU [0.0, 0.0, 0.0, 0.0], # GLY [1.0, 1.0, 0.0, 0.0], # HIS [1.0, 1.0, 0.0, 0.0], # ILE [1.0, 1.0, 0.0, 0.0], # LEU [1.0, 1.0, 1.0, 1.0], # LYS [1.0, 1.0, 1.0, 0.0], # MET [1.0, 1.0, 0.0, 0.0], # PHE [1.0, 1.0, 0.0, 0.0], # PRO [1.0, 0.0, 0.0, 0.0], # SER [1.0, 0.0, 0.0, 0.0], # THR [1.0, 1.0, 0.0, 0.0], # TRP [1.0, 1.0, 0.0, 0.0], # TYR [1.0, 0.0, 0.0, 0.0], # VAL ] # The following chi angles are pi periodic: they can be rotated by a multiple # of pi without affecting the structure. chi_pi_periodic: list[list[float]] = [ [0.0, 0.0, 0.0, 0.0], # ALA [0.0, 0.0, 0.0, 0.0], # ARG [0.0, 0.0, 0.0, 0.0], # ASN [0.0, 1.0, 0.0, 0.0], # ASP [0.0, 0.0, 0.0, 0.0], # CYS [0.0, 0.0, 0.0, 0.0], # GLN [0.0, 0.0, 1.0, 0.0], # GLU [0.0, 0.0, 0.0, 0.0], # GLY [0.0, 0.0, 0.0, 0.0], # HIS [0.0, 0.0, 0.0, 0.0], # ILE [0.0, 0.0, 0.0, 0.0], # LEU [0.0, 0.0, 0.0, 0.0], # LYS [0.0, 0.0, 0.0, 0.0], # MET [0.0, 1.0, 0.0, 0.0], # PHE [0.0, 0.0, 0.0, 0.0], # PRO [0.0, 0.0, 0.0, 0.0], # SER [0.0, 0.0, 0.0, 0.0], # THR [0.0, 0.0, 0.0, 0.0], # TRP [0.0, 1.0, 0.0, 0.0], # TYR [0.0, 0.0, 0.0, 0.0], # VAL [0.0, 0.0, 0.0, 0.0], # UNK ] # Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi, # psi and chi angles: # 0: 'backbone group', # 1: 'pre-omega-group', (empty) # 2: 'phi-group', (currently empty, because it defines only hydrogens) # 3: 'psi-group', # 4,5,6,7: 'chi1,2,3,4-group' # The atom positions are relative to the axis-end-atom of the corresponding # rotation axis. The x-axis is in direction of the rotation axis, and the y-axis # is defined such that the dihedral-angle-definiting atom (the last entry in # chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate). # format: [atomname, group_idx, rel_position] rigid_group_atom_positions: dict[str, list[tuple[str, int, tuple[float, float, float]]]] = { "ALA": [ ("N", 0, (-0.525, 1.363, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, -0.000, -0.000)), ("CB", 0, (-0.529, -0.774, -1.205)), ("O", 3, (0.627, 1.062, 0.000)), ], "ARG": [ ("N", 0, (-0.524, 1.362, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, -0.000, -0.000)), ("CB", 0, (-0.524, -0.778, -1.209)), ("O", 3, (0.626, 1.062, 0.000)), ("CG", 4, (0.616, 1.390, -0.000)), ("CD", 5, (0.564, 1.414, 0.000)), ("NE", 6, (0.539, 1.357, -0.000)), ("NH1", 7, (0.206, 2.301, 0.000)), ("NH2", 7, (2.078, 0.978, -0.000)), ("CZ", 7, (0.758, 1.093, -0.000)), ], "ASN": [ ("N", 0, (-0.536, 1.357, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, -0.000, -0.000)), ("CB", 0, (-0.531, -0.787, -1.200)), ("O", 3, (0.625, 1.062, 0.000)), ("CG", 4, (0.584, 1.399, 0.000)), ("ND2", 5, (0.593, -1.188, 0.001)), ("OD1", 5, (0.633, 1.059, 0.000)), ], "ASP": [ ("N", 0, (-0.525, 1.362, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.527, 0.000, -0.000)), ("CB", 0, (-0.526, -0.778, -1.208)), ("O", 3, (0.626, 1.062, -0.000)), ("CG", 4, (0.593, 1.398, -0.000)), ("OD1", 5, (0.610, 1.091, 0.000)), ("OD2", 5, (0.592, -1.101, -0.003)), ], "CYS": [ ("N", 0, (-0.522, 1.362, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.524, 0.000, 0.000)), ("CB", 0, (-0.519, -0.773, -1.212)), ("O", 3, (0.625, 1.062, -0.000)), ("SG", 4, (0.728, 1.653, 0.000)), ], "GLN": [ ("N", 0, (-0.526, 1.361, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, 0.000, 0.000)), ("CB", 0, (-0.525, -0.779, -1.207)), ("O", 3, (0.626, 1.062, -0.000)), ("CG", 4, (0.615, 1.393, 0.000)), ("CD", 5, (0.587, 1.399, -0.000)), ("NE2", 6, (0.593, -1.189, -0.001)), ("OE1", 6, (0.634, 1.060, 0.000)), ], "GLU": [ ("N", 0, (-0.528, 1.361, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, -0.000, -0.000)), ("CB", 0, (-0.526, -0.781, -1.207)), ("O", 3, (0.626, 1.062, 0.000)), ("CG", 4, (0.615, 1.392, 0.000)), ("CD", 5, (0.600, 1.397, 0.000)), ("OE1", 6, (0.607, 1.095, -0.000)), ("OE2", 6, (0.589, -1.104, -0.001)), ], "GLY": [ ("N", 0, (-0.572, 1.337, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.517, -0.000, -0.000)), ("O", 3, (0.626, 1.062, -0.000)), ], "HIS": [ ("N", 0, (-0.527, 1.360, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, 0.000, 0.000)), ("CB", 0, (-0.525, -0.778, -1.208)), ("O", 3, (0.625, 1.063, 0.000)), ("CG", 4, (0.600, 1.370, -0.000)), ("CD2", 5, (0.889, -1.021, 0.003)), ("ND1", 5, (0.744, 1.160, -0.000)), ("CE1", 5, (2.030, 0.851, 0.002)), ("NE2", 5, (2.145, -0.466, 0.004)), ], "ILE": [ ("N", 0, (-0.493, 1.373, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.527, -0.000, -0.000)), ("CB", 0, (-0.536, -0.793, -1.213)), ("O", 3, (0.627, 1.062, -0.000)), ("CG1", 4, (0.534, 1.437, -0.000)), ("CG2", 4, (0.540, -0.785, -1.199)), ("CD1", 5, (0.619, 1.391, 0.000)), ], "LEU": [ ("N", 0, (-0.520, 1.363, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, -0.000, -0.000)), ("CB", 0, (-0.522, -0.773, -1.214)), ("O", 3, (0.625, 1.063, -0.000)), ("CG", 4, (0.678, 1.371, 0.000)), ("CD1", 5, (0.530, 1.430, -0.000)), ("CD2", 5, (0.535, -0.774, 1.200)), ], "LYS": [ ("N", 0, (-0.526, 1.362, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, 0.000, 0.000)), ("CB", 0, (-0.524, -0.778, -1.208)), ("O", 3, (0.626, 1.062, -0.000)), ("CG", 4, (0.619, 1.390, 0.000)), ("CD", 5, (0.559, 1.417, 0.000)), ("CE", 6, (0.560, 1.416, 0.000)), ("NZ", 7, (0.554, 1.387, 0.000)), ], "MET": [ ("N", 0, (-0.521, 1.364, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, 0.000, 0.000)), ("CB", 0, (-0.523, -0.776, -1.210)), ("O", 3, (0.625, 1.062, -0.000)), ("CG", 4, (0.613, 1.391, -0.000)), ("SD", 5, (0.703, 1.695, 0.000)), ("CE", 6, (0.320, 1.786, -0.000)), ], "PHE": [ ("N", 0, (-0.518, 1.363, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.524, 0.000, -0.000)), ("CB", 0, (-0.525, -0.776, -1.212)), ("O", 3, (0.626, 1.062, -0.000)), ("CG", 4, (0.607, 1.377, 0.000)), ("CD1", 5, (0.709, 1.195, -0.000)), ("CD2", 5, (0.706, -1.196, 0.000)), ("CE1", 5, (2.102, 1.198, -0.000)), ("CE2", 5, (2.098, -1.201, -0.000)), ("CZ", 5, (2.794, -0.003, -0.001)), ], "PRO": [ ("N", 0, (-0.566, 1.351, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.527, -0.000, 0.000)), ("CB", 0, (-0.546, -0.611, -1.293)), ("O", 3, (0.621, 1.066, 0.000)), ("CG", 4, (0.382, 1.445, 0.0)), # ('CD', 5, (0.427, 1.440, 0.0)), ("CD", 5, (0.477, 1.424, 0.0)), # manually made angle 2 degrees larger ], "SER": [ ("N", 0, (-0.529, 1.360, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, -0.000, -0.000)), ("CB", 0, (-0.518, -0.777, -1.211)), ("O", 3, (0.626, 1.062, -0.000)), ("OG", 4, (0.503, 1.325, 0.000)), ], "THR": [ ("N", 0, (-0.517, 1.364, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.526, 0.000, -0.000)), ("CB", 0, (-0.516, -0.793, -1.215)), ("O", 3, (0.626, 1.062, 0.000)), ("CG2", 4, (0.550, -0.718, -1.228)), ("OG1", 4, (0.472, 1.353, 0.000)), ], "TRP": [ ("N", 0, (-0.521, 1.363, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.525, -0.000, 0.000)), ("CB", 0, (-0.523, -0.776, -1.212)), ("O", 3, (0.627, 1.062, 0.000)), ("CG", 4, (0.609, 1.370, -0.000)), ("CD1", 5, (0.824, 1.091, 0.000)), ("CD2", 5, (0.854, -1.148, -0.005)), ("CE2", 5, (2.186, -0.678, -0.007)), ("CE3", 5, (0.622, -2.530, -0.007)), ("NE1", 5, (2.140, 0.690, -0.004)), ("CH2", 5, (3.028, -2.890, -0.013)), ("CZ2", 5, (3.283, -1.543, -0.011)), ("CZ3", 5, (1.715, -3.389, -0.011)), ], "TYR": [ ("N", 0, (-0.522, 1.362, 0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.524, -0.000, -0.000)), ("CB", 0, (-0.522, -0.776, -1.213)), ("O", 3, (0.627, 1.062, -0.000)), ("CG", 4, (0.607, 1.382, -0.000)), ("CD1", 5, (0.716, 1.195, -0.000)), ("CD2", 5, (0.713, -1.194, -0.001)), ("CE1", 5, (2.107, 1.200, -0.002)), ("CE2", 5, (2.104, -1.201, -0.003)), ("OH", 5, (4.168, -0.002, -0.005)), ("CZ", 5, (2.791, -0.001, -0.003)), ], "VAL": [ ("N", 0, (-0.494, 1.373, -0.000)), ("CA", 0, (0.000, 0.000, 0.000)), ("C", 0, (1.527, -0.000, -0.000)), ("CB", 0, (-0.533, -0.795, -1.213)), ("O", 3, (0.627, 1.062, -0.000)), ("CG1", 4, (0.540, 1.429, -0.000)), ("CG2", 4, (0.533, -0.776, 1.203)), ], } # A list of atoms (excluding hydrogen) for each AA type. PDB naming convention. residue_atoms: dict[str, list[str]] = { "ALA": ["C", "CA", "CB", "N", "O"], "ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"], "ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"], "ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"], "CYS": ["C", "CA", "CB", "N", "O", "SG"], "GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"], "GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"], "GLY": ["C", "CA", "N", "O"], "HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"], "ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"], "LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"], "LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"], "MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"], "PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"], "PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"], "SER": ["C", "CA", "CB", "N", "O", "OG"], "THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"], "TRP": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE2", "CE3", "CZ2", "CZ3", "CH2", "N", "NE1", "O"], "TYR": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O", "OH"], "VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"], } # Naming swaps for ambiguous atom names. # Due to symmetries in the amino acids the naming of atoms is ambiguous in # 4 of the 20 amino acids. # (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities # in LEU, VAL and ARG can be resolved by using the 3d constellations of # the 'ambiguous' atoms and their neighbours) # TODO: ^ interpret this residue_atom_renaming_swaps: dict[str, dict[str, str]] = { "ASP": {"OD1": "OD2"}, "GLU": {"OE1": "OE2"}, "PHE": {"CD1": "CD2", "CE1": "CE2"}, "TYR": {"CD1": "CD2", "CE1": "CE2"}, } # Van der Waals radii [Angstroem] of the atoms (from Wikipedia) van_der_waals_radius: dict[str, float] = { "C": 1.7, "N": 1.55, "O": 1.52, "S": 1.8, } Bond = collections.namedtuple("Bond", ["atom1_name", "atom2_name", "length", "stddev"]) BondAngle = collections.namedtuple( "BondAngle", ["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"], ) def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> list: # Maps strings in a nested list structure to their corresponding index in atom_order if first_call: in_list = copy.deepcopy(in_list) for i in range(len(in_list)): if isinstance(in_list[i], list): in_list[i] = map_structure_with_atom_order(in_list[i], first_call=False) elif isinstance(in_list[i], str): in_list[i] = atom_order[in_list[i]] else: raise TypeError("Unexpected type when mapping nested lists!") return in_list @functools.cache def load_stereo_chemical_props() -> tuple[ Mapping[str, list[Bond]], Mapping[str, list[Bond]], Mapping[str, list[BondAngle]], ]: """Load stereo_chemical_props.txt into a nice structure. Load literature values for bond lengths and bond angles and translate bond angles into the length of the opposite edge of the triangle ("residue_virtual_bonds"). Returns: residue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname --> list of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples """ # TODO: this file should be downloaded in a setup script stereo_chemical_props = resources.read_text("openfold.resources", "stereo_chemical_props.txt") lines_iter = iter(stereo_chemical_props.splitlines()) # Load bond lengths. residue_bonds: dict[str, list[Bond]] = {} next(lines_iter) # Skip header line. for line in lines_iter: if line.strip() == "-": break bond, resname, bond_length, stddev = line.split() atom1, atom2 = bond.split("-") if resname not in residue_bonds: residue_bonds[resname] = [] residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev))) residue_bonds["UNK"] = [] # Load bond angles. residue_bond_angles: dict[str, list[BondAngle]] = {} next(lines_iter) # Skip empty line. next(lines_iter) # Skip header line. for line in lines_iter: if line.strip() == "-": break bond, resname, angle_degree, stddev_degree = line.split() atom1, atom2, atom3 = bond.split("-") if resname not in residue_bond_angles: residue_bond_angles[resname] = [] residue_bond_angles[resname].append( BondAngle( atom1, atom2, atom3, float(angle_degree) / 180.0 * np.pi, float(stddev_degree) / 180.0 * np.pi, ) ) residue_bond_angles["UNK"] = [] def make_bond_key(atom1_name: str, atom2_name: str) -> str: """Unique key to lookup bonds.""" return "-".join(sorted([atom1_name, atom2_name])) # Translate bond angles into distances ("virtual bonds"). residue_virtual_bonds: dict[str, list[Bond]] = {} for resname, bond_angles in residue_bond_angles.items(): # Create a fast lookup dict for bond lengths. bond_cache: dict[str, Bond] = {} for b in residue_bonds[resname]: bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b residue_virtual_bonds[resname] = [] for ba in bond_angles: bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)] bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)] # Compute distance between atom1 and atom3 using the law of cosines # c^2 = a^2 + b^2 - 2ab*cos(gamma). gamma = ba.angle_rad length = np.sqrt(bond1.length**2 + bond2.length**2 - 2 * bond1.length * bond2.length * np.cos(gamma)) # Propagation of uncertainty assuming uncorrelated errors. dl_outer = 0.5 / length dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer stddev = np.sqrt( (dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2 ) residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev)) return (residue_bonds, residue_virtual_bonds, residue_bond_angles) # Between-residue bond lengths for general bonds (first element) and for Proline # (second element). between_res_bond_length_c_n: tuple[float, float] = (1.329, 1.341) between_res_bond_length_stddev_c_n: tuple[float, float] = (0.014, 0.016) # Between-residue cos_angles. between_res_cos_angles_c_n_ca: tuple[float, float] = (-0.5203, 0.0353) # degrees: 121.352 +- 2.315 between_res_cos_angles_ca_c_n: tuple[float, float] = (-0.4473, 0.0311) # degrees: 116.568 +- 1.995 # This mapping is used when we need to store atom data in a format that requires # fixed atom data size for every residue (e.g. a numpy array). atom_types: list[str] = [ "N", "CA", "C", "CB", "O", "CG", "CG1", "CG2", "OG", "OG1", "SG", "CD", "CD1", "CD2", "ND1", "ND2", "OD1", "OD2", "SD", "CE", "CE1", "CE2", "CE3", "NE", "NE1", "NE2", "OE1", "OE2", "CH2", "NH1", "NH2", "OH", "CZ", "CZ2", "CZ3", "NZ", "OXT", ] atom_order: dict[str, int] = {atom_type: i for i, atom_type in enumerate(atom_types)} atom_type_num = len(atom_types) # := 37. # A compact atom encoding with 14 columns # pylint: disable=line-too-long # pylint: disable=bad-whitespace restype_name_to_atom14_names: dict[str, list[str]] = { "ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""], "ARG": ["N", "CA", "C", "O", "CB", "CG", "CD", "NE", "CZ", "NH1", "NH2", "", "", ""], "ASN": ["N", "CA", "C", "O", "CB", "CG", "OD1", "ND2", "", "", "", "", "", ""], "ASP": ["N", "CA", "C", "O", "CB", "CG", "OD1", "OD2", "", "", "", "", "", ""], "CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""], "GLN": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "NE2", "", "", "", "", ""], "GLU": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "OE2", "", "", "", "", ""], "GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""], "HIS": ["N", "CA", "C", "O", "CB", "CG", "ND1", "CD2", "CE1", "NE2", "", "", "", ""], "ILE": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "CD1", "", "", "", "", "", ""], "LEU": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "", "", "", "", "", ""], "LYS": ["N", "CA", "C", "O", "CB", "CG", "CD", "CE", "NZ", "", "", "", "", ""], "MET": ["N", "CA", "C", "O", "CB", "CG", "SD", "CE", "", "", "", "", "", ""], "PHE": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "", "", ""], "PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""], "SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""], "THR": ["N", "CA", "C", "O", "CB", "OG1", "CG2", "", "", "", "", "", "", ""], "TRP": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "NE1", "CE2", "CE3", "CZ2", "CZ3", "CH2"], "TYR": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "OH", "", ""], "VAL": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "", "", "", "", "", "", ""], "UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""], } # pylint: enable=line-too-long # pylint: enable=bad-whitespace # This is the standard residue order when coding AA type as a number. # Reproduce it by taking 3-letter AA codes and sorting them alphabetically. restypes: list[str] = [ "A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V", ] restype_order: dict[str, int] = {restype: i for i, restype in enumerate(restypes)} restype_num = len(restypes) # := 20. unk_restype_index = restype_num # Catch-all index for unknown restypes. restypes_with_x: list[str] = restypes + ["X"] restype_order_with_x: dict[str, int] = {restype: i for i, restype in enumerate(restypes_with_x)} def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False) -> np.ndarray: """Maps the given sequence into a one-hot encoded matrix. Args: sequence: An amino acid sequence. mapping: A dictionary mapping amino acids to integers. map_unknown_to_x: If True, any amino acid that is not in the mapping will be mapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown. If False, any amino acid not in the mapping will throw an error. Returns: A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence. Raises: ValueError: If the mapping doesn't contain values from 0 to num_unique_aas - 1 without any gaps. """ num_entries = max(mapping.values()) + 1 if sorted(set(mapping.values())) != list(range(num_entries)): raise ValueError( "The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s" % sorted(mapping.values()) ) one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32) for aa_index, aa_type in enumerate(sequence): if map_unknown_to_x: if aa_type.isalpha() and aa_type.isupper(): aa_id = mapping.get(aa_type, mapping["X"]) else: raise ValueError(f"Invalid character in the sequence: {aa_type}") else: aa_id = mapping[aa_type] one_hot_arr[aa_index, aa_id] = 1 return one_hot_arr restype_1to3: dict[str, str] = { "A": "ALA", "R": "ARG", "N": "ASN", "D": "ASP", "C": "CYS", "Q": "GLN", "E": "GLU", "G": "GLY", "H": "HIS", "I": "ILE", "L": "LEU", "K": "LYS", "M": "MET", "F": "PHE", "P": "PRO", "S": "SER", "T": "THR", "W": "TRP", "Y": "TYR", "V": "VAL", } # NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple # 1-to-1 mapping of 3 letter names to one letter names. The latter contains # many more, and less common, three letter names as keys and maps many of these # to the same one letter name (including 'X' and 'U' which we don't use here). restype_3to1: dict[str, str] = {v: k for k, v in restype_1to3.items()} # Define a restype name for all unknown residues. unk_restype = "UNK" resnames: list[str] = [restype_1to3[r] for r in restypes] + [unk_restype] resname_to_idx: dict[str, int] = {resname: i for i, resname in enumerate(resnames)} # The mapping here uses hhblits convention, so that B is mapped to D, J and O # are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the # remaining 20 amino acids are kept in alphabetical order. # There are 2 non-amino acid codes, X (representing any amino acid) and # "-" representing a missing amino acid in an alignment. The id for these # codes is put at the end (20 and 21) so that they can easily be ignored if # desired. HHBLITS_AA_TO_ID: dict[str, int] = { "A": 0, "B": 2, "C": 1, "D": 2, "E": 3, "F": 4, "G": 5, "H": 6, "I": 7, "J": 20, "K": 8, "L": 9, "M": 10, "N": 11, "O": 20, "P": 12, "Q": 13, "R": 14, "S": 15, "T": 16, "U": 1, "V": 17, "W": 18, "X": 20, "Y": 19, "Z": 3, "-": 21, } # Partial inversion of HHBLITS_AA_TO_ID. ID_TO_HHBLITS_AA: dict[int, str] = { 0: "A", 1: "C", # Also U. 2: "D", # Also B. 3: "E", # Also Z. 4: "F", 5: "G", 6: "H", 7: "I", 8: "K", 9: "L", 10: "M", 11: "N", 12: "P", 13: "Q", 14: "R", 15: "S", 16: "T", 17: "V", 18: "W", 19: "Y", 20: "X", # Includes J and O. 21: "-", } restypes_with_x_and_gap: list[str] = restypes + ["X", "-"] MAP_HHBLITS_AATYPE_TO_OUR_AATYPE: tuple[int, ...] = tuple( restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap)) ) def _make_standard_atom_mask() -> np.ndarray: """Returns [num_res_types, num_atom_types] mask array.""" # +1 to account for unknown (all 0s). mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32) for restype, restype_letter in enumerate(restypes): restype_name = restype_1to3[restype_letter] atom_names = residue_atoms[restype_name] for atom_name in atom_names: atom_type = atom_order[atom_name] mask[restype, atom_type] = 1 return mask STANDARD_ATOM_MASK = _make_standard_atom_mask() # A one hot representation for the first and second atoms defining the axis # of rotation for each chi-angle in each residue. def chi_angle_atom(atom_index: int) -> np.ndarray: """Define chi-angle rigid groups via one-hot representations.""" chi_angles_index = {} one_hots = [] for k, v in chi_angles_atoms.items(): indices = [atom_types.index(s[atom_index]) for s in v] indices.extend([-1] * (4 - len(indices))) chi_angles_index[k] = indices for r in restypes: res3 = restype_1to3[r] one_hot = np.eye(atom_type_num)[chi_angles_index[res3]] one_hots.append(one_hot) one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`. one_hot = np.stack(one_hots, axis=0) one_hot = np.transpose(one_hot, [0, 2, 1]) return one_hot chi_atom_1_one_hot = chi_angle_atom(1) chi_atom_2_one_hot = chi_angle_atom(2) # An array like chi_angles_atoms but using indices rather than names. chi_angles_atom_indices_list: list[list[list[str]]] = [chi_angles_atoms[restype_1to3[r]] for r in restypes] chi_angles_atom_indices_ours: list = map_structure_with_atom_order(chi_angles_atom_indices_list) chi_angles_atom_indices = np.array( [chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms))) for chi_atoms in chi_angles_atom_indices_list] ) # Mapping from (res_name, atom_name) pairs to the atom's chi group index # and atom index within that group. chi_groups_for_atom: dict[tuple[str, str], list[tuple[int, int]]] = collections.defaultdict(list) for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items(): for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res): for atom_i, atom in enumerate(chi_group): chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i)) chi_groups_for_atom = dict(chi_groups_for_atom) def _make_rigid_transformation_4x4(ex: np.ndarray, ey: np.ndarray, translation: np.ndarray) -> np.ndarray: """Create a rigid 4x4 transformation matrix from two axes and transl.""" # Normalize ex. ex_normalized = ex / np.linalg.norm(ex) # make ey perpendicular to ex ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized ey_normalized /= np.linalg.norm(ey_normalized) # compute ez as cross product eznorm = np.cross(ex_normalized, ey_normalized) m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose() m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0) return m # create an array with (restype, atomtype) --> rigid_group_idx # and an array with (restype, atomtype, coord) for the atom positions # and compute affine transformation matrices (4,4) from one rigid group to the # previous group restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int) restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32) restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int) restype_atom14_mask = np.zeros([21, 14], dtype=np.float32) restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32) restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32) def _make_rigid_group_constants() -> None: """Fill the arrays above.""" for restype, restype_letter in enumerate(restypes): resname = restype_1to3[restype_letter] for atomname, group_idx, atom_position in rigid_group_atom_positions[resname]: atomtype = atom_order[atomname] restype_atom37_to_rigid_group[restype, atomtype] = group_idx restype_atom37_mask[restype, atomtype] = 1 restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position atom14idx = restype_name_to_atom14_names[resname].index(atomname) restype_atom14_to_rigid_group[restype, atom14idx] = group_idx restype_atom14_mask[restype, atom14idx] = 1 restype_atom14_rigid_group_positions[restype, atom14idx, :] = atom_position for restype, restype_letter in enumerate(restypes): resname = restype_1to3[restype_letter] atom_positions: dict[str, np.ndarray] = { name: np.array(pos) for name, _, pos in rigid_group_atom_positions[resname] } # backbone to backbone is the identity transform restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4) # pre-omega-frame to backbone (currently dummy identity matrix) restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4) # phi-frame to backbone mat = _make_rigid_transformation_4x4( ex=atom_positions["N"] - atom_positions["CA"], ey=np.array([1.0, 0.0, 0.0]), translation=atom_positions["N"], ) restype_rigid_group_default_frame[restype, 2, :, :] = mat # psi-frame to backbone mat = _make_rigid_transformation_4x4( ex=atom_positions["C"] - atom_positions["CA"], ey=atom_positions["CA"] - atom_positions["N"], translation=atom_positions["C"], ) restype_rigid_group_default_frame[restype, 3, :, :] = mat # chi1-frame to backbone if chi_angles_mask[restype][0]: base_atom_names = chi_angles_atoms[resname][0] base_atom_positions = [atom_positions[name] for name in base_atom_names] mat = _make_rigid_transformation_4x4( ex=base_atom_positions[2] - base_atom_positions[1], ey=base_atom_positions[0] - base_atom_positions[1], translation=base_atom_positions[2], ) restype_rigid_group_default_frame[restype, 4, :, :] = mat # chi2-frame to chi1-frame # chi3-frame to chi2-frame # chi4-frame to chi3-frame # luckily all rotation axes for the next frame start at (0,0,0) of the # previous frame for chi_idx in range(1, 4): if chi_angles_mask[restype][chi_idx]: axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2] axis_end_atom_position = atom_positions[axis_end_atom_name] mat = _make_rigid_transformation_4x4( ex=axis_end_atom_position, ey=np.array([-1.0, 0.0, 0.0]), translation=axis_end_atom_position, ) restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat _make_rigid_group_constants() def make_atom14_dists_bounds( overlap_tolerance: float = 1.5, bond_length_tolerance_factor: int = 15, ) -> dict[str, np.ndarray]: """compute upper and lower bounds for bonds to assess violations.""" restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32) restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32) restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32) residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props() for restype, restype_letter in enumerate(restypes): resname = restype_1to3[restype_letter] atom_list = restype_name_to_atom14_names[resname] # create lower and upper bounds for clashes for atom1_idx, atom1_name in enumerate(atom_list): if not atom1_name: continue atom1_radius = van_der_waals_radius[atom1_name[0]] for atom2_idx, atom2_name in enumerate(atom_list): if (not atom2_name) or atom1_idx == atom2_idx: continue atom2_radius = van_der_waals_radius[atom2_name[0]] lower = atom1_radius + atom2_radius - overlap_tolerance upper = 1e10 restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper # overwrite lower and upper bounds for bonds and angles for b in residue_bonds[resname] + residue_virtual_bonds[resname]: atom1_idx = atom_list.index(b.atom1_name) atom2_idx = atom_list.index(b.atom2_name) lower = b.length - bond_length_tolerance_factor * b.stddev upper = b.length + bond_length_tolerance_factor * b.stddev restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev return { "lower_bound": restype_atom14_bond_lower_bound, # shape (21,14,14) "upper_bound": restype_atom14_bond_upper_bound, # shape (21,14,14) "stddev": restype_atom14_bond_stddev, # shape (21,14,14) } restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32) restype_atom14_ambiguous_atoms_swap_idx: np.ndarray = np.tile(np.arange(14, dtype=int), (21, 1)) def _make_atom14_ambiguity_feats() -> None: for res, pairs in residue_atom_renaming_swaps.items(): res_idx = restype_order[restype_3to1[res]] for atom1, atom2 in pairs.items(): atom1_idx = restype_name_to_atom14_names[res].index(atom1) atom2_idx = restype_name_to_atom14_names[res].index(atom2) restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1 restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1 restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom1_idx] = atom2_idx restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom2_idx] = atom1_idx _make_atom14_ambiguity_feats() def aatype_to_str_sequence(aatype: Sequence[int]) -> str: return "".join([restypes_with_x[aatype[i]] for i in range(len(aatype))])
transformers/src/transformers/models/esm/openfold_utils/residue_constants.py/0
{ "file_path": "transformers/src/transformers/models/esm/openfold_utils/residue_constants.py", "repo_id": "transformers", "token_count": 19076 }
430
# coding=utf-8 # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Falcon model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ...activations import get_activation from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, ) from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...utils import ( auto_docstring, logging, ) from .configuration_falcon import FalconConfig if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) # NOTE(Hesslow): Unfortunately we did not fuse matmul and bias during training, this means that there's one additional quantization to bfloat16 between the operations. # In order not to degrade the quality of our HF-port, we keep these characteristics in the final model. class FalconLinear(nn.Linear): def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_states = input @ self.weight.T if self.bias is None: return hidden_states return hidden_states + self.bias # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Falcon class FalconRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: FalconConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None].bfloat16() * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) # Copied from transformers.models.bloom.modeling_bloom.dropout_add def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`): input tensor residual (`torch.tensor`): residual tensor prob (`float`): dropout probability training (`bool`): training mode """ out = F.dropout(x, p=prob, training=training) out = residual + out return out class FalconAttention(nn.Module): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self._use_sdpa = config._attn_implementation == "sdpa" self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" f" {self.num_heads})." ) # Layer-wise attention scaling self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = self.inv_norm_factor if config.new_decoder_architecture: qkv_out_dim = (config.num_kv_heads * 2 + config.num_attention_heads) * self.head_dim elif config.multi_query: qkv_out_dim = self.hidden_size + 2 * self.head_dim else: qkv_out_dim = 3 * self.hidden_size self.query_key_value = FalconLinear(self.hidden_size, qkv_out_dim, bias=config.bias) self.new_decoder_architecture = config.new_decoder_architecture self.multi_query = config.multi_query self.dense = FalconLinear(self.hidden_size, self.hidden_size, bias=config.bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.num_kv_heads = config.num_kv_heads if (self.new_decoder_architecture or not self.multi_query) else 1 def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ if self.new_decoder_architecture: batch, seq_len, _ = fused_qkv.shape qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads // self.num_kv_heads + 2, self.head_dim) query = qkv[:, :, :, :-2] key = qkv[:, :, :, [-2]] value = qkv[:, :, :, [-1]] key = torch.broadcast_to(key, query.shape) value = torch.broadcast_to(value, query.shape) query, key, value = [x.flatten(2, 3) for x in (query, key, value)] return query, key, value elif not self.multi_query: batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] else: batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim) return fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :] # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._merge_heads def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: """ Merge heads together over the last dimension Args: x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ # What we want to achieve is: # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads // self.num_heads # First view to decompose the batch size # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Cache] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, query_length, _, _ = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: cos, sin = position_embeddings query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {"cache_position": cache_position} if alibi is None: cache_kwargs.update({"sin": sin, "cos": cos}) key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) kv_length = key_layer.shape[-2] if self._use_sdpa and query_layer.device.type == "cuda" and attention_mask is not None: # For torch<=2.1.2, SDPA with memory-efficient backend is bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() if attention_mask is not None: attention_mask = attention_mask[:, :, :, : key_layer.shape[-2]] if alibi is None: if self._use_sdpa and not output_attentions: # We dispatch to SDPA's Flash Attention or Efficient kernels via this if statement instead of an # inline conditional assignment to support both torch.compile's `dynamic=True` and `fullgraph=True` # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not # create a causal mask in case query_length == 1. is_causal = self.is_causal and attention_mask is None and query_length > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=0.0, is_causal=is_causal, ) attention_scores = None else: attention_scores = query_layer @ key_layer.transpose(-1, -2) attention_scores /= math.sqrt(self.head_dim) attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype) # It is unclear why neither dropout nor head_mask is applied here (while it is with alibi). attn_output = attention_scores @ value_layer attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim) attn_output = attn_output.permute(0, 2, 1, 3) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) return attn_output, attention_scores else: if self._use_sdpa and not output_attentions and head_mask is None: # We dispatch to SDPA's Flash Attention or Efficient kernels via this if statement instead of an # inline conditional assignment to support both torch.compile's `dynamic=True` and `fullgraph=True` is_causal = self.is_causal and attention_mask is None and query_length > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal, ) attention_probs = None attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) else: matmul_result = query_layer @ key_layer.transpose(-1, -2) # change view to [batch_size, num_heads, q_length, kv_length] attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length) # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attention_scores.dtype # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` if input_dtype == torch.float16 or input_dtype == torch.bfloat16: attention_scores = attention_scores.to(torch.float32) attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1) attention_logits *= self.inv_norm_factor attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype) # [batch_size, num_heads, q_length, kv_length] attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask # change view [batch_size, num_heads, q_length, kv_length] attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length) # matmul: [batch_size * num_heads, q_length, head_dim] attn_output = (attention_probs_reshaped @ value_layer).flatten(0, 1) # change view [batch_size, q_length, num_heads * head_dim] attn_output = self._merge_heads(attn_output) attn_output = self.dense(attn_output) return attn_output, attention_probs class FalconFlashAttention2(FalconAttention): """ Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Cache] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, query_length, _, _ = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: cos, sin = position_embeddings query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {"cache_position": cache_position} if alibi is None: cache_kwargs.update({"sin": sin, "cos": cos}) key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_layer = query_layer.transpose(1, 2) key_layer = key_layer.transpose(1, 2) value_layer = value_layer.transpose(1, 2) if alibi is not None: raise ValueError("`alibi` is not supported when `use_flash_attn` is True") attn_dropout = self.config.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_layer.dtype device_type = query_layer.device.type if query_layer.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query_key_value.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_layer = query_layer.to(target_dtype) key_layer = key_layer.to(target_dtype) value_layer = value_layer.to(target_dtype) attn_output = _flash_attention_forward( query_layer, key_layer, value_layer, attention_mask, query_length, position_ids=position_ids, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_weights) if not output_attentions: attn_weights = None return attn_output, attn_weights class FalconMLP(nn.Module): def __init__(self, config: FalconConfig): super().__init__() hidden_size = config.hidden_size self.dense_h_to_4h = FalconLinear(hidden_size, config.ffn_hidden_size, bias=config.bias) self.act = get_activation(config.activation) self.dense_4h_to_h = FalconLinear(config.ffn_hidden_size, hidden_size, bias=config.bias) self.hidden_dropout = config.hidden_dropout def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.act(self.dense_h_to_4h(x)) x = self.dense_4h_to_h(x) return x FALCON_ATTENTION_CLASSES = { "eager": FalconAttention, "sdpa": FalconAttention, # FalconAttention originally implemented both a forward with & without SDPA "flash_attention_2": FalconFlashAttention2, } class FalconDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = FALCON_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = FalconMLP(config) self.hidden_dropout = config.hidden_dropout self.config = config if config.num_ln_in_parallel_attn is None and config.new_decoder_architecture: config.num_ln_in_parallel_attn = 2 if not config.parallel_attn: self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: if config.num_ln_in_parallel_attn == 2: # The layer norm before self-attention self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # The layer norm before the MLP self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) def forward( self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Union[Cache, tuple[torch.Tensor, torch.Tensor]]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ): residual = hidden_states if self.config.new_decoder_architecture and self.config.num_ln_in_parallel_attn == 2: attention_layernorm_out = self.ln_attn(hidden_states) mlp_layernorm_out = self.ln_mlp(hidden_states) else: attention_layernorm_out = self.input_layernorm(hidden_states) # Self attention. attention_output, attn_weights = self.self_attention( attention_layernorm_out, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) if not self.config.new_decoder_architecture: if self.config.parallel_attn: mlp_layernorm_out = attention_layernorm_out else: residual = dropout_add( attention_output, residual, self.config.attention_dropout, training=self.training ) mlp_layernorm_out = self.post_attention_layernorm(residual) if ( self.config.new_decoder_architecture and self.config.parallel_attn and self.config.num_ln_in_parallel_attn == 1 ): mlp_layernorm_out = attention_layernorm_out # MLP. mlp_output = self.mlp(mlp_layernorm_out) if self.config.new_decoder_architecture or self.config.parallel_attn: mlp_output += attention_output output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training) return output, attn_weights @auto_docstring class FalconPreTrainedModel(PreTrainedModel): config: FalconConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["FalconDecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, (nn.Linear, FalconLinear)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False): _is_bettertransformer = getattr(cls, "use_bettertransformer", False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = "sdpa" return config @auto_docstring class FalconModel(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.use_alibi = config.alibi # Embedding + LN Embedding self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) # Transformer blocks self.h = nn.ModuleList([FalconDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self._use_sdpa = config._attn_implementation == "sdpa" # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.rotary_emb = FalconRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() # Compute alibi tensor: check build_alibi_tensor documentation alibi = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 batch_size, seq_length, _ = inputs_embeds.shape if self.use_alibi: mask = ( torch.ones( (batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long ) if attention_mask is None else attention_mask ) alibi = build_alibi_tensor(mask, self.num_heads, dtype=inputs_embeds.dtype) if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions, head_mask, alibi ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, head_mask: torch.Tensor, alibi: torch.Tensor, ): # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if ( self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions and head_mask is None and alibi is None ): if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min batch_size, sequence_length, _ = input_tensor.shape if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) # We take care to integrate alibi bias in the causal_mask here if head_mask is None and alibi is not None: alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:]) causal_mask = torch.masked_fill( alibi / math.sqrt(self.config.hidden_size // self.num_heads), causal_mask < -1, min_dtype, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @auto_docstring( custom_intro=""" The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class FalconForCausalLM(FalconPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: FalconConfig): super().__init__(config) self.transformer = FalconModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep lm_logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" The Falcon Model transformer with a sequence classification head on top (linear layer). [`FalconForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class FalconForSequenceClassification(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = FalconModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class FalconForTokenClassification(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = FalconModel(config) if getattr(config, "classifier_dropout", None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, "hidden_dropout", None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class FalconForQuestionAnswering(FalconPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = FalconModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ]
transformers/src/transformers/models/falcon/modeling_falcon.py/0
{ "file_path": "transformers/src/transformers/models/falcon/modeling_falcon.py", "repo_id": "transformers", "token_count": 28193 }
431
# coding=utf-8 # Copyright 2023 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for FastSpeech2Conformer.""" import json import os from typing import Optional import regex from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging, requires_backends logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"} class FastSpeech2ConformerTokenizer(PreTrainedTokenizer): """ Construct a FastSpeech2Conformer tokenizer. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<sos/eos>"`): The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`. eos_token (`str`, *optional*, defaults to `"<sos/eos>"`): The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`. pad_token (`str`, *optional*, defaults to `"<blank>"`): The token used for padding, for example when batching sequences of different lengths. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. should_strip_spaces (`bool`, *optional*, defaults to `False`): Whether or not to strip the spaces from the list of tokens. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<sos/eos>", eos_token="<sos/eos>", pad_token="<blank>", unk_token="<unk>", should_strip_spaces=False, **kwargs, ): requires_backends(self, "g2p_en") with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) import g2p_en self.g2p = g2p_en.G2p() self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, should_strip_spaces=should_strip_spaces, **kwargs, ) self.should_strip_spaces = should_strip_spaces @property def vocab_size(self): return len(self.decoder) def get_vocab(self): "Returns vocab as a dict" return dict(self.encoder, **self.added_tokens_encoder) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): # expand symbols text = regex.sub(";", ",", text) text = regex.sub(":", ",", text) text = regex.sub("-", " ", text) text = regex.sub("&", "and", text) # strip unnecessary symbols text = regex.sub(r"[\(\)\[\]\<\>\"]+", "", text) # strip whitespaces text = regex.sub(r"\s+", " ", text) text = text.upper() return text, kwargs def _tokenize(self, text): """Returns a tokenized string.""" # phonemize tokens = self.g2p(text) if self.should_strip_spaces: tokens = list(filter(lambda s: s != " ", tokens)) tokens.append(self.eos_token) return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) # Override since phonemes cannot be converted back to strings def decode(self, token_ids, **kwargs): logger.warning( "Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead." ) return self.convert_ids_to_tokens(token_ids) # Override since phonemes cannot be converted back to strings def convert_tokens_to_string(self, tokens, **kwargs): logger.warning( "Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens." ) return tokens def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.get_vocab(), ensure_ascii=False)) return (vocab_file,) def __getstate__(self): state = self.__dict__.copy() state["g2p"] = None return state def __setstate__(self, d): self.__dict__ = d try: import g2p_en self.g2p = g2p_en.G2p() except ImportError: raise ImportError( "You need to install g2p-en to use FastSpeech2ConformerTokenizer. " "See https://pypi.org/project/g2p-en/ for installation." ) __all__ = ["FastSpeech2ConformerTokenizer"]
transformers/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py/0
{ "file_path": "transformers/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py", "repo_id": "transformers", "token_count": 2654 }
432
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/florence2/modular_florence2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_florence2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Microsoft and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Florence2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Florence2VisionModel`]. It is used to instantiate a Florence2VisionModel according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence2VisionModel architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: in_channels (`int`, *optional*, defaults to 3): Number of input image channels. depths (`Tuple[int]`, *optional*, defaults to `(1, 1, 9, 1)`): The depth of the model. patch_size (`Tuple[int]`, *optional*, defaults to `(7, 3, 3, 3)`): The patch size of the image. patch_stride (`Tuple[int]`, *optional*, defaults to `(4, 2, 2, 2)`): The patch stride of the image. patch_padding (`Tuple[int]`, *optional*, defaults to `(3, 1, 1, 1)`): The patch padding of the image. patch_prenorm (`Tuple[bool]`, *optional*, defaults to `(False, True, True, True)`): Whether to apply layer normalization before the patch embedding layer. embed_dim (`Tuple[int]`, *optional*, defaults to `(128, 256, 512, 1024)`): The dimension of the embedding layer. num_heads (`Tuple[int]`, *optional*, defaults to `(4, 8, 16, 32)`): The number of attention heads. num_groups (`Tuple[int]`, *optional*, defaults to `(4, 8, 16, 32)`): The number of groups. window_size (`int`, *optional*, defaults to 12): The window size of the model. drop_path_rate (`float`, *optional*, defaults to 0.1): The dropout rate of the drop path layer. mlp_ratio (`int`, *optional*, defaults to 4.0): Ratio of mlp hidden dim to embedding dim. qkv_bias (`bool`, *optional*, defaults to `True`): If True, add a learnable bias to query, key, value. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. projection_dim (`int`, *optional*, defaults to 1024): The dimension of the projection layer. max_temporal_embeddings (`int`, *optional*, defaults to 100): The configuration of the visual temporal embedding. max_position_embeddings (`int`, *optional*, defaults to 50): The configuration of the image position embedding. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Florence2VisionConfig, Florence2VisionModel >>> # Initializing a Florence2 Vision style configuration >>> configuration = Florence2VisionConfig() >>> # Initializing a model (with random weights) >>> model = Florence2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "florence_vision" def __init__( self, in_channels=3, depths=(1, 1, 9, 1), patch_size=(7, 3, 3, 3), patch_stride=(4, 2, 2, 2), patch_padding=(3, 1, 1, 1), patch_prenorm=(False, True, True, True), embed_dim=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32), num_groups=(4, 8, 16, 32), window_size=12, drop_path_rate=0.1, mlp_ratio=4.0, qkv_bias=True, activation_function="gelu", projection_dim=1024, max_temporal_embeddings=100, max_position_embeddings=50, initializer_range=0.02, **kwargs, ): self.in_channels = in_channels self.depths = list(depths) self.patch_size = list(patch_size) self.patch_stride = list(patch_stride) self.patch_padding = list(patch_padding) self.patch_prenorm = list(patch_prenorm) self.embed_dim = list(embed_dim) self.num_heads = list(num_heads) self.num_groups = list(num_groups) self.window_size = window_size self.drop_path_rate = drop_path_rate self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.projection_dim = projection_dim self.max_temporal_embeddings = max_temporal_embeddings self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.activation_function = activation_function super().__init__(**kwargs) class Florence2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an Florence-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Florence-2 [microsoft/Florence-2-base](https://huggingface.co/microsoft/Florence-2-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`AutoConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Florence2VisionConfig`]. image_token_id (`int`, *optional*, defaults to 51289): The image token index to encode the image prompt. is_encoder_decoder (bool, optional, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. Example: ```python >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig >>> # Initializing a clip-like vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Bart config >>> text_config = BartConfig() >>> # Initializing a Florence-2 configuration >>> configuration = Florence2Config(vision_config, text_config) >>> # Initializing a model from the florence-2 configuration >>> model = Florence2ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "florence2" sub_configs = { "text_config": AutoConfig, "vision_config": Florence2VisionConfig, } def __init__( self, text_config=None, vision_config=None, image_token_id=51289, is_encoder_decoder=True, **kwargs, ): if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "bart") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bart"]() if isinstance(vision_config, dict): vision_config = Florence2VisionConfig(**vision_config) elif vision_config is None: logger.info("vision_config is None. Initializing the Florence2VisionConfig with default values.") vision_config = Florence2VisionConfig() self.text_config = text_config self.vision_config = vision_config self.image_token_id = image_token_id super().__init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) __all__ = ["Florence2Config", "Florence2VisionConfig"]
transformers/src/transformers/models/florence2/configuration_florence2.py/0
{ "file_path": "transformers/src/transformers/models/florence2/configuration_florence2.py", "repo_id": "transformers", "token_count": 3809 }
433
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FSMT configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class """ model_type = "fsmt_decoder" def __init__(self, vocab_size=0, bos_token_id=0, is_encoder_decoder=True, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.bos_token_id = bos_token_id self.is_encoder_decoder = is_encoder_decoder class FSMTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FSMT [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: langs (`list[str]`): A list with source language and target_language (e.g., ['en', 'ru']). src_vocab_size (`int`): Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the encoder. tgt_vocab_size (`int`): Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the decoder. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. decoder_start_token_id (`int`, *optional*): This model starts decoding with `eos_token_id` encoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. num_beams (`int`, *optional*, defaults to 5) Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means no beam search. length_penalty (`float`, *optional*, defaults to 1) Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool`, *optional*, defaults to `False`) Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import FSMTConfig, FSMTModel >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration >>> config = FSMTConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = FSMTModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fsmt" attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} sub_configs = {"decoder": DecoderConfig} # update the defaults from config file def __init__( self, langs=["en", "de"], src_vocab_size=42024, tgt_vocab_size=42024, activation_function="relu", d_model=1024, max_length=200, max_position_embeddings=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, encoder_layerdrop=0.0, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, is_encoder_decoder=True, scale_embedding=True, tie_word_embeddings=False, num_beams=5, length_penalty=1.0, early_stopping=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **common_kwargs, ): self.langs = langs self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function self.decoder = DecoderConfig( vocab_size=tgt_vocab_size, bos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, num_hidden_layers=encoder_layers, ) if "decoder" in common_kwargs: del common_kwargs["decoder"] self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, max_length=max_length, num_beams=num_beams, length_penalty=length_penalty, early_stopping=early_stopping, **common_kwargs, ) __all__ = ["FSMTConfig"]
transformers/src/transformers/models/fsmt/configuration_fsmt.py/0
{ "file_path": "transformers/src/transformers/models/fsmt/configuration_fsmt.py", "repo_id": "transformers", "token_count": 4166 }
434
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for GIT """ import re from typing import Optional, Union import numpy as np from ...image_utils import ImageInput from ...processing_utils import ( MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, ) from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import is_torch_available, logging, requires_backends from ...utils.import_utils import requires if is_torch_available(): from .image_processing_fuyu import FuyuBatchFeature logger = logging.get_logger(__name__) if is_torch_available(): import torch TEXT_REPR_BBOX_OPEN = "<box>" TEXT_REPR_BBOX_CLOSE = "</box>" TEXT_REPR_POINT_OPEN = "<point>" TEXT_REPR_POINT_CLOSE = "</point>" TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox> TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox> TOKEN_POINT_OPEN_STRING = "<0x02>" # <point> TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point> BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa> class FuyuProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_attention_mask": True, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, "return_mm_token_type_ids": False, }, "images_kwargs": {}, } def full_unpacked_stream_to_tensor( all_bi_tokens_to_place: list[int], full_unpacked_stream: list["torch.Tensor"], fill_value: int, batch_size: int, new_seq_len: int, offset: int, ) -> "torch.Tensor": """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does the required padding to create a single tensor for the batch of shape batch_size x new_seq_len. """ assert len(all_bi_tokens_to_place) == batch_size assert len(full_unpacked_stream) == batch_size # Create padded tensors for the full batch. new_padded_tensor = torch.full( [batch_size, new_seq_len], fill_value=fill_value, dtype=full_unpacked_stream[0].dtype, device=full_unpacked_stream[0].device, ) # Place each batch entry into the batch tensor. for bi in range(batch_size): tokens_to_place = all_bi_tokens_to_place[bi] new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset] return new_padded_tensor def construct_full_unpacked_stream( num_real_text_tokens: Union[list[list[int]], "torch.Tensor"], input_stream: "torch.Tensor", image_tokens: list[list["torch.Tensor"]], batch_size: int, num_sub_sequences: int, ) -> list["torch.Tensor"]: """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch. Returns a list of tensors, one for each item in the batch.""" all_bi_stream = [] for batch_index in range(batch_size): all_si_stream = [] # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence # and append to lists. We use lists rather than tensors because each subsequence is variable-sized. # TODO Remove this logic in a subsequent release since subsequences are not supported. image_adjustment = image_tokens[batch_index][0] subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0) num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0] all_si_stream.append(subsequence_stream[:num_real_tokens]) all_bi_stream.append(torch.cat(all_si_stream, dim=0)) return all_bi_stream def _replace_string_repr_with_token_tags(prompt: str) -> str: prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING) return prompt def _segment_prompt_into_text_token_conversions(prompt: str) -> list: """ Given a string prompt, converts the prompt into a list of TextTokenConversions. """ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt prompt_text_list: list = [] regex_pattern = re.compile( f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})" ) # Split by the regex pattern prompt_split = regex_pattern.split(prompt) for i, elem in enumerate(prompt_split): if len(elem) == 0 or elem in [ TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING, ]: continue prompt_text_list.append( (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING]) ) return prompt_text_list def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> list[int]: """ This function transforms the prompt in the following fashion: - <box> <point> and </box> </point> to their respective token mappings - extract the coordinates from the tag - transform the coordinates into the transformed image space - return the prompt tokens with the transformed coordinates and new tags Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces and punctuation added above are NOT optional. """ # Make a namedtuple that stores "text" and "is_bbox" # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function # When point or box close tag, continue tokenizing normally # First, we replace the point and box tags with their respective tokens prompt = _replace_string_repr_with_token_tags(prompt) # Tokenize the prompt # Convert prompt into a list split prompt_text_list = _segment_prompt_into_text_token_conversions(prompt) transformed_prompt_tokens: list[int] = [] for elem in prompt_text_list: if elem[1]: # This is a location, we need to tokenize it within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer) # Surround the text with the open and close tags transformed_prompt_tokens.extend(within_tag_tokenized) else: transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids) return transformed_prompt_tokens def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> list[int]: """ Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas. """ # Convert the text into a list of strings. num_int_strs = text.split(",") if len(num_int_strs) == 2: # If there are any open or close tags, remove them. token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING] else: token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING] # Remove all spaces from num_ints num_ints = [float(num.strip()) for num in num_int_strs] # scale to transformed image siz if len(num_ints) == 2: num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor) elif len(num_ints) == 4: num_ints_translated = scale_bbox_to_transformed_image( top=num_ints[0], left=num_ints[1], bottom=num_ints[2], right=num_ints[3], scale_factor=scale_factor, ) else: raise ValueError(f"Invalid number of ints: {len(num_ints)}") # Tokenize the text, skipping the tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated] return [token_space_open_string] + tokens + [token_space_close_string] def _tokenize_prompts_with_image_and_batch( tokenizer, prompts: list[list[str]], scale_factors: Optional[list[list["torch.Tensor"]]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, # Same issue with types as above add_beginning_of_answer_token: bool, ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Given a set of prompts and number of tokens to generate: - tokenize prompts - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate - pad all the sequences to this length so we can convert them into a 3D tensor. """ # If not tool use, transform the coordinates while tokenizing if scale_factors is not None: transformed_prompt_tokens = [] for prompt_seq, scale_factor_seq in zip(prompts, scale_factors): transformed_prompt_tokens.append( [ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) for prompt, scale_factor in zip(prompt_seq, scale_factor_seq) ] ) else: transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts] prompts_tokens = transformed_prompt_tokens if add_BOS: bos_token = tokenizer.vocab["<s>"] else: bos_token = tokenizer.vocab["|ENDOFTEXT|"] prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens] if add_beginning_of_answer_token: beginning_of_answer = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING] # Only add bbox open token to the last subsequence since that is what will be completed for token_seq in prompts_tokens: token_seq[-1].append(beginning_of_answer) # Now we have a list of list of tokens which each list has a different # size. We want to extend this list to: # - incorporate the tokens that need to be generated # - make all the sequences equal length. # Get the prompts length. prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens] # Get the max prompts length. max_prompt_len: int = np.max(prompts_length) # Number of tokens in the each sample of the batch. samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings) if max_prompt_len + max_tokens_to_generate > max_position_embeddings: logger.warning( f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}", f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.", ) # Now update the list of list to be of the same size: samples_length. for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length): for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq): if len(prompt_tokens) > samples_length: raise ValueError("Length of subsequence prompt exceeds sequence length.") padding_size = samples_length - prompt_length prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size) # Now we are in a structured format, we can convert to tensors. prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64) prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64) return prompts_tokens_tensor, prompts_length_tensor # Simplified assuming self.crop_top = self.padding_top = 0 def original_to_transformed_h_coords(original_coords, scale_h): return np.round(original_coords * scale_h).astype(np.int32) # Simplified assuming self.crop_left = self.padding_left = 0 def original_to_transformed_w_coords(original_coords, scale_w): return np.round(original_coords * scale_w).astype(np.int32) def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> list[int]: x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0] y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0] return [x_scaled, y_scaled] def scale_bbox_to_transformed_image( top: float, left: float, bottom: float, right: float, scale_factor: float ) -> list[int]: top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0] left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0] bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0] right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0] return [top_scaled, left_scaled, bottom_scaled, right_scaled] @requires(backends=("vision",)) class FuyuProcessor(ProcessorMixin): r""" Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor. [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information. Args: image_processor ([`FuyuImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "FuyuImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor=image_processor, tokenizer=tokenizer) self.image_processor = image_processor self.tokenizer = tokenizer self.max_tokens_to_generate = 10 self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it? self.pad_token_id = 0 self.dummy_image_index = -1 self.image_token_id = tokenizer.encode("|SPEAKER|", add_special_tokens=False)[1] self.image_newline_id = tokenizer.encode("|NEWLINE|", add_special_tokens=False)[1] def _left_pad_inputs_with_attention_mask(self, model_inputs: list[dict], return_attention_mask: bool): max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs) max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs) batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []} for entry in model_inputs: for key, tensor in entry.items(): if key == "input_ids": num_padding_tokens = max_length_input_ids - tensor.shape[1] padded_input_ids = torch.cat( [ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long), tensor, ], dim=1, ) batched_inputs[key].append(padded_input_ids) attention_mask = torch.cat( [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)], dim=1, ) batched_inputs["attention_mask"].append(attention_mask) elif key == "image_patches": # For image_patches, we don't pad but just append them to the list. batched_inputs[key].append(tensor) else: # for image_patches_indices num_padding_indices = max_length_image_patch_indices - tensor.shape[1] padded_indices = torch.cat( [ torch.full( (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long ), tensor, ], dim=1, ) batched_inputs[key].append(padded_indices) batched_keys = ["input_ids", "image_patches_indices"] if return_attention_mask: batched_keys.append("attention_mask") for key in batched_keys: batched_inputs[key] = torch.cat(batched_inputs[key], dim=0) # Cast images to tensor as well, if only one image passed and no padding needed # NOTE: vLLM expects all processor outputs to be a tensor if len(batched_inputs["image_patches"]) == 1: batched_inputs["image_patches"] = torch.cat(batched_inputs["image_patches"], dim=0) return batched_inputs def get_sample_encoding( self, prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, image_placeholder_id, image_newline_id, tensor_batch_images, ): image_present = torch.ones(1, 1, 1) model_image_input = self.image_processor.preprocess_with_tokenizer_info( image_input=tensor_batch_images, image_present=image_present, image_unpadded_h=image_unpadded_heights, image_unpadded_w=image_unpadded_widths, image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, variable_sized=True, ) # FIXME max_tokens_to_generate is embedded into this processor's call. prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( tokenizer=self.tokenizer, prompts=prompts, scale_factors=scale_factors, max_tokens_to_generate=self.max_tokens_to_generate, max_position_embeddings=self.max_position_embeddings, add_BOS=True, add_beginning_of_answer_token=True, ) image_padded_unpacked_tokens = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=prompt_tokens, image_tokens=model_image_input["image_input_ids"], batch_size=1, num_sub_sequences=self.subsequence_length, ) # Construct inputs for image patch indices. unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=torch.full_like(prompt_tokens, -1), image_tokens=model_image_input["image_patch_indices_per_batch"], batch_size=1, num_sub_sequences=self.subsequence_length, ) max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens) max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0])) # Use same packing logic for the image patch indices. image_patch_input_indices = full_unpacked_stream_to_tensor( all_bi_tokens_to_place=[tokens_to_place], full_unpacked_stream=unpacked_image_patch_indices_per_batch, fill_value=-1, batch_size=1, new_seq_len=max_seq_len_batch, offset=0, ) image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]]) batch_encoding = { "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0), "image_patches": image_patches_tensor, "image_patches_indices": image_patch_input_indices, } return batch_encoding def __call__( self, images: ImageInput = None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[FuyuProcessorKwargs], ) -> "FuyuBatchFeature": """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `list[PIL.Image.Image]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields: - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`. - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`. - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when `return_attention_mask=True`. """ requires_backends(self, ["torch"]) # --- Check input validity --- if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be None.") output_kwargs = self._merge_kwargs( FuyuProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) if not output_kwargs["text_kwargs"].setdefault("return_attention_mask", True): raise ValueError("`return_attention_mask=False` is not supported for this model.") if text is not None and images is None: logger.warning("You are processing a text with no associated image. Make sure it is intended.") self.current_processor = self.tokenizer text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) return text_encoding if text is None and images is not None: logger.warning("You are processing an image with no associated text. Make sure it is intended.") prompts = [[""]] if text is not None and images is not None: if isinstance(text, str): prompts = [[text]] elif isinstance(text, list): prompts = [[text_seq] for text_seq in text] # --- Preprocess images using self.image_processor --- # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors output_kwargs["images_kwargs"]["return_tensors"] = "pt" image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"]) batch_images = image_encoding["images"] image_unpadded_heights = image_encoding["image_unpadded_heights"] image_unpadded_widths = image_encoding["image_unpadded_widths"] scale_factors = image_encoding["image_scale_factors"] self.subsequence_length = 1 # Each batch contains only one sequence. self.batch_size = len(batch_images) # --- Use self.tokenizer to get the ids of special tokens to insert into image ids --- tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) # --- Use self.image_processor again to obtain the full token ids and batch inputs --- all_encodings = [] for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip( prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images ): sample_encoding = self.get_sample_encoding( prompts=[prompt], scale_factors=[scale_factor], image_unpadded_heights=torch.tensor([image_unpadded_height]), image_unpadded_widths=torch.tensor([image_unpadded_width]), image_placeholder_id=self.image_token_id, image_newline_id=self.image_newline_id, tensor_batch_images=tensor_batch_image.unsqueeze(0), ) all_encodings.append(sample_encoding) batch_encoding = self._left_pad_inputs_with_attention_mask( model_inputs=all_encodings, return_attention_mask=True ) if return_mm_token_type_ids: input_ids = batch_encoding["input_ids"] mm_token_type_ids = torch.zeros_like(input_ids) mm_token_type_ids[input_ids == self.image_token_id] = 1 mm_token_type_ids[input_ids == self.image_newline_id] = 1 batch_encoding["mm_token_type_ids"] = mm_token_type_ids return FuyuBatchFeature(data=batch_encoding) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: size = kwargs.get("size") or self.image_processor.size padded_height, padded_width = size["height"], size["width"] num_image_tokens = [] num_image_patches = [1] * len(image_sizes) for image_size in image_sizes: height_scale_factor = padded_height / image_size[0] width_scale_factor = padded_width / image_size[1] optimal_scale_factor = min(height_scale_factor, width_scale_factor) image_unpadded_h = min(int(image_size[0] * optimal_scale_factor), image_size[0]) image_unpadded_w = min(int(image_size[0] * optimal_scale_factor), image_size[0]) # We can use torch here because Fuyu processor has hard dependency on torch. NOTE: Fuyu can't do multi-image # thus the below (1, 1, 1) is hardcoded. Same as when calling the processor model_image_input = self.image_processor.preprocess_with_tokenizer_info( image_input=torch.zeros(1, 1, 3, padded_height, padded_width), image_present=torch.ones(1, 1, 1), image_unpadded_h=torch.tensor([[image_unpadded_h]]), image_unpadded_w=torch.tensor([[image_unpadded_w]]), image_placeholder_id=0, # dummy ids, we can be sure `id=0` is never out-of-range image_newline_id=0, variable_sized=True, ) num_image_tokens.append(model_image_input["image_input_ids"][0][0].shape[-1]) vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) def post_process_box_coordinates(self, outputs, target_sizes=None): """ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space. Coordinates will be returned in "box" format, with the following pattern: `<box>top, left, bottom, right</box>` Point coordinates are not supported yet. Args: outputs ([`GenerateOutput`]): Raw outputs from `generate`. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left to None, coordinates will not be rescaled. Returns: `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with boxed and possible rescaled coordinates. """ def scale_factor_to_fit(original_size, target_size=None): height, width = original_size if target_size is None: max_height = self.image_processor.size["height"] max_width = self.image_processor.size["width"] else: max_height, max_width = target_size if width <= max_width and height <= max_height: return 1.0 return min(max_height / height, max_width / width) def find_delimiters_pair(tokens, start_token, end_token): start_id = self.tokenizer.convert_tokens_to_ids(start_token) end_id = self.tokenizer.convert_tokens_to_ids(end_token) starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0] ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0] if torch.any(starting_positions) and torch.any(ending_positions): return (starting_positions[0], ending_positions[0]) return (None, None) def tokens_to_boxes(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 5: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) top, left, bottom, right = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens def tokens_to_points(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 3: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) x, y = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens if target_sizes is None: target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs) elif target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") if len(outputs) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as output sequences") results = [] for seq, size in zip(outputs, target_sizes): seq = tokens_to_boxes(seq, size) seq = tokens_to_points(seq, size) results.append(seq) return results def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs): """ Post-processes the output of `FuyuForConditionalGeneration` to only return the text output. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)` containing the token ids of the generated sequences. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `list[str]`: The decoded text output. """ beginning_of_answer = self.tokenizer.convert_tokens_to_ids(BEGINNING_OF_ANSWER_STRING) # get boa index for each outputted sequence tensor # start all generated sequences from the beginning of the answer token, pad to have consistent length unpadded_output_sequences = [ seq[(seq == beginning_of_answer).nonzero(as_tuple=True)[0] + 1 :] for seq in generated_outputs ] max_len = max(len(seq) for seq in unpadded_output_sequences) # convert to torch and pad sequences padded_output_sequences = torch.full((len(unpadded_output_sequences), max_len), self.pad_token_id) for i, seq in enumerate(unpadded_output_sequences): padded_output_sequences[i, : len(seq)] = torch.tensor(seq) return self.batch_decode(padded_output_sequences, skip_special_tokens=skip_special_tokens, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names # Make a copy of list when removing otherwise `self.image_processor.model_input_names` is also modified extra_image_inputs = [ "image_input_ids", "image_patch_indices_per_subsequence", "images", "image_patch_indices_per_batch", ] image_processor_input_names = [name for name in image_processor_input_names if name not in extra_image_inputs] return list(tokenizer_input_names + image_processor_input_names + ["image_patches_indices"]) __all__ = ["FuyuProcessor"]
transformers/src/transformers/models/fuyu/processing_fuyu.py/0
{ "file_path": "transformers/src/transformers/models/fuyu/processing_fuyu.py", "repo_id": "transformers", "token_count": 15849 }
435
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Utility to convert Gemma models from Orbax to HF Transformers checkpoint. python -m transformers.models.gemma3.convert_gemma3_weights_orbax_to_hf \ --variant='gemma3_4b' \ --tokenizer_path="$HOME/gemma3/tokenizer/gemma3_cleaned_262144_v2.spiece.model" \ --checkpoint_path="$HOME/gemma3/gemma3_4b_pt_orbax/" \ --output_path="$HOME/gemma3/gemma3_4b_pt_safetensors/" """ from collections.abc import Iterator, Sequence from typing import Any import accelerate import numpy as np import torch import tree from absl import app, flags, logging from orbax import checkpoint as obc from transformers import ( Gemma3Config, Gemma3ForCausalLM, Gemma3ForConditionalGeneration, Gemma3ImageProcessor, Gemma3Processor, Gemma3TextConfig, GemmaTokenizerFast, GenerationConfig, SiglipVisionConfig, ) from transformers.image_utils import PILImageResampling # ==== Internal Constants and Classes ==== _CHAT_TEMPLATE = """{{ bos_token }} {%- if messages[0]['role'] == 'system' -%} {%- if messages[0]['content'] is string -%} {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%} {%- else -%} {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%} {%- endif -%} {%- set loop_messages = messages[1:] -%} {%- else -%} {%- set first_user_prefix = "" -%} {%- set loop_messages = messages -%} {%- endif -%} {%- for message in loop_messages -%} {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%} {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }} {%- endif -%} {%- if (message['role'] == 'assistant') -%} {%- set role = "model" -%} {%- else -%} {%- set role = message['role'] -%} {%- endif -%} {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else "") }} {%- if message['content'] is string -%} {{ message['content'] | trim }} {%- elif message['content'] is iterable -%} {%- for item in message['content'] -%} {%- if item['type'] == 'image' -%} {{ '<start_of_image>' }} {%- elif item['type'] == 'text' -%} {{ item['text'] | trim }} {%- endif -%} {%- endfor -%} {%- else -%} {{ raise_exception("Invalid content type") }} {%- endif -%} {{ '<end_of_turn>\n' }} {%- endfor -%} {%- if add_generation_prompt -%} {{'<start_of_turn>model\n'}} {%- endif -%} """ _DTYPES = {"float32", "bfloat16", "float16"} _SIGLIP_BASE = "SigLiPFromPatches_0/siglip_encoder" _SIGLIP_EMBEDDING = "SigLiPFromPatches_0/siglip_encoder/embedding" _SIGLIP_TRANSFORMER_ENCODER_BLOCK = "SigLiPFromPatches_0/siglip_encoder/Transformer/encoderblock_" _SIGLIP_TRANSFORMER_ENCODER_BLOCK_LEN = len(_SIGLIP_TRANSFORMER_ENCODER_BLOCK) _SIGLIP_TRANSFORMER_ENCODER_NORM = "SigLiPFromPatches_0/siglip_encoder/Transformer/encoder_norm" _TRANSFORMER_DECODER_BLOCK = "transformer/layer_" _TRANSFORMER_DECODER_BLOCK_LEN = len(_TRANSFORMER_DECODER_BLOCK) _TRANSFORMER_EMBEDDER = "transformer/embedder" _TRANSFORMER_FINAL_NORM = "transformer/final_norm" _TRANSFORMER_POST_TRAINING_PREFIX = "rlx_networks/policy_network/" _TRANSFORMER_POST_TRAINING_PREFIX_LEN = len(_TRANSFORMER_POST_TRAINING_PREFIX) _VISION_CONFIG = { "hidden_size": 1152, "intermediate_size": 4304, "num_hidden_layers": 27, "num_attention_heads": 16, "num_channels": 3, "image_size": 896, "patch_size": 14, "hidden_act": "gelu_pytorch_tanh", "layer_norm_eps": 1e-6, "attention_dropout": 0.0, "vision_use_head": False, } _VARIANT_GEMMA_3_1B = "gemma3_1b" _VARIANT_GEMMA_3_4B = "gemma3_4b" _VARIANT_GEMMA_3_12B = "gemma3_12b" _VARIANT_GEMMA_3_27B = "gemma3_27b" _VARIANTS = { _VARIANT_GEMMA_3_1B: Gemma3Config( text_config=Gemma3TextConfig( vocab_size=262_144, hidden_size=1152, intermediate_size=6 * 1152, num_attention_heads=4, num_hidden_layers=26, num_key_value_heads=1, head_dim=256, sliding_window=512, rope_theta=1_000_000, # used for global RoPE only rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=256, max_position_embeddings=32_768, ), vision_config=None, ), _VARIANT_GEMMA_3_4B: Gemma3Config( text_config=Gemma3TextConfig( vocab_size=262_208, hidden_size=2560, intermediate_size=2560 * 8 // 2, num_attention_heads=8, head_dim=256, num_hidden_layers=34, num_key_value_heads=4, sliding_window=1024, rope_scaling={"rope_type": "linear", "factor": 8.0}, # used for global RoPE only rope_theta=1_000_000, rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=256, ), vision_config=_VISION_CONFIG, ), _VARIANT_GEMMA_3_12B: Gemma3Config( text_config=Gemma3TextConfig( vocab_size=262_208, hidden_size=30 * 128, intermediate_size=30 * 128 * 8 // 2, num_attention_heads=16, head_dim=256, num_hidden_layers=48, num_key_value_heads=8, sliding_window=1024, rope_scaling={"rope_type": "linear", "factor": 8.0}, # used for global RoPE only rope_theta=1_000_000, rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=256, ), vision_config=_VISION_CONFIG, ), _VARIANT_GEMMA_3_27B: Gemma3Config( text_config=Gemma3TextConfig( vocab_size=262_208, hidden_size=42 * 128, intermediate_size=42 * 128 * 8 // 2, num_attention_heads=32, num_hidden_layers=62, num_key_value_heads=16, head_dim=128, sliding_window=1024, rope_scaling={"rope_type": "linear", "factor": 8.0}, # used for global RoPE only rope_theta=1_000_000, rope_local_base_freq=10_000, attn_logit_softcapping=None, query_pre_attn_scalar=(42 * 128 // 32), # 1 / sqrt(hidden_size // num_attention_heads) ), vision_config=_VISION_CONFIG, ), } # ==== Flags ==== _CHECKPOINT_PATH = flags.DEFINE_string( name="checkpoint_path", default=None, help="Path to the Orbax checkpoint.", required=True, ) _INCLUDE_CHAT_TEMPLATE = flags.DEFINE_bool( name="include_chat_template", default=False, help="If true, will save the default chat template with the tokenizer" ) _OUTPUT_PATH = flags.DEFINE_string( name="output_path", default=None, help="Path to store the HF checkpoint.", required=True, ) _TRANSFORMER_DTYPE = flags.DEFINE_enum( name="text_dtype", default="bfloat16", help="The floating point precision (aka dtype) of the model.", enum_values=_DTYPES, ) _TOKENIZER_PATH = flags.DEFINE_string( name="tokenizer_path", default=None, help="Path to the SentencePiece model file.", required=True, ) _VARIANT = flags.DEFINE_enum( name="variant", default=_VARIANT_GEMMA_3_4B, help="The model variant to convert.", enum_values=set(_VARIANTS.keys()), ) _VERBOSE = flags.DEFINE_bool( name="verbose", default=False, help="If true, log the path, shape, and dtype of every converted layer.", ) _VISION_DTYPE = flags.DEFINE_enum( name="vision_dtype", default="float32", help="The floating point precision (aka dtype) of the model.", enum_values=_DTYPES, ) def convert_siglip_weight( config: SiglipVisionConfig, paths: Sequence[str], weights: np.ndarray, ) -> tuple[str, np.ndarray]: path, prop = paths normalized_path: str = "" updated_weights: np.ndarray = None if path == _SIGLIP_BASE: normalized_path = "vision_tower.vision_model.embeddings.position_embedding.weight" updated_weights = weights.reshape(-1, config.hidden_size) elif path == _SIGLIP_EMBEDDING: if prop == "kernel": normalized_path = "vision_tower.vision_model.embeddings.patch_embedding.weight" updated_weights = weights.transpose(3, 2, 0, 1) elif prop == "bias": normalized_path = "vision_tower.vision_model.embeddings.patch_embedding.bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") elif path.startswith(_SIGLIP_TRANSFORMER_ENCODER_BLOCK): encoder_block_path = path[_SIGLIP_TRANSFORMER_ENCODER_BLOCK_LEN:] next_path_seperator_idx = encoder_block_path.find("/") layer_idx = encoder_block_path[:next_path_seperator_idx] encoder_block_path = encoder_block_path[next_path_seperator_idx:] normalized_path = f"vision_tower.vision_model.encoder.layers.{layer_idx}" if encoder_block_path.startswith("/LayerNorm"): normalized_path += ".layer_norm1" if path.endswith("_0") else ".layer_norm2" if prop == "scale": normalized_path += ".weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path += ".bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `scale`.") elif encoder_block_path.startswith("/MlpBlock_0"): normalized_path += ".mlp.fc1" if "/Dense_0" in encoder_block_path else ".mlp.fc2" if prop == "kernel": normalized_path += ".weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path += ".bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") elif encoder_block_path.startswith("/MultiHeadDotProductAttention_0"): if encoder_block_path.endswith("/key"): normalized_path += ".self_attn.k_proj" elif encoder_block_path.endswith("/out"): normalized_path += ".self_attn.out_proj" elif encoder_block_path.endswith("/query"): normalized_path += ".self_attn.q_proj" elif encoder_block_path.endswith("/value"): normalized_path += ".self_attn.v_proj" else: raise ValueError(f"Unexpected path `{path}` in SigLIP Transformer MultiHeadDotProductAttention_0.") if prop == "bias": normalized_path += ".bias" updated_weights = weights.reshape(-1, config.hidden_size).reshape(-1) elif prop == "kernel": normalized_path += ".weight" updated_weights = weights.reshape(-1, config.hidden_size).transpose() else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `kernel`.") else: raise ValueError(f"Unexpected path `{path}` in SigLIP Transformer Encoder Block.") elif path == _SIGLIP_TRANSFORMER_ENCODER_NORM: if prop == "scale": normalized_path = "vision_tower.vision_model.post_layernorm.weight" updated_weights = weights.transpose() elif prop == "bias": normalized_path = "vision_tower.vision_model.post_layernorm.bias" updated_weights = weights else: raise ValueError(f"Unexpected member, `{prop}`, for path `{path}`. Should be `bias` or `scale`.") else: raise ValueError(f"Unexpected path `{path}`.") return normalized_path, updated_weights def convert_transformer_weights( config: Gemma3TextConfig, paths: Sequence[str], weights: np.ndarray, ) -> Iterator[tuple[str, np.ndarray]]: path, prop = paths if path.startswith(_TRANSFORMER_POST_TRAINING_PREFIX): path = path[_TRANSFORMER_POST_TRAINING_PREFIX_LEN:] converted_paths: list[str] = [] converted_weights: list[Any] = [] attn_head_dim = config.num_attention_heads * config.head_dim kv_head_dim = config.num_key_value_heads * config.head_dim if path == _TRANSFORMER_EMBEDDER: if prop == "input_embedding": # Tied to language_model.lm_head.weight, assigned at the end. converted_paths = ["language_model.model.embed_tokens.weight"] if _VARIANT.value != _VARIANT_GEMMA_3_1B: # Gemma3 model doesn't have image soft token in input and output embeddings, resize to avoid bugs we had with Mllama pre_expansion_embeddings = weights mu = np.mean(pre_expansion_embeddings, axis=0) sigma = np.cov(pre_expansion_embeddings, rowvar=False, bias=True) new_embeddings = np.random.multivariate_normal(mu, 1e-5 * sigma, size=64) weights = np.vstack([pre_expansion_embeddings, new_embeddings]) converted_weights = [weights] elif _VARIANT.value == _VARIANT_GEMMA_3_1B or prop in ("mm_output_embedding", "mm_input_embedding_extra"): return zip([], []) else: raise ValueError(f"Unexpected member, {prop}, in Embedder.") elif path.startswith(f"{_TRANSFORMER_EMBEDDER}/mm"): if _VARIANT.value == _VARIANT_GEMMA_3_1B: return zip([], []) if path.endswith("/mm_input_projection"): converted_paths = ["multi_modal_projector.mm_input_projection_weight"] converted_weights = [weights] elif path.endswith("/mm_soft_embedding_norm"): converted_paths = ["multi_modal_projector.mm_soft_emb_norm.weight"] converted_weights = [weights] else: raise ValueError(f"Unexpected subpath, `{path}`, in Embedder.") elif path == _TRANSFORMER_FINAL_NORM: converted_paths = ["language_model.model.norm.weight"] converted_weights = [weights] elif path.startswith(_TRANSFORMER_DECODER_BLOCK): decoder_block_path = path[_TRANSFORMER_DECODER_BLOCK_LEN:] next_path_seperator_idx = decoder_block_path.find("/") layer_idx = decoder_block_path[:next_path_seperator_idx] decoder_block_path = decoder_block_path[next_path_seperator_idx:] base_path = f"language_model.model.layers.{layer_idx}" if path.endswith("attn/attn_vec_einsum"): converted_paths = [f"{base_path}.self_attn.o_proj.weight"] converted_weights = [weights.transpose(2, 0, 1).reshape(config.hidden_size, attn_head_dim)] elif path.endswith("attn/_key_norm"): converted_paths = [f"{base_path}.self_attn.k_norm.weight"] converted_weights = [weights] elif path.endswith("attn/kv_einsum"): converted_paths = [ f"{base_path}.self_attn.k_proj.weight", f"{base_path}.self_attn.v_proj.weight", ] k_proj_weights, v_proj_weights = weights converted_weights = [ k_proj_weights.transpose(0, 2, 1).reshape(kv_head_dim, config.hidden_size), v_proj_weights.transpose(0, 2, 1).reshape(kv_head_dim, config.hidden_size), ] elif path.endswith("attn/q_einsum"): converted_paths = [f"{base_path}.self_attn.q_proj.weight"] converted_weights = [weights.transpose(0, 2, 1).reshape(attn_head_dim, config.hidden_size)] elif path.endswith("attn/_query_norm"): converted_paths = [f"{base_path}.self_attn.q_norm.weight"] converted_weights = [weights] elif path.endswith("mlp/gating_einsum"): converted_paths = [ f"{base_path}.mlp.gate_proj.weight", f"{base_path}.mlp.up_proj.weight", ] gate_proj_weight, up_proj_weight = weights converted_weights = [gate_proj_weight, up_proj_weight] elif path.endswith("mlp/linear"): converted_paths = [f"{base_path}.mlp.down_proj.weight"] converted_weights = [weights.transpose()] elif path.endswith("post_attention_norm"): converted_paths = [f"{base_path}.post_attention_layernorm.weight"] converted_weights = [weights] elif path.endswith("post_ffw_norm"): converted_paths = [f"{base_path}.post_feedforward_layernorm.weight"] converted_weights = [weights] elif path.endswith("pre_attention_norm"): converted_paths = [f"{base_path}.input_layernorm.weight"] converted_weights = [weights] elif path.endswith("pre_ffw_norm"): converted_paths = [f"{base_path}.pre_feedforward_layernorm.weight"] converted_weights = [weights] else: raise ValueError(f"Unexpected path `{path}` in Decoder Block.") else: raise ValueError(f"Unexpected path `{path}`.") if (cpl := len(converted_paths)) != (cwl := len(converted_weights)): raise ValueError( "The `converted_paths` and `converted_weights` should be the same " f"length. Got {cpl} and {cwl}, respectively, for {path}." ) return zip(converted_paths, converted_weights) def convert(checkpoint_path: str, config: Gemma3Config) -> dict[str, torch.Tensor]: """Loads Orbax checkpoint from `input_path` and converts it to HF tree.""" checkpointer = obc.PyTreeCheckpointer() ckpt = checkpointer.restore(checkpoint_path) hf_tree: dict[str, torch.Tensor] = {} def update_tree(path: str, weights: np.ndarray, target_dtype: torch.dtype) -> None: hf_tree[path] = torch.from_numpy(weights.astype("float32")).type(target_dtype) if _VERBOSE.value: logging.info( "%s converted shape=%s with dtype=%s", path, weights.shape, target_dtype, ) for paths, value in tree.flatten_with_path(ckpt): if paths[0].startswith("SigLiPFromPatches_"): if config.vision_config is None: continue path, weights = convert_siglip_weight(config=config.vision_config, paths=paths, weights=value) update_tree(path, weights, config.vision_config.dtype) else: for path, weights in convert_transformer_weights(config=config.text_config, paths=paths, weights=value): if config.vision_config is None: path = path[len("language_model.") :] update_tree(path, weights, config.text_config.dtype) if config.vision_config is None: hf_tree["lm_head.weight"] = hf_tree["model.embed_tokens.weight"] else: hf_tree["language_model.lm_head.weight"] = hf_tree["language_model.model.embed_tokens.weight"] return hf_tree def main(*args): del args output_path = _OUTPUT_PATH.value variant = _VARIANT.value config = _VARIANTS[variant] config.text_config.dtype = getattr(torch, _TRANSFORMER_DTYPE.value) if variant == _VARIANT_GEMMA_3_1B: config.vision_config = None else: config.vision_config.dtype = getattr(torch, _VISION_DTYPE.value) if _INCLUDE_CHAT_TEMPLATE.value: # Chat template is included for instruction tuned models, which treat # both "<eos>" and "<end_of_turn>" as generation stoppers. config.eos_token_id = [1, 106] logging.info( "Converting Gemma 3 (%s) @ %s (language) and %s (vision)", variant, _TRANSFORMER_DTYPE.value, _VISION_DTYPE.value, ) state_tree = convert(_CHECKPOINT_PATH.value, config) logging.info("Converted Gemma 3 (%s) state tree from Orbax to Hugging Face.", variant) with accelerate.init_empty_weights(): if variant == _VARIANT_GEMMA_3_1B: model = Gemma3ForCausalLM(config=config.text_config) else: model = Gemma3ForConditionalGeneration(config) model.load_state_dict(state_tree, assign=True, strict=True) logging.info( "Loaded Gemma 3 (%s) in Hugging Face Transformers as a %s instance.", variant, type(model).__name__, ) model.save_pretrained(output_path, safe_serialization=True) logging.info( "Saved Gemma 3 (%s) to SafeTensors in %s using %s", variant, output_path, type(model).__name__, ) del model del state_tree tokenizer = GemmaTokenizerFast( _TOKENIZER_PATH.value, add_bos_token=True, extra_special_tokens={ "image_token": "<image_soft_token>", # Should be ID=262_144 "boi_token": "<start_of_image>", # Should be ID=255_999 "eoi_token": "<end_of_image>", # Should be ID=256_000 }, chat_template=_CHAT_TEMPLATE if _INCLUDE_CHAT_TEMPLATE.value else None, ) tokenizer.save_pretrained(output_path) logging.info("Saved GemmaTokenizer for %s to %s", variant, output_path) if variant != _VARIANT_GEMMA_3_1B: image_processor = Gemma3ImageProcessor( image_seq_length=256, image_mean=(0.5,) * 3, image_std=(0.5,) * 3, size={"height": 896, "width": 896}, resample=PILImageResampling.BILINEAR, ) processor = Gemma3Processor( image_processor=image_processor, tokenizer=tokenizer, chat_template=tokenizer.chat_template, ) processor.save_pretrained(output_path) logging.info("Saved Gemma3Processor for %s to %s", variant, output_path) del processor del tokenizer generation_config = GenerationConfig( pad_token_id=config.pad_token_id, bos_token_id=config.bos_token_id, eos_token_id=config.eos_token_id, cache_implementation="hybrid", temperature=1.0, do_sample=True, top_k=64, top_p=0.95, ) generation_config.save_pretrained(output_path) if __name__ == "__main__": app.run(main)
transformers/src/transformers/models/gemma3/convert_gemma3_weights_orbax_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/gemma3/convert_gemma3_weights_orbax_to_hf.py", "repo_id": "transformers", "token_count": 10722 }
436
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GIT model.""" import math from dataclasses import dataclass from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, auto_docstring, can_return_tuple, logging, torch_int, ) from ...utils.deprecation import deprecate_kwarg from .configuration_git import GitConfig, GitVisionConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. """ ) # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git class GitVisionModelOutput(ModelOutput): r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None class GitEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: embeddings = self.word_embeddings(input_ids) else: embeddings = inputs_embeds if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class GitSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1) if config.num_image_with_embedding is not None: self.image_patch_tokens *= config.num_image_with_embedding self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, pixel_values_present: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) cutoff = self.image_patch_tokens if pixel_values_present else 0 key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) if past_key_values is not None: # NOTE: like in other caches, we store the text component. In GIT it means we discard the image component. key_layer_past, value_layer_past = past_key_values.update( key_layer[:, :, cutoff:, :], value_layer[:, :, cutoff:, :], self.layer_idx ) key_layer = torch.cat([key_layer[:, :, :cutoff, :], key_layer_past], dim=2) value_layer = torch.cat([value_layer[:, :, :cutoff, :], value_layer_past], dim=2) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if past_key_values is not None: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in GitModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class GitSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states GIT_SELF_ATTENTION_CLASSES = { "eager": GitSelfAttention, } class GitAttention(nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__() self.self = GIT_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type, layer_idx=layer_idx ) self.output = GitSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, pixel_values_present: Optional[bool] = False, ) -> tuple[torch.Tensor]: attn_output, self_attn_weights = self.self( hidden_states, attention_mask, head_mask, past_key_values, output_attentions, pixel_values_present, ) attention_output = self.output(attn_output, hidden_states) return attention_output, self_attn_weights # Copied from transformers.models.bert.modeling_bert.BertIntermediate class GitIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class GitOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class GitLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = GitAttention(config, layer_idx=layer_idx) self.intermediate = GitIntermediate(config) self.output = GitOutput(config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, pixel_values_present: Optional[bool] = False, ) -> tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 attention_output, self_attention_weights = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_values=past_key_values, pixel_values_present=pixel_values_present, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output, self_attention_weights def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class GitEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([GitLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.FloatTensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, pixel_values_present: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, past_key_values, output_attentions, pixel_values_present, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class GitPreTrainedModel(PreTrainedModel): config: GitConfig base_model_prefix = "git" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, GitVisionEmbeddings): nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range) nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range) if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git class GitVisionEmbeddings(nn.Module): def __init__(self, config: GitVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class GitVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class GitVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, seq_length, embed_dim = hidden_states.shape queries = self.q_proj(hidden_states) keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) # CLIP text model uses both `causal_attention_mask` and `attention_mask` # in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask` if self.config._attn_implementation != "flash_attention_2": if attention_mask is not None and causal_attention_mask is not None: attention_mask = attention_mask + causal_attention_mask elif causal_attention_mask is not None: attention_mask = causal_attention_mask else: self.is_causal = causal_attention_mask is not None attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, ) attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->GitVision class GitVisionEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: GitVisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GitVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = GitVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->GitVision, CLIPConfig class GitVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`GitVisionEncoderLayer`]. Args: config: GitVisionConfig """ def __init__(self, config: GitVisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class GitVisionTransformer(nn.Module): # Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIPEncoder->GitVisionEncoder, AltCLIP->Git def __init__(self, config: GitVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GitVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = GitVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) if not return_dict: return (last_hidden_state,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The vision model from CLIP, used in GIT, without any head or projection on top. """ ) class GitVisionModel(GitPreTrainedModel): config: GitVisionConfig main_input_name = "pixel_values" # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git def __init__(self, config: GitVisionConfig): super().__init__(config) self.vision_model = GitVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, GitVisionModel >>> processor = AutoProcessor.from_pretrained("microsoft/git-base") >>> model = GitVisionModel.from_pretrained("microsoft/git-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) class GitProjection(nn.Module): def __init__(self, config: GitConfig): super().__init__() self.config = config self.visual_projection = nn.Sequential( nn.Linear(config.vision_config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps), ) def forward(self, embeddings: torch.Tensor) -> torch.Tensor: return self.visual_projection(embeddings) @auto_docstring( custom_intro=""" The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states """ ) class GitModel(GitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = GitEmbeddings(config) self.image_encoder = GitVisionModel(config.vision_config) self.encoder = GitEncoder(config) self.visual_projection = GitProjection(config) if config.num_image_with_embedding is not None: self.img_temperal_embedding = nn.ParameterList( nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size)) for _ in range(config.num_image_with_embedding) ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor: # Default mask is for forward direction. Flip for backward direction. mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1) mask = mask.masked_fill(mask == 1, float("-inf")) return mask def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None): num_tgt = tgt.shape[1] num_memory = memory.shape[1] device = tgt.device dtype = tgt.dtype top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype) top_right = torch.full( (num_memory, num_tgt + past_key_values_length), float("-inf"), device=tgt.device, dtype=dtype, ) bottom_left = torch.zeros( (num_tgt, num_memory), dtype=dtype, device=tgt_mask.device, ) if past_key_values_length > 0: tgt_mask = torch.zeros( (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length), dtype=dtype, device=tgt_mask.device, ) left = torch.cat((top_left, bottom_left), dim=0) right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0) full_attention_mask = torch.cat((left, right), dim=1)[None, :] if memory_key_padding_mask is None: memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device) # if it is False, it means valid. That is, it is not a padding if memory_key_padding_mask.dtype != torch.bool: raise ValueError("Memory key padding mask must be a boolean tensor.") zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype) zero_negative_infinity[memory_key_padding_mask] = float("-inf") full_attention_mask = full_attention_mask.expand( (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt) ) full_attention_mask = full_attention_mask.clone() origin_left = full_attention_mask[:, :, :num_memory] update = zero_negative_infinity[:, None, :] full_attention_mask[:, :, :num_memory] = origin_left + update # add axis for multi-head full_attention_mask = full_attention_mask[:, None, :, :] return full_attention_mask @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: r""" Examples: ```python >>> from transformers import AutoProcessor, AutoModel >>> import requests >>> from PIL import Image >>> processor = AutoProcessor.from_pretrained("microsoft/git-base") >>> model = AutoModel.from_pretrained("microsoft/git-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "this is an image of two cats" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") seq_length = input_shape[1] # past_key_values_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = ( past_key_values.get_seq_length() if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length() ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) projected_visual_features = None if pixel_values is not None: if pixel_values.ndim == 4: # here we assume pixel_values is of shape (batch_size, num_channels, height, width) visual_features = self.image_encoder( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding ).last_hidden_state elif pixel_values.ndim == 5: # here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width) visual_features = [] for frame_idx in range(pixel_values.shape[1]): visual_features_frame = self.image_encoder( pixel_values[:, frame_idx, :, :], interpolate_pos_encoding=interpolate_pos_encoding ).last_hidden_state visual_features_frame += self.img_temperal_embedding[frame_idx] visual_features.append(visual_features_frame) # finally, concatenate all features along sequence dimension visual_features = torch.cat(visual_features, dim=1) else: raise ValueError("pixel_values must be of rank 4 or 5") projected_visual_features = self.visual_projection(visual_features) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) if projected_visual_features is None: projected_visual_features = torch.zeros( (embedding_output.shape[0], 0, embedding_output.shape[2]), dtype=embedding_output.dtype, device=embedding_output.device, ) # Repeat visual features to match embedding batch size. projected_visual_features = projected_visual_features.repeat( embedding_output.size(0) // projected_visual_features.size(0), 1, 1 ) # concatenate patch token and text token embeddings hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1) # By default, an additive causal mask is created # for masking the future (one direction). tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device) # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len) combined_attention_mask = self.create_attention_mask( tgt=embedding_output, memory=projected_visual_features, tgt_mask=tgt_mask, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # if the user provides an attention mask, we add it to the default one # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _prepare_4d_attention_mask( attention_mask, embedding_output.dtype, tgt_len=input_shape[-1] ).to(embedding_output.device) if past_key_values_length > 0: expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :] else: combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask encoder_outputs = self.encoder( hidden_states, attention_mask=combined_attention_mask, head_mask=head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values_present=pixel_values is not None, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPast( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" GIT Model with a `language modeling` head on top for autoregressive language modeling. """ ) class GitForCausalLM(GitPreTrainedModel, GenerationMixin): _tied_weights_keys = ["output.weight"] def __init__(self, config): super().__init__(config) self.git = GitModel(config) self.output = nn.Linear(config.hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.output def set_output_embeddings(self, new_embeddings): self.output = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[Union[Cache, list[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` Examples: Image captioning example: ```python >>> from transformers import AutoProcessor, AutoModelForCausalLM >>> import requests >>> from PIL import Image >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco") >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50) >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_caption) two cats sleeping on a pink blanket next to remotes. ``` Visual question answering (VQA) example: ```python >>> from transformers import AutoProcessor, AutoModelForCausalLM >>> from huggingface_hub import hf_hub_download >>> from PIL import Image >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa") >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa") >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") >>> image = Image.open(file_path).convert("RGB") >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values >>> question = "what does the front of the bus say at the top?" >>> input_ids = processor(text=question, add_special_tokens=False).input_ids >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids >>> input_ids = torch.tensor(input_ids).unsqueeze(0) >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True)) ['what does the front of the bus say at the top? special'] ``` Video captioning example: ```python >>> import av >>> import numpy as np >>> from PIL import Image >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoProcessor, AutoModelForCausalLM >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex") >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex") >>> # set seed for reproducibility >>> np.random.seed(45) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # load video >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample frames >>> num_frames = model.config.num_image_with_embedding >>> indices = sample_frame_indices( ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames ... ) >>> frames = read_video_pyav(container, indices) >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50) >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True)) Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.git( input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.output(sequence_output) loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens shifted_logits = logits[:, num_image_tokens:-1, :].contiguous() labels = labels[:, 1:].contiguous() loss = self.loss_function( shifted_logits.view(-1, self.config.vocab_size), labels.view(-1), vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # Overwritten -- `git` has special cache handling and doesn't support generating from `inputs_embeds` atm # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values.get_seq_length() # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) return { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": kwargs.get("pixel_values"), "past_key_values": past_key_values, "use_cache": use_cache, } __all__ = ["GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel"]
transformers/src/transformers/models/git/modeling_git.py/0
{ "file_path": "transformers/src/transformers/models/git/modeling_git.py", "repo_id": "transformers", "token_count": 26937 }
437
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT-2 model.""" import math import os import warnings from dataclasses import dataclass from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, get_activation from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_start_docstrings, auto_docstring, logging, ) from ...utils.deprecation import deprecate_kwarg from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(gpt2_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split("/") pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "w" or scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "wpe" or scope_names[0] == "wte": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model def eager_attention_forward(module, query, key, value, attention_mask, head_mask=None, **kwargs): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if module.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device ) # Layer-wise attention scaling if module.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(module.layer_idx + 1) if not module.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = module.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) if attention_mask is not None: # Apply the attention mask causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise attn_weights = attn_weights.type(value.dtype) attn_weights = module.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights class GPT2Attention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention # Layer-wise attention scaling, reordering, and upcasting self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = True self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() # Preallocate attn_weights for `baddbmm` attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) # Compute Scale Factor scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) with torch.autocast(query.device.type, enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise if attn_weights.dtype != torch.float32: raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]: is_cross_attention = encoder_hidden_states is not None if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values if is_cross_attention: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query_states = self.q_attn(hidden_states) attention_mask = encoder_attention_mask # Try to get key/value states from cache if possible if past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) shape_kv = (*key_states.shape[:-1], -1, self.head_dim) key_states = key_states.view(shape_kv).transpose(1, 2) value_states = value_states.view(shape_kv).transpose(1, 2) else: query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2) shape_kv = (*key_states.shape[:-1], -1, self.head_dim) key_states = key_states.view(shape_kv).transpose(1, 2) value_states = value_states.view(shape_kv).transpose(1, 2) shape_q = (*query_states.shape[:-1], -1, self.head_dim) query_states = query_states.view(shape_q).transpose(1, 2) if (past_key_values is not None and not is_cross_attention) or ( past_key_values is not None and is_cross_attention and not is_updated ): # save all key/value_layer to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True is_causal = attention_mask is None and query_states.shape[-2] > 1 and not is_cross_attention using_eager = self.config._attn_implementation == "eager" attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if using_eager and self.reorder_and_upcast_attn: attn_output, attn_weights = self._upcast_and_reordered_attn( query_states, key_states, value_states, attention_mask, head_mask ) else: attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, head_mask=head_mask, dropout=self.attn_dropout.p if self.training else 0.0, is_causal=is_causal, **kwargs, ) attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous() attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class GPT2MLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPT2Block(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPT2Attention(config=config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = GPT2Attention(config=config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, **kwargs, ) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output, self_attn_weights = self.attn( hidden_states, past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs, ) # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_output, cross_attn_weights = self.crossattention( hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) # residual connection hidden_states = residual + cross_attn_output residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if encoder_hidden_states is not None: outputs += (cross_attn_weights,) return outputs # Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->GPT2 class GPT2SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`GPT2Config`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: GPT2Config): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output @auto_docstring class GPT2PreTrainedModel(PreTrainedModel): config: GPT2Config load_tf_weights = load_tf_weights_in_gpt2 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["GPT2Block"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _supports_attention_backend = True _can_compile_fullgraph = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if name == "c_proj.weight": # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of models predicting if two sentences are consecutive or not. """ ) class GPT2DoubleHeadsModelOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): Multiple choice classification loss. logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`tuple[tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None mc_loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mc_logits: Optional[torch.FloatTensor] = None past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`dict[int, list]`, *optional*): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the following number of attention modules: - openai-community/gpt2: 12 - openai-community/gpt2-medium: 24 - openai-community/gpt2-large: 36 - openai-community/gpt2-xl: 48 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl") device_map = { 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example: ```python # On a 4 GPU machine with openai-community/gpt2-large: model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large") device_map = { 0: [0, 1, 2, 3, 4, 5, 6, 7], 1: [8, 9, 10, 11, 12, 13, 14, 15], 2: [16, 17, 18, 19, 20, 21, 22, 23], 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ @auto_docstring class GPT2Model(GPT2PreTrainedModel): _supports_param_buffer_assignment = False def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False self._attn_implementation = config._attn_implementation # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map warnings.warn( "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," " ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) self.wpe = self.wpe.to(self.first_device) # Load onto devices for k, v in self.device_map.items(): for block in v: cuda_device = "cuda:" + str(k) self.h[block] = self.h[block].to(cuda_device) # ln_f to last self.ln_f = self.ln_f.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" self.wte = self.wte.to("cpu") self.wpe = self.wpe.to("cpu") for index in range(len(self.h)): self.h[index] = self.h[index].to("cpu") self.ln_f = self.ln_f.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[tuple[tuple[torch.Tensor]], Cache]] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # based on pattern from src/transformers/models/whisper/modeling_whisper.py::WhisperDecoder if use_cache: if past_key_values is None: past_key_values = DynamicCache() elif isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.53.0. " "You should pass an instance of `Cache` instead, e.g. " "`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." ) past_key_values = DynamicCache.from_legacy_cache(past_key_values) if self.config.add_cross_attention and not isinstance(past_key_values, EncoderDecoderCache): past_key_values = EncoderDecoderCache(past_key_values, DynamicCache()) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) # Attention mask. # ._update_causal_mask() and ._prepare_4d_causal_attention_mask_with_cache_position() copied from LlamaModel if attention_mask is not None and attention_mask.ndim < 4: attention_mask = attention_mask.view(batch_size, -1) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] _use_sdpa = self._attn_implementation == "sdpa" and output_attentions is False and head_mask is None if self.config.add_cross_attention and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if _use_sdpa: encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1] ) elif self._attn_implementation != "flash_attention_2": encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, past_key_values if not (self.gradient_checkpointing and self.training) else None, cache_position, causal_mask, head_mask[i], encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[2],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_values = past_key_values if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @auto_docstring( custom_intro=""" The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" " 0, 'transformer.h.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, cache_position=cache_position, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: # Flatten the tokens loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """ ) class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 1 self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = GPT2SequenceSummary(config) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should" " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your" " own `device_map` but it needs to be a dictionary module_name to device, so for instance" " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.multiple_choice_head = self.multiple_choice_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, mc_token_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, mc_labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, GPT2DoubleHeadsModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) Example: ```python >>> import torch >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> # Update the model embeddings with the new vocabulary size >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoded_choices = [tokenizer.encode(s) for s in choices] >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_logits = outputs.logits >>> mc_logits = outputs.mc_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) mc_loss = None if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return ((lm_loss,) + output) if lm_loss is not None else output return GPT2DoubleHeadsModelOutput( loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" The GPT2 Model transformer with a sequence classification head on top (linear layer). [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class GPT2ForSequenceClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class GPT2ForTokenClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class GPT2ForQuestionAnswering(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPT2ForSequenceClassification", "GPT2ForTokenClassification", "GPT2LMHeadModel", "GPT2Model", "GPT2PreTrainedModel", "load_tf_weights_in_gpt2", ]
transformers/src/transformers/models/gpt2/modeling_gpt2.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/modeling_gpt2.py", "repo_id": "transformers", "token_count": 32624 }
438
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GroupViT model configuration""" from collections import OrderedDict from collections.abc import Mapping from typing import TYPE_CHECKING, Any, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType logger = logging.get_logger(__name__) class GroupViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GroupViTModel`]. hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 77): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import GroupViTTextConfig, GroupViTTextModel >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration >>> configuration = GroupViTTextConfig() >>> model = GroupViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "groupvit_text_model" base_config_key = "text_config" def __init__( self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act="quick_gelu", layer_norm_eps=1e-5, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.dropout = dropout self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout class GroupViTVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 384): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. depths (`list[int]`, *optional*, defaults to [6, 3, 3]): The number of layers in each encoder block. num_group_tokens (`list[int]`, *optional*, defaults to [64, 8, 0]): The number of group tokens for each stage. num_output_groups (`list[int]`, *optional*, defaults to [64, 8, 8]): The number of output groups for each stage, 0 means no group. num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration >>> configuration = GroupViTVisionConfig() >>> model = GroupViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "groupvit_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act="gelu", layer_norm_eps=1e-5, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.depths = depths if num_hidden_layers != sum(depths): logger.warning( f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers =" f" sum(depth) = {sum(depths)}" ) self.num_hidden_layers = num_hidden_layers self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.assign_eps = assign_eps self.assign_mlp_ratio = assign_mlp_ratio class GroupViTConfig(PretrainedConfig): r""" [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to instantiate a GroupViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`GroupViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`GroupViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 256): Dimensionality of text and vision projection layers. projection_intermediate_dim (`int`, *optional*, defaults to 4096): Dimensionality of intermediate layer of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original GroupViT implementation. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "groupvit" sub_configs = {"text_config": GroupViTTextConfig, "vision_config": GroupViTVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) vision_config_dict = kwargs.pop("vision_config_dict", None) super().__init__(**kwargs) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. " f'The value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} # This is the complete result when using `vision_config_dict`. _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _vision_config_dict["id2label"] = { str(key): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: message = ( f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " f'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`." f' The value `vision_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.") self.text_config = GroupViTTextConfig(**text_config) self.vision_config = GroupViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.projection_intermediate_dim = projection_intermediate_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_range = 0.02 self.initializer_factor = 1.0 self.output_segmentation = False class GroupViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, processor: "ProcessorMixin", batch_size: int = -1, seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs( processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework ) image_input_dict = super().generate_dummy_inputs( processor.image_processor, batch_size=batch_size, framework=framework ) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14 __all__ = ["GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig"]
transformers/src/transformers/models/groupvit/configuration_groupvit.py/0
{ "file_path": "transformers/src/transformers/models/groupvit/configuration_groupvit.py", "repo_id": "transformers", "token_count": 7592 }
439
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Hiera model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class HieraConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`HieraModel`]. It is used to instantiate a Hiera model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Hiera [facebook/hiera-base-224](https://huggingface.co/facebook/hiera-base-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. image_size (`list(int)`, *optional*, defaults to `[224, 224]`): The size (resolution) of input in the format (height, width) for images and (frames, height, width) for videos. patch_size (`list(int)`, *optional*, defaults to `[7, 7]`): The size (resolution) of each patch. patch_stride (`list(int)`, *optional*, defaults to `[4, 4]`): The stride of the patch. patch_padding (`list(int)`, *optional*, defaults to `[3, 3]`): The padding of the patch. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of mlp hidden dim to embedding dim. depths (`list(int)`, *optional*, defaults to `[2, 3, 16, 3]`): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to `[1, 2, 4, 8]`): Number of attention heads in each layer of the Transformer encoder. embed_dim_multiplier (`float`, *optional*, defaults to 2.0): The multiplier to the dimensionality of patch embedding in each layer of the Transformer encoder. num_query_pool (`int`, *optional*, defaults to 3): The number of query pool stages. query_stride (`list(int)`, *optional*, defaults to `[2, 2]`): The stride of the query pool. masked_unit_size (`list(int)`, *optional*, defaults to `[8, 8]`): The size of the masked unit. masked_unit_attention (`list(bool)`, *optional*, defaults to `[True, True, False, False]`): Whether to use masked unit attention in each layer of the Transformer encoder. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop path rate. num_channels (`int`, *optional*, defaults to 3): The number of input channels. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices and the zero_initializer for initializing all bias vectors. layer_norm_init (`float`, *optional*, defaults to 1.0): The initial weight value for layer normalization layers. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. decoder_hidden_size (`int`, *optional*): Dimensionality of decoder embeddings for MAE pretraining. decoder_depth (`int`, *optional*): Depth of the decoder for MAE pretraining. decoder_num_heads (`int`, *optional*): Number of attention heads in each layer of the decoder for MAE pretraining. normalize_pixel_loss (`bool`, *optional*, defaults to `True`): Whether to normalize the pixel loss by the number of pixels. mask_ratio (`float`, *optional*, defaults to 0.6): The ratio of masked tokens in the input. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import HieraConfig, HieraModel >>> # Initializing a Hiera hiera-base-patch16-224 style configuration >>> configuration = HieraConfig() >>> # Initializing a model (with random weights) from the hiera-base-patch16-224 style configuration >>> model = HieraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "hiera" attribute_map = {"num_hidden_layers": "num_layers"} def __init__( self, embed_dim=96, image_size=[224, 224], patch_size=[7, 7], patch_stride=[4, 4], patch_padding=[3, 3], mlp_ratio=4.0, depths=[2, 3, 16, 3], num_heads=[1, 2, 4, 8], embed_dim_multiplier=2.0, num_query_pool=3, query_stride=[2, 2], masked_unit_size=[8, 8], masked_unit_attention=[True, True, False, False], drop_path_rate=0.0, num_channels=3, hidden_act="gelu", initializer_range=0.02, layer_norm_init=1.0, layer_norm_eps=1e-6, decoder_hidden_size=None, decoder_depth=None, decoder_num_heads=None, normalize_pixel_loss=True, mask_ratio=0.6, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) if masked_unit_size[0] % query_stride[0] ** (len(depths) - 1) != 0: raise ValueError( f"masked_unit_size[0] ({masked_unit_size[0]}) must be divisible by query_stride[0] ({query_stride[0]}) " f"raised to the power of the number of layers ({len(depths) - 1})" ) if num_query_pool >= len(depths): raise ValueError( f"num_query_pool ({num_query_pool}) must be less than the number of layers ({len(depths)})" ) self.embed_dim = embed_dim self.image_size = image_size self.patch_size = patch_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.mlp_ratio = mlp_ratio self.depths = depths self.num_heads = num_heads self.num_layers = len(depths) self.embed_dim_multiplier = embed_dim_multiplier self.num_query_pool = num_query_pool self.query_stride = query_stride self.masked_unit_size = masked_unit_size self.masked_unit_attention = masked_unit_attention self.drop_path_rate = drop_path_rate self.num_channels = num_channels self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_init = layer_norm_init self.layer_norm_eps = layer_norm_eps self.decoder_hidden_size = decoder_hidden_size self.decoder_depth = decoder_depth self.decoder_num_heads = decoder_num_heads self.normalize_pixel_loss = normalize_pixel_loss self.mask_ratio = mask_ratio # we set the hidden_size attribute in order to make Hiera work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * embed_dim_multiplier ** (len(depths) - 1)) self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["HieraConfig"]
transformers/src/transformers/models/hiera/configuration_hiera.py/0
{ "file_path": "transformers/src/transformers/models/hiera/configuration_hiera.py", "repo_id": "transformers", "token_count": 3701 }
440
# coding=utf-8 # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF IdeficsVision model: a copy of CLIPVisionModel using a simpler config object""" import math from dataclasses import dataclass from typing import Optional, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFPreTrainedModel, shape_list from ...tf_utils import flatten from ...utils import ModelOutput, logging from .configuration_idefics import IdeficsVisionConfig logger = logging.get_logger(__name__) @dataclass class TFIdeficsVisionModelOutput(ModelOutput): """ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. Args: image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ image_embeds: Optional[tf.Tensor] = None last_hidden_state: Optional[tf.Tensor] = None hidden_states: Optional[tuple[tf.Tensor]] = None attentions: Optional[tuple[tf.Tensor]] = None class TFIdeficsVisionEmbeddings(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = tf.keras.layers.Conv2D( filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, use_bias=False, padding="valid", data_format="channels_last", name="patch_embedding", ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = tf.keras.layers.Embedding( self.num_positions, self.embed_dim, name="position_embedding" ) # self.position_ids = tf.range(self.num_positions)[tf.newaxis, :] def interpolate_pos_encoding(self, embeddings: tf.Tensor, height: int, width: int) -> tf.Tensor: num_patches = shape_list(embeddings)[1] - 1 pos_embed = self.position_embedding(self.position_ids) num_positions = shape_list(pos_embed)[1] - 1 if num_patches == num_positions and height == width: return pos_embed class_pos_embed = pos_embed[:, 0] patch_pos_embed = pos_embed[:, 1:] embed_dim = shape_list(embeddings)[-1] num_h_patches = height // self.config.patch_size num_w_patches = width // self.config.patch_size num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1 sqrt_num_positions = math.sqrt(float(num_positions)) patch_pos_embed = tf.reshape(patch_pos_embed, (1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)) scale_height = num_h_patches / sqrt_num_positions scale_width = num_w_patches / sqrt_num_positions original_height = tf.cast(tf.shape(patch_pos_embed)[1], tf.float32) original_width = tf.cast(tf.shape(patch_pos_embed)[2], tf.float32) # Apply scaling new_height = tf.cast(original_height * scale_height, tf.int32) new_width = tf.cast(original_width * scale_width, tf.int32) patch_pos_embed = tf.image.resize( patch_pos_embed, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC ) if ( int(num_h_patches) != shape_list(patch_pos_embed)[-3] or int(num_w_patches) != shape_list(patch_pos_embed)[-2] ): raise ValueError( f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the " f"shape of position embedding ({shape_list(patch_pos_embed)[-2], shape_list(patch_pos_embed)[-1]})" ) patch_pos_embed = tf.reshape(patch_pos_embed, (1, -1, embed_dim)) return tf.concat((class_pos_embed[tf.newaxis, :], patch_pos_embed), axis=1) def call(self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False) -> tf.Tensor: # Input `pixel_values` is NCHW format which doesn't run on CPU so first thing we do is # transpose it to change it to NHWC. We don't care to transpose it back because # the Conv2D layer is only hit once for each query if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"] pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) batch_size, height, width, num_channels = shape_list(pixel_values) if not interpolate_pos_encoding: if height != self.image_size or width != self.image_size: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`" ) patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) patch_embeds = flatten(patch_embeds, 1, 2) class_embeds = tf.broadcast_to( self.class_embedding[tf.newaxis, tf.newaxis, :], [batch_size, 1, self.embed_dim] ) embeddings = tf.concat([class_embeds, patch_embeds], axis=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings def build(self, input_shape=None): if self.built: return self.built = True self.position_ids = tf.range(self.num_positions, name="self.position_ids")[tf.newaxis, :] self.class_embedding = self.add_weight(shape=(self.embed_dim,), name="class_embedding") if getattr(self, "patch_embedding", None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, self.config.num_channels]) if getattr(self, "position_embedding", None) is not None: with tf.name_scope(self.position_embedding.name): self.position_embedding.build(None) class TFIdeficsVisionAttention(tf.keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = tf.keras.layers.Dense(self.embed_dim, name="k_proj") self.v_proj = tf.keras.layers.Dense(self.embed_dim, name="v_proj") self.q_proj = tf.keras.layers.Dense(self.embed_dim, name="q_proj") self.out_proj = tf.keras.layers.Dense(self.embed_dim, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, causal_attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[tf.Tensor, Optional[tf.Tensor], Optional[tuple[tf.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.linalg.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( tf.shape(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f"Attention weights should be of size {[bsz * self.num_heads, tgt_len, src_len]}, but is {tf.shape(attn_weights)}", ) # apply the causal_attention_mask first if causal_attention_mask is not None: if shape_list(causal_attention_mask) != [bsz, 1, tgt_len, src_len]: raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(causal_attention_mask)}" ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + causal_attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) if attention_mask is not None: if shape_list(attention_mask) != [bsz, 1, tgt_len, src_len]: raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}" ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights_reshaped, (bsz * self.num_heads, tgt_len, src_len)) else: attn_weights_reshaped = None attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) attn_output = tf.linalg.matmul(attn_probs, value_states) tf.debugging.assert_equal( tf.shape(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f"Attention weights should be of size {[bsz * self.num_heads, tgt_len, self.head_dim]}, but is {tf.shape(attn_output)}", ) attn_output = tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)) attn_output = tf.transpose(attn_output, perm=[0, 2, 1, 3]) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build((self.embed_dim, self.embed_dim)) class TFIdeficsVisionMLP(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.activation_fn = get_tf_activation(config.hidden_act) self.fc1 = tf.keras.layers.Dense(config.intermediate_size, name="fc1") self.fc2 = tf.keras.layers.Dense(config.hidden_size, name="fc2") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build(self.config.hidden_size) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build(self.config.intermediate_size) class TFIdeficsVisionEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFIdeficsVisionAttention(config, name="self_attn") self.layer_norm1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") self.mlp = TFIdeficsVisionMLP(config, name="mlp") self.layer_norm2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[tf.Tensor]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm1", None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, "layer_norm2", None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFIdeficsVisionEncoder(tf.keras.layers.Layer): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`TFIdeficsVisionEncoderLayer`]. Args: config: IdeficsVisionConfig """ def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layers = [ TFIdeficsVisionEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] self.gradient_checkpointing = False def call( self, inputs_embeds, attention_mask: Optional[tf.Tensor] = None, causal_attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[tuple, TFBaseModelOutput]: r""" Args: inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = tf.recompute_grad( create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFIdeficsVisionTransformer(TFPreTrainedModel): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.embed_dim = config.hidden_size self.embeddings = TFIdeficsVisionEmbeddings(config, name="embeddings") self.pre_layrnorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="pre_layrnorm") self.encoder = TFIdeficsVisionEncoder(config, name="encoder") self.post_layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm") # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward def call( self, pixel_values: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[tuple, TFBaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "pre_layrnorm", None) is not None: with tf.name_scope(self.pre_layrnorm.name): self.pre_layrnorm.build([None, None, self.embed_dim]) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "post_layernorm", None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, self.embed_dim])
transformers/src/transformers/models/idefics/vision_tf.py/0
{ "file_path": "transformers/src/transformers/models/idefics/vision_tf.py", "repo_id": "transformers", "token_count": 11476 }
441
# coding=utf-8 # Copyright 2025 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import numpy as np from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput, concatenate_list, make_flat_list_of_images from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...video_utils import VideoInput, make_batched_videos class InternVLImagesKwargs(ImagesKwargs, total=False): crop_to_patches: Optional[bool] min_patches: Optional[int] max_patches: Optional[int] class InternVLProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: InternVLImagesKwargs _defaults = { "text_kwargs": { "padding_side": "left", "return_mm_token_type_ids": False, }, "images_kwargs": { "crop_to_patches": True, }, "videos_kwargs": {}, } class InternVLProcessor(ProcessorMixin): r""" Constructs a InternVL processor which wraps a [`AutoImageProcessor`] and [`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~InternVLProcessor.__call__`] and [`~InternVLProcessor.decode`] for more information. Args: image_processor ([`AutoImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*): The tokenizer is a required input. video_processor ([`AutoVideoProcessor`], *optional*): The video processor is a required input. image_seq_length (`int`, *optional*, defaults to 256): The number of image token to use per image patch. it should be set so that: image_seq_length = (config.image_size // config.patch_size) ** 2 * (config.scale_factor**2) chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer", "video_processor"] image_processor_class = "AutoImageProcessor" video_processor_class = "AutoVideoProcessor" tokenizer_class = "AutoTokenizer" def __init__( self, image_processor=None, tokenizer=None, video_processor=None, image_seq_length: int = 256, chat_template=None, **kwargs, ): self.image_seq_length = image_seq_length self.start_image_token = tokenizer.start_image_token self.end_image_token = tokenizer.end_image_token self.start_image_token_id = tokenizer.start_image_token_id self.end_image_token_id = tokenizer.end_image_token_id self.image_token = tokenizer.context_image_token self.video_token = tokenizer.video_token self.image_token_id = tokenizer.context_image_token_id self.image_ids = [self.image_token_id, self.start_image_token_id, self.end_image_token_id] super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template, **kwargs) def _insert_media_placeholders( self, text: list[str], image_pixel_values, video_pixel_values, image_num_patches: list[int], video_num_patches: list[int], image_num_patches_indices: np.ndarray, video_num_patches_indices: np.ndarray, video_patch_indices: np.ndarray, ): """ Processes interleaved text with <image> and <video> placeholders, replacing them with appropriate image and video tokens while keeping track of the patches used. """ image_index = 0 video_index = 0 processed_text = [] image_video_patches = [] replace_strings = [] # Support interleaved image and video in prompts: # Processed patches of images and videos are inserted in `image_video_patches` in the order they appear in the prompts for prompt in text: new_prompt = prompt while self.image_token in new_prompt or self.video_token in new_prompt: if self.image_token in new_prompt and ( self.video_token not in new_prompt or new_prompt.index(self.image_token) < new_prompt.index(self.video_token) ): # Get the slice of patches corresponding to the current image start_index = image_num_patches_indices[image_index - 1] if image_index > 0 else 0 end_index = image_num_patches_indices[image_index] image_video_patches.append(image_pixel_values[start_index:end_index]) # Replace the corresponding image placeholder with the correct number of image tokens new_prompt = new_prompt.replace(self.image_token, "<placeholder>", 1) replace_strings.append( f"{self.start_image_token}{self.image_token * self.image_seq_length * image_num_patches[image_index]}{self.end_image_token}" ) image_index += 1 else: # Get the slice of patches corresponding to the current video # Here we need to account for both the multiple video frames and the potential multiple patches per frame # As of now, InternVL only supports one patch per frame, but we keep the code flexible for future updates current_patch_index = video_patch_indices[video_index - 1] if video_index > 0 else 0 end_patch_index = video_patch_indices[video_index] start_index = video_num_patches_indices[current_patch_index] if video_index > 0 else 0 end_index = video_num_patches_indices[end_patch_index - 1] image_video_patches.append(video_pixel_values[start_index:end_index]) # Get the number of patches per frame and replace the video placeholder with the correct number of image tokens num_patches = list(video_num_patches[current_patch_index:end_patch_index]) video_prompt = "\n".join( f"Frame{i + 1}: {self.start_image_token}{self.image_token * self.image_seq_length * num_patches[i]}{self.end_image_token}" for i in range(len(num_patches)) ) replace_strings.append(video_prompt) new_prompt = new_prompt.replace(self.video_token, "<placeholder>", 1) video_index += 1 while "<placeholder>" in new_prompt: replace_str = replace_strings.pop(0) new_prompt = new_prompt.replace("<placeholder>", replace_str, 1) processed_text.append(new_prompt) return processed_text, image_video_patches, image_index, video_index def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, audio=None, videos: Optional[VideoInput] = None, **kwargs: Unpack[InternVLProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text if `text` is not `None`, otherwise encode default OCR queries which depends on the `format`, `box`, `color`, `multi_page` and `crop_to_patches` arguments. To prepare the vision inputs, this method forwards the `images` and `kwrags` arguments to GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None: raise ValueError("You have to specify text.") output_kwargs = self._merge_kwargs( InternVLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if not isinstance(text, (list, tuple)): text = [text] # Process images and videos separately, as videos don't support crop_to_patches image_num_patches = [] video_num_patches = [] image_videos_inputs = {} image_pixel_values = None video_pixel_values = None image_num_patches_indices = np.array([0]) video_patch_indices = np.array([0]) video_num_patches_indices = np.array([0]) if images is not None: images = make_flat_list_of_images(images) image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"]) image_num_patches = image_inputs.pop("num_patches") image_pixel_values = image_inputs.pop("pixel_values") image_num_patches_indices = np.cumsum(image_num_patches) if videos is not None: videos = make_batched_videos(videos) video_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"]) video_pixel_values = video_inputs.pop("pixel_values_videos") # Obtain per frame information first and then flatten to (BS * T, ...) num_frames_per_video = [len(video) for video in video_pixel_values] video_num_patches = [1 for frames in num_frames_per_video for _ in range(frames)] video_patch_indices = np.cumsum(num_frames_per_video) video_num_patches_indices = np.cumsum(video_num_patches) video_pixel_values = video_pixel_values.flatten(0, 1) if images is not None or videos is not None: text, image_video_patches, image_index, video_index = self._insert_media_placeholders( text, image_pixel_values, video_pixel_values, image_num_patches, video_num_patches, image_num_patches_indices, video_num_patches_indices, video_patch_indices, ) if images is not None and image_index != len(images): raise ValueError("Number of image placeholders in the prompt does not match the number of images.") if videos is not None and video_index != len(videos): raise ValueError("Number of video placeholders in the prompt does not match the number of videos.") # Concatenate the interleaved image and video patches (function agnostic to the patches type (list, numpy array, torch tensor)) image_videos_inputs = {"pixel_values": concatenate_list(image_video_patches)} return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, text_inputs, modalities=["image"]) if return_mm_token_type_ids: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1 text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_videos_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = InternVLProcessorKwargs._defaults.get("images_kwargs", {}) images_kwargs.update(kwargs) num_image_patches = [ self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes ] # Add 2 for BOI and EOI tokens num_image_tokens = [2 + (self.image_seq_length * num_patches) for num_patches in num_image_patches] vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) __all__ = ["InternVLProcessor"]
transformers/src/transformers/models/internvl/processing_internvl.py/0
{ "file_path": "transformers/src/transformers/models/internvl/processing_internvl.py", "repo_id": "transformers", "token_count": 6500 }
442
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_kyutai_speech_to_text.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Kyutai and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import types from typing import Optional, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationConfig, GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from ..auto import AutoModel from .configuration_kyutai_speech_to_text import KyutaiSpeechToTextConfig if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) class KyutaiSpeechToTextRMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) # Ignore copy def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) # Ignore copy def forward(self, x): output = self._norm(x.float()) output = output * self.weight.float() return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}" class KyutaiSpeechToTextFlexibleLinear(nn.Module): def __init__(self, input_size, output_size, num_layers): super().__init__() # Stack the weights for N layers into a single tensor (num_layers, output_size, input_size) self.weight = nn.Parameter(torch.randn(num_layers, output_size, input_size)) def forward(self, x, layer_idx=None): """ `KyutaiSpeechToTextFlexibleLinear` creates one linear layer per codebook. There's multiple ways to use it. In the default case, `sequence_length=num_layers`, so each element of the sequence will be matmul to the weights corresponding to its index on the sequence. For more advanced cases, one can specify which codebook's layer(s) to use with `layer_idx`. If `layer_idx` indicates a single integer, all of the element of the sequence will be matmul to this single codebook's layer. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. Args: x (`torch.FloatTensor): input to the layer of shape `(batch, num_layers, embed_dim)` or of shape `(batch, seq_length, embed_dim)` layer_idx (`torch.Tensor`, *optional*): Can be used to specify which codebook's layers(s) to use. If it's a tensor of shape `(seq_length,)`, will matmul each element of the sequence to the corresponding weights. But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`. """ # Use torch.gather to select the corresponding weights for each sample # (codebooks, output_size, hidden_size) selected_weights = torch.index_select(self.weight, 0, layer_idx) if layer_idx is not None else self.weight # (1, codebooks, hidden_size, output_size) selected_weights = selected_weights.transpose(1, 2)[None, :, :, :] # (batch_size, codebooks, 1, hidden_size) x (1, codebooks, hidden_size, output_size) # -> (batch_size, codebooks, 1, output_size) x = torch.matmul(x[:, :, None, :], selected_weights) # (batch_size, codebooks, output_size) return x.squeeze(2) @auto_docstring class KyutaiSpeechToTextPreTrainedModel(PreTrainedModel): config: KyutaiSpeechToTextConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["KyutaiSpeechToTextDecoderLayer", "MimiTransformerLayer"] _supports_flash_attn = True _supports_sdpa = True main_input_name = "input_ids" def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, KyutaiSpeechToTextFlexibleLinear): module.weight.data.normal_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, KyutaiSpeechToTextRMSNorm): module.weight.data.fill_(1.0) class KyutaiSpeechToTextConv1dPaddingCache: """ Padding cache for KyutaiSpeechToTextConv1d causal convolutions in order to support streaming via cache padding. See: https://arxiv.org/pdf/2005.06720 & https://arxiv.org/pdf/2204.07064 A padding cache is a list of cached partial hidden states for each convolution layer. Hidden states are cached from the previous call to the KyutaiSpeechToTextConv1d forward pass, given the padding size. """ def __init__( self, num_layers: int, per_layer_padding: list[int], per_layer_padding_mode: list[str], per_layer_in_channels: list[int], ): # ensure correct number of layers for each arg from_args_num_layers = {len(per_layer_padding), len(per_layer_padding_mode), len(per_layer_in_channels)} if len(from_args_num_layers) != 1 or from_args_num_layers.pop() != num_layers: raise ValueError( f"Expected `num_layers` ({num_layers}) values in `per_layer_padding`, `per_layer_padding_mode` and `per_layer_in_channels`" ) elif not all(mode in ["constant", "replicate"] for mode in per_layer_padding_mode): raise NotImplementedError( "`padding_cache` is not supported for convolutions using other than `constant` or `replicate` padding mode" ) self.per_layer_padding = per_layer_padding self.per_layer_padding_mode = per_layer_padding_mode self.per_layer_in_channels = per_layer_in_channels self.per_layer_is_init = [True] * num_layers self.padding_cache = [None] * num_layers def update(self, hidden_states: torch.Tensor, layer_idx: int): """ Updates the padding cache with the new padding states for the layer `layer_idx` and returns the current cache. Parameters: hidden_states (`torch.Tensor`): The hidden states to be partially cached. layer_idx (`int`): The index of the layer to cache the states for. Returns: `torch.Tensor` or `None`, the current padding cache. """ batch_size, dtype, device = hidden_states.shape[0], hidden_states.dtype, hidden_states.device padding = self.per_layer_padding[layer_idx] padding_mode = self.per_layer_padding_mode[layer_idx] in_channels = self.per_layer_in_channels[layer_idx] if self.padding_cache[layer_idx] is None: if padding_mode == "constant": current_cache = torch.zeros( batch_size, in_channels, padding, device=device, dtype=dtype, ) elif padding_mode == "replicate": current_cache = ( torch.ones( batch_size, in_channels, padding, device=device, dtype=dtype, ) * hidden_states[..., :1] ) else: current_cache = self.padding_cache[layer_idx] # update the cache if padding > 0: padding_states = hidden_states[:, :, -padding:] else: padding_states = torch.empty(batch_size, in_channels, padding, dtype=dtype, device=device) self.padding_cache[layer_idx] = padding_states return current_cache class KyutaiSpeechToTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.embed_tokens = nn.Embedding( config.vocab_size + (config.num_codebooks * config.codebook_vocab_size) + 1, config.hidden_size, padding_idx=config.audio_pad_token_id, ) audio_tokens_offsets = torch.arange(config.num_codebooks) * config.codebook_vocab_size audio_tokens_offsets += config.vocab_size audio_tokens_offsets = nn.functional.pad( audio_tokens_offsets, (1, 0) ) # pad one 0 to the left for the text token self.register_buffer("audio_tokens_offsets", audio_tokens_offsets, persistent=False) def forward(self, input_ids): input_ids = torch.where( input_ids == self.embed_tokens.padding_idx, input_ids, input_ids + self.audio_tokens_offsets ) inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds.sum(dim=2) return inputs_embeds class KyutaiSpeechToTextLinear(nn.Module): def __init__(self, input_dim, output_dim, num_codebooks, use_flexible_linear=False): super().__init__() self.use_flexible_linear = use_flexible_linear if not use_flexible_linear: self.linear = nn.Linear(input_dim, output_dim, bias=False) else: self.linear = KyutaiSpeechToTextFlexibleLinear(input_dim, output_dim, num_layers=num_codebooks) def forward(self, x, layer_idx=None): if self.use_flexible_linear: return self.linear(x, layer_idx) else: return self.linear(x) class KyutaiSpeechToTextRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: KyutaiSpeechToTextConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class KyutaiSpeechToTextGatingMLP(nn.Module): def __init__(self, config, use_flexible_linear=False): super().__init__() self.activation_fn = ACT2FN[config.hidden_act] ffn_dim = config.ffn_dim hidden_size = config.hidden_size num_layers = config.num_codebooks if use_flexible_linear else 1 if num_layers == 1: self.fc1 = nn.Linear(hidden_size, ffn_dim, bias=False) self.fc2 = nn.Linear(ffn_dim // 2, hidden_size, bias=False) else: self.fc1 = KyutaiSpeechToTextFlexibleLinear(hidden_size, ffn_dim, num_layers) self.fc2 = KyutaiSpeechToTextFlexibleLinear(ffn_dim // 2, hidden_size, num_layers) def forward(self, hidden_states: torch.Tensor, layer_idx: Optional[int] = None) -> torch.Tensor: hidden_states = self.fc1(hidden_states) if layer_idx is None else self.fc1(hidden_states, layer_idx) batch_size, sequence_length, _ = hidden_states.shape hidden_states = hidden_states.view(batch_size, sequence_length, 2, -1) hidden_states = self.activation_fn(hidden_states[..., 0, :]) * hidden_states[..., 1, :] hidden_states = self.fc2(hidden_states) if layer_idx is None else self.fc2(hidden_states, layer_idx) return hidden_states def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class KyutaiSpeechToTextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: KyutaiSpeechToTextConfig, layer_idx: Optional[int] = None, use_flexible_linear=False, use_rope=True, ): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.is_causal = True self.scaling = 1 / math.sqrt(self.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = KyutaiSpeechToTextLinear( self.hidden_size, self.num_heads * self.head_dim, config.num_codebooks, use_flexible_linear ) self.k_proj = KyutaiSpeechToTextLinear( self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear ) self.v_proj = KyutaiSpeechToTextLinear( self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear ) self.o_proj = KyutaiSpeechToTextLinear( self.num_heads * self.head_dim, self.hidden_size, config.num_codebooks, use_flexible_linear ) # rotary embeddings are not used in the depth decoder self.rotary_emb = None if use_rope: self.rope_theta = config.rope_theta self.rotary_emb = KyutaiSpeechToTextRotaryEmbedding(config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) # Ignore copy key_states = self.k_proj(hidden_states, cache_position) # Ignore copy value_states = self.v_proj(hidden_states, cache_position) # Ignore copy query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: # Ignore copy cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = ( {"sin": sin, "cos": cos, "cache_position": cache_position} if self.rotary_emb is not None else {"cache_position": cache_position} ) # Ignore copy key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output, cache_position) # Ignore copy if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaFlashAttention2 with Gemma->KyutaiSpeechToText # TODO cyril: modular class KyutaiSpeechToTextFlashAttention2(KyutaiSpeechToTextAttention): """ KyutaiSpeechToText flash attention module. This module inherits from `KyutaiSpeechToTextAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError( "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" ) output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) # Ignore copy key_states = self.k_proj(hidden_states, cache_position) # Ignore copy value_states = self.v_proj(hidden_states, cache_position) # Ignore copy # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: # Ignore copy cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = ( {"sin": sin, "cos": cos, "cache_position": cache_position} if self.rotary_emb is not None else {"cache_position": cache_position} ) # Ignore copy key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (KyutaiSpeechToTextRMSNorm handles it correctly) input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output, cache_position) # Ignore copy if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaSdpaAttention with Gemma->KyutaiSpeechToText # TODO cyril: modular class KyutaiSpeechToTextSdpaAttention(KyutaiSpeechToTextAttention): """ KyutaiSpeechToText attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `KyutaiSpeechToTextAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from KyutaiSpeechToTextAttention.forward @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "KyutaiSpeechToTextModel is using KyutaiSpeechToTextSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states, cache_position) # Ignore copy key_states = self.k_proj(hidden_states, cache_position) # Ignore copy value_states = self.v_proj(hidden_states, cache_position) # Ignore copy query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.rotary_emb is not None: # Ignore copy cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = ( {"sin": sin, "cos": cos, "cache_position": cache_position} if self.rotary_emb is not None else {"cache_position": cache_position} ) # Ignore copy key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = causal_mask is None and q_len > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output, cache_position) # Ignore copy return attn_output, None KYUTAI_SPEECH_TO_TEXT_ATTENTION_CLASSES = { "eager": KyutaiSpeechToTextAttention, "flash_attention_2": KyutaiSpeechToTextFlashAttention2, "sdpa": KyutaiSpeechToTextSdpaAttention, } class KyutaiSpeechToTextDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: KyutaiSpeechToTextConfig, layer_idx: int, use_flexible_linear: bool, use_rope=True): super().__init__() self.hidden_size = config.hidden_size self.use_flexible_linear = use_flexible_linear self.self_attn = KYUTAI_SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation]( config=config, layer_idx=layer_idx, use_flexible_linear=use_flexible_linear, use_rope=use_rope ) self.mlp = KyutaiSpeechToTextGatingMLP(config, use_flexible_linear) self.input_layernorm = KyutaiSpeechToTextRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = KyutaiSpeechToTextRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.sliding_window = config.sliding_window self._attn_implementation = config._attn_implementation @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = ( self.mlp(hidden_states) if not self.use_flexible_linear else self.mlp(hidden_states, cache_position) ) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class KyutaiSpeechToTextModel(KyutaiSpeechToTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = KyutaiSpeechToTextEmbeddings(config) self.layers = nn.ModuleList( [ KyutaiSpeechToTextDecoderLayer(config, layer_idx, use_flexible_linear=False) for layer_idx in range(config.num_hidden_layers) ] ) self.norm = KyutaiSpeechToTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = None if attention_mask is not None: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions hidden_states = inputs_embeds # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of KyutaiSpeechToText. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training, ): return None dtype = input_tensor.dtype min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] # StaticCache if using_static_cache: target_length = past_key_values.get_max_cache_shape() # DynamicCache or no cache else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: KyutaiSpeechToTextConfig, past_key_values: Cache, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`KyutaiSpeechToTextConfig`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape( -1, 1 ) text_config = config.get_text_config() if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None: # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also # the check is needed to verify is current checkpoint was trained with sliding window or not is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding) if not is_static_sliding_cache or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= ( cache_position.reshape(-1, 1) - text_config.sliding_window ) diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @auto_docstring class KyutaiSpeechToTextForConditionalGeneration(KyutaiSpeechToTextPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} _keep_in_fp32_modules_strict = ["codec_model"] def __init__(self, config): super().__init__(config) self.model = KyutaiSpeechToTextModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.codec_model = AutoModel.from_config(config.codec_config) # we are in an edge case where for the codec_model self.can_generate is False, setting self.codec_model.generation_config to None # yet the codec_model needs a generation config to initalize it's cache for streaming inference # we therefore initialize a generation config for the codec model self.codec_model.generation_config = GenerationConfig.from_model_config(config.codec_config) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> import torch >>> from datasets import load_dataset, Audio >>> from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration >>> torch_device = "cuda" if torch.cuda.is_available() else "cpu" >>> model_id = "kyutai/stt-2.6b-en-trfs" >>> processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id) >>> model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(model_id, device_map=torch_device) >>> ds = load_dataset( ... "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" ... ) >>> ds = ds.cast_column("audio", Audio(sampling_rate=24000)) >>> inputs = processor( ... ds[0]["audio"]["array"], ... ) >>> inputs.to(torch_device) >>> output_tokens = model.generate(**inputs) >>> print(processor.batch_decode(output_tokens, skip_special_tokens=True)) ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _prepare_generation_config(self, *args, **kwargs): generation_config, model_kwargs = super()._prepare_generation_config(*args, **kwargs) # this should be passed to the model kwargs for the input preparation model_kwargs["audio_window_size"] = ( generation_config.audio_window_size if hasattr(generation_config, "audio_window_size") else None ) return generation_config, model_kwargs def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[torch.Tensor] = None, model_kwargs: Optional[dict[str, torch.Tensor]] = None, ) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]: inputs, input_name, model_kwargs = super()._prepare_model_inputs( inputs=inputs, bos_token_id=bos_token_id, model_kwargs=model_kwargs, ) audio_window_size = model_kwargs.get("audio_window_size", None) if audio_window_size is None: audio_window_size = self.codec_model.get_encoded_length(model_kwargs["input_values"].shape[-1]).item() model_kwargs["audio_window_size"] = audio_window_size batch_size = inputs.shape[0] device = inputs.device # initialize audio tokens model_kwargs["audio_tokens"] = torch.zeros( (batch_size, audio_window_size, self.config.num_codebooks), device=device, dtype=torch.long, ) model_kwargs["current_window"] = ( torch.tensor([0, 0], device=device, dtype=torch.long).expand(batch_size, -1).contiguous() ) # let's use generate's cache preparation to prepare the cache for the codec model temporary_model_kwargs = {} # monkey patching the codec model with cache preparation methods since we don't want it to inherit fully from GenerationMixin # Add cache-related methods from GenerationMixin to codec model cache_methods = [ "_prepare_cache_for_generation", "_get_cache", ] for method in cache_methods: setattr(self.codec_model, method, types.MethodType(getattr(self, method).__func__, self.codec_model)) setattr( self.codec_model, "_supports_default_dynamic_cache", types.MethodType(lambda x: True, self.codec_model) ) self.codec_model.generation_config.cache_implementation = "dynamic" self.codec_model._prepare_cache_for_generation( generation_config=self.codec_model.generation_config, model_kwargs=temporary_model_kwargs, assistant_model=None, batch_size=batch_size, max_cache_length=self.config.codec_config.sliding_window, ) if "past_key_values" in temporary_model_kwargs: model_kwargs["encoder_past_key_values"] = temporary_model_kwargs["past_key_values"] # initialize the padding cache for the codec model per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], [] for layer_name in self.codec_model.encoder._mimiconv1d_layer_names: per_layer_padding.append(self.codec_model.encoder.get_submodule(layer_name).padding_total) per_layer_padding_mode.append(self.codec_model.encoder.get_submodule(layer_name).pad_mode) per_layer_in_channels.append(self.codec_model.encoder.get_submodule(layer_name).in_channels) # downsample layer per_layer_padding.append(self.codec_model.downsample.padding_total) per_layer_padding_mode.append(self.codec_model.downsample.pad_mode) per_layer_in_channels.append(self.codec_model.downsample.in_channels) model_kwargs["padding_cache"] = KyutaiSpeechToTextConv1dPaddingCache( num_layers=len(self.codec_model.encoder._mimiconv1d_layer_names) + 1, per_layer_padding=per_layer_padding, per_layer_padding_mode=per_layer_padding_mode, per_layer_in_channels=per_layer_in_channels, ) return inputs, input_name, model_kwargs def prepare_inputs_for_generation( self, *args, audio_tokens: Optional[torch.LongTensor] = None, input_values: Optional[torch.FloatTensor] = None, padding_mask: Optional[torch.Tensor] = None, audio_window_size: Optional[int] = None, current_window: Optional[tuple[int, int]] = None, encoder_past_key_values: Optional[Cache] = None, padding_cache: Optional[KyutaiSpeechToTextConv1dPaddingCache] = None, **kwargs, ): model_inputs = super().prepare_inputs_for_generation(*args, **kwargs) if input_values is not None: cache_position = model_inputs["cache_position"] start, end = current_window[0] # first cache position is for bos token, so we need to offset by -1 if cache_position[-1] - 1 >= end: # we need to encode the new audio tokens with torch.no_grad(): input_values_start_idx = start * self.config.frame_size input_values_end_idx = (start + audio_window_size) * self.config.frame_size current_input_values = input_values[..., input_values_start_idx:input_values_end_idx] codec_model_output = self.codec_model.encode( current_input_values, encoder_past_key_values=encoder_past_key_values, padding_cache=padding_cache, ) new_audio_tokens = codec_model_output.audio_codes.transpose(1, 2) audio_tokens.copy_(new_audio_tokens) start = end.clone() end = end + audio_window_size current_window.copy_( torch.tensor([start, end], device=current_window.device).expand(current_window.shape[0], -1) ) # first cache position is for bos token, so we need to offset by -1 current_audio_tokens_idxs = (cache_position - start - 1).clamp(min=0) current_audio_tokens = audio_tokens[:, current_audio_tokens_idxs, :] current_audio_tokens[:, cache_position == 0, :] = self.config.audio_bos_token_id input_ids = model_inputs.pop("input_ids") input_ids = torch.cat( [input_ids.unsqueeze(2), current_audio_tokens], dim=2, ) model_inputs["input_ids"] = input_ids return model_inputs # TODO: @eustlb, this should be standardized @classmethod def from_pretrained(cls, *args, **kwargs): if kwargs.get("output_loading_info", False): model, loading_info = super().from_pretrained(*args, **kwargs) else: model = super().from_pretrained(*args, **kwargs) # copy depth decoder generation conf attr to the depth decoder generation config prefix = "codec_" prefix_len = len(prefix) codec_model_attrs = { attr[prefix_len:]: value for attr, value in vars(model.generation_config).items() if attr.startswith(prefix) } vars(model.codec_model.generation_config).update({"_from_model_config": False, **codec_model_attrs}) # remove the depth decoder generation conf attr from the model generation config for attr in codec_model_attrs: delattr(model.generation_config, prefix + attr) if "output_loading_info" in kwargs: return model, loading_info else: return model # TODO: @eustlb, this should be standardized def save_pretrained(self, *args, **kwargs): prefix = "codec_" codec_model_attrs = self.codec_model.generation_config.to_diff_dict() codec_model_attrs.pop("transformers_version", None) for attr, value in codec_model_attrs.items(): setattr(self.generation_config, prefix + attr, value) super().save_pretrained(*args, **kwargs) def generate(self, *args, **kwargs): r""" This method forwards all its arguments to GenerationMixin's [`~GenerationMixin.generate`]. Please refer to the docstring of this method for more information. """ max_new_tokens = kwargs.pop("max_new_tokens", None) input_values = kwargs.get("input_values") # TODO: @eustlb, we should have per-batch-idx values # here we do not use padding_mask to be aligned to what's done in the original codebase max_audio_frames = input_values.shape[-1] // self.config.codec_config.frame_size if max_new_tokens is None or max_new_tokens > max_audio_frames: if max_new_tokens is not None: logger.warning( f"`max_new_tokens` ({max_new_tokens}) is greater than the maximum number of audio frames ({max_audio_frames})." f"Setting `max_new_tokens` to {max_audio_frames}." ) max_new_tokens = max_audio_frames return super().generate( *args, max_new_tokens=max_new_tokens, **kwargs, ) __all__ = [ "KyutaiSpeechToTextPreTrainedModel", "KyutaiSpeechToTextModel", "KyutaiSpeechToTextForConditionalGeneration", ]
transformers/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py", "repo_id": "transformers", "token_count": 28229 }
443
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv2.""" import collections import os import sys import unicodedata from typing import Optional, Union from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")) def subfinder(mylist, pattern): matches = [] indices = [] for idx, i in enumerate(range(len(mylist))): if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern: matches.append(pattern) indices.append(idx) if matches: return matches[0], indices[0] else: return None, 0 class LayoutLMv2Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token classification). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int = 512, additional_special_tokens: Optional[list[str]] = None, **kwargs, ): sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: Optional[bool] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> list[int]: encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: list[int], token_boxes: list[list[int]], pair_ids: Optional[list[int]] = None, pair_token_boxes: Optional[list[list[int]]] = None, labels: Optional[list[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> tuple[list[int], list[int], list[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens __all__ = ["LayoutLMv2Tokenizer"]
transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py", "repo_id": "transformers", "token_count": 33081 }
444
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Optional, Union import torch import torch.nn.functional as F from torch import nn from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.deprecation import deprecate_kwarg from ...utils.import_utils import is_causal_conv1d_available from ..bamba.modeling_bamba import apply_mask_to_padding_states from ..llama.modeling_llama import ( LlamaAttention, LlamaForCausalLM, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm, LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_lfm2 import Lfm2Config if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_fn, causal_conv1d_update = None, None kernel_modules = (causal_conv1d_fn, causal_conv1d_update) is_fast_path_available = all(kernel_modules) logger = logging.get_logger(__name__) class Lfm2RMSNorm(LlamaRMSNorm): pass class Lfm2RotaryEmbedding(LlamaRotaryEmbedding): pass class Lfm2MLP(nn.Module): def __init__(self, config: Lfm2Config): super().__init__() intermediate_size = config.intermediate_size if config.block_auto_adjust_ff_dim: intermediate_size = int(2 * intermediate_size / 3) # custom dim factor multiplier if config.block_ffn_dim_multiplier is not None: intermediate_size = int(config.block_ffn_dim_multiplier * intermediate_size) intermediate_size = config.block_multiple_of * ( (intermediate_size + config.block_multiple_of - 1) // config.block_multiple_of ) self.w1 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w3 = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.w2 = nn.Linear(intermediate_size, config.hidden_size, bias=False) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) class Lfm2HybridConvCache: """ Attention and conv cache for Lfm2. It stores the Key and Value states as a list of tensors, one for each layer. Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`. Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`. """ # Override @property existing in Cache max_batch_size = None is_compileable = False key_cache = None value_cache = None def __init__( self, config: Lfm2Config, max_batch_size: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, str, None] = None, ): self.key_cache = [] self.value_cache = [] self.max_batch_size = max_batch_size self.layer_types = config.layer_types self.first_attention_layer = self.layer_types.index("full_attention") self.conv_L_cache = config.conv_L_cache self._dtype = dtype self.conv_cache: list[torch.Tensor] = [] device = torch.device(device) if device is not None else None for _ in range(config.num_hidden_layers): conv_state = torch.zeros( self.max_batch_size, config.hidden_size, self.conv_L_cache, dtype=self._dtype, device=device, ) torch._dynamo.mark_static_address(conv_state) self.conv_cache.append(conv_state) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. Return: A tuple containing the updated key and value states. """ # Update the cache if key_states is not None: if len(self.key_cache) <= layer_idx: # There may be skipped layers, fill them with empty lists for _ in range(len(self.key_cache), layer_idx): self.key_cache.append(torch.tensor([])) self.value_cache.append(torch.tensor([])) self.key_cache.append(key_states) self.value_cache.append(value_states) elif ( not self.key_cache[layer_idx].numel() # prefers not t.numel() to len(t) == 0 to export the model ): # fills previously skipped layers; checking for tensor causes errors self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.conv_cache[layer_idx].device self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device)) def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0: return 0 return self.key_cache[layer_idx].shape[-2] def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """ Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for the given layer at `layer_idx`. The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size), for each layer. """ full_mask_kv_offset = 0 query_length = cache_position.shape[0] past_seen_tokens = self.get_seq_length() kv_length = query_length + past_seen_tokens return kv_length, full_mask_kv_offset def crop(self, max_length: int): """Crop the cache to the given length""" if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return for idx in range(len(self.key_cache)): if self.key_cache[idx].numel(): self.key_cache[idx] = self.key_cache[idx][..., :max_length, :] self.value_cache[idx] = self.value_cache[idx][..., :max_length, :] def __len__(self) -> int: return len(self.key_cache) def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]: return self.key_cache[layer_idx], self.value_cache[layer_idx] def reset(self): for layer_idx in range(len(self.conv_cache)): # In-place ops prevent breaking the static address self.conv_cache[layer_idx].zero_() class Lfm2Attention(LlamaAttention): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) del self.o_proj del self.attention_dropout @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() output = self.out_proj(attn_output) return output, attn_weights class Lfm2ShortConv(nn.Module): def __init__( self, config: Lfm2Config, layer_idx: int, ): super().__init__() self.config = config self.layer_idx = layer_idx self.L_cache = config.conv_L_cache self.bias = config.conv_bias self.conv = nn.Conv1d( in_channels=config.hidden_size, out_channels=config.hidden_size, kernel_size=self.L_cache, groups=config.hidden_size, bias=self.bias, padding=self.L_cache - 1, ) self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def cuda_kernels_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2)) if past_key_values is not None and cache_position[0] > 0: conv_out = causal_conv1d_update( Bx.squeeze(-1), past_key_values.conv_cache[self.layer_idx], conv_weights, self.conv.bias, None, ) conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None) y = C * conv_out y = self.out_proj(y.transpose(-1, -2).contiguous()) return y @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def slow_forward( self, x: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): seqlen = x.shape[1] x = apply_mask_to_padding_states(x, attention_mask) BCx = self.in_proj(x).transpose(-1, -2) B, C, x = BCx.chunk(3, dim=-2) Bx = B * x if past_key_values is not None and cache_position[0] > 0: conv_state = past_key_values.conv_cache[self.layer_idx] cache_position = cache_position.clamp(0, self.L_cache - 1) conv_state = conv_state.roll(shifts=-1, dims=-1) conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1) if self.bias: conv_out += self.conv.bias conv_out = conv_out.unsqueeze(-1) else: if past_key_values is not None: conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0)) past_key_values.conv_cache[self.layer_idx].copy_(conv_state) conv_out = self.conv(Bx)[..., :seqlen] y = C * conv_out y = y.transpose(-1, -2).contiguous() y = self.out_proj(y) return y @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in hidden_states.device.type and not torch._dynamo.is_compiling(): return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask) return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask) class Lfm2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__() self.is_attention_layer = config.layer_types[layer_idx] == "full_attention" if self.is_attention_layer: self.self_attn = Lfm2Attention(config, layer_idx) else: self.conv = Lfm2ShortConv(config, layer_idx) self.feed_forward = Lfm2MLP(config) self.operator_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) self.ffn_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> torch.Tensor: residual = hidden_states if self.is_attention_layer: hidden_states, _ = self.self_attn( hidden_states=self.operator_norm(hidden_states), position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) else: hidden_states = self.conv( hidden_states=self.operator_norm(hidden_states), past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, ) hidden_states = hidden_states + residual hidden_states = hidden_states + self.feed_forward(self.ffn_norm(hidden_states)) return hidden_states class Lfm2PreTrainedModel(LlamaPreTrainedModel): _can_compile_fullgraph = False class Lfm2Model(LlamaModel): def __init__(self, config: Lfm2Config): super().__init__(config) self.pos_emb = Lfm2RotaryEmbedding(config) self.embedding_norm = Lfm2RMSNorm(config.hidden_size, eps=config.norm_eps) del self.norm del self.rotary_emv def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2HybridConvCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2HybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.pos_emb(hidden_states, position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Lfm2ForCausalLM(LlamaForCausalLM): pass __all__ = ["Lfm2ForCausalLM", "Lfm2Model", "Lfm2PreTrainedModel"]
transformers/src/transformers/models/lfm2/modular_lfm2.py/0
{ "file_path": "transformers/src/transformers/models/lfm2/modular_lfm2.py", "repo_id": "transformers", "token_count": 9563 }
445
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import Optional from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_llama import LlamaTokenizer else: LlamaTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"} B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" # fmt: off DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \ correct. If you don't know the answer to a question, please don't share false information.""" # fmt: on class LlamaTokenizerFast(PreTrainedTokenizerFast): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and no normalization. ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") >>> tokenizer.encode("Hello this is a test") [1, 15043, 445, 338, 263, 1243] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that contains the vocabulary necessary to instantiate a tokenizer. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Llama should be used legacy (`bool`, *optional*): Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. Make sure to also set `from_slow` to `True`. A simple example: - `legacy=True`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 869 is '▁.' [1, 15043, 29871, 1, 869] ``` - `legacy=False`: ```python >>> from transformers import LlamaTokenizerFast >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True) >>> tokenizer.encode("Hello <s>.") # 29889 is '.' [1, 15043, 29871, 1, 29889] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LlamaTokenizer padding_side = "left" model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token="<unk>", bos_token="<s>", eos_token="</s>", add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, legacy=None, add_prefix_space=None, **kwargs, ): if legacy is None: logger.warning_once( f"You are using the default legacy behaviour of the {self.__class__}. This is" " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" " means, and thoroughly read the reason why this was added as explained in" " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file" " you can ignore this message." ) legacy = True self.legacy = legacy if add_prefix_space is not None: kwargs["from_slow"] = True super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, legacy=legacy, **kwargs, ) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.use_default_system_prompt = use_default_system_prompt self.vocab_file = vocab_file def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError("add_bos_token = True but bos_token = None") eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError("add_eos_token = True but eos_token = None") single = f"{(bos + ':0 ') if self.add_bos_token else ''}$A:0{(' ' + eos + ':0') if self.add_eos_token else ''}" pair = f"{single}{(' ' + bos + ':1') if self.add_bos_token else ''} $B:1{(' ' + eos + ':1') if self.add_eos_token else ''}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( single=single, pair=pair, special_tokens=special_tokens ) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output __all__ = ["LlamaTokenizerFast"]
transformers/src/transformers/models/llama/tokenization_llama_fast.py/0
{ "file_path": "transformers/src/transformers/models/llama/tokenization_llama_fast.py", "repo_id": "transformers", "token_count": 4405 }
446
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LLaVa-NeXT (LLaVa-1.6) checkpoints from the original repository. URL: https://github.com/haotian-liu/LLaVA/tree/main. The command used to obtain original logits is the following: python llava/eval/run_llava.py --model-path "liuhaotian/llava-v1.6-mistral-7b" --image-file "images/llava_v1_5_radar.jpg" --query "What is shown in this image?" --max_new_tokens 100 --temperature 0 Note: logits are tested with torch==2.1.2. """ import argparse import gc import glob import json from pathlib import Path import requests import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from PIL import Image from safetensors import safe_open from transformers import ( AddedToken, AutoConfig, AutoTokenizer, LlavaNextConfig, LlavaNextForConditionalGeneration, LlavaNextImageProcessor, LlavaNextProcessor, ) KEYS_TO_MODIFY_MAPPING = { "model.vision_tower.": "", "model.mm_projector": "multi_modal_projector", "model": "model.model", "vision_model.model": "vision_model", "lm_head": "language_model.lm_head", "model.model": "language_model.model", "multi_modal_projector.0": "multi_modal_projector.linear_1", "multi_modal_projector.2": "multi_modal_projector.linear_2", "language_model.model.image_newline": "image_newline", } def load_original_state_dict(model_id): directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"]) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) return original_state_dict def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value.to(torch.float16) return new_state_dict def load_image(): url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw) return image def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False): # load original config filepath = hf_hub_download(repo_id=model_id, filename="config.json", repo_type="model") # read json with open(filepath) as f: data = json.load(f) print(data) if model_id == "liuhaotian/llava-v1.6-mistral-7b": text_model_id = "mistralai/Mistral-7B-Instruct-v0.2" image_token_id = 32000 elif model_id == "liuhaotian/llava-v1.6-vicuna-7b": text_model_id = "lmsys/vicuna-7b-v1.5" image_token_id = 32000 elif model_id == "liuhaotian/llava-v1.6-vicuna-13b": text_model_id = "lmsys/vicuna-13b-v1.5" image_token_id = 32000 elif model_id == "liuhaotian/llava-v1.6-34b": text_model_id = "NousResearch/Nous-Hermes-2-Yi-34B" image_token_id = 64000 elif model_id == "lmms-lab/llama3-llava-next-8b": text_model_id = "meta-llama/Meta-Llama-3-8B-Instruct" image_token_id = 128256 elif model_id == "lmms-lab/llava-next-72b": text_model_id = "Qwen/Qwen1.5-72B-Chat" image_token_id = 151646 elif model_id == "lmms-lab/llava-next-110b": text_model_id = "Qwen/Qwen1.5-110B-Chat" image_token_id = 151646 vision_model_id = data["mm_vision_tower"] torch.set_default_dtype(torch.float16) text_config = AutoConfig.from_pretrained(text_model_id) use_fast = model_id != "liuhaotian/llava-v1.6-34b" tokenizer = AutoTokenizer.from_pretrained(text_model_id, use_fast=use_fast) tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True) if model_id in ("liuhaotian/llava-v1.6-mistral-7b", "lmms-lab/llama3-llava-next-8b"): # Mistral-7B doesn't have a padding token set yet tokenizer.add_special_tokens({"pad_token": "<pad>"}) image_processor = LlavaNextImageProcessor.from_pretrained(vision_model_id) processor = LlavaNextProcessor(tokenizer=tokenizer, image_processor=image_processor) config = LlavaNextConfig( text_config=text_config.to_dict(), image_grid_pinpoints=image_processor.image_grid_pinpoints, use_image_newline_parameter=True, image_token_id=image_token_id, ) with init_empty_weights(): model = LlavaNextForConditionalGeneration(config) # load original state dict state_dict = load_original_state_dict(model_id) state_dict = convert_state_dict_to_hf(state_dict) model.load_state_dict(state_dict, assign=True) model.eval() pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image token so we resize the model # Pad to 64 for performance reasons # Qwen-based models have extra unused space in the vocab size already, so no need to resize if model_id not in ["lmms-lab/llava-next-72b", "lmms-lab/llava-next-110b"]: pad_shape = 64 vocab_size = config.text_config.vocab_size if model_id == "liuhaotian/llava-v1.6-34b": # this one has 3 additional tokens, namely <|startoftext|>, <|endoftext|> and <image> num_tokens = vocab_size + 3 else: # this one has 2 additional tokens, namely <image> and <pad> num_tokens = vocab_size + 2 model.resize_token_embeddings(num_tokens, pad_to_multiple_of=pad_shape) model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack( tuple( dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0]) ), dim=0, ) model.language_model.lm_head.weight.data[vocab_size:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0])), dim=0, ) print(f"Saving model and processor for {model_id} to {pytorch_dump_folder_path}") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) # Make space so we can load the model properly now. del state_dict gc.collect() # Load everything back for inference tests in float32 because prev script was written as that # Though it's mostly loaded in fp16 as original weights are in fp16 model = LlavaNextForConditionalGeneration.from_pretrained(pytorch_dump_folder_path, device_map="auto") processor = LlavaNextProcessor.from_pretrained(pytorch_dump_folder_path) device = model.device # prepare inputs image = load_image() if model_id == "liuhaotian/llava-v1.6-mistral-7b": prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" elif model_id in ["liuhaotian/llava-v1.6-vicuna-7b", "liuhaotian/llava-v1.6-vicuna-13b"]: prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:" elif model_id == "liuhaotian/llava-v1.6-34b": prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n" elif model_id == "lmms-lab/llama3-llava-next-8b": prompt = "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|><|start_header_id|>user<|end_header_id|>\n\n<image>\nWhat is shown in this image?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" elif model_id in ["lmms-lab/llava-next-72b", "lmms-lab/llava-next-110b"]: prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n" inputs = processor(images=image, text=prompt, return_tensors="pt") # verify inputs filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_pixel_values.pt", repo_type="dataset") original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True) assert torch.allclose(original_pixel_values, inputs.pixel_values.half()) if model_id == "liuhaotian/llava-v1.6-mistral-7b": filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_input_ids.pt", repo_type="dataset") original_input_ids = torch.load(filepath, map_location="cpu", weights_only=True) # replace -200 by image_token_id (since we use token ID = 32000 for the image token) original_input_ids[original_input_ids == -200] = image_token_id assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist() elif model_id == "liuhaotian/llava-v1.6-34b": filepath = hf_hub_download( repo_id="nielsr/test-image", filename="llava_1_6_34b_input_ids.pt", repo_type="dataset" ) original_input_ids = torch.load(filepath, map_location="cpu", weights_only=True) # replace -200 by image_token_id original_input_ids[original_input_ids == -200] = image_token_id assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist() image_sizes = torch.tensor([[899, 1024]]) assert image_sizes[0].tolist() == inputs.image_sizes[0].tolist() # verify single forward pass print("Single forward pass") with torch.inference_mode(): inputs = inputs.to(device) outputs = model(**inputs) print("Shape of logits:", outputs.logits.shape) print("First values of logits:", outputs.logits[0, :3, :3]) if model_id == "liuhaotian/llava-v1.6-mistral-7b": expected_slice = torch.tensor( [[-4.8555, -4.6992, -0.1996], [-10.5703, -10.7344, -2.7246], [-7.0391, -7.3672, -0.2634]], dtype=torch.float32, device=device, ) elif model_id == "liuhaotian/llava-v1.6-vicuna-7b": expected_slice = torch.tensor( [[1.4883, 0.9976, -0.6992], [-9.7031, -5.7031, -1.5557], [-5.1328, -5.5586, 8.8281]], dtype=torch.float32, device=device, ) elif model_id == "liuhaotian/llava-v1.6-vicuna-13b": expected_slice = torch.tensor( [[-0.9614, 7.3125, 0.2106], [-7.2695, -8.5469, 3.6211], [-6.3750, -8.1875, 5.4688]], dtype=torch.float32, device=device, ) elif model_id == "liuhaotian/llava-v1.6-34b": expected_slice = torch.tensor( [[-9.0859, -9.1406, 5.9453], [-5.9570, -5.9766, 2.2754], [-5.7305, -5.7539, 4.0000]], dtype=torch.float32, device=device, ) elif model_id == "lmms-lab/llama3-llava-next-8b": expected_slice = torch.tensor( [[-3.9648, 1.1396, 3.3145], [-5.3594, -1.5654, -1.9619], [-12.3750, -10.6797, -9.3125]], dtype=torch.float32, device=device, ) elif model_id == "lmms-lab/llava-next-72b": # Not yet checked against reference expected_slice = torch.tensor( [[3.7148, 3.9277, 3.4395], [-0.4341, 1.1387, 6.5117], [3.2324, 3.4688, 4.1133]], dtype=torch.float32, device=device, ) elif model_id == "lmms-lab/llava-next-110b": # Not yet checked against reference expected_slice = torch.tensor( [[-2.5449, -1.6738, -2.0371], [1.0811, 3.4961, 5.0312], [1.7803, 2.5137, 2.4277]], dtype=torch.float32, device=device, ) else: raise ValueError(f"Model {model_id} not supported") assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4) print("Logits are ok!") # verify generation output_ids = model.generate( **inputs, max_new_tokens=100, use_cache=True, ) generated_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0].strip() print("Generated text:", repr(generated_text)) if model_id == "liuhaotian/llava-v1.6-mistral-7b": expected_text = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several axes labeled with different metrics or benchmarks, such as "MMM-Vet," "MMM-Bench," "LLaVA-Bench," "SLED-Bench," "' elif model_id == "liuhaotian/llava-v1.6-vicuna-7b": expected_text = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\'s questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a benchmarking study comparing the performance of various models or systems. It\'s a scatter plot with a circular layout, where each point represents a different model or system, and the axes represent different metrics or dimensions of comparison.\n\nThe metrics are likely related to machine learning or artificial intelligence performance, as indicated by the terms like "BLIP-2," "Instruct BLIP," "POE," "QWA," "V""" elif model_id == "liuhaotian/llava-v1.6-vicuna-13b": expected_text = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a radar chart, also known as a spider chart or star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several variables represented:\n\n- MM-Vet\n- LLa-Va-Bench\n- SEED-Bench\n- MM" elif model_id == "liuhaotian/llava-v1.6-34b": expected_text = "<|im_start|> system\nAnswer the questions. <|im_start|> user\n\nWhat is shown in this image? <|im_start|> assistant\nThe image appears to be a radar chart, also known as a spider chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular chart, there are several datasets represented by different colors and labeled with various acronyms such as MM-Vet, LLaVA-Bench, SEED-Bench, MM-Bench-CN, MM-" elif model_id == "lmms-lab/llama3-llava-next-8b": expected_text = 'system\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.user\n\n\nWhat is shown in this image?assistant\n\n\nThe image shows a radar chart, also known as a spider chart or a web chart, which is a type of graph used to display multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point. Each axis represents a different variable, and the values are plotted along each axis and connected to form a polygon.\n\nIn this particular radar chart, there are several axes labeled with different variables, such as "MM-Vet," "LL' elif model_id == "lmms-lab/llava-next-72b": expected_text = "system\nYou are a helpful assistant.\nuser\n\nWhat is shown in this image?\nassistant\nThe image displays a radar chart, also known as a spider chart or a star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point. Each axis represents a different variable, and the value of each variable is represented by the distance from the center of the chart to the point where the axis intersects with the line representing that variable's value.\n\nIn this particular chart, there are several axes" elif model_id == "lmms-lab/llava-next-110b": expected_text = "system\nYou are a helpful assistant.\nuser\n\nWhat is shown in this image?\nassistant\nThe image shows a radar chart comparing the performance of different models on various visual question answering (VQA) benchmarks. Each colored line represents a different model, and the distance from the center of the chart indicates the score or performance level of the model on a particular benchmark. The benchmarks are labeled around the edges of the chart, and include VQA v2, GQA, VizWiz, TextVQA, MMBench-CN, MME, and others. The chart allows for a" else: raise ValueError(f"Model {model_id} not supported") assert generated_text == expected_text print("Generated text is ok!") # verify batched generation print("Batched generation...") url = "http://images.cocodataset.org/val2017/000000039769.jpg" cats_image = Image.open(requests.get(url, stream=True).raw) inputs = processor( images=[image, cats_image], text=[prompt, prompt], padding=True, return_tensors="pt", ).to(device) for k, v in inputs.items(): print(k, v.shape) print("Image sizes:", inputs.image_sizes) # make sure image_sizes are the same # as otherwise batched generation doesn't work inputs.image_sizes[1] = inputs.image_sizes[0] print("Batched generation...") output_ids = model.generate( **inputs, max_new_tokens=20, use_cache=True, ) outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) print(outputs) if push_to_hub: checkpoint_name = model_id.split("/")[-1] print(f"Pushing to repo llava-hf/{checkpoint_name}-hf") model.push_to_hub(f"llava-hf/{checkpoint_name}-hf") processor.push_to_hub(f"llava-hf/{checkpoint_name}-hf") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", help="Hub location of the model to convert", default="liuhaotian/llava-v1.6-mistral-7b", choices=[ "liuhaotian/llava-v1.6-mistral-7b", "liuhaotian/llava-v1.6-vicuna-7b", "liuhaotian/llava-v1.6-vicuna-13b", "liuhaotian/llava-v1.6-34b", "lmms-lab/llama3-llava-next-8b", "lmms-lab/llava-next-72b", "lmms-lab/llava-next-110b", ], required=False, ) parser.add_argument( "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_llava_to_hf(args.model_id, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/llava_next/convert_llava_next_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/llava_next/convert_llava_next_weights_to_hf.py", "repo_id": "transformers", "token_count": 8457 }
447
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import M2M100Config, M2M100ForConditionalGeneration def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_m2m100_checkpoint_from_disk(checkpoint_path): m2m_100 = torch.load(checkpoint_path, map_location="cpu", weights_only=True) args = m2m_100["args"] or m2m_100["cfg"]["model"] state_dict = m2m_100["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] config = M2M100Config( vocab_size=vocab_size, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="relu", ) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] model = M2M100ForConditionalGeneration(config) model.model.load_state_dict(state_dict, strict=False) model.lm_head = make_linear_from_emb(model.model.shared) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") args = parser.parse_args() model = convert_fairseq_m2m100_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1226 }
448
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from transformers.models.maskformer.image_processing_maskformer_fast import MaskFormerImageProcessorFast from ...utils import ( TensorType, is_torch_available, logging, ) from .image_processing_mask2former import ( compute_segments, convert_segmentation_to_rle, remove_low_and_no_objects, ) if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) class Mask2FormerImageProcessorFast(MaskFormerImageProcessorFast): def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[list[tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> list[dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps to `True` to get the correct segmentation result. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: list[dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor") mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros((384, 384)) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[set[int]] = None, target_sizes: Optional[list[tuple[int, int]]] = None, ) -> list[dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentationOutput`]): The outputs from [`Mask2FormerForUniversalSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: list[dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_segmentation(): raise NotImplementedError("Segmentation post-processing is not implemented for Mask2Former yet.") __all__ = ["Mask2FormerImageProcessorFast"]
transformers/src/transformers/models/mask2former/modular_mask2former.py/0
{ "file_path": "transformers/src/transformers/models/mask2former/modular_mask2former.py", "repo_id": "transformers", "token_count": 6727 }
449
""" This script allows you to convert MetaCLIP 2 (worldwide) checkpoints from the original repository to the Hugging Face format. URL: https://github.com/facebookresearch/MetaCLIP To convert: 1. git clone the MetaCLIP repository 2. place it in the same directory as this script 3. move the conversion script to the MetaCLIP repository. Then run the script with: ```bash cd MetaCLIP python convert_metaclip_2_to_hf.py --checkpoint_path /path/to/checkpoint --model_name ViT-H-14-quickgelu-worldwide ``` """ import argparse import os from typing import Optional import torch from PIL import Image # Import MetaCLIP modules from src.mini_clip.factory import create_model_and_transforms from transformers import ( AutoTokenizer, CLIPImageProcessor, CLIPProcessor, MetaClip2Config, MetaClip2Model, ) def load_metaclip2_checkpoint(checkpoint_path: str, model_name: str) -> torch.nn.Module: """Load MetaCLIP 2 model from checkpoint.""" print(f"Loading MetaCLIP 2 model: {model_name}") # For worldwide models, use WorldWideCLIP class model_name_with_class = model_name if "worldwide" in model_name.lower(): model_name_with_class = f"{model_name}@WorldWideCLIP" print("Using WorldWideCLIP class for worldwide model") # Create model using the factory model, _, preprocess = create_model_and_transforms(model_name_with_class, pretrained=checkpoint_path, device="cpu") model.eval() return model, preprocess def create_hf_config(tokenizer: AutoTokenizer, model_name: str) -> tuple[MetaClip2Config, int]: """Create Hugging Face MetaClip2Config from MetaCLIP model. This is based on the configs found at https://github.com/facebookresearch/MetaCLIP/tree/main/src/mini_clip/model_configs. """ print("Creating Hugging Face config...") # Vision config vision_configs = { "ViT-H-14-quickgelu-worldwide": { "image_size": 224, "patch_size": 14, "hidden_size": 1280, "intermediate_size": 1280 * 4, "num_attention_heads": 16, "num_hidden_layers": 32, "hidden_act": "quick_gelu", "projection_dim": 1024, }, "ViT-H-14-378-worldwide": { "image_size": 378, "patch_size": 14, "hidden_size": 1280, "intermediate_size": 1280 * 4, "num_attention_heads": 16, "num_hidden_layers": 32, "hidden_act": "gelu", "projection_dim": 1024, }, "ViT-bigG-14-worldwide": { "image_size": 224, "patch_size": 14, "hidden_size": 1664, "intermediate_size": 8192, "num_attention_heads": 16, "num_hidden_layers": 48, "hidden_act": "gelu", "projection_dim": 1280, }, "ViT-bigG-14-378-worldwide": { "image_size": 378, "patch_size": 14, "hidden_size": 1664, "intermediate_size": 8192, "num_attention_heads": 16, "num_hidden_layers": 48, "hidden_act": "gelu", "projection_dim": 1280, }, } vision_config = vision_configs[model_name] image_size = vision_config["image_size"] # Text config text_configs = { "ViT-H-14-quickgelu-worldwide": { "hidden_size": 1024, "intermediate_size": 1024 * 4, "num_attention_heads": 16, "num_hidden_layers": 24, "max_position_embeddings": 77, "vocab_size": 901629, "eos_token_id": tokenizer.eos_token_id, "hidden_act": "quick_gelu", "projection_dim": 1024, }, "ViT-H-14-378-worldwide": { "hidden_size": 1024, "intermediate_size": 1024 * 4, "num_attention_heads": 16, "num_hidden_layers": 24, "max_position_embeddings": 77, "vocab_size": 901629, "eos_token_id": tokenizer.eos_token_id, "hidden_act": "gelu", "projection_dim": 1024, }, "ViT-bigG-14-worldwide": { "hidden_size": 1280, "intermediate_size": 1280 * 4, "num_attention_heads": 20, "num_hidden_layers": 32, "max_position_embeddings": 77, "vocab_size": 901629, "eos_token_id": tokenizer.eos_token_id, "hidden_act": "gelu", "projection_dim": 1280, }, "ViT-bigG-14-378-worldwide": { "hidden_size": 1280, "intermediate_size": 1280 * 4, "num_attention_heads": 20, "num_hidden_layers": 32, "max_position_embeddings": 77, "vocab_size": 901629, "eos_token_id": tokenizer.eos_token_id, "hidden_act": "gelu", "projection_dim": 1280, }, } text_config = text_configs[model_name] projection_dim = text_config["projection_dim"] # Create config config = MetaClip2Config( vision_config=vision_config, text_config=text_config, projection_dim=projection_dim, ) return config, image_size def convert_state_dict(metaclip_state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]: """Convert MetaCLIP state dict to Hugging Face format.""" print("Converting state dict...") hf_state_dict = {} for key, value in metaclip_state_dict.items(): new_key = key # Handle specific mappings first before general prefix replacements if key == "visual.proj": new_key = "visual_projection.weight" # Don't transpose! MetaCLIP: x @ proj, HF: Linear(x) = x @ weight.T # So we want weight.T = proj, which means weight = proj.T # But since we're storing proj as weight, we need proj.T value = value.T # This gives us the correct orientation for Linear layer elif key == "text_projection": new_key = "text_projection.weight" # Same logic as visual projection value = value.T elif key == "token_embedding.weight": new_key = "text_model.embeddings.token_embedding.weight" elif key == "positional_embedding": new_key = "text_model.embeddings.position_embedding.weight" elif key == "ln_final.weight": new_key = "text_model.final_layer_norm.weight" elif key == "ln_final.bias": new_key = "text_model.final_layer_norm.bias" # Vision encoder mappings elif key.startswith("visual."): new_key = key.replace("visual.", "vision_model.") # Handle specific vision model components if "conv1" in new_key: new_key = new_key.replace("conv1", "embeddings.patch_embedding") elif "class_embedding" in new_key: new_key = new_key.replace("class_embedding", "embeddings.class_embedding") elif "positional_embedding" in new_key: new_key = new_key.replace("positional_embedding", "embeddings.position_embedding.weight") elif "ln_pre" in new_key: new_key = new_key.replace("ln_pre", "pre_layrnorm") elif "ln_post" in new_key: new_key = new_key.replace("ln_post", "post_layernorm") elif "transformer.resblocks" in new_key: new_key = new_key.replace("transformer.resblocks", "encoder.layers") # Handle attention and MLP mappings within transformer blocks if "attn.in_proj" in new_key: # Split the in_proj into q, k, v projections if "weight" in new_key: # We'll handle this later in a special case continue elif "bias" in new_key: continue elif "attn.out_proj" in new_key: new_key = new_key.replace("attn.out_proj", "self_attn.out_proj") elif "ln_1" in new_key: new_key = new_key.replace("ln_1", "layer_norm1") elif "ln_2" in new_key: new_key = new_key.replace("ln_2", "layer_norm2") elif "mlp.c_fc" in new_key: new_key = new_key.replace("mlp.c_fc", "mlp.fc1") elif "mlp.c_proj" in new_key: new_key = new_key.replace("mlp.c_proj", "mlp.fc2") # Text encoder mappings elif key.startswith("transformer."): new_key = key.replace("transformer.", "text_model.encoder.") if "resblocks" in new_key: new_key = new_key.replace("resblocks", "layers") # Similar mappings as vision transformer if "attn.in_proj" in new_key: continue # Handle separately elif "attn.out_proj" in new_key: new_key = new_key.replace("attn.out_proj", "self_attn.out_proj") elif "ln_1" in new_key: new_key = new_key.replace("ln_1", "layer_norm1") elif "ln_2" in new_key: new_key = new_key.replace("ln_2", "layer_norm2") elif "mlp.c_fc" in new_key: new_key = new_key.replace("mlp.c_fc", "mlp.fc1") elif "mlp.c_proj" in new_key: new_key = new_key.replace("mlp.c_proj", "mlp.fc2") hf_state_dict[new_key] = value # Handle in_proj weights separately (split into q, k, v) for key, value in metaclip_state_dict.items(): if "attn.in_proj_weight" in key: # Split the combined qkv weight into separate q, k, v weights dim = value.shape[0] // 3 q_weight = value[:dim] k_weight = value[dim : 2 * dim] v_weight = value[2 * dim :] base_key = key.replace("attn.in_proj_weight", "") if key.startswith("visual."): base_key = base_key.replace("visual.transformer.resblocks", "vision_model.encoder.layers") else: base_key = base_key.replace("transformer.resblocks", "text_model.encoder.layers") hf_state_dict[f"{base_key}self_attn.q_proj.weight"] = q_weight hf_state_dict[f"{base_key}self_attn.k_proj.weight"] = k_weight hf_state_dict[f"{base_key}self_attn.v_proj.weight"] = v_weight elif "attn.in_proj_bias" in key: # Split the combined qkv bias into separate q, k, v biases dim = value.shape[0] // 3 q_bias = value[:dim] k_bias = value[dim : 2 * dim] v_bias = value[2 * dim :] base_key = key.replace("attn.in_proj_bias", "") if key.startswith("visual."): base_key = base_key.replace("visual.transformer.resblocks", "vision_model.encoder.layers") else: base_key = base_key.replace("transformer.resblocks", "text_model.encoder.layers") hf_state_dict[f"{base_key}self_attn.q_proj.bias"] = q_bias hf_state_dict[f"{base_key}self_attn.k_proj.bias"] = k_bias hf_state_dict[f"{base_key}self_attn.v_proj.bias"] = v_bias return hf_state_dict def verify_conversion( original_model, hf_model, preprocess, image_processor, tokenizer, test_image_path: Optional[str] = None ) -> bool: """Verify that the conversion produces the same outputs.""" print("Verifying conversion...") # Create test image if test_image_path and os.path.exists(test_image_path): image = Image.open(test_image_path) else: # Create a dummy image image = Image.new("RGB", (224, 224), color="red") # Verify image processor processed_image = preprocess(image).unsqueeze(0) pixel_values = image_processor(image, return_tensors="pt").pixel_values print("Shape of pixel_values:", pixel_values.shape) print("Shape of processed_image:", processed_image.shape) assert torch.allclose(pixel_values, processed_image) # Use tokenizer to get input_ids texts = ["a cat", "a dog", "a bird"] token_inputs = tokenizer(texts, return_tensors="pt", padding="max_length", truncation=True, max_length=77) input_ids = token_inputs.input_ids print(f"Processed text shape: {input_ids.shape}") print(f"Processed image shape: {processed_image.shape}") with torch.no_grad(): # Original model outputs orig_image_features = original_model.encode_image(processed_image) orig_text_features = original_model.encode_text(input_ids) # Normalize and compute logits orig_image_features = orig_image_features / orig_image_features.norm(dim=-1, keepdim=True) orig_text_features = orig_text_features / orig_text_features.norm(dim=-1, keepdim=True) orig_logits = original_model.logit_scale.exp() * orig_image_features @ orig_text_features.T print(f"Original text features: {orig_text_features[0][:5].tolist()}") print(f"Original image features: {orig_image_features[0][:5].tolist()}") with torch.no_grad(): hf_outputs = hf_model(input_ids=input_ids, pixel_values=pixel_values) hf_logits = hf_outputs.logits_per_image # Debug: Check HF model features print(f"HF text features: {hf_outputs.text_embeds[0][:5].tolist()}") print(f"HF image features: {hf_outputs.image_embeds[0][:5].tolist()}") print(f"HF model EOS token ID: {hf_model.config.text_config.eos_token_id}") # Compare outputs print(f"Original logits: {orig_logits}") print(f"HF logits: {hf_logits}") print(f"Logit scale - Original: {original_model.logit_scale.exp():.6f}, HF: {hf_model.logit_scale.exp():.6f}") # Check if they're close if orig_logits.shape == hf_logits.shape and torch.allclose(orig_logits, hf_logits, atol=1e-4): print("✅ Conversion verified! Outputs match.") return True else: print("❌ Conversion failed! Outputs don't match.") if orig_logits.numel() > 0 and hf_logits.numel() > 0: print(f"Max difference: {(orig_logits - hf_logits).abs().max()}") return False def push_to_hub(hf_model: MetaClip2Model, processor: CLIPProcessor, repo_name: str): """Push the converted model to Hugging Face Hub.""" print(f"Pushing to hub: {repo_name}") try: hf_model.push_to_hub(repo_name) processor.push_to_hub(repo_name) print(f"✅ Successfully pushed to {repo_name}") except Exception as e: print(f"❌ Failed to push to hub: {e}") def main(): parser = argparse.ArgumentParser(description="Convert MetaCLIP 2 to Hugging Face format") parser.add_argument("--checkpoint_path", required=True, help="Path to MetaCLIP 2 checkpoint") parser.add_argument("--model_name", required=True, help="MetaCLIP model name (e.g., ViT-H-14-quickgelu-worldwide)") parser.add_argument("--output_dir", default="./converted_models", help="Output directory for converted model") parser.add_argument("--push_to_hub", action="store_true", help="Push to Hugging Face Hub") parser.add_argument("--hub_repo_name", help="Hub repository name") parser.add_argument("--test_image", help="Path to test image for verification") args = parser.parse_args() # Load original model original_model, preprocess = load_metaclip2_checkpoint(args.checkpoint_path, args.model_name) # Create HF config # Requires the tokenizer for the eos token id tokenizer = AutoTokenizer.from_pretrained("facebook/xlm-v-base") config, image_size = create_hf_config(tokenizer=tokenizer, model_name=args.model_name) # Create processor image_processor = CLIPImageProcessor( size={"height": image_size, "width": image_size}, crop_size={"height": image_size, "width": image_size} ) processor = CLIPProcessor(image_processor=image_processor, tokenizer=tokenizer) # Create HF model hf_model = MetaClip2Model(config) # Convert state dict converted_state_dict = convert_state_dict(original_model.state_dict()) for name, param in hf_model.named_parameters(): print(name, param.shape) # Load converted weights hf_model.load_state_dict(converted_state_dict) # Verify conversion if not verify_conversion(original_model, hf_model, preprocess, image_processor, tokenizer, args.test_image): print("Conversion verification failed. Please check the conversion logic.") return # Save model locally if args.output_dir: os.makedirs(args.output_dir, exist_ok=True) hf_model.save_pretrained(args.output_dir) processor.save_pretrained(args.output_dir) # Push to hub if requested if args.push_to_hub and args.hub_repo_name: push_to_hub(hf_model, processor, args.hub_repo_name) if __name__ == "__main__": main()
transformers/src/transformers/models/metaclip_2/convert_metaclip_2_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/metaclip_2/convert_metaclip_2_to_hf.py", "repo_id": "transformers", "token_count": 7819 }
450
# coding=utf-8 # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Mixtral model.""" from typing import Optional, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.deprecation import deprecate_kwarg from ...utils.generic import OutputRecorder from ..mistral.modeling_mistral import ( MistralAttention, MistralForCausalLM, MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel, MistralRMSNorm, MistralRotaryEmbedding, ) from .configuration_mixtral import MixtralConfig logger = logging.get_logger(__name__) def load_balancing_loss_func( gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], num_experts: Optional[int] = None, top_k=2, attention_mask: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, int]: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits: Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. num_experts: Number of experts top_k: The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter. attention_mask (`torch.Tensor`, *optional*): The attention_mask used in forward function shape [batch_size X sequence_length] if not None. Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) if attention_mask is None: # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) else: batch_size, sequence_length = attention_mask.shape num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask expert_attention_mask = ( attention_mask[None, :, :, None, None] .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) .reshape(-1, top_k, num_experts) .to(compute_device) ) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( expert_attention_mask, dim=0 ) # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert router_per_expert_attention_mask = ( attention_mask[None, :, :, None] .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) .reshape(-1, num_experts) .to(compute_device) ) # Compute the average probability of routing to these experts router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( router_per_expert_attention_mask, dim=0 ) overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) return overall_loss * num_experts class MixtralBlockSparseTop2MLP(nn.Module): def __init__(self, config: MixtralConfig): super().__init__() self.ffn_dim = config.intermediate_size self.hidden_dim = config.hidden_size self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states) current_hidden_states = self.w2(current_hidden_states) return current_hidden_states class MixtralSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok # gating self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False) self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)]) # Jitter parameters self.jitter_noise = config.router_jitter_noise def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ """ batch_size, sequence_length, hidden_dim = hidden_states.shape if self.training and self.jitter_noise > 0: hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) hidden_states = hidden_states.view(-1, hidden_dim) # router_logits: (batch * sequence_length, n_experts) router_logits = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # we cast back to the input dtype routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: expert_layer = self.experts[expert_idx] idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) # Index the correct hidden states and compute the expert hidden state for # the current expert. We need to make sure to multiply the output hidden # states by `routing_weights` on the corresponding tokens (top-1 and top-2) current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] # However `index_add_` only support torch tensors for indexing so we'll use # the `top_x` tensor here. final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) return final_hidden_states, router_logits class MixtralRMSNorm(MistralRMSNorm): pass class MixtralAttention(MistralAttention): pass class MixtralDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MixtralConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MixtralAttention(config, layer_idx) self.block_sparse_moe = MixtralSparseMoeBlock(config) self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, _ = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states return hidden_states class MixtralRotaryEmbedding(MistralRotaryEmbedding): pass class MixtralPreTrainedModel(MistralPreTrainedModel): _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) _can_record_outputs = { "router_logits": OutputRecorder(MixtralSparseMoeBlock, index=1), "hidden_states": MixtralDecoderLayer, "attentions": MixtralAttention, } class MixtralModel(MistralModel): def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class MixtralForCausalLM(MistralForCausalLM): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = MixtralModel(config) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_router_logits: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> MoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MixtralForCausalLM >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: MoeModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_router_logits=output_router_logits, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) class MixtralForSequenceClassification(MistralForSequenceClassification): pass class MixtralForTokenClassification(MistralForTokenClassification): pass class MixtralForQuestionAnswering(MistralForQuestionAnswering): pass __all__ = [ "MixtralForCausalLM", "MixtralForQuestionAnswering", "MixtralModel", "MixtralPreTrainedModel", "MixtralForSequenceClassification", "MixtralForTokenClassification", ]
transformers/src/transformers/models/mixtral/modular_mixtral.py/0
{ "file_path": "transformers/src/transformers/models/mixtral/modular_mixtral.py", "repo_id": "transformers", "token_count": 7809 }
451
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE """TensorFlow 2.0 MobileViT model.""" from __future__ import annotations import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFImageClassifierOutputWithNoAttention, TFSemanticSegmenterOutputWithNoAttention, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFSequenceClassificationLoss, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import logging from .configuration_mobilevit import MobileViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "MobileViTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "apple/mobilevit-small" _EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" def make_divisible(value: int, divisor: int = 8, min_value: int | None = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the original TensorFlow repo. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_value < 0.9 * value: new_value += divisor return int(new_value) class TFMobileViTConvLayer(keras.layers.Layer): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias: bool = False, dilation: int = 1, use_normalization: bool = True, use_activation: bool | str = True, **kwargs, ) -> None: super().__init__(**kwargs) logger.warning( f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish " "to train/fine-tune this model, you need a GPU or a TPU" ) padding = int((kernel_size - 1) / 2) * dilation self.padding = keras.layers.ZeroPadding2D(padding) if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") self.convolution = keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, padding="VALID", dilation_rate=dilation, groups=groups, use_bias=bias, name="convolution", ) if use_normalization: self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = get_tf_activation(use_activation) elif isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act else: self.activation = None self.in_channels = in_channels self.out_channels = out_channels def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: padded_features = self.padding(features) features = self.convolution(padded_features) if self.normalization is not None: features = self.normalization(features, training=training) if self.activation is not None: features = self.activation(features) return features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: if hasattr(self.normalization, "name"): with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFMobileViTInvertedResidual(keras.layers.Layer): """ Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1, **kwargs ) -> None: super().__init__(**kwargs) expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) if stride not in [1, 2]: raise ValueError(f"Invalid stride {stride}.") self.use_residual = (stride == 1) and (in_channels == out_channels) self.expand_1x1 = TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1, name="expand_1x1" ) self.conv_3x3 = TFMobileViTConvLayer( config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation, name="conv_3x3", ) self.reduce_1x1 = TFMobileViTConvLayer( config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False, name="reduce_1x1", ) def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: residual = features features = self.expand_1x1(features, training=training) features = self.conv_3x3(features, training=training) features = self.reduce_1x1(features, training=training) return residual + features if self.use_residual else features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "expand_1x1", None) is not None: with tf.name_scope(self.expand_1x1.name): self.expand_1x1.build(None) if getattr(self, "conv_3x3", None) is not None: with tf.name_scope(self.conv_3x3.name): self.conv_3x3.build(None) if getattr(self, "reduce_1x1", None) is not None: with tf.name_scope(self.reduce_1x1.name): self.reduce_1x1.build(None) class TFMobileViTMobileNetLayer(keras.layers.Layer): def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1, **kwargs, ) -> None: super().__init__(**kwargs) self.layers = [] for i in range(num_stages): layer = TFMobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1, name=f"layer.{i}", ) self.layers.append(layer) in_channels = out_channels def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: for layer_module in self.layers: features = layer_module(features, training=training) return features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer_module in self.layers: with tf.name_scope(layer_module.name): layer_module.build(None) class TFMobileViTSelfAttention(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: super().__init__(**kwargs) if hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size scale = tf.cast(self.attention_head_size, dtype=tf.float32) self.scale = tf.math.sqrt(scale) self.query = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="query") self.key = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="key") self.value = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="value") self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.hidden_size = hidden_size def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: batch_size = tf.shape(x)[0] x = tf.reshape(x, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: batch_size = tf.shape(hidden_states)[0] key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(self.query(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) attention_scores = attention_scores / self.scale # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape(context_layer, shape=(batch_size, -1, self.all_head_size)) return context_layer def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.hidden_size]) class TFMobileViTSelfOutput(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(hidden_size, name="dense") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.hidden_size = hidden_size def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.hidden_size]) class TFMobileViTAttention(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: super().__init__(**kwargs) self.attention = TFMobileViTSelfAttention(config, hidden_size, name="attention") self.dense_output = TFMobileViTSelfOutput(config, hidden_size, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: self_outputs = self.attention(hidden_states, training=training) attention_output = self.dense_output(self_outputs, training=training) return attention_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFMobileViTIntermediate(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(intermediate_size, name="dense") if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.hidden_size = hidden_size def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.hidden_size]) class TFMobileViTOutput(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(hidden_size, name="dense") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.intermediate_size = intermediate_size def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.intermediate_size]) class TFMobileViTTransformerLayer(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: super().__init__(**kwargs) self.attention = TFMobileViTAttention(config, hidden_size, name="attention") self.intermediate = TFMobileViTIntermediate(config, hidden_size, intermediate_size, name="intermediate") self.mobilevit_output = TFMobileViTOutput(config, hidden_size, intermediate_size, name="output") self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.hidden_size = hidden_size def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: attention_output = self.attention(self.layernorm_before(hidden_states), training=training) hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.mobilevit_output(layer_output, hidden_states, training=training) return layer_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "mobilevit_output", None) is not None: with tf.name_scope(self.mobilevit_output.name): self.mobilevit_output.build(None) if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.hidden_size]) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.hidden_size]) class TFMobileViTTransformer(keras.layers.Layer): def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int, **kwargs) -> None: super().__init__(**kwargs) self.layers = [] for i in range(num_stages): transformer_layer = TFMobileViTTransformerLayer( config, hidden_size=hidden_size, intermediate_size=int(hidden_size * config.mlp_ratio), name=f"layer.{i}", ) self.layers.append(transformer_layer) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: for layer_module in self.layers: hidden_states = layer_module(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer_module in self.layers: with tf.name_scope(layer_module.name): layer_module.build(None) class TFMobileViTLayer(keras.layers.Layer): """ MobileViT block: https://huggingface.co/papers/2110.02178 """ def __init__( self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int = 1, **kwargs, ) -> None: super().__init__(**kwargs) self.patch_width = config.patch_size self.patch_height = config.patch_size if stride == 2: self.downsampling_layer = TFMobileViTInvertedResidual( config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1, name="downsampling_layer", ) in_channels = out_channels else: self.downsampling_layer = None self.conv_kxk = TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, name="conv_kxk", ) self.conv_1x1 = TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=hidden_size, kernel_size=1, use_normalization=False, use_activation=False, name="conv_1x1", ) self.transformer = TFMobileViTTransformer( config, hidden_size=hidden_size, num_stages=num_stages, name="transformer" ) self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.conv_projection = TFMobileViTConvLayer( config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1, name="conv_projection" ) self.fusion = TFMobileViTConvLayer( config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, name="fusion", ) self.hidden_size = hidden_size def unfolding(self, features: tf.Tensor) -> tuple[tf.Tensor, dict]: patch_width, patch_height = self.patch_width, self.patch_height patch_area = tf.cast(patch_width * patch_height, "int32") batch_size = tf.shape(features)[0] orig_height = tf.shape(features)[1] orig_width = tf.shape(features)[2] channels = tf.shape(features)[3] new_height = tf.cast(tf.math.ceil(orig_height / patch_height) * patch_height, "int32") new_width = tf.cast(tf.math.ceil(orig_width / patch_width) * patch_width, "int32") interpolate = new_width != orig_width or new_height != orig_height if interpolate: # Note: Padding can be done, but then it needs to be handled in attention function. features = tf.image.resize(features, size=(new_height, new_width), method="bilinear") # number of patches along width and height num_patch_width = new_width // patch_width num_patch_height = new_height // patch_height num_patches = num_patch_height * num_patch_width # convert from shape (batch_size, orig_height, orig_width, channels) # to the shape (batch_size * patch_area, num_patches, channels) features = tf.transpose(features, [0, 3, 1, 2]) patches = tf.reshape( features, (batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width) ) patches = tf.transpose(patches, [0, 2, 1, 3]) patches = tf.reshape(patches, (batch_size, channels, num_patches, patch_area)) patches = tf.transpose(patches, [0, 3, 2, 1]) patches = tf.reshape(patches, (batch_size * patch_area, num_patches, channels)) info_dict = { "orig_size": (orig_height, orig_width), "batch_size": batch_size, "channels": channels, "interpolate": interpolate, "num_patches": num_patches, "num_patches_width": num_patch_width, "num_patches_height": num_patch_height, } return patches, info_dict def folding(self, patches: tf.Tensor, info_dict: dict) -> tf.Tensor: patch_width, patch_height = self.patch_width, self.patch_height patch_area = int(patch_width * patch_height) batch_size = info_dict["batch_size"] channels = info_dict["channels"] num_patches = info_dict["num_patches"] num_patch_height = info_dict["num_patches_height"] num_patch_width = info_dict["num_patches_width"] # convert from shape (batch_size * patch_area, num_patches, channels) # back to shape (batch_size, channels, orig_height, orig_width) features = tf.reshape(patches, (batch_size, patch_area, num_patches, -1)) features = tf.transpose(features, perm=(0, 3, 2, 1)) features = tf.reshape( features, (batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width) ) features = tf.transpose(features, perm=(0, 2, 1, 3)) features = tf.reshape( features, (batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width) ) features = tf.transpose(features, perm=(0, 2, 3, 1)) if info_dict["interpolate"]: features = tf.image.resize(features, size=info_dict["orig_size"], method="bilinear") return features def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: # reduce spatial dimensions if needed if self.downsampling_layer: features = self.downsampling_layer(features, training=training) residual = features # local representation features = self.conv_kxk(features, training=training) features = self.conv_1x1(features, training=training) # convert feature map to patches patches, info_dict = self.unfolding(features) # learn global representations patches = self.transformer(patches, training=training) patches = self.layernorm(patches) # convert patches back to feature maps features = self.folding(patches, info_dict) features = self.conv_projection(features, training=training) features = self.fusion(tf.concat([residual, features], axis=-1), training=training) return features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv_kxk", None) is not None: with tf.name_scope(self.conv_kxk.name): self.conv_kxk.build(None) if getattr(self, "conv_1x1", None) is not None: with tf.name_scope(self.conv_1x1.name): self.conv_1x1.build(None) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.hidden_size]) if getattr(self, "conv_projection", None) is not None: with tf.name_scope(self.conv_projection.name): self.conv_projection.build(None) if getattr(self, "fusion", None) is not None: with tf.name_scope(self.fusion.name): self.fusion.build(None) if getattr(self, "downsampling_layer", None) is not None: with tf.name_scope(self.downsampling_layer.name): self.downsampling_layer.build(None) class TFMobileViTEncoder(keras.layers.Layer): def __init__(self, config: MobileViTConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config self.layers = [] # segmentation architectures like DeepLab and PSPNet modify the strides # of the classification backbones dilate_layer_4 = dilate_layer_5 = False if config.output_stride == 8: dilate_layer_4 = True dilate_layer_5 = True elif config.output_stride == 16: dilate_layer_5 = True dilation = 1 layer_1 = TFMobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1, name="layer.0", ) self.layers.append(layer_1) layer_2 = TFMobileViTMobileNetLayer( config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3, name="layer.1", ) self.layers.append(layer_2) layer_3 = TFMobileViTLayer( config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2, name="layer.2", ) self.layers.append(layer_3) if dilate_layer_4: dilation *= 2 layer_4 = TFMobileViTLayer( config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation, name="layer.3", ) self.layers.append(layer_4) if dilate_layer_5: dilation *= 2 layer_5 = TFMobileViTLayer( config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation, name="layer.4", ) self.layers.append(layer_5) def call( self, hidden_states: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> tuple | TFBaseModelOutput: all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layers): hidden_states = layer_module(hidden_states, training=training) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer_module in self.layers: with tf.name_scope(layer_module.name): layer_module.build(None) @keras_serializable class TFMobileViTMainLayer(keras.layers.Layer): config_class = MobileViTConfig def __init__(self, config: MobileViTConfig, expand_output: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.expand_output = expand_output self.conv_stem = TFMobileViTConvLayer( config, in_channels=config.num_channels, out_channels=config.neck_hidden_sizes[0], kernel_size=3, stride=2, name="conv_stem", ) self.encoder = TFMobileViTEncoder(config, name="encoder") if self.expand_output: self.conv_1x1_exp = TFMobileViTConvLayer( config, in_channels=config.neck_hidden_sizes[5], out_channels=config.neck_hidden_sizes[6], kernel_size=1, name="conv_1x1_exp", ) self.pooler = keras.layers.GlobalAveragePooling2D(data_format="channels_first", name="pooler") def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFBaseModelOutputWithPooling: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embedding_output = self.conv_stem(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) if self.expand_output: last_hidden_state = self.conv_1x1_exp(encoder_outputs[0]) # Change to NCHW output format to have uniformity in the modules last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2]) # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) pooled_output = self.pooler(last_hidden_state) else: last_hidden_state = encoder_outputs[0] # Change to NCHW output format to have uniformity in the modules last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2]) pooled_output = None if not return_dict: output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) # Change to NCHW output format to have uniformity in the modules if not self.expand_output: remaining_encoder_outputs = encoder_outputs[1:] remaining_encoder_outputs = tuple( tf.transpose(h, perm=(0, 3, 1, 2)) for h in remaining_encoder_outputs[0] ) remaining_encoder_outputs = (remaining_encoder_outputs,) return output + remaining_encoder_outputs else: return output + encoder_outputs[1:] # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple(tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]) return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv_stem", None) is not None: with tf.name_scope(self.conv_stem.name): self.conv_stem.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build([None, None, None, None]) if getattr(self, "conv_1x1_exp", None) is not None: with tf.name_scope(self.conv_1x1_exp.name): self.conv_1x1_exp.build(None) class TFMobileViTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileViTConfig base_model_prefix = "mobilevit" main_input_name = "pixel_values" MOBILEVIT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEVIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]`, `dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileViTImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. """ @add_start_docstrings( "The bare MobileViT model outputting raw hidden-states without any specific head on top.", MOBILEVIT_START_DOCSTRING, ) class TFMobileViTModel(TFMobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig, expand_output: bool = True, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.expand_output = expand_output self.mobilevit = TFMobileViTMainLayer(config, expand_output=expand_output, name="mobilevit") @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: tf.Tensor | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFBaseModelOutputWithPooling: output = self.mobilevit(pixel_values, output_hidden_states, return_dict, training=training) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilevit", None) is not None: with tf.name_scope(self.mobilevit.name): self.mobilevit.build(None) @add_start_docstrings( """ MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, MOBILEVIT_START_DOCSTRING, ) class TFMobileViTForImageClassification(TFMobileViTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: MobileViTConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.mobilevit = TFMobileViTMainLayer(config, name="mobilevit") # Classifier head self.dropout = keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = ( keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else tf.identity ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: tf.Tensor | None = None, output_hidden_states: bool | None = None, labels: tf.Tensor | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> tuple | TFImageClassifierOutputWithNoAttention: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilevit( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output, training=training)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilevit", None) is not None: with tf.name_scope(self.mobilevit.name): self.mobilevit.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.neck_hidden_sizes[-1]]) class TFMobileViTASPPPooling(keras.layers.Layer): def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, **kwargs) -> None: super().__init__(**kwargs) self.global_pool = keras.layers.GlobalAveragePooling2D(keepdims=True, name="global_pool") self.conv_1x1 = TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation="relu", name="conv_1x1", ) def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: spatial_size = shape_list(features)[1:-1] features = self.global_pool(features) features = self.conv_1x1(features, training=training) features = tf.image.resize(features, size=spatial_size, method="bilinear") return features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "global_pool", None) is not None: with tf.name_scope(self.global_pool.name): self.global_pool.build([None, None, None, None]) if getattr(self, "conv_1x1", None) is not None: with tf.name_scope(self.conv_1x1.name): self.conv_1x1.build(None) class TFMobileViTASPP(keras.layers.Layer): """ ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587 """ def __init__(self, config: MobileViTConfig, **kwargs) -> None: super().__init__(**kwargs) in_channels = config.neck_hidden_sizes[-2] out_channels = config.aspp_out_channels if len(config.atrous_rates) != 3: raise ValueError("Expected 3 values for atrous_rates") self.convs = [] in_projection = TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation="relu", name="convs.0", ) self.convs.append(in_projection) self.convs.extend( [ TFMobileViTConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation="relu", name=f"convs.{i + 1}", ) for i, rate in enumerate(config.atrous_rates) ] ) pool_layer = TFMobileViTASPPPooling( config, in_channels, out_channels, name=f"convs.{len(config.atrous_rates) + 1}" ) self.convs.append(pool_layer) self.project = TFMobileViTConvLayer( config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu", name="project", ) self.dropout = keras.layers.Dropout(config.aspp_dropout_prob) def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: # since the hidden states were transposed to have `(batch_size, channels, height, width)` # layout we transpose them back to have `(batch_size, height, width, channels)` layout. features = tf.transpose(features, perm=[0, 2, 3, 1]) pyramid = [] for conv in self.convs: pyramid.append(conv(features, training=training)) pyramid = tf.concat(pyramid, axis=-1) pooled_features = self.project(pyramid, training=training) pooled_features = self.dropout(pooled_features, training=training) return pooled_features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "project", None) is not None: with tf.name_scope(self.project.name): self.project.build(None) if getattr(self, "convs", None) is not None: for conv in self.convs: with tf.name_scope(conv.name): conv.build(None) class TFMobileViTDeepLabV3(keras.layers.Layer): """ DeepLabv3 architecture: https://huggingface.co/papers/1706.05587 """ def __init__(self, config: MobileViTConfig, **kwargs) -> None: super().__init__(**kwargs) self.aspp = TFMobileViTASPP(config, name="aspp") self.dropout = keras.layers.Dropout(config.classifier_dropout_prob) self.classifier = TFMobileViTConvLayer( config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, name="classifier", ) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: features = self.aspp(hidden_states[-1], training=training) features = self.dropout(features, training=training) features = self.classifier(features, training=training) return features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "aspp", None) is not None: with tf.name_scope(self.aspp.name): self.aspp.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC. """, MOBILEVIT_START_DOCSTRING, ) class TFMobileViTForSemanticSegmentation(TFMobileViTPreTrainedModel): def __init__(self, config: MobileViTConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.num_labels = config.num_labels self.mobilevit = TFMobileViTMainLayer(config, expand_output=False, name="mobilevit") self.segmentation_head = TFMobileViTDeepLabV3(config, name="segmentation_head") def hf_compute_loss(self, logits, labels): # upsample logits to the images' original size # `labels` is of shape (batch_size, height, width) label_interp_shape = shape_list(labels)[1:] upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear") # compute weighted loss loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none") def masked_loss(real, pred): unmasked_loss = loss_fct(real, pred) mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * mask # Reduction strategy in the similar spirit with # https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210 reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask) return tf.reshape(reduced_masked_loss, (1,)) return masked_loss(labels, upsampled_logits) @unpack_inputs @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSemanticSegmenterOutputWithNoAttention, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple | TFSemanticSegmenterOutputWithNoAttention: r""" labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFMobileViTForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small") >>> model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> # logits are of shape (batch_size, num_labels, height, width) >>> logits = outputs.logits ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and not self.config.num_labels > 1: raise ValueError("The number of labels should be greater than one") outputs = self.mobilevit( pixel_values, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, training=training, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.segmentation_head(encoder_hidden_states, training=training) loss = None if labels is not None: loss = self.hf_compute_loss(logits=logits, labels=labels) # make logits of shape (batch_size, num_labels, height, width) to # keep them consistent across APIs logits = tf.transpose(logits, perm=[0, 3, 1, 2]) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSemanticSegmenterOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "mobilevit", None) is not None: with tf.name_scope(self.mobilevit.name): self.mobilevit.build(None) if getattr(self, "segmentation_head", None) is not None: with tf.name_scope(self.segmentation_head.name): self.segmentation_head.build(None) __all__ = [ "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ]
transformers/src/transformers/models/mobilevit/modeling_tf_mobilevit.py/0
{ "file_path": "transformers/src/transformers/models/mobilevit/modeling_tf_mobilevit.py", "repo_id": "transformers", "token_count": 24166 }
452
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/moonshine/modular_moonshine.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_moonshine.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional, Union import numpy as np import torch import torch.nn as nn from transformers.utils.generic import OutputRecorder, check_model_inputs from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.deprecation import deprecate_kwarg from .configuration_moonshine import MoonshineConfig class MoonshineEncoderMLP(nn.Module): def __init__(self, config, hidden_act): super().__init__() self.config = config self.activation_fn = ACT2FN[hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class MoonshineDecoderMLP(nn.Module): def __init__(self, config, hidden_act): super().__init__() self.config = config self.activation_fn = ACT2FN[hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size * 2) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states, gate = hidden_states.chunk(2, dim=-1) hidden_states = self.activation_fn(gate) * hidden_states hidden_states = self.fc2(hidden_states) return hidden_states def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., 0::2] x2 = x[..., 1::2] return torch.stack((-x2, x1), dim=-1).flatten(-2) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed class MoonshineAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int, ): super().__init__() config.update({"num_attention_heads": num_attention_heads, "num_key_value_heads": num_key_value_heads}) self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = is_causal self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) # Pad head dimension to the next specified multiple. if self.config.pad_head_dim_to_multiple_of is not None: target_multiple = self.config.pad_head_dim_to_multiple_of target_head_dim = target_multiple * ((self.head_dim + target_multiple - 1) // target_multiple) self.head_dim_padding = target_head_dim - self.head_dim else: self.head_dim_padding = 0 @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, key_value_states: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len = hidden_states.shape[:-1] query_states = ( self.q_proj(hidden_states).view(bsz, q_len, self.config.num_key_value_heads, self.head_dim).transpose(1, 2) ) is_cross_attention = key_value_states is not None if past_key_values is not None: is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache past_key_values.is_updated[self.layer_idx] = True past_key_values = past_key_values.cross_attention_cache else: past_key_values = past_key_values.self_attention_cache # use key_value_states if cross attention current_states = key_value_states if key_value_states is not None else hidden_states if is_cross_attention and past_key_values and is_updated: key_states = past_key_values.layers[self.layer_idx].keys value_states = past_key_values.layers[self.layer_idx].values else: key_states = ( self.k_proj(current_states) .view(bsz, -1, self.config.num_key_value_heads, self.head_dim) .transpose(1, 2) ) value_states = ( self.v_proj(current_states) .view(bsz, -1, self.config.num_key_value_heads, self.head_dim) .transpose(1, 2) ) if is_cross_attention and past_key_values is not None: key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) if not is_cross_attention: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, cache_kwargs ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] is_causal = self.is_causal and attention_mask is None and q_len > 1 if self.head_dim_padding > 0: query_states = torch.nn.functional.pad(query_states, (0, self.head_dim_padding)) key_states = torch.nn.functional.pad(key_states, (0, self.head_dim_padding)) value_states = torch.nn.functional.pad(value_states, (0, self.head_dim_padding)) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, is_causal=is_causal, **kwargs, ) if self.head_dim_padding > 0: attn_output = attn_output[..., : -self.head_dim_padding] attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class MoonshineRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: MoonshineConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class MoonshineEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MoonshineConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.encoder_num_attention_heads, num_key_value_heads=config.encoder_num_key_value_heads, ) self.mlp = MoonshineEncoderMLP(config, config.encoder_hidden_act) self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class MoonshineDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MoonshineConfig, layer_idx: Optional[int] = None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=True, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads, ) self.encoder_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads, ) self.mlp = MoonshineDecoderMLP(config, config.decoder_hidden_act) self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.final_layernorm = nn.LayerNorm(config.hidden_size, bias=False) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, encoder_position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, _ = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring class MoonshinePreTrainedModel(PreTrainedModel): config: MoonshineConfig base_model_prefix = "model" main_input_name = "input_values" supports_gradient_checkpointing = True _no_split_modules = ["MoonshineEncoderLayer", "MoonshineDecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True # TODO arthur, how do we separate when it cross / self coming from different layer? def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ output_conv1_length = int((input_lengths - 127) / 64 + 1) output_conv2_length = int((output_conv1_length - 7) / 3 + 1) output_conv3_length = int((output_conv2_length - 3) / 2 + 1) return output_conv3_length class MoonshineEncoder(MoonshinePreTrainedModel): """ Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`] Args: config: MoonshineConfig """ main_input_name = "input_values" _can_record_outputs = { "attentions": MoonshineAttention, "hidden_states": MoonshineEncoderLayer, } def __init__(self, config: MoonshineConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.conv1 = nn.Conv1d(1, embed_dim, kernel_size=127, stride=64, bias=False) self.conv2 = nn.Conv1d(embed_dim, 2 * embed_dim, kernel_size=7, stride=3) self.conv3 = nn.Conv1d(2 * embed_dim, embed_dim, kernel_size=3, stride=2) self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=1e-5) self.rotary_emb = MoonshineRotaryEmbedding(config=config) self.layers = nn.ModuleList( [MoonshineEncoderLayer(config, idx) for idx in range(config.encoder_num_hidden_layers)] ) self.layer_norm = nn.LayerNorm(embed_dim, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self) -> nn.Module: return self.conv1 def set_input_embeddings(self, value: nn.Module): self.conv1 = value @check_model_inputs def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ input_values = input_values.unsqueeze(1) hidden_states = nn.functional.tanh(self.conv1(input_values)) hidden_states = self.groupnorm(hidden_states) hidden_states = nn.functional.gelu(self.conv2(hidden_states)) hidden_states = nn.functional.gelu(self.conv3(hidden_states)) hidden_states = hidden_states.permute(0, 2, 1) # attention mask downsampling if attention_mask is not None: mask_len = self._get_feat_extract_output_lengths(attention_mask.shape[-1]) downsample_stride = 64 * 3 * 2 # conv strides attention_mask = attention_mask[..., ::downsample_stride][..., :mask_len] if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if (attention_mask == 0.0).any() else None elif self.config._attn_implementation == "sdpa": attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, hidden_states.dtype) else: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) position_ids = torch.arange(0, hidden_states.shape[1], device=hidden_states.device).unsqueeze(0) position_embeddings = self.rotary_emb(hidden_states, position_ids) for encoder_layer in self.layers: hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.layer_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, ) @auto_docstring class MoonshineDecoder(MoonshinePreTrainedModel): main_input_name = "input_ids" _can_record_outputs = { "attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="self_attn"), "hidden_states": MoonshineDecoderLayer, "cross_attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="encoder_attn"), } def __init__(self, config: MoonshineConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [MoonshineDecoderLayer(config, idx) for idx in range(config.decoder_num_hidden_layers)] ) self.norm = nn.LayerNorm(config.hidden_size, bias=False) self.rotary_emb = MoonshineRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) if encoder_attention_mask is not None: mask_len = encoder_hidden_states.shape[-2] downsample_stride = 64 * 3 * 2 # conv strides encoder_attention_mask = encoder_attention_mask[..., ::downsample_stride][..., :mask_len] if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if (encoder_attention_mask == 0.0).any() else None elif self.config._attn_implementation == "sdpa": encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2] ) else: encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2] ) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, causal_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) def _compute_mask_indices( shape: tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.detach().sum(-1).tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask @auto_docstring class MoonshineModel(MoonshinePreTrainedModel): def __init__(self, config: MoonshineConfig): super().__init__(config) self.encoder = MoonshineEncoder(config) self.decoder = MoonshineDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_encoder(self): """ Calling this function will disable the gradient computation for the Moonshine encoder so that its parameters will not be updated during training. """ self.encoder._freeze_parameters() def _mask_input_features( self, input_features: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return input_features # generate indices & apply SpecAugment along time axis batch_size, hidden_size, sequence_length = input_features.size() if self.config.mask_time_prob > 0 and self.training: # generate indices & apply SpecAugment along time axis mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool) mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1) input_features[mask_time_indices] = 0 if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool) input_features[mask_feature_indices] = 0 return input_features @can_return_tuple @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[tuple[torch.LongTensor]] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Seq2SeqModelOutput: r""" input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`): Indices of positions of each input sequence tokens in the position embeddings. Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings` Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, MoonshineModel >>> from datasets import load_dataset >>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_values = inputs.input_values >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 288] ``` """ if encoder_outputs is None: encoder_outputs: BaseModelOutput = self.encoder(input_values, attention_mask=attention_mask, **kwargs) decoder_outputs: BaseModelOutputWithPastAndCrossAttentions = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_attention_mask=attention_mask, encoder_hidden_states=encoder_outputs.last_hidden_state, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs, ) return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids @auto_docstring( custom_intro=""" The Moonshine Model with a language modeling head. Can be used for automatic speech recognition. """ ) class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin): _tied_weights_keys = ["proj_out.weight"] def __init__(self, config: MoonshineConfig): super().__init__(config) self.model = MoonshineModel(config) self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.proj_out def set_output_embeddings(self, new_embeddings): self.proj_out = new_embeddings def get_input_embeddings(self) -> nn.Module: return self.model.get_input_embeddings() @can_return_tuple @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[tuple[torch.LongTensor]] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Seq2SeqLMOutput: r""" input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`): Indices of positions of each input sequence tokens in the position embeddings. Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings` Example: ```python >>> import torch >>> from transformers import AutoProcessor, MoonshineForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny") >>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_values = inputs.input_values >>> generated_ids = model.generate(input_values, max_new_tokens=100) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription 'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs: Seq2SeqModelOutput = self.model( input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs, ) logits = self.proj_out(outputs.last_hidden_state) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size) return Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) __all__ = ["MoonshineModel", "MoonshinePreTrainedModel", "MoonshineForConditionalGeneration"]
transformers/src/transformers/models/moonshine/modeling_moonshine.py/0
{ "file_path": "transformers/src/transformers/models/moonshine/modeling_moonshine.py", "repo_id": "transformers", "token_count": 21235 }
453
# coding=utf-8 # Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Musicgen Melody model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) class MusicgenMelodyDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. audio_channels (`int`, *optional*, defaults to 1): Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token. eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder. """ model_type = "musicgen_melody_decoder" base_config_key = "decoder_config" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function="gelu", hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.initializer_factor = initializer_factor self.layerdrop = layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.num_codebooks = num_codebooks if audio_channels not in [1, 2]: raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.") self.audio_channels = audio_channels super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) class MusicgenMelodyConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MusicgenMelodyModel`]. It is used to instantiate a Musicgen Melody model according to the specified arguments, defining the text encoder, audio encoder and Musicgen Melody decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use. chroma_length (`int`, *optional*, defaults to 235): Maximum chroma duration if audio is used to condition the model. Corresponds to the maximum duration used during training. kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenMelodyConfig, ... MusicgenMelodyDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenMelodyForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenMelodyDecoderConfig() >>> configuration = MusicgenMelodyConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenMelodyForConditionalGeneration (with random weights) from the facebook/musicgen-melody style configuration >>> model = MusicgenMelodyForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen_melody-model") >>> # loading model and config from pretrained folder >>> musicgen_melody_config = MusicgenMelodyConfig.from_pretrained("musicgen_melody-model") >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("musicgen_melody-model", config=musicgen_melody_config) ```""" model_type = "musicgen_melody" sub_configs = { "text_encoder": AutoConfig, "audio_encoder": AutoConfig, "decoder": MusicgenMelodyDecoderConfig, } has_no_defaults_at_init = True def __init__( self, num_chroma=12, chroma_length=235, **kwargs, ): super().__init__(**kwargs) if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs: raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config") text_encoder_config = kwargs.pop("text_encoder") text_encoder_model_type = text_encoder_config.pop("model_type") audio_encoder_config = kwargs.pop("audio_encoder") audio_encoder_model_type = audio_encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config) self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) self.decoder = MusicgenMelodyDecoderConfig(**decoder_config) self.is_encoder_decoder = False self.num_chroma = num_chroma self.chroma_length = chroma_length @classmethod def from_sub_models_config( cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs, ): r""" Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenMelodyConfig`]: An instance of a configuration object """ return cls( text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs, ) @property # This is a property because you might want to change the codec model on the fly def sampling_rate(self): return self.audio_encoder.sampling_rate __all__ = ["MusicgenMelodyConfig", "MusicgenMelodyDecoderConfig"]
transformers/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py/0
{ "file_path": "transformers/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py", "repo_id": "transformers", "token_count": 4480 }
454
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Nemotron model.""" import math from typing import Optional, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import Size, Tensor, nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask from ...modeling_layers import ( GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer, ) from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_nemotron import NemotronConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) def _cast_if_autocast_enabled(device_type, *args): if not torch.is_autocast_enabled(): return args else: # NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4 target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) return torch.amp.autocast_mode._cast(args, device_type, target_dtype) class NemotronLayerNorm1P(nn.LayerNorm): def __init__( self, normalized_shape: Union[int, list[int], Size], eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True, device=None, dtype=None, ): super().__init__(normalized_shape, eps, elementwise_affine, bias, device, dtype) def forward(self, input: Tensor) -> Tensor: device_type = input.device.type if input.device.type != "mps" else "cpu" args = _cast_if_autocast_enabled( device_type, input, self.normalized_shape, self.weight + 1, self.bias, self.eps ) with torch.autocast(device_type=input.device.type, enabled=False): return F.layer_norm(*args) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron class NemotronRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` # Ignore copy def __init__( self, config: NemotronConfig, device=None, ): super().__init__() self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) rot_dim = cos.shape[-1] # If q_pass/k_pass is empty, rotary pos embedding is applied to all tensor q/k q, q_pass = q[..., :rot_dim], q[..., rot_dim:] k, k_pass = k[..., :rot_dim], k[..., rot_dim:] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return torch.cat((q_embed, q_pass), dim=-1), torch.cat((k_embed, k_pass), dim=-1) class NemotronMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.up_proj(x))) # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class NemotronAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: NemotronConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.partial_rotary_factor = config.partial_rotary_factor self.is_causal = True self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=config.attention_bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron # TODO cyril: modular class NemotronFlashAttention2(NemotronAttention): """ Nemotron flash attention module. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError( "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" ) output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (NemotronRMSNorm handles it correctly) input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): # NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4 target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron # TODO cyril: modular class NemotronSdpaAttention(NemotronAttention): """ Nemotron attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `NemotronAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "NemotronModel is using NemotronSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if position_embeddings is not None: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = causal_mask is None and q_len > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None NEMOTRON_ATTENTION_CLASSES = { "eager": NemotronAttention, "flash_attention_2": NemotronFlashAttention2, "sdpa": NemotronSdpaAttention, } # copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron # no longer copied after attention refactors class NemotronDecoderLayer(GradientCheckpointingLayer): # Ignore copy def __init__(self, config: NemotronConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = NEMOTRON_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = NemotronMLP(config) self.input_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) self.post_attention_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class NemotronPreTrainedModel(PreTrainedModel): config: NemotronConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["NemotronDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, NemotronLayerNorm1P): module.weight.data.fill_(1.0) module.bias.data.zero_() @auto_docstring class NemotronModel(NemotronPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`NemotronDecoderLayer`] Args: config: NemotronConfig """ def __init__(self, config: NemotronConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [NemotronDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps) self.rotary_emb = NemotronRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if use_cache and past_key_values is None: past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # TODO: re-enable check: Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron class NemotronForCausalLM(NemotronPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = NemotronModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, NemotronForCausalLM >>> model = NemotronForCausalLM.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/nemotron-3-8b-base-4k-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class NemotronForSequenceClassification(GenericForSequenceClassification, NemotronPreTrainedModel): ... class NemotronForQuestionAnswering(GenericForQuestionAnswering, NemotronPreTrainedModel): base_model_prefix = "transformer" class NemotronForTokenClassification(GenericForTokenClassification, NemotronPreTrainedModel): ... __all__ = [ "NemotronForQuestionAnswering", "NemotronForCausalLM", "NemotronModel", "NemotronPreTrainedModel", "NemotronForSequenceClassification", "NemotronForTokenClassification", ]
transformers/src/transformers/models/nemotron/modeling_nemotron.py/0
{ "file_path": "transformers/src/transformers/models/nemotron/modeling_nemotron.py", "repo_id": "transformers", "token_count": 18700 }
455
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import logging logger = logging.get_logger(__name__) class Ovis2ProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, }, "image_kwargs": {}, } class Ovis2Processor(ProcessorMixin): r""" Constructs a Ovis2 processor which wraps Ovis2 image processor and a Qwen2 tokenizer into a single processor. [`Ovis2Processor`] offers all the functionalities of [`Ovis2VideoProcessor`], [`Ovis2ImageProcessor`] and [`Qwen2TokenizerFast`]. See the [`~Ovis2Processor.__call__`] and [`~Ovis2Processor.decode`] for more information. Args: image_processor ([`Ovis2ImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`Qwen2TokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. image_seq_length (`int`, *optional*, defaults to 256): The number of image tokens to be used for each image in the input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__( self, image_processor=None, tokenizer=None, chat_template=None, image_token="<image>", image_seq_length=256, **kwargs, ): self.image_seq_length = image_seq_length self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token self.image_token_id = ( tokenizer.image_token_id if getattr(tokenizer, "image_token_id", None) else tokenizer.convert_tokens_to_ids(self.image_token) ) super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, **kwargs: Unpack[Ovis2ProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to Ovis2ImageProcessor's [`~Ovis2ImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( Ovis2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") image_inputs = {} if images is not None: image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) image_grids = image_inputs.pop("grids").tolist() text = self._expand_image_tokens(text, image_grids) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) return BatchFeature(data={**text_inputs, **image_inputs}) def _expand_image_tokens( self, text: list[TextInput], grids: list[list[int]], ): processed_text = [] grid_index = 0 for sample in text: while "<image>" in sample: grid = grids[grid_index] row, col = grid[0], grid[1] placeholder = f"<IMG_START>{'<IMG_ATOM>' * self.image_seq_length}<IMG_GRID>" if row * col > 1: for r in range(row): for c in range(col): placeholder += f"{'<IMG_ATOM>' * self.image_seq_length}" if c < col - 1: placeholder += "<IMG_COL>" if r < row - 1: placeholder += "<IMG_ROW>" placeholder += "<IMG_END>" sample = sample.replace("<image>", placeholder, 1) grid_index += 1 processed_text.append(sample) return processed_text def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Qwen2TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(tokenizer_input_names) + list(image_processor_input_names) __all__ = ["Ovis2Processor"]
transformers/src/transformers/models/ovis2/processing_ovis2.py/0
{ "file_path": "transformers/src/transformers/models/ovis2/processing_ovis2.py", "repo_id": "transformers", "token_count": 3282 }
456
from typing import Callable, Optional import torch import torch.nn as nn from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.deprecation import deprecate_kwarg from ..clip.modeling_clip import CLIPMLP from ..llama.modeling_llama import ( LlamaAttention, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, # copied from Llama ) from .configuration_phi import PhiConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/phi-1" _CONFIG_FOR_DOC = "PhiConfig" class PhiAttention(LlamaAttention): def __init__(self, config: PhiConfig, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True) self.dense = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True) del self.o_proj self.rotary_ndims = int(self.head_dim * config.partial_rotary_factor) self.qk_layernorm = config.qk_layernorm if self.qk_layernorm: self.q_layernorm = nn.LayerNorm( config.hidden_size // config.num_attention_heads, eps=config.layer_norm_eps, elementwise_affine=True ) self.k_layernorm = nn.LayerNorm( config.hidden_size // config.num_attention_heads, eps=config.layer_norm_eps, elementwise_affine=True ) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) if self.qk_layernorm: query_states = self.q_layernorm(query_states) key_states = self.k_layernorm(key_states) cos, sin = position_embeddings # Partial rotary embedding query_rot, query_pass = ( query_states[..., : self.rotary_ndims], query_states[..., self.rotary_ndims :], ) key_rot, key_pass = ( key_states[..., : self.rotary_ndims], key_states[..., self.rotary_ndims :], ) # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) # [batch_size, seq_length, num_heads, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.dense(attn_output) return attn_output, attn_weights class PhiMLP(CLIPMLP): pass class PhiDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: PhiConfig, layer_idx: int): super().__init__() self.self_attn = PhiAttention(config, layer_idx=layer_idx) self.mlp = PhiMLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.resid_dropout = nn.Dropout(config.resid_pdrop) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention attn_outputs, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) attn_outputs = self.resid_dropout(attn_outputs) feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states)) hidden_states = attn_outputs + feed_forward_hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class PhiRotaryEmbedding(LlamaRotaryEmbedding): pass class PhiModel(LlamaModel): def __init__(self, config: PhiConfig): super().__init__(config) self.layers = nn.ModuleList( [PhiDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.embed_dropout = nn.Dropout(config.embd_pdrop) self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) del self.norm def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) inputs_embeds = self.embed_dropout(inputs_embeds) # diff with Llama hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.final_layernorm(hidden_states) # diff with Llama # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) class PhiForCausalLM(LlamaForCausalLM): def __init__(self, config): super().__init__(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True) class PhiForSequenceClassification(LlamaForSequenceClassification): pass class PhiForTokenClassification(LlamaForTokenClassification): pass __all__ = [ "PhiPreTrainedModel", # noqa: F822 "PhiModel", "PhiForCausalLM", "PhiForSequenceClassification", "PhiForTokenClassification", ]
transformers/src/transformers/models/phi/modular_phi.py/0
{ "file_path": "transformers/src/transformers/models/phi/modular_phi.py", "repo_id": "transformers", "token_count": 5128 }
457
# coding=utf-8 # Copyright 2023 The Pop2Piano Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Pop2Piano model.""" import copy import math from typing import Optional, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from transformers.generation import GenerationConfig from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging from ...utils.deprecation import deprecate_kwarg from .configuration_pop2piano import Pop2PianoConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) _load_pop2piano_layer_norm = True try: from apex.normalization import FusedRMSNorm _load_pop2piano_layer_norm = False logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pop2PianoLayerNorm") except ImportError: # using the normal Pop2PianoLayerNorm pass except Exception: logger.warning("Discovered apex but it failed to load, falling back to Pop2PianoLayerNorm") pass # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pop2Piano class Pop2PianoLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the Pop2Piano style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # Pop2Piano uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states if not _load_pop2piano_layer_norm: Pop2PianoLayerNorm = FusedRMSNorm # noqa # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->Pop2Piano,t5->pop2piano class Pop2PianoDenseActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pop2Piano class Pop2PianoDenseGatedActDense(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->Pop2Piano class Pop2PianoLayerFF(nn.Module): def __init__(self, config: Pop2PianoConfig): super().__init__() if config.is_gated_act: self.DenseReluDense = Pop2PianoDenseGatedActDense(config) else: self.DenseReluDense = Pop2PianoDenseActDense(config) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->Pop2Piano,t5->pop2piano class Pop2PianoAttention(nn.Module): def __init__( self, config: Pop2PianoConfig, has_relative_attention_bias=False, layer_idx: Optional[int] = None, ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) batch_size, seq_length = hidden_states.shape[:2] # if key_value_states are provided this layer is used as a cross-attention layer for the decoder is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache` if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device, cache_position=cache_position ) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, : key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->Pop2Piano,t5->pop2piano class Pop2PianoLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.SelfAttention = Pop2PianoAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->Pop2Piano,t5->pop2piano class Pop2PianoLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.EncDecAttention = Pop2PianoAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5Block with T5->Pop2Piano,t5->pop2piano class Pop2PianoBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( Pop2PianoLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) ) if self.is_decoder: self.layer.append(Pop2PianoLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(Pop2PianoLayerFF(config)) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return ( outputs + attention_outputs ) # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) @auto_docstring class Pop2PianoPreTrainedModel(PreTrainedModel): config: Pop2PianoConfig base_model_prefix = "transformer" is_parallelizable = False supports_gradient_checkpointing = True _can_compile_fullgraph = False _no_split_modules = ["Pop2PianoBlock"] _keep_in_fp32_modules = ["wo"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, Pop2PianoLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, Pop2PianoConcatEmbeddingToMel): module.embedding.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoForConditionalGeneration): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, Pop2PianoDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoDenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, Pop2PianoAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In Pop2Piano it is usually set to the pad_token_id." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class Pop2PianoStack(Pop2PianoPreTrainedModel): # Copied from transformers.models.t5.modeling_t5.T5Stack.__init__ with T5->Pop2Piano,t5->pop2piano def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [ Pop2PianoBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers) ] ) self.final_layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) else: past_key_values = DynamicCache() elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions, ) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, # as a positional argument for gradient checkpointing layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class Pop2PianoConcatEmbeddingToMel(nn.Module): """Embedding Matrix for `composer` tokens.""" def __init__(self, config): super().__init__() self.embedding = nn.Embedding(num_embeddings=config.composer_vocab_size, embedding_dim=config.d_model) def forward(self, feature, index_value, embedding_offset): index_shifted = index_value - embedding_offset composer_embedding = self.embedding(index_shifted).unsqueeze(1) inputs_embeds = torch.cat([composer_embedding, feature], dim=1) return inputs_embeds @auto_docstring( custom_intro=""" Pop2Piano Model with a `language modeling` head on top. """ ) class Pop2PianoForConditionalGeneration(Pop2PianoPreTrainedModel, GenerationMixin): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: Pop2PianoConfig): super().__init__(config) self.config = config self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) self.mel_conditioner = Pop2PianoConcatEmbeddingToMel(config) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = Pop2PianoStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = Pop2PianoStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_mel_conditioner_outputs( self, input_features: torch.FloatTensor, composer: str, generation_config: GenerationConfig, attention_mask: Optional[torch.FloatTensor] = None, ): """ This method is used to concatenate mel conditioner tokens at the front of the input_features in order to control the type of MIDI token generated by the model. Args: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): input features extracted from the feature extractor. composer (`str`): composer token which determines the type of MIDI tokens to be generated. generation_config (`~generation.GenerationConfig`): The generation is used to get the composer-feature_token pair. attention_mask (``, *optional*): For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. """ composer_to_feature_token = generation_config.composer_to_feature_token if composer not in composer_to_feature_token: raise ValueError( f"Please choose a composer from {list(composer_to_feature_token.keys())}. Composer received - {composer}" ) composer_value = composer_to_feature_token[composer] composer_value = torch.tensor(composer_value, device=self.device) composer_value = composer_value.repeat(input_features.shape[0]) embedding_offset = min(composer_to_feature_token.values()) input_features = self.mel_conditioner( feature=input_features, index_value=composer_value, embedding_offset=embedding_offset, ) if attention_mask is not None: input_features[~attention_mask[:, 0].bool()] = 0.0 # since self.mel_conditioner adds a new array at the front of inputs_embeds we need to do the same for attention_mask to keep the shapes same attention_mask = torch.concatenate([attention_mask[:, 0].view(-1, 1), attention_mask], axis=1) return input_features, attention_mask return input_features, None @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, input_features: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Pop2Piano is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [Pop2Piano Training](./Pop2Piano#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Pop2Piano uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None and input_features is not None: raise ValueError("Both `inputs_embeds` and `input_features` received! Please provide only one of them") elif input_features is not None and inputs_embeds is None: inputs_embeds = input_features # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_features, attention_mask=None, composer="composer1", generation_config=None, **kwargs, ): """ Generates token ids for midi outputs. <Tip warning={true}> Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the model's default generation configuration. You can override any `generation_config` by passing the corresponding parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. For an overview of generation strategies and code examples, check out the [following guide](./generation_strategies). </Tip> Parameters: input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): This is the featurized version of audio generated by `Pop2PianoFeatureExtractor`. attention_mask: For batched generation `input_features` are padded to have the same shape across all examples. `attention_mask` helps to determine which areas were padded and which were not. - 1 for tokens that are **not padded**, - 0 for tokens that are **padded**. composer (`str`, *optional*, defaults to `"composer1"`): This value is passed to `Pop2PianoConcatEmbeddingToMel` to generate different embeddings for each `"composer"`. Please make sure that the composet value is present in `composer_to_feature_token` in `generation_config`. For an example please see https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json . generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. kwargs: Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. Since Pop2Piano is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible [`~utils.ModelOutput`] types are: - [`~generation.GenerateEncoderDecoderOutput`], - [`~generation.GenerateBeamEncoderDecoderOutput`] """ if generation_config is None: generation_config = self.generation_config generation_config.update(**kwargs) # check for composer_to_feature_token if not hasattr(generation_config, "composer_to_feature_token"): raise ValueError( "`composer_to_feature_token` was not found! Please refer to " "https://huggingface.co/sweetcocoa/pop2piano/blob/main/generation_config.json" "and parse a dict like that." ) if len(generation_config.composer_to_feature_token) != self.config.composer_vocab_size: raise ValueError( "config.composer_vocab_size must be same as the number of keys in " f"generation_config.composer_to_feature_token! " f"Found {self.config.composer_vocab_size} vs {len(generation_config.composer_to_feature_token)}." ) # to control the variation of generated MIDI tokens we concatenate mel-conditioner tokens(which depends on composer_token) # at the front of input_features. input_features, attention_mask = self.get_mel_conditioner_outputs( input_features=input_features, attention_mask=attention_mask, composer=composer, generation_config=generation_config, ) return super().generate( inputs=None, inputs_embeds=input_features, attention_mask=attention_mask, generation_config=generation_config, **kwargs, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) __all__ = ["Pop2PianoForConditionalGeneration", "Pop2PianoPreTrainedModel"]
transformers/src/transformers/models/pop2piano/modeling_pop2piano.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/modeling_pop2piano.py", "repo_id": "transformers", "token_count": 27683 }
458
# coding=utf-8 # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Pvt checkpoints from the original library.""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] for i in range(config.num_encoder_blocks): # Rename embeddings' parameters rename_keys.append((f"pos_embed{i + 1}", f"pvt.encoder.patch_embeddings.{i}.position_embeddings")) rename_keys.append((f"patch_embed{i + 1}.proj.weight", f"pvt.encoder.patch_embeddings.{i}.projection.weight")) rename_keys.append((f"patch_embed{i + 1}.proj.bias", f"pvt.encoder.patch_embeddings.{i}.projection.bias")) rename_keys.append((f"patch_embed{i + 1}.norm.weight", f"pvt.encoder.patch_embeddings.{i}.layer_norm.weight")) rename_keys.append((f"patch_embed{i + 1}.norm.bias", f"pvt.encoder.patch_embeddings.{i}.layer_norm.bias")) for j in range(config.depths[i]): # Rename blocks' parameters rename_keys.append( (f"block{i + 1}.{j}.attn.q.weight", f"pvt.encoder.block.{i}.{j}.attention.self.query.weight") ) rename_keys.append( (f"block{i + 1}.{j}.attn.q.bias", f"pvt.encoder.block.{i}.{j}.attention.self.query.bias") ) rename_keys.append( (f"block{i + 1}.{j}.attn.kv.weight", f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight") ) rename_keys.append((f"block{i + 1}.{j}.attn.kv.bias", f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias")) if config.sequence_reduction_ratios[i] > 1: rename_keys.append( ( f"block{i + 1}.{j}.attn.norm.weight", f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.weight", ) ) rename_keys.append( (f"block{i + 1}.{j}.attn.norm.bias", f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.bias") ) rename_keys.append( ( f"block{i + 1}.{j}.attn.sr.weight", f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.weight", ) ) rename_keys.append( ( f"block{i + 1}.{j}.attn.sr.bias", f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.bias", ) ) rename_keys.append( (f"block{i + 1}.{j}.attn.proj.weight", f"pvt.encoder.block.{i}.{j}.attention.output.dense.weight") ) rename_keys.append( (f"block{i + 1}.{j}.attn.proj.bias", f"pvt.encoder.block.{i}.{j}.attention.output.dense.bias") ) rename_keys.append((f"block{i + 1}.{j}.norm1.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_1.weight")) rename_keys.append((f"block{i + 1}.{j}.norm1.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_1.bias")) rename_keys.append((f"block{i + 1}.{j}.norm2.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_2.weight")) rename_keys.append((f"block{i + 1}.{j}.norm2.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_2.bias")) rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense1.weight")) rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense1.bias")) rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense2.weight")) rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense2.bias")) # Rename cls token rename_keys.extend( [ ("cls_token", "pvt.encoder.patch_embeddings.3.cls_token"), ] ) # Rename norm layer and classifier layer rename_keys.extend( [ ("norm.weight", "pvt.encoder.layer_norm.weight"), ("norm.bias", "pvt.encoder.layer_norm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_k_v(state_dict, config): # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) kv_weight = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight") kv_bias = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias") # next, add keys and values (in that order) to the state dict state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[: config.hidden_sizes[i], :] state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]] state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[ config.hidden_sizes[i] :, : ] state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_pvt_checkpoint(pvt_size, pvt_checkpoint, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our PVT structure. """ # define default Pvt configuration if pvt_size == "tiny": config_path = "Zetatech/pvt-tiny-224" elif pvt_size == "small": config_path = "Zetatech/pvt-small-224" elif pvt_size == "medium": config_path = "Zetatech/pvt-medium-224" elif pvt_size == "large": config_path = "Zetatech/pvt-large-224" else: raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but '{pvt_size}' was given") config = PvtConfig(name_or_path=config_path) # load original model from https://github.com/whai362/PVT state_dict = torch.load(pvt_checkpoint, map_location="cpu", weights_only=True) rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_k_v(state_dict, config) # load HuggingFace model model = PvtForImageClassification(config).eval() model.load_state_dict(state_dict) # Check outputs on an image, prepared by PVTFeatureExtractor image_processor = PvtImageProcessor(size=config.image_size) encoding = image_processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits.detach().cpu() if pvt_size == "tiny": expected_slice_logits = torch.tensor([-1.4192, -1.9158, -0.9702]) elif pvt_size == "small": expected_slice_logits = torch.tensor([0.4353, -0.1960, -0.2373]) elif pvt_size == "medium": expected_slice_logits = torch.tensor([-0.2914, -0.2231, 0.0321]) elif pvt_size == "large": expected_slice_logits = torch.tensor([0.3740, -0.7739, -0.4214]) else: raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but '{pvt_size}' was given") assert torch.allclose(logits[0, :3], expected_slice_logits, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model pytorch_model.bin to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pvt_size", default="tiny", type=str, help="Size of the PVT pretrained model you'd like to convert.", ) parser.add_argument( "--pvt_checkpoint", default="pvt_tiny.pth", type=str, help="Checkpoint of the PVT pretrained model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_pvt_checkpoint(args.pvt_size, args.pvt_checkpoint, args.pytorch_dump_folder_path)
transformers/src/transformers/models/pvt/convert_pvt_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/pvt/convert_pvt_to_pytorch.py", "repo_id": "transformers", "token_count": 4513 }
459
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_qwen2_5_omni.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from dataclasses import dataclass from typing import Any, Callable, Optional, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.nn import Parameter from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, ModelOutput from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, check_torch_load_is_safe, logging from ...utils.deprecation import deprecate_kwarg from ...utils.hub import cached_file from ..qwen2.modeling_qwen2 import Qwen2RMSNorm from .configuration_qwen2_5_omni import ( Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniBigVGANConfig, Qwen2_5OmniConfig, Qwen2_5OmniDiTConfig, Qwen2_5OmniTalkerConfig, Qwen2_5OmniTextConfig, Qwen2_5OmniThinkerConfig, Qwen2_5OmniToken2WavConfig, Qwen2_5OmniVisionEncoderConfig, ) logger = logging.get_logger(__name__) @auto_docstring class Qwen2_5OmniPreTrainedModel(PreTrainedModel): config: Qwen2_5OmniConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Qwen2_5OmniDecoderLayer", "Qwen2_5OmniVisionBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = False _supports_attention_backend = True class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel): def _prepare_4d_causal_attention_mask_with_cache_position( self, attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to place the 4D attention mask on. min_dtype (`float`): The minimum value representable with the dtype `dtype`. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask def get_llm_pos_ids_for_vision( self, start_idx: int, vision_idx: int, spatial_merge_size: int, t_index: list[int], grid_hs: list[int], grid_ws: list[int], ): llm_pos_ids_list = [] llm_grid_h = grid_hs[vision_idx] // spatial_merge_size llm_grid_w = grid_ws[vision_idx] // spatial_merge_size h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten() t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().long() _llm_pos_ids = torch.stack([t_index, h_index, w_index]) llm_pos_ids_list.append(_llm_pos_ids + start_idx) # + 1 ) # 12.09 by malinhan llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1) return llm_pos_ids def get_chunked_index( self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int ) -> list[tuple[int, int]]: """ Splits token index list into chunks based on token value ranges. Given a list of token indices, returns a list of (start, end) index tuples representing slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`. For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that: - the first chunk contains token values < 1000, - the second chunk contains values >= 1000 and < 2000, and so on. Parameters: token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of token index values. t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold). remove_index (`int`) An index id to subtract from `token_indices` before chunking Returns: `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive) and end (exclusive) indices of a chunk in `token_indices`. """ def _iter(): i, start_idx = 0, 0 # skip bos token current_chunk = 1 while i < len(token_indices): # skip eos token if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk: yield (start_idx, i) start_idx = i current_chunk += 1 i += 1 yield (start_idx, len(token_indices)) return list(_iter()) def get_rope_index( self, input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, use_audio_in_video: bool = False, audio_seqlens: Optional[torch.LongTensor] = None, second_per_grids: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embedding for text part. Examples: Temporal (Time): 3 patches, representing different segments of the video in time. Height: 2 patches, dividing each frame vertically. Width: 2 patches, dividing each frame horizontally. We also have some important parameters: fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [101, 102, 103, 104, 105] text height position_ids: [101, 102, 103, 104, 105] text width position_ids: [101, 102, 103, 104, 105] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. use_audio_in_video (`bool`, *optional*): If set to `True`, use the audio in video. audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = self.spatial_merge_size image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id audio_token_id = self.config.audio_token_id vision_start_token_id = self.config.vision_start_token_id audio_start_token_id = self.config.audio_start_token_id position_id_per_seconds = self.config.position_id_per_seconds seconds_per_chunk = self.config.seconds_per_chunk mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) image_idx, video_idx, audio_idx = 0, 0, 0 attention_mask = attention_mask.to(total_input_ids.device) for i, input_ids in enumerate(total_input_ids): input_ids = input_ids[attention_mask[i] == 1] image_nums, video_nums, audio_nums = 0, 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] audio_nums = torch.sum(input_ids == audio_start_token_id) image_nums = (vision_tokens == image_token_id).sum() video_nums = ( (vision_tokens == audio_start_token_id).sum() if use_audio_in_video else (vision_tokens == video_token_id).sum() ) input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums multimodal_nums = ( image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums ) for _ in range(multimodal_nums): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if audio_token_id in input_tokens and remain_audios > 0: ed_audio = input_tokens.index(audio_token_id, st) else: ed_audio = len(input_tokens) + 1 min_ed = min(ed_image, ed_video, ed_audio) if min_ed == ed_audio: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1 llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + audio_len + eos_len audio_idx += 1 remain_audios -= 1 elif min_ed == ed_image: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 grid_t = image_grid_thw[image_idx][0] grid_hs = image_grid_thw[:, 1] grid_ws = image_grid_thw[:, 2] t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).long() llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2) llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + image_len + eos_len image_idx += 1 remain_images -= 1 elif min_ed == ed_video and not use_audio_in_video: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 grid_t = video_grid_thw[video_idx][0] grid_hs = video_grid_thw[:, 1] grid_ws = video_grid_thw[:, 2] t_index = ( torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds ).long() llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2) llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + video_len + eos_len video_idx += 1 remain_videos -= 1 elif min_ed == ed_video and use_audio_in_video: text_len = min_ed - st - 2 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1 audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx grid_t = video_grid_thw[video_idx][0] grid_hs = video_grid_thw[:, 1] grid_ws = video_grid_thw[:, 2] t_index = ( torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds ).long() video_llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) t_ntoken_per_chunk = int(position_id_per_seconds * seconds_per_chunk) video_chunk_indexes = self.get_chunked_index(video_llm_pos_ids[0], t_ntoken_per_chunk, st_idx) audio_chunk_indexes = self.get_chunked_index(audio_llm_pos_ids[0], t_ntoken_per_chunk, st_idx) sub_len = 0 for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))): video_chunk_index = video_chunk_indexes[j] if j < len(video_chunk_indexes) else None audio_chunk_index = audio_chunk_indexes[j] if j < len(audio_chunk_indexes) else None if video_chunk_index is not None: sub_len += video_chunk_index[1] - video_chunk_index[0] llm_pos_ids_list.append( video_llm_pos_ids[:, video_chunk_index[0] : video_chunk_index[1]] ) if audio_chunk_index is not None: sub_len += audio_chunk_index[1] - audio_chunk_index[0] llm_pos_ids_list.append( audio_llm_pos_ids[:, audio_chunk_index[0] : audio_chunk_index[1]] ) video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len * 2 + audio_len + video_len + eos_len * 2 audio_idx += 1 video_idx += 1 remain_videos -= 1 remain_audios -= 1 if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids)) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas else: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True) return position_ids, mrope_position_deltas ############################ # Start Thinker # ############################ @dataclass @auto_docstring( custom_intro=""" Base class for Qwen2.5OmniThinker causal language model (or autoregressive) outputs. """ ) class Qwen2_5OmniThinkerCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Qwen2_5OmniAudioAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: Qwen2_5OmniAudioEncoderConfig, ): super().__init__() self.embed_dim = config.d_model self.num_heads = config.encoder_attention_heads self.dropout = config.attention_dropout self.head_dim = self.embed_dim // self.num_heads self.num_key_value_groups = 1 # needed for eager attention self.config = config if (self.head_dim * self.num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.attention_dropout = 0.0 self.is_decoder = False self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) def forward( self, hidden_states: torch.Tensor, cu_seqlens: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" seq_length, _ = hidden_states.size() query_states = self.q_proj(hidden_states).reshape(seq_length, self.num_heads, -1) key_states = self.k_proj(hidden_states).reshape(seq_length, self.num_heads, -1) value_states = self.v_proj(hidden_states).reshape(seq_length, self.num_heads, -1) query_states = query_states.transpose(0, 1).unsqueeze(0) key_states = key_states.transpose(0, 1).unsqueeze(0) value_states = value_states.transpose(0, 1).unsqueeze(0) max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask=attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, cu_seq_lens_q=cu_seqlens, # pass cu seq lens for FA2 cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs, ) attn_output = attn_output.reshape(seq_length, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output class Qwen2_5OmniAudioEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Qwen2_5OmniAudioEncoderConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Qwen2_5OmniAudioAttention(config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states = self.self_attn( hidden_states=hidden_states, cu_seqlens=cu_seqlens, attention_mask=attention_mask, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return outputs class SinusoidsPositionEmbedding(nn.Module): def __init__(self, length, channels, max_timescale=10000): super().__init__() if channels % 2 != 0: raise ValueError("SinusoidsPositionEmbedding needs even channels input") log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float()) scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] self.register_buffer( "positional_embedding", torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1), persistent=False, ) def forward(self, seqlen: int): return self.positional_embedding[:seqlen, :] @auto_docstring( custom_intro=""" Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`Qwen2_5OmniAudioEncoderLayer`]. """ ) class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniAudioEncoderConfig main_input_name = "input_features" _no_split_modules = ["Qwen2_5OmniAudioEncoderLayer"] _supports_sdpa = True def __init__(self, config: Qwen2_5OmniAudioEncoderConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.n_window = config.n_window self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1) self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1) self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim) self.audio_bos_eos_token = nn.Embedding(2, config.output_dim) self.layers = nn.ModuleList([Qwen2_5OmniAudioEncoderLayer(config) for _ in range(config.encoder_layers)]) self.ln_post = nn.LayerNorm(config.d_model) self.avg_pooler = nn.AvgPool1d(2, stride=2) self.proj = nn.Linear(config.d_model, config.output_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def get_input_embeddings(self) -> nn.Module: return self.conv1 def set_input_embeddings(self, value: nn.Module): self.conv1 = value def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor: # Flash Attention 2 doesn't need a 4D mask and relies on `cu_seqlens/max_seqlen` # NOTE: the created attention masl only approximates the ragged FA2 attention by # allowing bidirectional attention within `cu_seqlens` blocks, and not attending between # blocks. Though it will not be a 100% match for FA2's `varlen` path if self.config._attn_implementation == "flash_attention_2": return None seq_length = inputs_tensor.shape[0] attention_mask = torch.full( [1, 1, seq_length, seq_length], torch.finfo(inputs_tensor.dtype).min, device=inputs_tensor.device, dtype=inputs_tensor.dtype, ) for i in range(1, len(cu_seqlens)): attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 return attention_mask @auto_docstring def forward( self, input_features, feature_lens=None, aftercnn_lens=None, **kwargs, ): r""" feature_lens (`torch.LongTensor` of shape `(batch_size,)`): mel length aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`): mel length after cnn """ chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long() chunk_lengths = torch.tensor( [self.n_window * 2] * chunk_num.sum(), dtype=torch.long, device=feature_lens.device, ) tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:] chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2) chunk_lengths = torch.where(chunk_lengths == 0, self.n_window * 2, chunk_lengths) chunk_list = input_features.split(chunk_lengths.tolist(), dim=1) padded_feature, padded_mask, padded_mask_after_cnn = self.padded_and_mask_function( chunk_list, chunk_lengths, padding_value=0, padding_side="right" ) padded_embed = nn.functional.gelu(self.conv1(padded_feature)) * padded_mask padded_embed = nn.functional.gelu(self.conv2(padded_embed)).transpose(1, 2) padded_embed = padded_embed + self.positional_embedding.positional_embedding[ : padded_embed.shape[1], : ].unsqueeze(0).to(padded_embed.dtype) hidden_states = padded_embed[padded_mask_after_cnn] cu_seqlens = torch.cat( ( torch.zeros(1, device=padded_mask_after_cnn.device, dtype=torch.int32), padded_mask_after_cnn.sum(1).cumsum(0), ) ).to(torch.int32) attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens) for encoder_layer in self.layers: layer_outputs = encoder_layer( hidden_states, cu_seqlens=cu_seqlens, attention_mask=attention_mask, **kwargs, ) hidden_states = layer_outputs[0] hidden_states_list = hidden_states.split(aftercnn_lens.tolist(), dim=0) token_audio_list = [] for each_audio_states in hidden_states_list: each_audio_states = self.avg_pooler(each_audio_states.transpose(0, 1)).transpose_(0, 1) each_audio_states = self.ln_post(each_audio_states) each_audio_states = self.proj(each_audio_states) token_audio_list.append(each_audio_states) token_audio = torch.cat(token_audio_list, dim=0) return BaseModelOutput(last_hidden_state=token_audio) def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"): """ Pads a sequence of tensors to their maximum length on indicated `padding_side`. Then prepares a mask so that pad tokens are not attended to. """ max_len = tensor_len.max() dim = tensor_list[0].shape[0] padded_tensor = torch.full( size=(len(tensor_list), dim, max_len), fill_value=padding_value, dtype=self.dtype, device=tensor_list[0].device, ) batch_mask = torch.zeros( (len(tensor_len), max_len), dtype=torch.long, device=padded_tensor.device, ) for i, length in enumerate(tensor_len): batch_mask[i, :length] = 1 padded_tensor[i, :, :length] = tensor_list[i] feature_lens_after_cnn = (tensor_len - 1) // 2 + 1 max_len_after_cnn = feature_lens_after_cnn.max() batch_mask_after_cnn = torch.zeros( (len(tensor_len), max_len_after_cnn), dtype=torch.long, device=padded_tensor.device, ) for i, length in enumerate(feature_lens_after_cnn): batch_mask_after_cnn[i, :length] = 1 return ( padded_tensor, batch_mask.unsqueeze(1), batch_mask_after_cnn.bool(), ) # Ignore copy def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers and the output length of the audio encoder """ input_lengths = (input_lengths - 1) // 2 + 1 output_lengths = (input_lengths - 2) // 2 + 1 return input_lengths, output_lengths def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: orig_dtype = tensor.dtype tensor = tensor.float() cos = freqs.cos() sin = freqs.sin() cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() output = (tensor * cos) + (rotate_half(tensor) * sin) output = output.to(orig_dtype) return output class Qwen2_5OmniVisionAttention(nn.Module): def __init__(self, config: Qwen2_5OmniVisionEncoderConfig = None) -> None: super().__init__() self.dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.dim // self.num_heads self.q = nn.Linear(self.dim, self.dim, bias=True) self.k = nn.Linear(self.dim, self.dim, bias=True) self.v = nn.Linear(self.dim, self.dim, bias=True) self.proj = nn.Linear(self.dim, self.dim) self.scaling = self.head_dim**-0.5 self.num_key_value_groups = 1 # needed for eager attention self.config = config self.attention_dropout = 0.0 self.is_causal = False def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: seq_length = hidden_states.shape[0] query_states = self.q(hidden_states).reshape(seq_length, self.num_heads, -1) key_states = self.k(hidden_states).reshape(seq_length, self.num_heads, -1) value_states = self.v(hidden_states).reshape(seq_length, self.num_heads, -1) query_states = apply_rotary_pos_emb_vision(query_states.unsqueeze(0), rotary_pos_emb).squeeze(0) key_states = apply_rotary_pos_emb_vision(key_states.unsqueeze(0), rotary_pos_emb).squeeze(0) query_states = query_states.transpose(0, 1).unsqueeze(0) key_states = key_states.transpose(0, 1).unsqueeze(0) value_states = value_states.transpose(0, 1).unsqueeze(0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if self.config._attn_implementation == "flash_attention_2": # Flash Attention 2: Use cu_seqlens for variable length attention max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, cu_seq_lens_q=cu_seqlens, cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs, ) else: # Other implementations: Process each chunk separately lengths = cu_seqlens[1:] - cu_seqlens[:-1] splits = [ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states) ] attn_outputs = [ attention_interface( self, q, k, v, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, is_causal=False, **kwargs, )[0] for q, k, v in zip(*splits) ] attn_output = torch.cat(attn_outputs, dim=1) attn_output = attn_output.reshape(seq_length, -1).contiguous() attn_output = self.proj(attn_output) return attn_output class Qwen2_5OmniMLP(nn.Module): def __init__(self, config, bias: bool = False): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) class Qwen2_5OmniVisionBlock(GradientCheckpointingLayer): def __init__(self, config: Qwen2_5OmniVisionEncoderConfig) -> None: super().__init__() self.norm1 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) self.norm2 = Qwen2RMSNorm(config.hidden_size, eps=1e-6) self.attn = Qwen2_5OmniVisionAttention(config=config) self.mlp = Qwen2_5OmniMLP(config, bias=True) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb, **kwargs, ) hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) return hidden_states class Qwen2_5_VisionPatchEmbed(nn.Module): def __init__( self, patch_size: int = 14, temporal_patch_size: int = 2, in_channels: int = 3, embed_dim: int = 1152, ) -> None: super().__init__() self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.in_channels = in_channels self.embed_dim = embed_dim kernel_size = [temporal_patch_size, patch_size, patch_size] self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states class Qwen2_5_VisionRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs class Qwen2_5OmniPatchMerger(nn.Module): def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None: super().__init__() self.hidden_size = context_dim * (spatial_merge_size**2) self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6) self.mlp = nn.Sequential( nn.Linear(self.hidden_size, self.hidden_size), nn.GELU(), nn.Linear(self.hidden_size, dim), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.mlp(self.ln_q(x).view(-1, self.hidden_size)) return x class Qwen2_5OmniVisionEncoder(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniVisionEncoderConfig _no_split_modules = ["Qwen2_5OmniVisionBlock"] def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.spatial_merge_size = config.spatial_merge_size self.patch_size = config.patch_size self.fullatt_block_indexes = config.fullatt_block_indexes self.window_size = config.window_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.patch_embed = Qwen2_5_VisionPatchEmbed( patch_size=config.patch_size, temporal_patch_size=config.temporal_patch_size, in_channels=config.in_channels, embed_dim=config.hidden_size, ) head_dim = config.hidden_size // config.num_heads self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2) self.blocks = nn.ModuleList([Qwen2_5OmniVisionBlock(config) for _ in range(config.depth)]) self.merger = Qwen2_5OmniPatchMerger( dim=config.out_hidden_size, context_dim=config.hidden_size, spatial_merge_size=config.spatial_merge_size, ) self.gradient_checkpointing = False def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def get_window_index(self, grid_thw): window_index: list = [] cu_window_seqlens: list = [0] window_index_id = 0 vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size for grid_t, grid_h, grid_w in grid_thw: llm_grid_h, llm_grid_w = ( grid_h // self.spatial_merge_size, grid_w // self.spatial_merge_size, ) index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) index_padded = index_padded.reshape( grid_t, num_windows_h, vit_merger_window_size, num_windows_w, vit_merger_window_size, ) index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( grid_t, num_windows_h * num_windows_w, vit_merger_window_size, vit_merger_window_size, ) seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) index_padded = index_padded.reshape(-1) index_new = index_padded[index_padded != -100] window_index.append(index_new + window_index_id) cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() window_index = torch.cat(window_index, dim=0) return window_index, cu_window_seqlens def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor: """ Args: hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): The final hidden states of the model. grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): The temporal, height and width of feature shape of each image in LLM. Returns: `torch.Tensor`: hidden_states. """ hidden_states = self.patch_embed(hidden_states) rotary_pos_emb = self.rot_pos_emb(grid_thw) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = hidden_states.size() hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) hidden_states = hidden_states[window_index, :, :] hidden_states = hidden_states.reshape(seq_len, -1) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) # Modification here for layer_num, blk in enumerate(self.blocks): if layer_num in self.fullatt_block_indexes: cu_seqlens_now = cu_seqlens else: cu_seqlens_now = cu_window_seqlens hidden_states = blk( hidden_states, cu_seqlens=cu_seqlens_now, rotary_pos_emb=rotary_pos_emb, **kwargs, ) hidden_states = self.merger(hidden_states) reverse_indices = torch.argsort(window_index) hidden_states = hidden_states[reverse_indices, :] return hidden_states class Qwen2_5OmniRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Qwen2_5OmniThinkerConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): # In contrast to other models, Qwen2_5Omni has different position ids for the grids # So we expand the inv_freq to shape (3, ...) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). Explanation: Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, height and width) of text embedding is always the same, so the text embedding rotary position embedding has no difference with modern LLMs. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. mrope_section(`List(int)`): Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class Qwen2_5OmniAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: Qwen2_5OmniConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads) self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.attention_dropout = config.attention_dropout self.rope_scaling = config.rope_scaling self.scaling = self.head_dim**-0.5 self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, position_ids=position_ids, # pass positions for FA2 **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Qwen2MLP(nn.Module): def __init__(self, config, bias: bool = False): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)) class Qwen2_5OmniDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Qwen2_5OmniTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size if config.use_sliding_window and config._attn_implementation != "flash_attention_2": logger.warning_once( f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " "unexpected results may be encountered." ) self.self_attn = Qwen2_5OmniAttention(config, layer_idx) self.mlp = Qwen2MLP(config) self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.attention_type = config.layer_types[layer_idx] @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class Qwen2_5OmniThinkerTextModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniTextConfig _no_split_modules = ["Qwen2_5OmniDecoderLayer"] def __init__(self, config: Qwen2_5OmniTextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Qwen2_5OmniDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config) self.has_sliding_layers = "sliding_attention" in self.config.layer_types self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `3` is for temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions # where each dim indicates visual spatial positions for temporal/height/width grids. # There are two scenarios when FA2-like packed masking might be activated. # 1. User specifically passed packed `position_ids` and no attention mask. # In this case we expect the useer to create correct position ids for all 3 grids # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] # 2. User runs forward with no attention mask and no position ids. In this case, position ids # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation` if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: text_position_ids = position_ids[0] # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": text_position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), } # The sliding window alternating layers are not always activated depending on the config if self.has_sliding_layers: causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=text_position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring( custom_intro=""" The Qwen2.5OmniThinker model which consists of a audio backbone and a language model. """ ) class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniThinkerConfig base_model_prefix = "thinker" _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] _no_split_modules = ["Qwen2_5OmniAudioEncoder", "Qwen2_5OmniVisionEncoder"] def __init__(self, config: Qwen2_5OmniThinkerConfig): super().__init__(config) self.audio_tower = Qwen2_5OmniAudioEncoder._from_config(config.audio_config) self.visual = Qwen2_5OmniVisionEncoder._from_config(config.vision_config) self.vocab_size = config.text_config.vocab_size self.model = Qwen2_5OmniThinkerTextModel._from_config(config.text_config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.spatial_merge_size = config.vision_config.spatial_merge_size self.rope_deltas = None self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def get_video_features( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None ): """ Encodes videos into continuous embeddings that can be forwarded to the language model. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. """ pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) return video_embeds def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. """ pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) return image_embeds def get_audio_features( self, input_features: torch.FloatTensor, feature_attention_mask: Optional[torch.LongTensor] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, ): """ Encodes audios into continuous embeddings that can be forwarded to the language model. Args: input_features (`torch.FloatTensor`): The tensors corresponding to the input audios. feature_attention_mask (`torch.LongTensor`, *optional*): Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. """ if feature_attention_mask is not None: audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0) else: audio_feature_lengths = None audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths( audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1) ) feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1) audio_outputs = self.audio_tower( input_features, feature_lens=feature_lens, aftercnn_lens=audio_feat_lengths, ) audio_features = audio_outputs.last_hidden_state if audio_features.shape[0] != sum(audio_output_lengths.tolist()): raise ValueError("length of audio_features should match audio_output_lengths") return audio_features def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor = None, video_features: torch.FloatTensor = None, ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_video_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_video_mask = special_video_mask.all(-1) special_audio_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device) ) ).all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_video_mask = input_ids == self.config.video_token_id special_audio_mask = input_ids == self.config.audio_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}" ) n_video_tokens = special_video_mask.sum() special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask, special_video_mask, special_audio_mask @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, input_features: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, feature_attention_mask: Optional[torch.Tensor] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_audio_in_video: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, video_second_per_grid: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Qwen2_5OmniThinkerCausalLMOutputWithPast]: r""" image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*): Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_audio_in_video (`bool`, *optional*): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*): Number of seconds per grid for each video, used for temporal feature mapping. Example: ```python >>> from io import BytesIO >>> from urllib.request import urlopen >>> import librosa >>> from qwen_vl_utils import process_vision_info >>> from transformers import Qwen2_5OmniProcessor, Qwen2_5OmniThinkerForConditionalGeneration >>> thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B") >>> processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B") >>> conversations = [ >>> {'role': 'system', 'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'}, >>> {"role": "user", "content": [ >>> {"type": "image", "image_url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, >>> {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"}, >>> ]}, >>> ] >>> text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) >>> audios = [ librosa.load(BytesIO(urlopen( conversations[1]['content'][1]['audio_url'] ).read()), sr=self.processor.feature_extractor.sampling_rate) ] >>> images, videos = process_vision_info(conversations) >>> inputs = processor(text=text, audios=audios, images=images, videos=videos, return_tensors="pt", padding=True) >>> # Generate >>> inputs['use_audio_in_video'] = `True` or `False` >>> generation = thinker.generate(**inputs, max_new_tokens=2048) >>> generate_ids = generation[:, inputs.input_ids.size(1):] >>> response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: # 1. Extract the input embeddings inputs_embeds = self.get_input_embeddings()(input_ids) # 2. Merge text , audios , image and video if input_features is not None: audio_features = self.get_audio_features( input_features, feature_attention_mask=feature_attention_mask, audio_feature_lengths=audio_feature_lengths, ) audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype) _, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features) if pixel_values is not None: image_embeds = self.get_image_features(pixel_values, image_grid_thw) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) image_mask, _, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds ) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) _, video_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds ) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if feature_attention_mask is not None: audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) else: audio_feature_lengths = None if attention_mask is not None and position_ids is None: if ( cache_position is None or (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None ): delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1) position_ids, rope_deltas = self.get_rope_index( input_ids, image_grid_thw, video_grid_thw, attention_mask, use_audio_in_video, audio_feature_lengths, video_second_per_grid, ) rope_deltas = rope_deltas - delta0 self.rope_deltas = rope_deltas else: batch_size, seq_length = input_ids.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=input_ids.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size ) if not return_dict: output = (logits,) + outputs return (loss,) + output if loss is not None else output return Qwen2_5OmniThinkerCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, input_features=None, feature_attention_mask=None, use_audio_in_video=False, video_second_per_grid=None, **kwargs, ): model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, input_features=input_features, feature_attention_mask=feature_attention_mask, use_audio_in_video=use_audio_in_video, video_second_per_grid=video_second_per_grid, **kwargs, ) model_inputs["position_ids"] = None if cache_position[0] != 0: model_inputs["pixel_values"] = None model_inputs["pixel_values_videos"] = None model_inputs["input_features"] = None return model_inputs ############################ # Start Talker # ############################ @dataclass @auto_docstring( custom_intro=""" Base class for Qwen2.5OmniTalker causal language model (or autoregressive) outputs. """ ) class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Hidden states from the thinker model that are used as input for the talker model. These represent the encoded response that the talker model will use to generate speech tokens. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None thinker_reply_part: torch.FloatTensor = None @auto_docstring class Qwen2_5OmniTalkerModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniTalkerConfig _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"] def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.embedding_size, self.padding_idx) self.layers = nn.ModuleList( [Qwen2_5OmniDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config) self.has_sliding_layers = "sliding_attention" in self.config.layer_types self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `3` is for temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions # where each dim indicates visual spatial positions for temporal/height/width grids. # There are two scenarios when FA2-like packed masking might be activated. # 1. User specifically passed packed `position_ids` and no attention mask. # In this case we expect the useer to create correct position ids for all 3 grids # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len] # 2. User runs forward with no attention mask and no position ids. In this case, position ids # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation` if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: text_position_ids = position_ids[0] # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": text_position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), } # The sliding window alternating layers are not always activated depending on the config if self.has_sliding_layers: causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=text_position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniTalkerConfig base_model_prefix = "talker" def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size) self.model = Qwen2_5OmniTalkerModel(config) self.codebook_size = config.vocab_size self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False) self.codec_bos_token = config.tts_codec_start_token_id self.codec_eos_token = config.tts_codec_end_token_id self.codec_pad_token = config.tts_codec_pad_token_id self.codec_mask_token = config.tts_codec_mask_token_id self.text_bos_token = config.tts_text_start_token_id self.text_eos_token = config.tts_text_end_token_id self.text_pad_token = config.tts_text_pad_token_id self.spatial_merge_size = self.config.spatial_merge_size self.rope_deltas = None self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) @auto_docstring def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, thinker_reply_part: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, input_text_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, use_audio_in_video: Optional[bool] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, video_second_per_grid: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]: r""" thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Hidden states from the thinker model's output that represent the text reply part to be processed. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Input token IDs for text-only content, used for position calculation in multimodal contexts. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. use_audio_in_video (`bool`, *optional*): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*): Number of seconds per grid for each video, used for temporal feature mapping. Example: ```python >>> from io import BytesIO >>> from urllib.request import urlopen >>> import librosa >>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration >>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B") >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B") >>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:" >>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3" >>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate) >>> inputs = processor(text=prompt, audios=audio, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Generate the caption in English: Glass is breaking." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if attention_mask is not None and position_ids is None: if ( cache_position is None or (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None ): position_ids, rope_deltas = self.get_rope_index( input_text_ids, image_grid_thw, video_grid_thw, attention_mask, use_audio_in_video, audio_feature_lengths, video_second_per_grid, ) inputs_embeds[:, -1, :] += self.get_input_embeddings()( torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device) ) inputs_embeds[:, -2, :] += self.get_input_embeddings()( torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device) ) self.rope_deltas = rope_deltas else: batch_size, seq_length = input_ids.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=input_ids.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) if inputs_embeds is None: # 1. Inference tokens after second token codec_embeds = self.get_input_embeddings()(input_ids) inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :] if thinker_reply_part.shape[1] > 1: thinker_reply_part = thinker_reply_part[:, 1:, :] talker_lm_input = self.thinker_to_talker_proj(inputs_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) outputs = self.model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=talker_lm_input, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.codec_head(hidden_states) logits = logits.float() loss = None if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Qwen2_5OmniTalkerCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, thinker_reply_part=thinker_reply_part, ) def _get_initial_cache_position(self, seq_length, device, model_kwargs): # Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily inputs_embeds = model_kwargs.pop("inputs_embeds") model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs) model_kwargs["inputs_embeds"] = inputs_embeds return model_kwargs # prepare inputs for talker lm generation def prepare_inputs_for_generation( self, input_ids, input_text_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, thinker_reply_part=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, input_audio_features=None, audio_feature_attention_mask=None, audio_feature_lengths=None, use_audio_in_video=False, video_second_per_grid=None, **kwargs, ): model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values, attention_mask, inputs_embeds, cache_position, use_cache=use_cache, thinker_reply_part=thinker_reply_part, input_text_ids=input_text_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, use_audio_in_video=use_audio_in_video, audio_feature_lengths=audio_feature_lengths, video_second_per_grid=video_second_per_grid, **kwargs, ) model_inputs["position_ids"] = None return model_inputs def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool = False, num_new_tokens: int = 1, ) -> dict[str, Any]: model_kwargs = super()._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder, num_new_tokens ) if getattr(outputs, "thinker_reply_part", None) is not None: model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part return model_kwargs ############################ # Start Token2Wav # ############################ # Using custom RoPE, will use LlamaRotaryEmbedding next version class Qwen2_5OmniDiTRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim, base=10000): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) def forward(self, x): batch_size, seq_len = x.shape[0], x.shape[1] t = torch.arange(seq_len, device=x.device) device_type = x.device.type device_type = device_type if device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = t.unsqueeze(1).float() @ self.inv_freq.unsqueeze(0).float() freqs = torch.stack((freqs, freqs), dim=-1) freqs = freqs.reshape(*freqs.shape[:-2], -1) freqs = freqs.repeat(batch_size, *([1] * freqs.dim())) cos = freqs.cos() sin = freqs.sin() return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class TimeDelayNetBlock(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, dilation, ): super().__init__() self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding="same", padding_mode="reflect", ) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor): return self.activation(self.conv(hidden_states)) class Res2NetBlock(torch.nn.Module): def __init__(self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1): super().__init__() in_channel = in_channels // scale hidden_channel = out_channels // scale self.blocks = nn.ModuleList( [ TimeDelayNetBlock( in_channel, hidden_channel, kernel_size=kernel_size, dilation=dilation, ) for i in range(scale - 1) ] ) self.scale = scale def forward(self, hidden_states): outputs = [] for i, hidden_part in enumerate(torch.chunk(hidden_states, self.scale, dim=1)): if i == 0: output_part = hidden_part elif i == 1: output_part = self.blocks[i - 1](hidden_part) else: output_part = self.blocks[i - 1](hidden_part + output_part) outputs.append(output_part) output = torch.cat(outputs, dim=1) return output class SqueezeExcitationBlock(nn.Module): def __init__(self, in_channels, se_channels, out_channels): super().__init__() self.conv1 = nn.Conv1d( in_channels=in_channels, out_channels=se_channels, kernel_size=1, padding="same", padding_mode="reflect", ) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv1d( in_channels=se_channels, out_channels=out_channels, kernel_size=1, padding="same", padding_mode="reflect", ) self.sigmoid = nn.Sigmoid() def forward(self, hidden_states): hidden_states_mean = hidden_states.mean(dim=2, keepdim=True) hidden_states_mean = self.relu(self.conv1(hidden_states_mean)) hidden_states_mean = self.sigmoid(self.conv2(hidden_states_mean)) return hidden_states * hidden_states_mean class AttentiveStatisticsPooling(nn.Module): """This class implements an attentive statistic pooling layer for each channel. It returns the concatenated mean and std of the input tensor. """ def __init__(self, channels, attention_channels=128): super().__init__() self.eps = 1e-12 self.tdnn = TimeDelayNetBlock(channels * 3, attention_channels, 1, 1) self.tanh = nn.Tanh() self.conv = nn.Conv1d( in_channels=attention_channels, out_channels=channels, kernel_size=1, padding="same", padding_mode="reflect", ) def _length_to_mask(self, length, max_len=None, dtype=None, device=None): """Creates a binary mask for each sequence. Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3 Arguments --------- length : torch.LongTensor Containing the length of each sequence in the batch. Must be 1D. max_len : int Max length for the mask, also the size of the second dimension. dtype : torch.dtype, default: None The dtype of the generated mask. device: torch.device, default: None The device to put the mask variable. Returns ------- mask : tensor The binary mask. """ if max_len is None: max_len = length.max().long().item() # using arange to generate mask mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand( len(length), max_len ) < length.unsqueeze(1) mask = torch.as_tensor(mask, dtype=dtype, device=device) return mask def _compute_statistics(self, x, m, dim=2): mean = (m * x).sum(dim) std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(self.eps)) return mean, std def forward(self, hidden_states): seq_length = hidden_states.shape[-1] lengths = torch.ones(hidden_states.shape[0], device=hidden_states.device) # Make binary mask of shape [N, 1, L] mask = self._length_to_mask( lengths * seq_length, max_len=seq_length, dtype=hidden_states.dtype, device=hidden_states.device ) mask = mask.unsqueeze(1) # Expand the temporal context of the pooling layer by allowing the # self-attention to look at global properties of the utterance. total = mask.sum(dim=2, keepdim=True) mean, std = self._compute_statistics(hidden_states, mask / total) mean = mean.unsqueeze(2).repeat(1, 1, seq_length) std = std.unsqueeze(2).repeat(1, 1, seq_length) attention = torch.cat([hidden_states, mean, std], dim=1) # Apply layers attention = self.conv(self.tanh(self.tdnn(attention))) # Filter out zero-paddings attention = attention.masked_fill(mask == 0, float("-inf")) attention = F.softmax(attention, dim=2) mean, std = self._compute_statistics(hidden_states, attention) # Append mean and std of the batch pooled_stats = torch.cat((mean, std), dim=1) pooled_stats = pooled_stats.unsqueeze(2) return pooled_stats class SqueezeExcitationRes2NetBlock(nn.Module): """An implementation of building block in ECAPA-TDNN, i.e., TDNN-Res2Net-TDNN-SqueezeExcitationBlock. """ def __init__( self, in_channels, out_channels, res2net_scale=8, se_channels=128, kernel_size=1, dilation=1, ): super().__init__() self.out_channels = out_channels self.tdnn1 = TimeDelayNetBlock( in_channels, out_channels, kernel_size=1, dilation=1, ) self.res2net_block = Res2NetBlock(out_channels, out_channels, res2net_scale, kernel_size, dilation) self.tdnn2 = TimeDelayNetBlock( out_channels, out_channels, kernel_size=1, dilation=1, ) self.se_block = SqueezeExcitationBlock(out_channels, se_channels, out_channels) def forward(self, hidden_state): residual = hidden_state hidden_state = self.tdnn1(hidden_state) hidden_state = self.res2net_block(hidden_state) hidden_state = self.tdnn2(hidden_state) hidden_state = self.se_block(hidden_state) return hidden_state + residual class ECAPA_TimeDelayNet(torch.nn.Module): """An implementation of the speaker embedding model in a paper. "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification" (https://huggingface.co/papers/2005.07143). """ def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() if len(config.enc_channels) != len(config.enc_kernel_sizes) or len(config.enc_channels) != len( config.enc_dilations ): raise ValueError("enc_channels, enc_kernel_sizes and enc_dilations should have same length") self.channels = config.enc_channels self.blocks = nn.ModuleList() # The initial TDNN layer self.blocks.append( TimeDelayNetBlock( config.mel_dim, config.enc_channels[0], config.enc_kernel_sizes[0], config.enc_dilations[0], ) ) # SE-Res2Net layers for i in range(1, len(config.enc_channels) - 1): self.blocks.append( SqueezeExcitationRes2NetBlock( config.enc_channels[i - 1], config.enc_channels[i], res2net_scale=config.enc_res2net_scale, se_channels=config.enc_se_channels, kernel_size=config.enc_kernel_sizes[i], dilation=config.enc_dilations[i], ) ) # Multi-layer feature aggregation self.mfa = TimeDelayNetBlock( config.enc_channels[-1], config.enc_channels[-1], config.enc_kernel_sizes[-1], config.enc_dilations[-1], ) # Attentive Statistical Pooling self.asp = AttentiveStatisticsPooling( config.enc_channels[-1], attention_channels=config.enc_attention_channels, ) # Final linear transformation self.fc = nn.Conv1d( in_channels=config.enc_channels[-1] * 2, out_channels=config.enc_dim, kernel_size=1, padding="same", padding_mode="reflect", ) def forward(self, hidden_states): # Minimize transpose for efficiency hidden_states = hidden_states.transpose(1, 2) hidden_states_list = [] for layer in self.blocks: hidden_states = layer(hidden_states) hidden_states_list.append(hidden_states) # Multi-layer feature aggregation hidden_states = torch.cat(hidden_states_list[1:], dim=1) hidden_states = self.mfa(hidden_states) # Attentive Statistical Pooling hidden_states = self.asp(hidden_states) # Final linear transformation hidden_states = self.fc(hidden_states) hidden_states = hidden_states.squeeze(-1) return hidden_states class DiTInputEmbedding(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() self.proj = nn.Linear( config.mel_dim + config.enc_dim + config.enc_emb_dim + config.emb_dim, config.hidden_size, ) self.spk_encoder = ECAPA_TimeDelayNet(config) def forward( self, hidden_states: torch.Tensor, speaker_embedding: torch.Tensor, condition_vector: torch.Tensor, code_embed: torch.Tensor, drop_audio_cond: Optional[bool] = False, code_embed_uncond: Optional[bool] = None, apply_cfg: Optional[bool] = True, ): if apply_cfg: hidden_states = torch.cat([hidden_states, hidden_states], dim=0) speaker_embedding = torch.cat([speaker_embedding, torch.zeros_like(speaker_embedding)], dim=0) condition_vector = torch.cat([condition_vector, torch.zeros_like(condition_vector)], dim=0) code_embed = torch.cat([code_embed, code_embed_uncond], dim=0) elif drop_audio_cond: # cfg for cond audio condition_vector = torch.zeros_like(condition_vector) speaker_embedding = torch.zeros_like(speaker_embedding) condition_vector = self.spk_encoder(condition_vector).unsqueeze(1).repeat(1, hidden_states.size(1), 1) hidden_states = self.proj(torch.cat((hidden_states, condition_vector, code_embed, speaker_embedding), dim=-1)) return hidden_states # Transformer backbone using DiT blocks class DiTCodecEmbedding(nn.Module): def __init__(self, codec_num_embeds, codec_dim, repeats): super().__init__() self.repeats = repeats self.codec_embed = nn.Embedding(codec_num_embeds + 1, codec_dim) def forward(self, code, drop_code=False): if drop_code: code = torch.zeros_like(code) code_embed = self.codec_embed(code) code_embed = torch.repeat_interleave(code_embed, repeats=self.repeats, dim=1) return code_embed # AdaLayerNormZero # return with modulated x for attn input, and params for later mlp modulation class Qwen2_5_OmniAdaLayerNormZero(nn.Module): def __init__(self, dim): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(dim, dim * 6) self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) def forward(self, hidden_states, emb=None): emb = self.linear(self.silu(emb)) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale_msa[:, None]) + shift_msa[:, None] return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp # AdaLayerNormZero for final layer # return only with modulated x for attn input, cuz no more mlp modulation class Qwen2_5_OmniAdaLayerNormZero_Final(nn.Module): def __init__(self, dim): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(dim, dim * 2) self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) def forward(self, hidden_states, emb): emb = self.linear(self.silu(emb)) scale, shift = torch.chunk(emb, 2, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :] return hidden_states # FeedForward class DiTMLP(nn.Module): def __init__(self, dim, mult=4, dropout=0.0): super().__init__() inner_dim = int(dim * mult) self.ff = nn.ModuleList( [ nn.Linear(dim, inner_dim), nn.GELU(approximate="tanh"), nn.Dropout(dropout), nn.Linear(inner_dim, dim), ] ) def forward(self, hidden_states): for layer in self.ff: hidden_states = layer(hidden_states) return hidden_states # Modified from Llama with a different rotate function, will fixed in next release def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ def rotate_half_codec(x): # x = rearrange(x, "... (d r) -> ... d r", r=2) x = x.reshape(*x.shape[:-1], -1, 2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return x.reshape(*x.shape[:-2], -1) cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half_codec(q) * sin) k_embed = (k * cos) + (rotate_half_codec(k) * sin) return q_embed, k_embed class DiTAttention(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() self.config = config self.dim = config.hidden_size self.heads = config.num_attention_heads self.inner_dim = config.head_dim * config.num_attention_heads self.dropout = config.dropout self.is_causal = False self.to_q = nn.Linear(config.hidden_size, self.inner_dim) self.to_k = nn.Linear(config.hidden_size, self.inner_dim) self.to_v = nn.Linear(config.hidden_size, self.inner_dim) self.to_out = nn.ModuleList([nn.Linear(self.inner_dim, config.hidden_size), nn.Dropout(config.dropout)]) def forward( self, hidden_states, # noised input x position_embeddings=None, # rotary position embedding for x attention_mask=None, ) -> torch.Tensor: batch_size = hidden_states.shape[0] # `sample` projections. query = self.to_q(hidden_states) key = self.to_k(hidden_states) value = self.to_v(hidden_states) # attention inner_dim = key.shape[-1] head_dim = inner_dim // self.heads query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) # apply rotary position embedding # Due to training process, only first head is applied with RoPE, will be fixed at next release cos, sin = position_embeddings query[:, :1], key[:, :1] = apply_rotary_pos_emb(query[:, :1], key[:, :1], cos, sin) attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attention_weights, _ = attention_interface( self, query, key, value, attention_mask=attention_mask, is_causal=False, ) # mask. e.g. inference got a batch with different target durations, mask out the padding attention_weights = attention_weights.reshape(batch_size, -1, self.heads * head_dim) attention_weights = attention_weights.to(query.dtype) # linear proj attention_output = self.to_out[0](attention_weights) attention_output = self.to_out[1](attention_output) return attention_output # time step conditioning embedding class SinusPositionEmbedding(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, hidden_states, scale=1000): device = hidden_states.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb) emb = scale * hidden_states.unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb.type_as(hidden_states) class DiTTimestepEmbedding(nn.Module): def __init__(self, dim, freq_embed_dim=256): super().__init__() self.time_embed = SinusPositionEmbedding(freq_embed_dim) self.time_mlp = nn.ModuleList([nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)]) def forward(self, timestep): # noqa: F821 time_hidden = self.time_embed(timestep) time_hidden = time_hidden.to(timestep.dtype) for layer in self.time_mlp: time_hidden = layer(time_hidden) # b d return time_hidden class DiTDecoderLayer(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig, look_ahead_block=0, look_backward_block=0): super().__init__() self.attn_norm = Qwen2_5_OmniAdaLayerNormZero(config.hidden_size) self.attn = DiTAttention(config) self.look_ahead_block = look_ahead_block self.look_backward_block = look_backward_block self.ff_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, eps=1e-6) self.ff = DiTMLP(dim=config.hidden_size, mult=config.ff_mult, dropout=config.dropout) def forward( self, hidden_states, timestep, position_embeddings=None, block_diff=None ): # x: noised input, t: time embedding # pre-norm & modulation for attention input norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(hidden_states, emb=timestep) # attention attn_output = self.attn( hidden_states=norm, position_embeddings=position_embeddings, attention_mask=(block_diff >= -float(self.look_backward_block)) & (block_diff <= float(self.look_ahead_block)), ) # process attention output for input x hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_output norm = self.ff_norm(hidden_states) * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output return hidden_states class SnakeBeta(nn.Module): """ A modified Snake function which uses separate parameters for the magnitude of the periodic components Shape: - Input: (B, C, T) - Output: (B, C, T), same shape as the input Parameters: - alpha - trainable parameter that controls frequency - beta - trainable parameter that controls magnitude References: - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: https://huggingface.co/papers/2006.08195 """ def __init__(self, in_features, alpha=1.0): super().__init__() self.in_features = in_features # initialize alpha self.alpha = Parameter(torch.zeros(in_features) * alpha) self.beta = Parameter(torch.zeros(in_features) * alpha) self.no_div_by_zero = 0.000000001 def forward(self, hidden_states): """ Forward pass of the function. Applies the function to the input elementwise. SnakeBeta ∶= x + 1/b * sin^2 (xa) """ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] beta = self.beta.unsqueeze(0).unsqueeze(-1) alpha = torch.exp(alpha) beta = torch.exp(beta) hidden_states = hidden_states + (1.0 / (beta + self.no_div_by_zero)) * torch.pow( torch.sin(hidden_states * alpha), 2 ) return hidden_states def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): """Generates a 1D Kaiser-windowed sinc filter. Args: cutoff (float): Normalized cutoff frequency (0 to 0.5). half_width (float): Transition bandwidth. kernel_size (int): Number of filter taps. Returns: torch.Tensor: A tensor of shape (1, 1, kernel_size) representing the filter. """ is_even = kernel_size % 2 == 0 half_size = kernel_size // 2 # Compute Kaiser window parameters delta_f = 4 * half_width attenuation = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 if attenuation > 50.0: beta = 0.1102 * (attenuation - 8.7) elif attenuation >= 21.0: beta = 0.5842 * (attenuation - 21) ** 0.4 + 0.07886 * (attenuation - 21.0) else: beta = 0.0 kaiser_window = torch.kaiser_window(kernel_size, beta=beta, periodic=False, dtype=torch.float32) # Compute time indices if is_even: time_indices = torch.arange(-half_size, half_size) + 0.5 else: time_indices = torch.arange(kernel_size) - half_size # Compute sinc filter if cutoff == 0: return torch.zeros((1, 1, kernel_size), dtype=torch.float32) # Ensures correct shape sinc_filter = torch.sinc(2 * cutoff * time_indices) normalized_filter = 2 * cutoff * kaiser_window * sinc_filter # Normalize to ensure sum = 1 (avoid leakage of constant component) normalized_filter /= normalized_filter.sum() return normalized_filter.view(1, 1, kernel_size) class UpSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() self.ratio = ratio self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size self.stride = ratio self.pad = self.kernel_size // ratio - 1 self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size) self.register_buffer("filter", filter, persistent=False) def forward(self, hidden_states): channels = hidden_states.shape[1] hidden_states = F.pad(hidden_states, (self.pad, self.pad), mode="replicate") hidden_states = self.ratio * F.conv_transpose1d( hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels ) hidden_states = hidden_states[..., self.pad_left : -self.pad_right] return hidden_states class DownSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() cutoff = 0.5 / ratio half_width = 0.6 / ratio if cutoff < 0.0: raise ValueError("Minimum cutoff must be larger than zero.") if cutoff > 0.5: raise ValueError("A cutoff above 0.5 does not make sense.") self.even = kernel_size % 2 == 0 self.pad_left = kernel_size // 2 - int(self.even) self.pad_right = kernel_size // 2 self.stride = ratio filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) self.register_buffer("filter", filter, persistent=False) def forward(self, hidden_states): channels = hidden_states.shape[1] hidden_states = F.pad(hidden_states, (self.pad_left, self.pad_right), mode="replicate") out = F.conv1d(hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels) return out class TorchActivation1d(nn.Module): def __init__( self, activation, up_ratio: int = 2, down_ratio: int = 2, up_kernel_size: int = 12, down_kernel_size: int = 12, ): super().__init__() if not callable(activation): raise TypeError("Activation function must be callable") self.act = activation self.upsample = UpSample1d(up_ratio, up_kernel_size) self.downsample = DownSample1d(down_ratio, down_kernel_size) def forward(self, hidden_states): hidden_states = self.upsample(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.downsample(hidden_states) return hidden_states class AMPBlock(torch.nn.Module): def __init__( self, channels, kernel_size=3, dilation=(1, 3, 5), ): super().__init__() self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=self._get_padding(kernel_size, dilation[0]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=self._get_padding(kernel_size, dilation[1]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=self._get_padding(kernel_size, dilation[2]), ), ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), ] ) self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers self.activations = nn.ModuleList( [TorchActivation1d(activation=SnakeBeta(channels)) for _ in range(self.num_layers)] ) def _get_padding(self, kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) def forward(self, hidden_states): acts1, acts2 = self.activations[::2], self.activations[1::2] for conv1, conv2, act1, act2 in zip(self.convs1, self.convs2, acts1, acts2): residual = hidden_states hidden_states = act1(hidden_states) hidden_states = conv1(hidden_states) hidden_states = act2(hidden_states) hidden_states = conv2(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2WavBigVGAN model. Which take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniBigVGANConfig def __init__(self, config: Qwen2_5OmniBigVGANConfig): super().__init__(config) self.num_residual_blocks = len(config.resblock_kernel_sizes) self.num_upsample_layers = len(config.upsample_rates) self.conv_pre = nn.Conv1d(config.mel_dim, config.upsample_initial_channel, 7, 1, padding=3) # Removing extra ModuleList breaks official state dict ups = [ nn.ModuleList( [ nn.ConvTranspose1d( config.upsample_initial_channel // (2**layer_idx), config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, stride, padding=(kernel_size - stride) // 2, ) ] ) for layer_idx, (stride, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)) ] self.ups = nn.ModuleList(ups) self.resblocks = nn.ModuleList( [ AMPBlock(config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, dilation) for layer_idx in range(self.num_upsample_layers) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes) ] ) self.activation_post = TorchActivation1d( activation=SnakeBeta(config.upsample_initial_channel // (2**self.num_upsample_layers)) ) self.conv_post = nn.Conv1d( config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False ) def normalize_spectrogram(self, spectrogram, max_value, min_db): return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value) def amplitude_to_db(self, amplitude, min_db_level): min_level = torch.exp( torch.tensor(min_db_level / 20.0 * np.log(10), device=amplitude.device, dtype=amplitude.dtype) ) return 20 * torch.log10(torch.clamp(amplitude, min=min_level)) def process_mel_spectrogram(self, mel_spectrogram): amplitude_spectrum = torch.exp(mel_spectrogram) decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20 return self.normalize_spectrogram(decibel_spectrum, 1, -115) def forward(self, mel_spectrogram): processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram) hidden_representation = self.conv_pre(processed_spectrogram) for layer_index in range(self.num_upsample_layers): hidden_representation = self.ups[layer_index][0](hidden_representation) residual_output = sum( self.resblocks[layer_index * self.num_residual_blocks + block_index](hidden_representation) for block_index in range(self.num_residual_blocks) ) residual_output = residual_output / self.num_residual_blocks hidden_representation = residual_output hidden_representation = self.activation_post(hidden_representation) output_waveform = self.conv_post(hidden_representation) return torch.clamp(output_waveform, min=-1.0, max=1.0).squeeze().cpu() class RungeKutta4ODESolver: def __init__(self, function, initial_value): self.function = function self.initial_value = initial_value self._one_third = 1 / 3 self._two_thirds = 2 / 3 def _rk4_step(self, function, time_start, time_step, time_end, value_start, function_value_start=None): k1 = function_value_start if function_value_start is not None else function(time_start, value_start) k2 = function(time_start + time_step * self._one_third, value_start + time_step * k1 * self._one_third) k3 = function(time_start + time_step * self._two_thirds, value_start + time_step * (k2 - k1 * self._one_third)) k4 = function(time_end, value_start + time_step * (k1 - k2 + k3)) return (k1 + 3 * (k2 + k3) + k4) * time_step / 8 def _compute_step(self, function, time_start, time_step, time_end, value_start): function_value_start = function(time_start, value_start) return self._rk4_step( function, time_start, time_step, time_end, value_start, function_value_start=function_value_start ), function_value_start def _linear_interpolation(self, time_start, time_end, value_start, value_end, time_point): if time_point == time_start: return value_start if time_point == time_end: return value_end weight = (time_point - time_start) / (time_end - time_start) return value_start + weight * (value_end - value_start) def integrate(self, time_points): solution = torch.empty( len(time_points), *self.initial_value.shape, dtype=self.initial_value.dtype, device=self.initial_value.device, ) solution[0] = self.initial_value current_index = 1 current_value = self.initial_value for time_start, time_end in zip(time_points[:-1], time_points[1:]): time_step = time_end - time_start delta_value, _ = self._compute_step(self.function, time_start, time_step, time_end, current_value) next_value = current_value + delta_value while current_index < len(time_points) and time_end >= time_points[current_index]: solution[current_index] = self._linear_interpolation( time_start, time_end, current_value, next_value, time_points[current_index] ) current_index += 1 current_value = next_value return solution @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2WavDiT model. Which take speech tokens as input and predict mel spectrogram. """ ) class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniDiTConfig _no_split_modules = ["DiTDecoderLayer"] def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__(config) self.mel_dim = config.mel_dim self.repeats = config.repeats self.time_embed = DiTTimestepEmbedding(config.hidden_size) self.text_embed = DiTCodecEmbedding(config.num_embeds, config.emb_dim, config.repeats) self.input_embed = DiTInputEmbedding(config) self.rotary_embed = Qwen2_5OmniDiTRotaryEmbedding(config.head_dim) self.hidden_size = config.hidden_size self.layers = config.num_hidden_layers self.block_size = config.block_size self.num_attention_heads = config.num_attention_heads self.transformer_blocks = nn.ModuleList() for i in range(config.num_hidden_layers): self.transformer_blocks.append( DiTDecoderLayer( config, look_ahead_block=1 if i in config.look_ahead_layers else 0, look_backward_block=1 if i in config.look_backward_layers else 0, ) ) self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation self.proj_out = nn.Linear(config.hidden_size, config.mel_dim) def _create_block_diff(self, hidden_states): batch, seq_len = hidden_states.shape[0], hidden_states.shape[1] block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length] block_i = block_indices.unsqueeze(1) # [seq_length, 1] block_j = block_indices.unsqueeze(0) # [1, seq_length] block_diff = block_j - block_i # (n, n) return block_diff.expand(batch, self.num_attention_heads, seq_len, seq_len) def forward( self, hidden_states, condition_vector, speaker_embedding, quantized_code, time_step, drop_audio_conditioning=False, drop_code=False, apply_cfg=True, ): batch_size = hidden_states.shape[0] if time_step.ndim == 0: time_step = time_step.repeat(batch_size) # Compute embeddings time_embedding = self.time_embed(time_step) text_embedding = self.text_embed(quantized_code, drop_code=False if apply_cfg else drop_code) text_embedding_unconditioned = self.text_embed(quantized_code, drop_code=True) if apply_cfg else None hidden_states = self.input_embed( hidden_states, speaker_embedding, condition_vector, text_embedding, drop_audio_cond=drop_audio_conditioning, code_embed_uncond=text_embedding_unconditioned, apply_cfg=apply_cfg, ) # Compute positional encodings position_embeddings = self.rotary_embed(hidden_states) blockwise_difference = self._create_block_diff(hidden_states) # Transformer blocks for transformer_block in self.transformer_blocks: hidden_states = transformer_block( hidden_states, time_embedding, position_embeddings=position_embeddings, block_diff=blockwise_difference, ) hidden_states = self.norm_out(hidden_states, time_embedding) output = self.proj_out(hidden_states) return output @torch.no_grad() def sample( self, conditioning_vector, reference_mel_spectrogram, quantized_code, num_steps=10, guidance_scale=0.5, sway_coefficient=-1.0, ): noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype) maximum_duration = quantized_code.shape[1] * self.repeats initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device) batch_size = reference_mel_spectrogram.shape[0] conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1) if batch_size != 1: raise ValueError("Only batch size = 1 is currently supported") def ode_function(time_step, hidden_states): if guidance_scale < 1e-5: prediction = self( hidden_states=hidden_states, speaker_embedding=conditioning_vector, condition_vector=reference_mel_spectrogram, quantized_code=quantized_code, time_step=time_step, drop_audio_conditioning=False, drop_code=False, ) return prediction model_output = self( hidden_states=hidden_states, quantized_code=quantized_code, speaker_embedding=conditioning_vector, condition_vector=reference_mel_spectrogram, time_step=time_step, apply_cfg=True, ) guided_prediction, null_prediction = torch.chunk(model_output, 2, dim=0) return guided_prediction + (guided_prediction - null_prediction) * guidance_scale initial_time = 0 time_embedding = torch.linspace( initial_time, 1, num_steps, device=quantized_code.device, dtype=conditioning_vector.dtype ) if sway_coefficient is not None: time_embedding += sway_coefficient * (torch.cos(torch.pi / 2 * time_embedding) - 1 + time_embedding) ode_solver = RungeKutta4ODESolver(function=ode_function, initial_value=initial_state) solution_trajectory = ode_solver.integrate(time_embedding) generated_waveform = solution_trajectory[-1] generated_mel_spectrogram = generated_waveform.permute(0, 2, 1) return generated_mel_spectrogram @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2Wav model. Consists a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniToken2WavConfig base_model_prefix = "model" _no_split_modules = ["Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel"] def __init__(self, config: Qwen2_5OmniToken2WavConfig): super().__init__(config) attn_impl = config._attn_implementation if config._attn_implementation == "flash_attention_2": logger.warning_once( "Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, " "attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa." ) attn_impl = "sdpa" elif config._attn_implementation == "eager": logger.warning_once( "Qwen2_5OmniToken2WavModel does not support eager attention implementation, fall back to sdpa" ) attn_impl = "sdpa" self.code2wav_dit_model = Qwen2_5OmniToken2WavDiTModel._from_config( config.dit_config, attn_implementation=attn_impl ) self.code2wav_bigvgan_model = Qwen2_5OmniToken2WavBigVGANModel._from_config( config.bigvgan_config, attn_implementation=attn_impl ) def forward( self, code, conditioning, reference_mel, num_steps=10, guidance_scale=0.5, sway_coefficient=-1.0, **kwargs, ): """Generates a waveform from input code and conditioning parameters.""" mel_spectrogram = self.code2wav_dit_model.sample( conditioning, reference_mel, code, num_steps=num_steps, guidance_scale=guidance_scale, sway_coefficient=sway_coefficient, ) waveform = self.code2wav_bigvgan_model(mel_spectrogram) return waveform ############################ # Start Qwen2.5Omni # ############################ @auto_docstring( custom_intro=""" The full Qwen2.5Omni model, a multimodal model composed of 3 sub-models: - [`Qwen2_5OmniThinkerForConditionalGeneration`]: a causal auto-regressive transformer takes text, audio, image, video as input and predict text tokens. - [`Qwen2_5OmniTalkerForConditionalGeneration`]: a causal auto-regressive transformer takes thinker hidden states and response as input and predict speech tokens. - [`Qwen2_5OmniToken2WavModel`]: a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin): config: Qwen2_5OmniConfig _no_split_modules = [ "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavModel", ] def __init__(self, config): super().__init__(config) self.thinker = Qwen2_5OmniThinkerForConditionalGeneration(config.thinker_config) self.has_talker = config.enable_audio_output self.speaker_map = {} if config.enable_audio_output: self.enable_talker() self.post_init() def enable_talker(self): self.talker = Qwen2_5OmniTalkerForConditionalGeneration(self.config.talker_config) self.token2wav = Qwen2_5OmniToken2WavModel(self.config.token2wav_config) self.token2wav.float() self.has_talker = True def load_speakers(self, path): check_torch_load_is_safe() for key, value in torch.load(path, weights_only=True).items(): self.speaker_map[key] = value logger.info(f"Speaker {list(self.speaker_map.keys())} loaded") def disable_talker(self): if hasattr(self, "talker"): del self.talker if hasattr(self, "token2wav"): del self.token2wav self.has_talker = False @classmethod def from_pretrained( cls, pretrained_model_name_or_path, *model_args, config=None, cache_dir=None, ignore_mismatched_sizes=False, force_download=False, local_files_only=False, token=None, revision="main", use_safetensors=None, weights_only=True, **kwargs, ): model = super().from_pretrained( pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, weights_only=weights_only, **kwargs, ) spk_path = cached_file( pretrained_model_name_or_path, "spk_dict.pt", subfolder=kwargs.pop("subfolder", None), cache_dir=kwargs.pop("cache_dir", None), force_download=kwargs.pop("force_download", False), proxies=kwargs.pop("proxies", None), resume_download=kwargs.pop("resume_download", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("use_auth_token", None), revision=kwargs.pop("revision", None), ) if spk_path is None: raise ValueError(f"""{pretrained_model_name_or_path}/{spk_path} not exists""") model.load_speakers(spk_path) return model @torch.no_grad() # TODO: raushan, defaults should be saved in generation config def generate( self, input_ids: Optional[torch.Tensor] = None, speaker: str = "Chelsie", use_audio_in_video: bool = False, return_audio: Optional[bool] = None, thinker_max_new_tokens: int = 1024, talker_max_new_tokens: int = 4096, talker_do_sample: bool = True, talker_top_k: int = 40, talker_top_p: float = 0.8, talker_temperature: float = 0.9, talker_eos_token_id: list[int] = [8292, 8294], talker_repetition_penalty: float = 1.05, **kwargs, ): r""" Generate text response and audio from input. Args: input_ids (`Optional[torch.Tensor]`, *optional*): Input ids, should obtain from processor. speaker (`str` , defaults to "Chelsie"): Which speaker should be used in audio response. use_audio_in_video (`bool`, defaults to False): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. return_audio (`Optional[bool]`, *optional*): Whether or not return response in audio format. When `return_audio=None`, this parameter is same as `config.enable_audio_output`. kwargs (*optional*): - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the thinker, talker and token2wav respectively. It has the priority over the keywords without a prefix. Returns: When `return_audio=False`: - **Text** (`torch.Tensor`): Generated text token sequence. When `return_audio=True`: - **Text** (`torch.Tensor`): Generated text token sequence. - **Audio waveform** (`torch.Tensor`): Generated audio waveform. """ if speaker not in self.speaker_map: raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}") if return_audio and not self.has_talker: raise ValueError( "Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker." ) if return_audio is None: return_audio = self.has_talker if input_ids.shape[0] != 1 and return_audio: raise NotImplementedError("Qwen2.5-Omni currently does not support batched inference with audio output") shared_kwargs = {"use_audio_in_video": use_audio_in_video} thinker_kwargs = { "max_new_tokens": thinker_max_new_tokens, } talker_kwargs = { "max_new_tokens": talker_max_new_tokens, "do_sample": talker_do_sample, "top_k": talker_top_k, "top_p": talker_top_p, "temperature": talker_temperature, "eos_token_id": talker_eos_token_id, "repetition_penalty": talker_repetition_penalty, } token2wav_kwargs = {} for key, value in kwargs.items(): if key.startswith("thinker_"): thinker_kwargs[key[len("thinker_") :]] = value elif key.startswith("talker_"): talker_kwargs[key[len("talker_") :]] = value elif key.startswith("token2wav_"): token2wav_kwargs[key[len("token2wav_") :]] = value # Process special input values elif key == "feature_attention_mask": thinker_kwargs[key] = value talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1) elif key == "input_features" or key == "attention_mask": thinker_kwargs[key] = value # Put other key to shared kwargs else: shared_kwargs[key] = value # Merge kwargs for key, value in shared_kwargs.items(): if key not in thinker_kwargs: thinker_kwargs[key] = value if key not in talker_kwargs: talker_kwargs[key] = value if key not in token2wav_kwargs: token2wav_kwargs[key] = value speaker_params = self.speaker_map[speaker] # 1. Generate from thinker module generate_audio = return_audio and self.has_talker if generate_audio: thinker_kwargs["output_hidden_states"] = True thinker_kwargs["return_dict_in_generate"] = True thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs) if not generate_audio: return thinker_result # 2. Generate speech tokens from talker module embeds_to_talker = thinker_result.hidden_states[0][0].clone().to(input_ids.device) if thinker_kwargs.get("input_features") is not None: audio_ids_mask = input_ids == self.config.thinker_config.audio_token_index audio_mask = audio_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) audio_mask_tensor = torch.zeros( [audio_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(audio_mask, audio_mask_tensor) if thinker_kwargs.get("pixel_values") is not None: image_ids_mask = input_ids == self.config.thinker_config.image_token_index image_mask = image_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) image_mask_tensor = torch.zeros( [image_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(image_mask, image_mask_tensor) if thinker_kwargs.get("pixel_values_videos") is not None: video_ids_mask = input_ids == self.config.thinker_config.video_token_index video_mask = video_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) video_mask_tensor = torch.zeros( [video_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(video_mask, video_mask_tensor) processed_thinker_hidden = ( (embeds_to_talker,) + thinker_result.hidden_states[0][1:], ) + thinker_result.hidden_states[1:] thinker_generate_ids = thinker_result.sequences[:, input_ids.size(1) :].to(input_ids.device) thinker_token_embeds = [ token_hidden_states[0].to(input_ids.device) for token_hidden_states in processed_thinker_hidden ] thinker_hidden_states = [ token_hidden_states[-1].to(input_ids.device) for token_hidden_states in processed_thinker_hidden ] talker_text_bos_token = speaker_params["bos_token"] talker_input_text_ids = torch.cat( [ input_ids, torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device), thinker_generate_ids[:, :1], ], dim=-1, ) talker_input_ids = torch.cat( [ torch.full_like(input_ids, fill_value=self.talker.codec_mask_token), torch.tensor([[self.talker.codec_pad_token]], dtype=torch.long, device=input_ids.device), torch.tensor([[self.talker.codec_bos_token]], dtype=torch.long, device=input_ids.device), ], dim=1, ) thinker_embed_tokens = self.thinker.get_input_embeddings() thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) talker_inputs_embeds = thinker_hidden_states[0] + thinker_token_embeds[0] talker_text_bos_token = torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device) talker_text_bos_embed = thinker_embed_tokens(talker_text_bos_token).to(input_ids.device) talker_inputs_embeds = torch.cat( [ talker_inputs_embeds, talker_text_bos_embed, thinker_reply_part[:, :1, :], ], dim=1, ) eos_embedding = thinker_embed_tokens( torch.tensor([[self.talker.text_eos_token]], dtype=torch.long, device=input_ids.device) ) pad_embedding = thinker_embed_tokens( torch.tensor([[self.talker.text_pad_token]], dtype=torch.long, device=input_ids.device) ) thinker_reply_part = torch.cat( [ thinker_reply_part[:, 1:, :], eos_embedding, pad_embedding, ], dim=1, ) talker_attention_mask = None if "attention_mask" in kwargs: talker_attention_mask = torch.cat( [kwargs["attention_mask"], kwargs["attention_mask"].new_ones((1, 2))], dim=1 ).to(input_ids.device) talker_result = self.talker.generate( input_ids=talker_input_ids, input_text_ids=talker_input_text_ids, thinker_reply_part=thinker_reply_part, inputs_embeds=talker_inputs_embeds, attention_mask=talker_attention_mask, suppress_tokens=[self.talker.codec_bos_token], **{k: (v.to(input_ids.device) if torch.is_tensor(v) else v) for k, v in talker_kwargs.items()}, ) talker_generate_codes = talker_result[:, talker_input_ids.shape[1] : -1] # 3. Generate wavs from code if self.token2wav.dtype != torch.float: self.token2wav.float() wav = self.token2wav( talker_generate_codes.to(input_ids.device), conditioning=speaker_params["cond"].to(input_ids.device).float(), reference_mel=speaker_params["ref_mel"].to(input_ids.device).float(), **token2wav_kwargs, ) return thinker_result.sequences, wav.float() __all__ = [ "Qwen2_5OmniForConditionalGeneration", "Qwen2_5OmniThinkerTextModel", "Qwen2_5OmniThinkerForConditionalGeneration", "Qwen2_5OmniTalkerModel", "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel", "Qwen2_5OmniToken2WavModel", "Qwen2_5OmniPreTrainedModel", "Qwen2_5OmniPreTrainedModelForConditionalGeneration", ]
transformers/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py/0
{ "file_path": "transformers/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py", "repo_id": "transformers", "token_count": 82674 }
460
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG model implementation.""" import copy from dataclasses import dataclass from typing import Callable, Optional, Union import torch from torch import nn from ...cache_utils import Cache, EncoderDecoderCache from ...configuration_utils import PretrainedConfig from ...generation import GenerationConfig, GenerationMixin, LogitsProcessorList, StoppingCriteriaList from ...modeling_outputs import ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for retriever augmented marginalized models outputs. """ ) class RetrievAugLMMarginOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None doc_scores: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring class RetrievAugLMOutput(ModelOutput): r""" logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: Optional[torch.FloatTensor] = None doc_scores: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None retrieved_doc_embeds: Optional[torch.FloatTensor] = None retrieved_doc_ids: Optional[torch.LongTensor] = None context_input_ids: Optional[torch.LongTensor] = None context_attention_mask: Optional[torch.LongTensor] = None question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None question_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None question_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None generator_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None generator_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_dec_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None generator_dec_attentions: Optional[tuple[torch.FloatTensor, ...]] = None generator_cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring( custom_intro=""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://huggingface.co/papers/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ ) @auto_docstring class RagPreTrainedModel(PreTrainedModel): config: RagConfig base_model_prefix = "rag" _supports_flash_attn = True _supports_sdpa = True @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: Optional[str] = None, generator_pretrained_model_name_or_path: Optional[str] = None, retriever: RagRetriever = None, **kwargs, ) -> PreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the question encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagModel >>> # initialize a RAG from two pretrained models. >>> model = RagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load fine-tuned model >>> model = RagModel.from_pretrained("./rag") ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder: del kwargs["question_encoder_" + key] for key in kwargs_generator: del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_auto import AutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder, return_unused_kwargs=True, ) kwargs_question_encoder["config"] = question_encoder_config question_encoder = AutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder ) generator = kwargs_generator.pop("model", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_auto import AutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config, kwargs_generator = AutoConfig.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True ) kwargs_generator["config"] = generator_config generator = AutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, **kwargs_generator ) # instantiate config with corresponding kwargs config = kwargs.get("config") if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) @auto_docstring class RagModel(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method **kwargs, ): r""" question_encoder (`PreTrainedModel`, *optional*): The model responsible for encoding the question into hidden states for retrieval. generator (`PreTrainedModel`, *optional*): The model responsible for generating text based on retrieved documents. retriever (`RagRetriever`, *optional*): The component responsible for retrieving documents from a knowledge base given the encoded question. """ assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an question_encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config) if question_encoder is None: from ..auto.modeling_auto import AutoModel question_encoder = AutoModel.from_config(config.question_encoder) if generator is None: from ..auto.modeling_auto import AutoModelForSeq2SeqLM generator = AutoModelForSeq2SeqLM.from_config(config.generator) self.retriever = retriever if self.retriever is not None: assert isinstance(retriever, RagRetriever), ( f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" ) self.retriever = retriever self.question_encoder = question_encoder self.generator = generator self.ctx_encoder = None self.context_encoder_training = False @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Cache] = None, doc_scores: Optional[torch.FloatTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, n_docs: Optional[int] = None, ) -> Union[tuple[torch.Tensor], RetrievAugLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. [What are input IDs?](../glossary#input-ids) encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`RagModel`]) model during decoding. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. output_retrieved (`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. n_docs (`int`, *optional*): The number of documents to retrieve. Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"]) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True ) question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.detach().to(device="cpu", dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) if self.context_encoder_training: ( context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_input_ids, retrieved_doc_attention_mask, retrieved_doc_ids, ) = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["tokenized_doc_ids"], retriever_outputs["tokenized_doc_attention_mask"], retriever_outputs["doc_ids"], ) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) retrieved_doc_input_ids = retrieved_doc_input_ids.to(input_ids) retrieved_doc_attention_mask = retrieved_doc_attention_mask.to(input_ids) retrieved_doc_embeds = self.ctx_encoder( retrieved_doc_input_ids, attention_mask=retrieved_doc_attention_mask, return_dict=True ).pooler_output retrieved_doc_embeds = retrieved_doc_embeds.view( -1, n_docs, question_encoder_last_hidden_state.shape[1] ) # reshaping # compute doc_scores involving ctx_encoder doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm( question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) ).squeeze(1) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." ) assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0) gen_outputs = self.generator( input_ids=context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=True, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return RetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, generator_cross_attentions=gen_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. """ ) class RagSequenceForGeneration(RagPreTrainedModel): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): r""" question_encoder (`PreTrainedModel`, *optional*): The model responsible for encoding the question into hidden states for retrieval. generator (`PreTrainedModel`, *optional*): The model responsible for generating text based on retrieved documents. retriever (`RagRetriever`, *optional*): The component responsible for retrieving documents from a knowledge base given the encoded question. """ assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Cache] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, exclude_bos_score: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. [What are input IDs?](../glossary#input-ids) encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`RagModel`]) model during decoding. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. output_retrieved (`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. n_docs (`int`, *optional*): The number of documents to retrieve. Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, decoder_input_ids, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, exclude_bos_score=exclude_bos_score, n_docs=n_docs, ) return RetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, do_deduplication: Optional[bool] = None, # defaults to True num_return_sequences: Optional[int] = None, # defaults to 1 num_beams: Optional[int] = None, # defaults to 1 n_docs: Optional[int] = None, **model_kwargs, ) -> torch.LongTensor: """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert input_ids is not None or context_input_ids is not None, ( " At least one of input_ids or context_input_ids must be given" ) if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", )["context_input_ids"] # set to correct device context_input_ids = context_input_ids.to(input_ids) hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication, max_output_len output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = generator_input_ids.repeat( num_candidates, 1 ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs] outputs = self( context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1] # add hypothesis hypos.append(output_sequences[top_cand_inds]) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) n_docs = n_docs if n_docs is not None else self.config.n_docs # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all() def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) # seq_logits dim = (batch*n_docs, tgt_len , #vocabs) seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) # batch_size x n_docs x tgt_len x #vocab_size doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1) # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2) # calculate loss target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2) smooth_obj = smooth_obj.sum(2) ll = ll.logsumexp(1) # logsumexp over docs smooth_obj = smooth_obj.logsumexp(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss @staticmethod def _cat_and_pad(tensors, pad_token_id): output = ( tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id) ) ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]] = t ind += t.shape[0] return output @auto_docstring( custom_intro=""" A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. """ ) class RagTokenForGeneration(RagPreTrainedModel, GenerationMixin): def __init__( self, config: Optional[PretrainedConfig] = None, question_encoder: Optional[PreTrainedModel] = None, generator: Optional[PreTrainedModel] = None, retriever: Optional[RagRetriever] = None, **kwargs, ): r""" question_encoder (`PreTrainedModel`, *optional*): The model responsible for encoding the question into hidden states for retrieval. generator (`PreTrainedModel`, *optional*): The model responsible for generating text based on retrieved documents. retriever (`RagRetriever`, *optional*): The component responsible for retrieving documents from a knowledge base given the encoded question. """ assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): self.rag.context_encoder_training = True self.rag.ctx_encoder = ctx_encoder def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): # Overwritten -- `do_marginalize` is explicitly set in the output if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _reorder_cache(past_key_values, beam_idx): """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs""" def _reorder_stacked(hidden_states, new_order): n_docs = hidden_states.shape[0] // new_order.shape[0] hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:]) hidden_states = hidden_states.index_select(0, new_order) result = hidden_states.view(-1, *hidden_states.shape[2:]) return result reordered_past = () for layer_past in past_key_values: # get the correct batch idx from decoder layer's batch dim for cross and self-attn reordered_past += ( tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past), ) if isinstance(past_key_values, EncoderDecoderCache): reordered_past = EncoderDecoderCache.from_legacy_cache(reordered_past) return reordered_past def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) ) doc_logprobs = torch.log_softmax(doc_scores, dim=1) log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1) return torch.logsumexp(log_prob_sum, dim=1) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Cache] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_retrieved: Optional[bool] = None, do_marginalize: Optional[bool] = None, reduce_loss: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, n_docs: Optional[int] = None, **kwargs, # needs kwargs for generation ) -> RetrievAugLMMarginOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. [What are input IDs?](../glossary#input-ids) encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`RagModel`]) model during decoding. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. output_retrieved (`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` operation. n_docs (`int`, *optional*): The number of documents to retrieve. Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") >>> input_ids = inputs["input_ids"] >>> labels = targets["input_ids"] >>> outputs = model(input_ids=input_ids, labels=labels) >>> # or use retriever separately >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) >>> # 1. Encode >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") >>> doc_scores = torch.bmm( ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) ... ).squeeze(1) >>> # 3. Forward to generator >>> outputs = model( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=labels, ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" n_docs = n_docs if n_docs is not None else self.config.n_docs do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return RetrievAugLMMarginOutput( loss=loss, logits=logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions, ) @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, context_input_ids: Optional[torch.LongTensor] = None, context_attention_mask: Optional[torch.LongTensor] = None, doc_scores: Optional[torch.FloatTensor] = None, n_docs: Optional[int] = None, generation_config: Optional[GenerationConfig] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None, logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), **kwargs, ) -> torch.LongTensor: """ Implements RAG token decoding. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which has the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for constrained generation conditioned on the prefix, as described in [Autoregressive Entity Retrieval](https://huggingface.co/papers/2010.00904). logits_processor (`LogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. stopping_criteria (`StoppingCriteriaList`, *optional*): Custom stopping criteria that complement the default stopping criteria built from arguments and a model's config. If a stopping criteria is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None self._prepare_special_tokens(generation_config, kwargs_has_attention_mask) # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="pt", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) # set to correct device retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) context_input_ids = context_input_ids.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids) # compute doc_scores doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( 1 ) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # batch_size batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True) input_ids = torch.full( (batch_size * generation_config.num_beams, 1), generation_config.decoder_start_token_id, dtype=torch.long, device=next(self.parameters()).device, ) input_ids_seq_length = input_ids.shape[-1] last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): # split into `batch_size`, `num_beams`, `num_docs` tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:]) # repeat same last hidden states over `num_beams` dimension tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:]) # merge `batch_size`, `num_beams`, `num_docs` dims again return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:]) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=context_input_ids, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, logits_processor=logits_processor, device=input_ids.device, ) prepared_stopping_criteria = self._get_stopping_criteria( generation_config=generation_config, stopping_criteria=stopping_criteria ) self._prepare_cache_for_generation( generation_config, model_kwargs, assistant_model=None, batch_size=input_ids.shape[0], max_cache_length=generation_config.max_length - 1, ) if generation_config.num_beams == 1: if generation_config.num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" " greedy search." ) return self._sample( input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, streamer=None, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_return_sequences > generation_config.num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") return self._beam_search( input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) # Auxiliary functions for beam search def _temporary_reorder_cache(self, past_key_values, beam_idx): # RAG should always use the legacy path even though the LM backbone (T5) uses new cache format # because RAG expands input for doc-size internally. TODO: raushan, remove me when all models support # new cache format past_key_values = self._reorder_cache(past_key_values, beam_idx) return past_key_values def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.rag.generator.set_output_embeddings(new_embeddings) def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.config.decoder_start_token_id shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = start_token_id return shifted_input_ids def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left target = torch.cat( [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 ) def _mask_pads(ll, smooth_obj): pad_mask = target.eq(self.config.generator.pad_token_id) if pad_mask.any(): ll.masked_fill_(pad_mask, 0.0) smooth_obj.masked_fill_(pad_mask, 0.0) return ll.squeeze(-1), smooth_obj.squeeze(-1) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) target = target.unsqueeze(-1) assert target.dim() == rag_logprobs.dim() ll = rag_logprobs.gather(dim=-1, index=target) smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) ll = ll.sum(1) # sum over tokens smooth_obj = smooth_obj.sum(1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss __all__ = ["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
transformers/src/transformers/models/rag/modeling_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/modeling_rag.py", "repo_id": "transformers", "token_count": 37154 }
461
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RegNet 10B checkpoints vissl.""" # You need to install a specific version of classy vision # pip install git+https://github.com/FrancescoSaverioZuppichini/ClassyVision.git@convert_weights import argparse import json import os import re from collections import OrderedDict from dataclasses import dataclass, field from functools import partial from pathlib import Path from pprint import pprint from typing import Optional import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams from huggingface_hub import hf_hub_download from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.modeling_utils import _load_state_dict_into_meta_model, load_state_dict from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger() @dataclass class Tracker: module: nn.Module traced: list[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) name2module: dict[str, nn.Module] = field(default_factory=OrderedDict) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d)) if has_not_submodules: self.traced.append(m) self.name2module[name] = m def __call__(self, x: Tensor): for name, m in self.module.named_modules(): self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name))) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0} class FakeRegNetVisslWrapper(nn.Module): """ Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. """ def __init__(self, model: nn.Module): super().__init__() feature_blocks: list[tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), f"Unexpected layer name {k}" block_index = len(feature_blocks) + 1 feature_blocks.append((f"res{block_index}", v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs( x, out_feat_keys=None, feature_blocks=self._feature_blocks, ) class FakeRegNetParams(RegNetParams): """ Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small parameters, so we can trace it in memory. """ def get_expanded_params(self): return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)] def get_from_to_our_keys(model_name: str) -> dict[str, str]: """ Returns a dictionary that maps from original model's key -> our implementation's keys """ # create our model (with small weights) our_config = RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[8, 8, 8, 8], groups_width=8) if "in1k" in model_name: our_model = RegNetForImageClassification(our_config) else: our_model = RegNetModel(our_config) # create from model (with small weights) from_model = FakeRegNetVisslWrapper( RegNet(FakeRegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) ) with torch.no_grad(): from_model = from_model.eval() our_model = our_model.eval() x = torch.randn((1, 3, 32, 32)) # trace both dest_tracker = Tracker(our_model) dest_traced = dest_tracker(x).parametrized pprint(dest_tracker.name2module) src_tracker = Tracker(from_model) src_traced = src_tracker(x).parametrized # convert the keys -> module dict to keys -> params def to_params_dict(dict_with_modules): params_dict = OrderedDict() for name, module in dict_with_modules.items(): for param_name, param in module.state_dict().items(): params_dict[f"{name}.{param_name}"] = param return params_dict from_to_ours_keys = {} src_state_dict = to_params_dict(src_traced) dst_state_dict = to_params_dict(dest_traced) for (src_key, src_param), (dest_key, dest_param) in zip(src_state_dict.items(), dst_state_dict.items()): from_to_ours_keys[src_key] = dest_key logger.info(f"{src_key} -> {dest_key}") # if "in1k" was in the model_name it means it must have a classification head (was finetuned) if "in1k" in model_name: from_to_ours_keys["0.clf.0.weight"] = "classifier.1.weight" from_to_ours_keys["0.clf.0.bias"] = "classifier.1.bias" return from_to_ours_keys def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } # add seer weights logic def load_using_classy_vision(checkpoint_url: str) -> tuple[dict, dict]: files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu") # check if we have a head, if yes add it model_state_dict = files["classy_state_dict"]["base_model"]["model"] return model_state_dict["trunk"], model_state_dict["heads"] names_to_from_model = { "regnet-y-10b-seer": partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", ), "regnet-y-10b-seer-in1k": partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", ), } from_to_ours_keys = get_from_to_our_keys(model_name) if not (save_directory / f"{model_name}.pth").exists(): logger.info("Loading original state_dict.") from_state_dict_trunk, from_state_dict_head = names_to_from_model[model_name]() from_state_dict = from_state_dict_trunk if "in1k" in model_name: # add the head from_state_dict = {**from_state_dict_trunk, **from_state_dict_head} logger.info("Done!") converted_state_dict = {} not_used_keys = list(from_state_dict.keys()) regex = r"\.block.-part." # this is "interesting", so the original checkpoints have `block[0,1]-part` in each key name, we remove it for key in from_state_dict: # remove the weird "block[0,1]-part" from the key src_key = re.sub(regex, "", key) # now src_key from the model checkpoints is the one we got from the original model after tracing, so use it to get the correct destination key dest_key = from_to_ours_keys[src_key] # store the parameter with our key converted_state_dict[dest_key] = from_state_dict[key] not_used_keys.remove(key) # check that all keys have been updated assert len(not_used_keys) == 0, f"Some keys where not used {','.join(not_used_keys)}" logger.info(f"The following keys were not used: {','.join(not_used_keys)}") # save our state dict to disk torch.save(converted_state_dict, save_directory / f"{model_name}.pth") del converted_state_dict else: logger.info("The state_dict was already stored on disk.") if push_to_hub: logger.info(f"Token is {os.environ['HF_TOKEN']}") logger.info("Loading our model.") # create our model our_config = names_to_config[model_name] our_model_func = RegNetModel if "in1k" in model_name: our_model_func = RegNetForImageClassification with torch.device("meta"): our_model = our_model_func(our_config) logger.info("Loading state_dict in our model.") # load state dict state_dict_keys = our_model.state_dict().keys() state_dict = load_state_dict(save_directory / f"{model_name}.pth", weights_only=True) fixed_state_dict = state_dict = {our_model._fix_state_dict_key_on_load(k)[0]: v for k, v in state_dict.items()} _load_state_dict_into_meta_model( our_model, fixed_state_dict, start_prefix="", expected_keys=state_dict_keys, ) logger.info("Finally, pushing!") # push it to hub our_model.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add model", output_dir=save_directory / model_name, ) size = 384 # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size) image_processor.push_to_hub( repo_path_or_name=save_directory / model_name, commit_message="Add image processor", output_dir=save_directory / model_name, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py", "repo_id": "transformers", "token_count": 5006 }
462
# coding=utf-8 # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ResNet model.""" import math from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig logger = logging.get_logger(__name__) class ResNetConvLayer(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu" ): super().__init__() self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False ) self.normalization = nn.BatchNorm2d(out_channels) self.activation = ACT2FN[activation] if activation is not None else nn.Identity() def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) return hidden_state class ResNetEmbeddings(nn.Module): """ ResNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: ResNetConfig): super().__init__() self.embedder = ResNetConvLayer( config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act ) self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.num_channels = config.num_channels def forward(self, pixel_values: Tensor) -> Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embedding = self.embedder(pixel_values) embedding = self.pooler(embedding) return embedding class ResNetShortCut(nn.Module): """ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2): super().__init__() self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False) self.normalization = nn.BatchNorm2d(out_channels) def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) return hidden_state class ResNetBasicLayer(nn.Module): """ A classic ResNet's residual layer composed by two `3x3` convolutions. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu"): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 self.shortcut = ( ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( ResNetConvLayer(in_channels, out_channels, stride=stride), ResNetConvLayer(out_channels, out_channels, activation=None), ) self.activation = ACT2FN[activation] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class ResNetBottleNeckLayer(nn.Module): """ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. If `downsample_in_bottleneck` is true, downsample will be in the first layer instead of the second layer. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", reduction: int = 4, downsample_in_bottleneck: bool = False, ): super().__init__() should_apply_shortcut = in_channels != out_channels or stride != 1 reduces_channels = out_channels // reduction self.shortcut = ( ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity() ) self.layer = nn.Sequential( ResNetConvLayer( in_channels, reduces_channels, kernel_size=1, stride=stride if downsample_in_bottleneck else 1 ), ResNetConvLayer(reduces_channels, reduces_channels, stride=stride if not downsample_in_bottleneck else 1), ResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None), ) self.activation = ACT2FN[activation] def forward(self, hidden_state): residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state class ResNetStage(nn.Module): """ A ResNet stage composed by stacked layers. """ def __init__( self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, ): super().__init__() layer = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer if config.layer_type == "bottleneck": first_layer = layer( in_channels, out_channels, stride=stride, activation=config.hidden_act, downsample_in_bottleneck=config.downsample_in_bottleneck, ) else: first_layer = layer(in_channels, out_channels, stride=stride, activation=config.hidden_act) self.layers = nn.Sequential( first_layer, *[layer(out_channels, out_channels, activation=config.hidden_act) for _ in range(depth - 1)] ) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class ResNetEncoder(nn.Module): def __init__(self, config: ResNetConfig): super().__init__() self.stages = nn.ModuleList([]) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) ) in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]): self.stages.append(ResNetStage(config, in_channels, out_channels, depth=depth)) def forward( self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, ) @auto_docstring class ResNetPreTrainedModel(PreTrainedModel): config: ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" _no_split_modules = ["ResNetConvLayer", "ResNetShortCut"] def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") # copied from the `reset_parameters` method of `class Linear(Module)` in `torch`. elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) @auto_docstring class ResNetModel(ResNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = ResNetEmbeddings(config) self.encoder = ResNetEncoder(config) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @auto_docstring( custom_intro=""" ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class ResNetForImageClassification(ResNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.resnet = ResNetModel(config) # classification head self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.resnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @auto_docstring( custom_intro=""" ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ ) class ResNetBackbone(ResNetPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = ResNetEmbeddings(config) self.encoder = ResNetEncoder(config) # initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") >>> model = AutoBackbone.from_pretrained( ... "microsoft/resnet-50", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 2048, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) embedding_output = self.embedder(pixel_values) outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = ["ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone"]
transformers/src/transformers/models/resnet/modeling_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/modeling_resnet.py", "repo_id": "transformers", "token_count": 7214 }
463
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RT Detr checkpoints with Timm backbone""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import RTDetrConfig, RTDetrForObjectDetection, RTDetrImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_rt_detr_config(model_name: str) -> RTDetrConfig: config = RTDetrConfig() config.num_labels = 80 repo_id = "huggingface/label-files" filename = "coco-detection-mmdet-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if model_name == "rtdetr_r18vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [2, 2, 2, 2] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 3 elif model_name == "rtdetr_r34vd": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [3, 4, 6, 3] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 4 elif model_name == "rtdetr_r50vd_m": pass elif model_name == "rtdetr_r50vd": pass elif model_name == "rtdetr_r101vd": config.backbone_config.depths = [3, 4, 23, 3] config.encoder_ffn_dim = 2048 config.encoder_hidden_dim = 384 config.decoder_in_channels = [384, 384, 384] elif model_name == "rtdetr_r18vd_coco_o365": config.backbone_config.hidden_sizes = [64, 128, 256, 512] config.backbone_config.depths = [2, 2, 2, 2] config.backbone_config.layer_type = "basic" config.encoder_in_channels = [128, 256, 512] config.hidden_expansion = 0.5 config.decoder_layers = 3 elif model_name == "rtdetr_r50vd_coco_o365": pass elif model_name == "rtdetr_r101vd_coco_o365": config.backbone_config.depths = [3, 4, 23, 3] config.encoder_ffn_dim = 2048 config.encoder_hidden_dim = 384 config.decoder_in_channels = [384, 384, 384] return config def create_rename_keys(config): # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] # stem # fmt: off last_key = ["weight", "bias", "running_mean", "running_var"] for level in range(3): rename_keys.append((f"backbone.conv1.conv1_{level+1}.conv.weight", f"model.backbone.model.embedder.embedder.{level}.convolution.weight")) for last in last_key: rename_keys.append((f"backbone.conv1.conv1_{level+1}.norm.{last}", f"model.backbone.model.embedder.embedder.{level}.normalization.{last}")) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: if stage_idx == 0: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.convolution.weight", ) ) for last in last_key: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.normalization.{last}", ) ) else: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.1.convolution.weight", ) ) for last in last_key: rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.0.short.conv.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.0.shortcut.1.normalization.{last}", ) ) rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2a.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2a.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.{last}", )) rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2b.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2b.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.{last}", )) # https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/nn/backbone/presnet.py#L171 if config.backbone_config.layer_type != "basic": rename_keys.append( ( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2c.conv.weight", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.2.convolution.weight", ) ) for last in last_key: rename_keys.append(( f"backbone.res_layers.{stage_idx}.blocks.{layer_idx}.branch2c.norm.{last}", f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.2.normalization.{last}", )) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"encoder.encoder.{i}.layers.0.self_attn.out_proj.weight", f"model.encoder.encoder.{i}.layers.0.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.self_attn.out_proj.bias", f"model.encoder.encoder.{i}.layers.0.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear1.weight", f"model.encoder.encoder.{i}.layers.0.fc1.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear1.bias", f"model.encoder.encoder.{i}.layers.0.fc1.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear2.weight", f"model.encoder.encoder.{i}.layers.0.fc2.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.linear2.bias", f"model.encoder.encoder.{i}.layers.0.fc2.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm1.weight", f"model.encoder.encoder.{i}.layers.0.self_attn_layer_norm.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm1.bias", f"model.encoder.encoder.{i}.layers.0.self_attn_layer_norm.bias", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm2.weight", f"model.encoder.encoder.{i}.layers.0.final_layer_norm.weight", ) ) rename_keys.append( ( f"encoder.encoder.{i}.layers.0.norm2.bias", f"model.encoder.encoder.{i}.layers.0.final_layer_norm.bias", ) ) for j in range(0, 3): rename_keys.append((f"encoder.input_proj.{j}.0.weight", f"model.encoder_input_proj.{j}.0.weight")) for last in last_key: rename_keys.append((f"encoder.input_proj.{j}.1.{last}", f"model.encoder_input_proj.{j}.1.{last}")) block_levels = 3 if config.backbone_config.layer_type != "basic" else 4 for i in range(len(config.encoder_in_channels) - 1): # encoder layers: hybridencoder parts for j in range(1, block_levels): rename_keys.append( (f"encoder.fpn_blocks.{i}.conv{j}.conv.weight", f"model.encoder.fpn_blocks.{i}.conv{j}.conv.weight") ) for last in last_key: rename_keys.append( ( f"encoder.fpn_blocks.{i}.conv{j}.norm.{last}", f"model.encoder.fpn_blocks.{i}.conv{j}.norm.{last}", ) ) rename_keys.append((f"encoder.lateral_convs.{i}.conv.weight", f"model.encoder.lateral_convs.{i}.conv.weight")) for last in last_key: rename_keys.append( (f"encoder.lateral_convs.{i}.norm.{last}", f"model.encoder.lateral_convs.{i}.norm.{last}") ) for j in range(3): for k in range(1, 3): rename_keys.append( ( f"encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", f"model.encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", ) ) for last in last_key: rename_keys.append( ( f"encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", f"model.encoder.fpn_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", ) ) for j in range(1, block_levels): rename_keys.append( (f"encoder.pan_blocks.{i}.conv{j}.conv.weight", f"model.encoder.pan_blocks.{i}.conv{j}.conv.weight") ) for last in last_key: rename_keys.append( ( f"encoder.pan_blocks.{i}.conv{j}.norm.{last}", f"model.encoder.pan_blocks.{i}.conv{j}.norm.{last}", ) ) for j in range(3): for k in range(1, 3): rename_keys.append( ( f"encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", f"model.encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.conv.weight", ) ) for last in last_key: rename_keys.append( ( f"encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", f"model.encoder.pan_blocks.{i}.bottlenecks.{j}.conv{k}.norm.{last}", ) ) rename_keys.append( (f"encoder.downsample_convs.{i}.conv.weight", f"model.encoder.downsample_convs.{i}.conv.weight") ) for last in last_key: rename_keys.append( (f"encoder.downsample_convs.{i}.norm.{last}", f"model.encoder.downsample_convs.{i}.norm.{last}") ) for i in range(config.decoder_layers): # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"decoder.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight", ) ) rename_keys.append( ( f"decoder.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias", ) ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"decoder.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) rename_keys.append((f"decoder.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) rename_keys.append((f"decoder.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) rename_keys.append((f"decoder.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"decoder.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append( (f"decoder.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") ) for i in range(config.decoder_layers): # decoder + class and bounding box heads rename_keys.append( ( f"decoder.dec_score_head.{i}.weight", f"model.decoder.class_embed.{i}.weight", ) ) rename_keys.append( ( f"decoder.dec_score_head.{i}.bias", f"model.decoder.class_embed.{i}.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.0.weight", f"model.decoder.bbox_embed.{i}.layers.0.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.0.bias", f"model.decoder.bbox_embed.{i}.layers.0.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.1.weight", f"model.decoder.bbox_embed.{i}.layers.1.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.1.bias", f"model.decoder.bbox_embed.{i}.layers.1.bias", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.2.weight", f"model.decoder.bbox_embed.{i}.layers.2.weight", ) ) rename_keys.append( ( f"decoder.dec_bbox_head.{i}.layers.2.bias", f"model.decoder.bbox_embed.{i}.layers.2.bias", ) ) # decoder projection for i in range(len(config.decoder_in_channels)): rename_keys.append( ( f"decoder.input_proj.{i}.conv.weight", f"model.decoder_input_proj.{i}.0.weight", ) ) for last in last_key: rename_keys.append( ( f"decoder.input_proj.{i}.norm.{last}", f"model.decoder_input_proj.{i}.1.{last}", ) ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("decoder.denoising_class_embed.weight", "model.denoising_class_embed.weight"), ("decoder.query_pos_head.layers.0.weight", "model.decoder.query_pos_head.layers.0.weight"), ("decoder.query_pos_head.layers.0.bias", "model.decoder.query_pos_head.layers.0.bias"), ("decoder.query_pos_head.layers.1.weight", "model.decoder.query_pos_head.layers.1.weight"), ("decoder.query_pos_head.layers.1.bias", "model.decoder.query_pos_head.layers.1.bias"), ("decoder.enc_output.0.weight", "model.enc_output.0.weight"), ("decoder.enc_output.0.bias", "model.enc_output.0.bias"), ("decoder.enc_output.1.weight", "model.enc_output.1.weight"), ("decoder.enc_output.1.bias", "model.enc_output.1.bias"), ("decoder.enc_score_head.weight", "model.enc_score_head.weight"), ("decoder.enc_score_head.bias", "model.enc_score_head.bias"), ("decoder.enc_bbox_head.layers.0.weight", "model.enc_bbox_head.layers.0.weight"), ("decoder.enc_bbox_head.layers.0.bias", "model.enc_bbox_head.layers.0.bias"), ("decoder.enc_bbox_head.layers.1.weight", "model.enc_bbox_head.layers.1.weight"), ("decoder.enc_bbox_head.layers.1.bias", "model.enc_bbox_head.layers.1.bias"), ("decoder.enc_bbox_head.layers.2.weight", "model.enc_bbox_head.layers.2.weight"), ("decoder.enc_bbox_head.layers.2.bias", "model.enc_bbox_head.layers.2.bias"), ] ) return rename_keys def rename_key(state_dict, old, new): try: val = state_dict.pop(old) state_dict[new] = val except Exception: pass def read_in_q_k_v(state_dict, config): prefix = "" encoder_hidden_dim = config.encoder_hidden_dim # first: transformer encoder for i in range(config.encoder_layers): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.weight"] = in_proj_weight[ :encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.bias"] = in_proj_bias[:encoder_hidden_dim] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.weight"] = in_proj_weight[ encoder_hidden_dim : 2 * encoder_hidden_dim, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.bias"] = in_proj_bias[ encoder_hidden_dim : 2 * encoder_hidden_dim ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.weight"] = in_proj_weight[ -encoder_hidden_dim:, : ] state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.bias"] = in_proj_bias[-encoder_hidden_dim:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_rt_detr_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, repo_id): """ Copy/paste/tweak model's weights to our RTDETR structure. """ # load default config config = get_rt_detr_config(model_name) # load original model from torch hub model_name_to_checkpoint_url = { "rtdetr_r18vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r18vd_dec3_6x_coco_from_paddle.pth", "rtdetr_r34vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r34vd_dec4_6x_coco_from_paddle.pth", "rtdetr_r50vd_m": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_m_6x_coco_from_paddle.pth", "rtdetr_r50vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_6x_coco_from_paddle.pth", "rtdetr_r101vd": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r101vd_6x_coco_from_paddle.pth", "rtdetr_r18vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r18vd_5x_coco_objects365_from_paddle.pth", "rtdetr_r50vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r50vd_2x_coco_objects365_from_paddle.pth", "rtdetr_r101vd_coco_o365": "https://github.com/lyuwenyu/storage/releases/download/v0.1/rtdetr_r101vd_2x_coco_objects365_from_paddle.pth", } logger.info(f"Converting model {model_name}...") state_dict = torch.hub.load_state_dict_from_url(model_name_to_checkpoint_url[model_name], map_location="cpu")[ "ema" ]["module"] # rename keys for src, dest in create_rename_keys(config): rename_key(state_dict, src, dest) # query, key and value matrices need special treatment read_in_q_k_v(state_dict, config) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them for key in state_dict.copy(): if key.endswith("num_batches_tracked"): del state_dict[key] # for two_stage if "bbox_embed" in key or ("class_embed" in key and "denoising_" not in key): state_dict[key.split("model.decoder.")[-1]] = state_dict[key] # finally, create HuggingFace model and load state dict model = RTDetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # load image processor image_processor = RTDetrImageProcessor() # prepare image img = prepare_img() # preprocess image transformations = transforms.Compose( [ transforms.Resize([640, 640], interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), ] ) original_pixel_values = transformations(img).unsqueeze(0) # insert batch dimension encoding = image_processor(images=img, return_tensors="pt") pixel_values = encoding["pixel_values"] assert torch.allclose(original_pixel_values, pixel_values) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) pixel_values = pixel_values.to(device) # Pass image by the model outputs = model(pixel_values) if model_name == "rtdetr_r18vd": expected_slice_logits = torch.tensor( [ [-4.3364253, -6.465683, -3.6130402], [-4.083815, -6.4039373, -6.97881], [-4.192215, -7.3410473, -6.9027247], ] ) expected_slice_boxes = torch.tensor( [ [0.16868353, 0.19833282, 0.21182671], [0.25559652, 0.55121744, 0.47988364], [0.7698693, 0.4124569, 0.46036878], ] ) elif model_name == "rtdetr_r34vd": expected_slice_logits = torch.tensor( [ [-4.3727384, -4.7921476, -5.7299604], [-4.840536, -8.455345, -4.1745796], [-4.1277084, -5.2154565, -5.7852697], ] ) expected_slice_boxes = torch.tensor( [ [0.258278, 0.5497808, 0.4732004], [0.16889669, 0.19890057, 0.21138911], [0.76632994, 0.4147879, 0.46851268], ] ) elif model_name == "rtdetr_r50vd_m": expected_slice_logits = torch.tensor( [ [-4.319764, -6.1349025, -6.094794], [-5.1056995, -7.744766, -4.803956], [-4.7685347, -7.9278393, -4.5751696], ] ) expected_slice_boxes = torch.tensor( [ [0.2582739, 0.55071366, 0.47660282], [0.16811174, 0.19954777, 0.21292639], [0.54986024, 0.2752091, 0.0561416], ] ) elif model_name == "rtdetr_r50vd": expected_slice_logits = torch.tensor( [ [-4.6476398, -5.001154, -4.9785104], [-4.1593494, -4.7038546, -5.946485], [-4.4374595, -4.658361, -6.2352347], ] ) expected_slice_boxes = torch.tensor( [ [0.16880608, 0.19992264, 0.21225442], [0.76837635, 0.4122631, 0.46368608], [0.2595386, 0.5483334, 0.4777486], ] ) elif model_name == "rtdetr_r101vd": expected_slice_logits = torch.tensor( [ [-4.6162, -4.9189, -4.6656], [-4.4701, -4.4997, -4.9659], [-5.6641, -7.9000, -5.0725], ] ) expected_slice_boxes = torch.tensor( [ [0.7707, 0.4124, 0.4585], [0.2589, 0.5492, 0.4735], [0.1688, 0.1993, 0.2108], ] ) elif model_name == "rtdetr_r18vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.8726, -5.9066, -5.2450], [-4.8157, -6.8764, -5.1656], [-4.7492, -5.7006, -5.1333], ] ) expected_slice_boxes = torch.tensor( [ [0.2552, 0.5501, 0.4773], [0.1685, 0.1986, 0.2104], [0.7692, 0.4141, 0.4620], ] ) elif model_name == "rtdetr_r50vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.6491, -3.9252, -5.3163], [-4.1386, -5.0348, -3.9016], [-4.4778, -4.5423, -5.7356], ] ) expected_slice_boxes = torch.tensor( [ [0.2583, 0.5492, 0.4747], [0.5501, 0.2754, 0.0574], [0.7693, 0.4137, 0.4613], ] ) elif model_name == "rtdetr_r101vd_coco_o365": expected_slice_logits = torch.tensor( [ [-4.5152, -5.6811, -5.7311], [-4.5358, -7.2422, -5.0941], [-4.6919, -5.5834, -6.0145], ] ) expected_slice_boxes = torch.tensor( [ [0.7703, 0.4140, 0.4583], [0.1686, 0.1991, 0.2107], [0.2570, 0.5496, 0.4750], ] ) else: raise ValueError(f"Unknown rt_detr_name: {model_name}") assert torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits.to(outputs.logits.device), atol=1e-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes.to(outputs.pred_boxes.device), atol=1e-3) if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: # Upload model, image processor and config to the hub logger.info("Uploading PyTorch model and image processor to the hub...") config.push_to_hub( repo_id=repo_id, commit_message="Add config from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py" ) model.push_to_hub( repo_id=repo_id, commit_message="Add model from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py" ) image_processor.push_to_hub( repo_id=repo_id, commit_message="Add image processor from convert_rt_detr_original_pytorch_checkpoint_to_pytorch.py", ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="rtdetr_r50vd", type=str, help="model_name of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") parser.add_argument( "--repo_id", type=str, help="repo_id where the model will be pushed to.", ) args = parser.parse_args() convert_rt_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.repo_id)
transformers/src/transformers/models/rt_detr/convert_rt_detr_original_pytorch_checkpoint_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr/convert_rt_detr_original_pytorch_checkpoint_to_hf.py", "repo_id": "transformers", "token_count": 17771 }
464
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SAM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class SamPromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.image_embedding_size = image_size // patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps class SamMaskDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function used inside the `SamMaskDecoder` module. mlp_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 2): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsampling rate of the attention layer. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3. iou_head_depth (`int`, *optional*, defaults to 3): The number of layers in the IoU head module. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The dimensionality of the hidden states in the IoU head module. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="relu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.layer_norm_eps = layer_norm_eps class SamVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the SAM ViT-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. output_channels (`int`, *optional*, defaults to 256): Dimensionality of the output channels in the Patch Encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. image_size (`int`, *optional*, defaults to 1024): Expected resolution. Target size of the resized input image. patch_size (`int`, *optional*, defaults to 16): Size of the patches to be extracted from the input image. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to query, key, value projections. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of mlp hidden dim to embedding dim. use_abs_pos (`bool`, *optional*, defaults to `True`): Whether to use absolute position embedding. use_rel_pos (`bool`, *optional*, defaults to `True`): Whether to use relative position embedding. window_size (`int`, *optional*, defaults to 14): Window size for relative position. global_attn_indexes (`list[int]`, *optional*, defaults to `[2, 5, 8, 11]`): The indexes of the global attention layers. num_pos_feats (`int`, *optional*, defaults to 128): The dimensionality of the position embedding. mlp_dim (`int`, *optional*): The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio * hidden_size`. Example: ```python >>> from transformers import ( ... SamVisionConfig, ... SamVisionModel, ... ) >>> # Initializing a SamVisionConfig with `"facebook/sam-vit-huge"` style configuration >>> configuration = SamVisionConfig() >>> # Initializing a SamVisionModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration >>> model = SamVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" base_config_key = "vision_config" model_type = "sam_vision_model" def __init__( self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act="gelu", layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=128, mlp_dim=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.output_channels = output_channels self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.use_abs_pos = use_abs_pos self.use_rel_pos = use_rel_pos self.window_size = window_size self.global_attn_indexes = global_attn_indexes self.num_pos_feats = num_pos_feats self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim class SamConfig(PretrainedConfig): r""" [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (Union[`dict`, `SamVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`SamVisionConfig`]. prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`]. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... SamVisionConfig, ... SamPromptEncoderConfig, ... SamMaskDecoderConfig, ... SamModel, ... ) >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration >>> configuration = SamConfig() >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration >>> model = SamModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig >>> # Initializing SAM vision, SAM Q-Former and language model configurations >>> vision_config = SamVisionConfig() >>> prompt_encoder_config = SamPromptEncoderConfig() >>> mask_decoder_config = SamMaskDecoderConfig() >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "sam" sub_configs = { "prompt_encoder_config": SamPromptEncoderConfig, "mask_decoder_config": SamMaskDecoderConfig, "vision_config": SamVisionConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} if isinstance(vision_config, SamVisionConfig): vision_config = vision_config.to_dict() if isinstance(prompt_encoder_config, SamPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, SamMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = SamVisionConfig(**vision_config) self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range __all__ = ["SamConfig", "SamMaskDecoderConfig", "SamPromptEncoderConfig", "SamVisionConfig"]
transformers/src/transformers/models/sam/configuration_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/configuration_sam.py", "repo_id": "transformers", "token_count": 5599 }
465
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for SeamlessM4T """ from ...processing_utils import ProcessorMixin class SeamlessM4TProcessor(ProcessorMixin): r""" Constructs a SeamlessM4T processor which wraps a SeamlessM4T feature extractor and a SeamlessM4T tokenizer into a single processor. [`SeamlessM4TProcessor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`SeamlessM4TTokenizerFast`]. See the [`~SeamlessM4TProcessor.__call__`] and [`~SeamlessM4TProcessor.decode`] for more information. Args: feature_extractor ([`SeamlessM4TFeatureExtractor`]): The audio processor is a required input. tokenizer ([`SeamlessM4TTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = ("SeamlessM4TTokenizer", "SeamlessM4TTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, src_lang=None, tgt_lang=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to SeamlessM4TTokenizerFast's [`~SeamlessM4TTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. src_lang (`str`, *optional*): The language code of the input texts/audios. If not specified, the last `src_lang` specified will be used. tgt_lang (`str`, *optional*): The code of the target language. If not specified, the last `tgt_lang` specified will be used. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **input_features** -- Audio input features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") elif text is not None and audios is not None: raise ValueError( "Text and audios are mututally exclusive when passed to `SeamlessM4T`. Specify one or another." ) elif text is not None: if tgt_lang is not None: self.tokenizer.tgt_lang = tgt_lang if src_lang is not None: self.tokenizer.src_lang = src_lang encoding = self.tokenizer(text, **kwargs) return encoding else: encoding = self.feature_extractor(audios, sampling_rate=sampling_rate, **kwargs) return encoding __all__ = ["SeamlessM4TProcessor"]
transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/processing_seamless_m4t.py", "repo_id": "transformers", "token_count": 1969 }
466
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/segformer/modular_segformer.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_segformer.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, is_torch_tensor, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F elif is_torchvision_available(): from torchvision.transforms import functional as F class SegformerFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): r""" do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. """ do_reduce_labels: Optional[bool] @auto_docstring class SegformerImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"height": 512, "width": 512} default_to_square = True crop_size = None do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_reduce_labels = False valid_kwargs = SegformerFastImageProcessorKwargs rescale_factor = 1 / 255 def __init__(self, **kwargs: Unpack[SegformerFastImageProcessorKwargs]): super().__init__(**kwargs) def reduce_label(self, labels: list["torch.Tensor"]): for idx in range(len(labels)): label = labels[idx] label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label) label = label - 1 label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label) labels[idx] = label return label @auto_docstring def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, **kwargs: Unpack[SegformerFastImageProcessorKwargs], ) -> BatchFeature: r""" segmentation_maps (`ImageInput`, *optional*): The segmentation maps to preprocess. """ return super().preprocess(images, segmentation_maps, **kwargs) def _preprocess_image_like_inputs( self, images: ImageInput, segmentation_maps: Optional[ImageInput], do_convert_rgb: bool, input_data_format: ChannelDimension, device: Optional[Union[str, "torch.device"]] = None, **kwargs: Unpack[SegformerFastImageProcessorKwargs], ) -> BatchFeature: """ Preprocess image-like inputs. """ images = self._prepare_image_like_inputs( images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device ) images_kwargs = kwargs.copy() images_kwargs["do_reduce_labels"] = False batch_feature = self._preprocess(images, **images_kwargs) if segmentation_maps is not None: processed_segmentation_maps = self._prepare_image_like_inputs( images=segmentation_maps, expected_ndims=2, do_convert_rgb=False, input_data_format=ChannelDimension.FIRST, ) segmentation_maps_kwargs = kwargs.copy() segmentation_maps_kwargs.update( { "do_normalize": False, "do_rescale": False, # Nearest interpolation is used for segmentation maps instead of BILINEAR. "interpolation": F.InterpolationMode.NEAREST_EXACT if is_torchvision_v2_available() else F.InterpolationMode.NEAREST, } ) processed_segmentation_maps = self._preprocess( images=processed_segmentation_maps, **segmentation_maps_kwargs ).pixel_values batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64) return batch_feature def _preprocess( self, images: list["torch.Tensor"], do_reduce_labels: bool, interpolation: Optional["F.InterpolationMode"], do_resize: bool, do_rescale: bool, do_normalize: bool, size: SizeDict, rescale_factor: float, image_mean: Union[float, list[float]], image_std: Union[float, list[float]], disable_grouping: bool, return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Return type can be list if return_tensors=None if do_reduce_labels: images = self.reduce_label(images) # Apply reduction if needed # Group images by size for batched resizing resized_images = images if do_resize: grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): resized_stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = resized_stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing (rescale/normalize) # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) # Stack images into a single tensor if return_tensors is set processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None): """ Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`SegformerForSemanticSegmentation`]): Raw outputs of the model. target_sizes (`list[Tuple]` of length `batch_size`, *optional*): List of tuples corresponding to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. Returns: semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ # TODO: add support for other frameworks logits = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() semantic_segmentation = [] for idx in range(len(logits)): resized_logits = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = logits.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation __all__ = ["SegformerImageProcessorFast"]
transformers/src/transformers/models/segformer/image_processing_segformer_fast.py/0
{ "file_path": "transformers/src/transformers/models/segformer/image_processing_segformer_fast.py", "repo_id": "transformers", "token_count": 4491 }
467
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SEW-D model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class SEWDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWDModel`]. It is used to instantiate a SEW-D model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW-D [asapp/sew-d-tiny-100k](https://huggingface.co/asapp/sew-d-tiny-100k) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the SEW-D model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SEWD`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. squeeze_factor (`int`, *optional*, defaults to 2): Sequence length downsampling factor after the encoder and upsampling factor after the transformer. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). position_buckets (`int`, *optional*, defaults to 256): The maximum size of relative position embeddings. share_att_key (`bool`, *optional*, defaults to `True`): Whether to share attention key with c2p and p2c. relative_attention (`bool`, *optional*, defaults to `True`): Whether to use relative position encoding. pos_att_type (`tuple[str]`, *optional*, defaults to `("p2c", "c2p")`): The type of relative position attention, it can be a combination of `("p2c", "c2p")`, e.g. `("p2c")`, `("p2c", "c2p")`, `("p2c", "c2p")`. norm_rel_ebd (`str`, *optional*, defaults to `"layer_norm"`): Whether to use layer norm in relative embedding (`"layer_norm"` if yes) hidden_act (`str` or `function`, *optional*, defaults to `"gelu_python"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"gelu_python"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): Deprecated. Not used by the model and will be removed in a future version. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`SEWDForCTC`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-7): The epsilon used by the layer normalization layers in the transformer encoder. feature_layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization after the feature encoder. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`SEWDForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`SEWDForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. Example: ```python >>> from transformers import SEWDConfig, SEWDModel >>> # Initializing a SEW-D asapp/sew-d-tiny-100k style configuration >>> configuration = SEWDConfig() >>> # Initializing a model (with random weights) from the asapp/sew-d-tiny-100k style configuration >>> model = SEWDModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "sew-d" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, max_position_embeddings=512, position_buckets=256, share_att_key=True, relative_attention=True, pos_att_type=("p2c", "c2p"), norm_rel_ebd="layer_norm", hidden_act="gelu_python", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-7, feature_layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.squeeze_factor = squeeze_factor self.max_position_embeddings = max_position_embeddings self.position_buckets = position_buckets self.share_att_key = share_att_key self.relative_attention = relative_attention self.norm_rel_ebd = norm_rel_ebd self.pos_att_type = list(pos_att_type) self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self._hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layer_norm_eps = layer_norm_eps self.feature_layer_norm_eps = feature_layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. " "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # sequence classification self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) def to_dict(self): """ Serializes this instance to a Python dictionary. """ output = super().to_dict() output["hidden_dropout"] = output.pop("_hidden_dropout") return output __all__ = ["SEWDConfig"]
transformers/src/transformers/models/sew_d/configuration_sew_d.py/0
{ "file_path": "transformers/src/transformers/models/sew_d/configuration_sew_d.py", "repo_id": "transformers", "token_count": 6396 }
468
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Speech2Text """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class Speech2TextProcessor(ProcessorMixin): r""" Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a single processor. [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more information. Args: feature_extractor (`Speech2TextFeatureExtractor`): An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2TextTokenizer`): An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "Speech2TextFeatureExtractor" tokenizer_class = "Speech2TextTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's [`~Speech2TextTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.") audio = kwargs.pop("raw_speech") else: audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False __all__ = ["Speech2TextProcessor"]
transformers/src/transformers/models/speech_to_text/processing_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/processing_speech_to_text.py", "repo_id": "transformers", "token_count": 1575 }
469
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SuperGlue model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from transformers import PreTrainedModel from transformers.models.superglue.configuration_superglue import SuperGlueConfig from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from ..auto import AutoModelForKeypointDetection logger = logging.get_logger(__name__) def concat_pairs(tensor_tuple0: tuple[torch.Tensor], tensor_tuple1: tuple[torch.Tensor]) -> tuple[torch.Tensor]: """ Concatenate two tuples of tensors pairwise Args: tensor_tuple0 (`tuple[torch.Tensor]`): Tuple of tensors. tensor_tuple1 (`tuple[torch.Tensor]`): Tuple of tensors. Returns: (`tuple[torch.Tensor]`): Tuple of concatenated tensors. """ return tuple(torch.cat([tensor0, tensor1]) for tensor0, tensor1 in zip(tensor_tuple0, tensor_tuple1)) def normalize_keypoints(keypoints: torch.Tensor, height: int, width: int) -> torch.Tensor: """ Normalize keypoints locations based on image image_shape Args: keypoints (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`): Keypoints locations in (x, y) format. height (`int`): Image height. width (`int`): Image width. Returns: Normalized keypoints locations of shape (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`). """ size = torch.tensor([width, height], device=keypoints.device, dtype=keypoints.dtype)[None] center = size / 2 scaling = size.max(1, keepdim=True).values * 0.7 return (keypoints - center[:, None, :]) / scaling[:, None, :] def log_sinkhorn_iterations( log_cost_matrix: torch.Tensor, log_source_distribution: torch.Tensor, log_target_distribution: torch.Tensor, num_iterations: int, ) -> torch.Tensor: """ Perform Sinkhorn Normalization in Log-space for stability Args: log_cost_matrix (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Logarithm of the cost matrix. log_source_distribution (`torch.Tensor` of shape `(batch_size, num_rows)`): Logarithm of the source distribution. log_target_distribution (`torch.Tensor` of shape `(batch_size, num_columns)`): Logarithm of the target distribution. Returns: log_cost_matrix (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Logarithm of the optimal transport matrix. """ log_u_scaling = torch.zeros_like(log_source_distribution) log_v_scaling = torch.zeros_like(log_target_distribution) for _ in range(num_iterations): log_u_scaling = log_source_distribution - torch.logsumexp(log_cost_matrix + log_v_scaling.unsqueeze(1), dim=2) log_v_scaling = log_target_distribution - torch.logsumexp(log_cost_matrix + log_u_scaling.unsqueeze(2), dim=1) return log_cost_matrix + log_u_scaling.unsqueeze(2) + log_v_scaling.unsqueeze(1) def log_optimal_transport(scores: torch.Tensor, reg_param: torch.Tensor, iterations: int) -> torch.Tensor: """ Perform Differentiable Optimal Transport in Log-space for stability Args: scores: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Cost matrix. reg_param: (`torch.Tensor` of shape `(batch_size, 1, 1)`): Regularization parameter. iterations: (`int`): Number of Sinkhorn iterations. Returns: log_optimal_transport_matrix: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Logarithm of the optimal transport matrix. """ batch_size, num_rows, num_columns = scores.shape one_tensor = scores.new_tensor(1) num_rows_tensor, num_columns_tensor = (num_rows * one_tensor).to(scores), (num_columns * one_tensor).to(scores) source_reg_param = reg_param.expand(batch_size, num_rows, 1) target_reg_param = reg_param.expand(batch_size, 1, num_columns) reg_param = reg_param.expand(batch_size, 1, 1) couplings = torch.cat([torch.cat([scores, source_reg_param], -1), torch.cat([target_reg_param, reg_param], -1)], 1) log_normalization = -(num_rows_tensor + num_columns_tensor).log() log_source_distribution = torch.cat( [log_normalization.expand(num_rows), num_columns_tensor.log()[None] + log_normalization] ) log_target_distribution = torch.cat( [log_normalization.expand(num_columns), num_rows_tensor.log()[None] + log_normalization] ) log_source_distribution, log_target_distribution = ( log_source_distribution[None].expand(batch_size, -1), log_target_distribution[None].expand(batch_size, -1), ) log_optimal_transport_matrix = log_sinkhorn_iterations( couplings, log_source_distribution, log_target_distribution, num_iterations=iterations ) log_optimal_transport_matrix = log_optimal_transport_matrix - log_normalization # multiply probabilities by M+N return log_optimal_transport_matrix def arange_like(x, dim: int) -> torch.Tensor: return x.new_ones(x.shape[dim]).cumsum(0) - 1 @dataclass @auto_docstring( custom_intro=""" Base class for outputs of keypoint matching models. Due to the nature of keypoint detection and matching, the number of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images, the maximum number of matches is set as the dimension of the matches and matching scores. The mask tensor is used to indicate which values in the keypoints, matches and matching_scores tensors are keypoint matching information. """ ) class KeypointMatchingOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*): Loss computed during training. matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`): Index of keypoint matched in the other image. matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`): Scores of predicted matches. keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`): Absolute (x, y) coordinates of predicted keypoints in a given image. mask (`torch.IntTensor` of shape `(batch_size, num_keypoints)`): Mask indicating which values in matches and matching_scores are keypoint matching information. hidden_states (`tuple[torch.FloatTensor, ...]`, *optional*): Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels, num_keypoints)`, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`) attentions (`tuple[torch.FloatTensor, ...]`, *optional*): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints, num_keypoints)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`) """ loss: Optional[torch.FloatTensor] = None matches: Optional[torch.FloatTensor] = None matching_scores: Optional[torch.FloatTensor] = None keypoints: Optional[torch.FloatTensor] = None mask: Optional[torch.IntTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None class SuperGlueMultiLayerPerceptron(nn.Module): def __init__(self, config: SuperGlueConfig, in_channels: int, out_channels: int) -> None: super().__init__() self.linear = nn.Linear(in_channels, out_channels) self.batch_norm = nn.BatchNorm1d(out_channels) self.activation = nn.ReLU() def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.linear(hidden_state) hidden_state = hidden_state.transpose(-1, -2) hidden_state = self.batch_norm(hidden_state) hidden_state = hidden_state.transpose(-1, -2) hidden_state = self.activation(hidden_state) return hidden_state class SuperGlueKeypointEncoder(nn.Module): def __init__(self, config: SuperGlueConfig) -> None: super().__init__() layer_sizes = config.keypoint_encoder_sizes hidden_size = config.hidden_size # 3 here consists of 2 for the (x, y) coordinates and 1 for the score of the keypoint encoder_channels = [3] + layer_sizes + [hidden_size] layers = [ SuperGlueMultiLayerPerceptron(config, encoder_channels[i - 1], encoder_channels[i]) for i in range(1, len(encoder_channels) - 1) ] layers.append(nn.Linear(encoder_channels[-2], encoder_channels[-1])) self.encoder = nn.ModuleList(layers) def forward( self, keypoints: torch.Tensor, scores: torch.Tensor, output_hidden_states: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]]]: scores = scores.unsqueeze(2) hidden_state = torch.cat([keypoints, scores], dim=2) all_hidden_states = () if output_hidden_states else None for layer in self.encoder: hidden_state = layer(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) return hidden_state, all_hidden_states class SuperGlueSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None current_states = encoder_hidden_states if is_cross_attention else hidden_states attention_mask = encoder_attention_mask if is_cross_attention else attention_mask batch_size = hidden_states.shape[0] key_layer = ( self.key(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(current_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SuperGlueModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (None,) return outputs class SuperGlueSelfOutput(nn.Module): def __init__(self, config: SuperGlueConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor, *args) -> torch.Tensor: hidden_states = self.dense(hidden_states) return hidden_states SUPERGLUE_SELF_ATTENTION_CLASSES = { "eager": SuperGlueSelfAttention, } class SuperGlueAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = SUPERGLUE_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type, ) self.output = SuperGlueSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class SuperGlueAttentionalPropagation(nn.Module): def __init__(self, config: SuperGlueConfig) -> None: super().__init__() hidden_size = config.hidden_size self.attention = SuperGlueAttention(config) mlp_channels = [hidden_size * 2, hidden_size * 2, hidden_size] layers = [ SuperGlueMultiLayerPerceptron(config, mlp_channels[i - 1], mlp_channels[i]) for i in range(1, len(mlp_channels) - 1) ] layers.append(nn.Linear(mlp_channels[-2], mlp_channels[-1])) self.mlp = nn.ModuleList(layers) def forward( self, descriptors: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, ) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]], Optional[tuple[torch.Tensor]]]: attention_outputs = self.attention( descriptors, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) output = attention_outputs[0] attention = attention_outputs[1:] hidden_state = torch.cat([descriptors, output], dim=2) all_hidden_states = () if output_hidden_states else None for layer in self.mlp: hidden_state = layer(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) return hidden_state, all_hidden_states, attention class SuperGlueAttentionalGNN(nn.Module): def __init__(self, config: SuperGlueConfig) -> None: super().__init__() self.hidden_size = config.hidden_size self.layers_types = config.gnn_layers_types self.layers = nn.ModuleList([SuperGlueAttentionalPropagation(config) for _ in range(len(self.layers_types))]) def forward( self, descriptors: torch.Tensor, mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[tuple], Optional[tuple]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None batch_size, num_keypoints, _ = descriptors.shape if output_hidden_states: all_hidden_states = all_hidden_states + (descriptors,) for gnn_layer, layer_type in zip(self.layers, self.layers_types): encoder_hidden_states = None encoder_attention_mask = None if layer_type == "cross": encoder_hidden_states = ( descriptors.reshape(-1, 2, num_keypoints, self.hidden_size) .flip(1) .reshape(batch_size, num_keypoints, self.hidden_size) ) encoder_attention_mask = ( mask.reshape(-1, 2, 1, 1, num_keypoints).flip(1).reshape(batch_size, 1, 1, num_keypoints) if mask is not None else None ) gnn_outputs = gnn_layer( descriptors, attention_mask=mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) delta = gnn_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + gnn_outputs[1] if output_attentions: all_attentions = all_attentions + gnn_outputs[2] descriptors = descriptors + delta return descriptors, all_hidden_states, all_attentions class SuperGlueFinalProjection(nn.Module): def __init__(self, config: SuperGlueConfig) -> None: super().__init__() hidden_size = config.hidden_size self.final_proj = nn.Linear(hidden_size, hidden_size, bias=True) def forward(self, descriptors: torch.Tensor) -> torch.Tensor: return self.final_proj(descriptors) @auto_docstring class SuperGluePreTrainedModel(PreTrainedModel): config: SuperGlueConfig base_model_prefix = "superglue" main_input_name = "pixel_values" def _init_weights(self, module: nn.Module) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm1d): module.bias.data.zero_() module.weight.data.fill_(1.0) if hasattr(module, "bin_score"): module.bin_score.data.fill_(1.0) @auto_docstring( custom_intro=""" SuperGlue model taking images as inputs and outputting the matching of them. """ ) class SuperGlueForKeypointMatching(SuperGluePreTrainedModel): """SuperGlue feature matching middle-end Given two sets of keypoints and locations, we determine the correspondences by: 1. Keypoint Encoding (normalization + visual feature and location fusion) 2. Graph Neural Network with multiple self and cross-attention layers 3. Final projection layer 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) 5. Thresholding matrix based on mutual exclusivity and a match_threshold The correspondence ids use -1 to indicate non-matching points. Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural Networks. In CVPR, 2020. https://huggingface.co/papers/1911.11763 """ def __init__(self, config: SuperGlueConfig) -> None: super().__init__(config) self.keypoint_detector = AutoModelForKeypointDetection.from_config(config.keypoint_detector_config) self.keypoint_encoder = SuperGlueKeypointEncoder(config) self.gnn = SuperGlueAttentionalGNN(config) self.final_projection = SuperGlueFinalProjection(config) bin_score = torch.nn.Parameter(torch.tensor(1.0)) self.register_parameter("bin_score", bin_score) self.post_init() def _match_image_pair( self, keypoints: torch.Tensor, descriptors: torch.Tensor, scores: torch.Tensor, height: int, width: int, mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> tuple[torch.Tensor, torch.Tensor, tuple, tuple]: """ Perform keypoint matching between two images. Args: keypoints (`torch.Tensor` of shape `(batch_size, 2, num_keypoints, 2)`): Keypoints detected in the pair of image. descriptors (`torch.Tensor` of shape `(batch_size, 2, descriptor_dim, num_keypoints)`): Descriptors of the keypoints detected in the image pair. scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`): Confidence scores of the keypoints detected in the image pair. height (`int`): Image height. width (`int`): Image width. mask (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`, *optional*): Mask indicating which values in the keypoints, matches and matching_scores tensors are keypoint matching information. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors. Default to `config.output_attentions`. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Default to `config.output_hidden_states`. Returns: matches (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`): For each image pair, for each keypoint in image0, the index of the keypoint in image1 that was matched with. And for each keypoint in image1, the index of the keypoint in image0 that was matched with. matching_scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`): Scores of predicted matches for each image pair all_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(1, 2, num_keypoints, num_channels)`. all_attentions (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for each layer) of shape `(1, 2, num_heads, num_keypoints, num_keypoints)`. """ all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if keypoints.shape[2] == 0: # no keypoints shape = keypoints.shape[:-1] return ( keypoints.new_full(shape, -1, dtype=torch.int), keypoints.new_zeros(shape), all_hidden_states, all_attentions, ) batch_size, _, num_keypoints, _ = keypoints.shape # (batch_size, 2, num_keypoints, 2) -> (batch_size * 2, num_keypoints, 2) keypoints = keypoints.reshape(batch_size * 2, num_keypoints, 2) descriptors = descriptors.reshape(batch_size * 2, num_keypoints, self.config.hidden_size) scores = scores.reshape(batch_size * 2, num_keypoints) mask = mask.reshape(batch_size * 2, num_keypoints) if mask is not None else None # Keypoint normalization keypoints = normalize_keypoints(keypoints, height, width) encoded_keypoints = self.keypoint_encoder(keypoints, scores, output_hidden_states=output_hidden_states) last_hidden_state = encoded_keypoints[0] # Keypoint MLP encoder. descriptors = descriptors + last_hidden_state if mask is not None: input_shape = descriptors.size() extended_attention_mask = self.get_extended_attention_mask(mask, input_shape) else: extended_attention_mask = torch.ones((batch_size, num_keypoints), device=keypoints.device) # Multi-layer Transformer network. gnn_outputs = self.gnn( descriptors, mask=extended_attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) descriptors = gnn_outputs[0] # Final MLP projection. projected_descriptors = self.final_projection(descriptors) # (batch_size * 2, num_keypoints, descriptor_dim) -> (batch_size, 2, num_keypoints, descriptor_dim) final_descriptors = projected_descriptors.reshape(batch_size, 2, num_keypoints, self.config.hidden_size) final_descriptors0 = final_descriptors[:, 0] final_descriptors1 = final_descriptors[:, 1] # Compute matching descriptor distance. scores = final_descriptors0 @ final_descriptors1.transpose(1, 2) scores = scores / self.config.hidden_size**0.5 if mask is not None: mask = mask.reshape(batch_size, 2, num_keypoints) mask0 = mask[:, 0].unsqueeze(2) mask1 = mask[:, 1].unsqueeze(1) mask = torch.logical_and(mask0, mask1) scores = scores.masked_fill(mask == 0, torch.finfo(scores.dtype).min) # Run the optimal transport. scores = log_optimal_transport(scores, self.bin_score, iterations=self.config.sinkhorn_iterations) # Get the matches with score above "match_threshold". max0 = scores[:, :-1, :-1].max(2) max1 = scores[:, :-1, :-1].max(1) indices0 = max0.indices indices1 = max1.indices mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) zero = scores.new_tensor(0) matching_scores0 = torch.where(mutual0, max0.values.exp(), zero) matching_scores0 = torch.where(matching_scores0 > self.config.matching_threshold, matching_scores0, zero) matching_scores1 = torch.where(mutual1, matching_scores0.gather(1, indices1), zero) valid0 = mutual0 & (matching_scores0 > zero) valid1 = mutual1 & valid0.gather(1, indices1) matches0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) matches1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) matches = torch.cat([matches0, matches1], dim=1).reshape(batch_size, 2, -1) matching_scores = torch.cat([matching_scores0, matching_scores1], dim=1).reshape(batch_size, 2, -1) if output_hidden_states: all_hidden_states = all_hidden_states + encoded_keypoints[1] all_hidden_states = all_hidden_states + gnn_outputs[1] all_hidden_states = all_hidden_states + (projected_descriptors,) all_hidden_states = tuple( x.reshape(batch_size, 2, num_keypoints, -1).transpose(-1, -2) for x in all_hidden_states ) if output_attentions: all_attentions = all_attentions + gnn_outputs[2] all_attentions = tuple(x.reshape(batch_size, 2, -1, num_keypoints, num_keypoints) for x in all_attentions) return ( matches, matching_scores, all_hidden_states, all_attentions, ) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, KeypointMatchingOutput]: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> import torch >>> from PIL import Image >>> import requests >>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true" >>> image1 = Image.open(requests.get(url, stream=True).raw) >>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true" >>> image2 = Image.open(requests.get(url, stream=True).raw) >>> images = [image1, image2] >>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor") >>> model = AutoModel.from_pretrained("magic-leap-community/superglue_outdoor") >>> with torch.no_grad(): >>> inputs = processor(images, return_tensors="pt") >>> outputs = model(**inputs) ```""" loss = None if labels is not None: raise ValueError("SuperGlue is not trainable, no labels should be provided.") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values.ndim != 5 or pixel_values.size(1) != 2: raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)") batch_size, _, channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width) keypoint_detections = self.keypoint_detector(pixel_values) keypoints, scores, descriptors, mask = keypoint_detections[:4] keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values) scores = scores.reshape(batch_size, 2, -1).to(pixel_values) descriptors = descriptors.reshape(batch_size, 2, -1, self.config.hidden_size).to(pixel_values) mask = mask.reshape(batch_size, 2, -1) absolute_keypoints = keypoints.clone() absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height matches, matching_scores, hidden_states, attentions = self._match_image_pair( absolute_keypoints, descriptors, scores, height, width, mask=mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) if not return_dict: return tuple( v for v in [loss, matches, matching_scores, keypoints, mask, hidden_states, attentions] if v is not None ) return KeypointMatchingOutput( loss=loss, matches=matches, matching_scores=matching_scores, keypoints=keypoints, mask=mask, hidden_states=hidden_states, attentions=attentions, ) __all__ = ["SuperGluePreTrainedModel", "SuperGlueForKeypointMatching"]
transformers/src/transformers/models/superglue/modeling_superglue.py/0
{ "file_path": "transformers/src/transformers/models/superglue/modeling_superglue.py", "repo_id": "transformers", "token_count": 15245 }
470
# coding=utf-8 # Copyright 2022 SwitchTransformers Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SwitchTransformers model.""" import copy import math import warnings from typing import Optional, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging, ) from ...utils.deprecation import deprecate_kwarg from .configuration_switch_transformers import SwitchTransformersConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" Compute the router z-loss implemented in PyTorch. The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://huggingface.co/papers/2202.08906). It encourages router logits to remain small in an effort to improve stability. Args: router_logits (`float`): Input logits of shape [batch_size, sequence_length, num_experts] Returns: Scalar router z-loss. """ num_groups, tokens_per_group, _ = router_logits.shape log_z = torch.logsumexp(router_logits, dim=-1) z_loss = log_z**2 return torch.sum(z_loss) / (num_groups * tokens_per_group) def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) class SwitchTransformersTop1Router(nn.Module): """ Router using tokens choose top-1 experts assignment. This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE (https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. """ def __init__(self, config: SwitchTransformersConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.jitter_noise = config.router_jitter_noise self.ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: r""" Computes router probabilities from input hidden states. Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: router_probabilities (`torch.Tensor`): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor`): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ # float32 is used to ensure stability. See the discussion of "selective precision" in # https://huggingface.co/papers/2101.03961. # We also store the previous dtype to cast back the output to the previous dtype self.input_dtype = hidden_states.dtype hidden_states = hidden_states.to(self.dtype) if self.training and self.jitter_noise > 0: # Multiply the token inputs by the uniform distribution - adding some noise hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) # Shape: [num_groups, tokens_per_group, num_experts] self._cast_classifier() router_logits = self.classifier(hidden_states) # Apply Softmax and cast back to the original `dtype` router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) return router_probabilities, router_logits def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def forward(self, hidden_states: torch.Tensor) -> tuple: r""" Generic forward function for every Router class. Each Router expects to have the same input hidden states (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. Returns: tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs and the router logits. The router probabilities and logits are required to compute the loss. """ router_probs, router_logits = self._compute_router_probabilities(hidden_states) expert_index = torch.argmax(router_probs, dim=-1) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) # Mask tokens outside expert capacity. Sum over each sequence token_priority = torch.cumsum(expert_index, dim=-2) # mask if the token routed to to the expert will overflow expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return expert_index, router_probs, router_logits # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->SwitchTransformers class SwitchTransformersLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the SwitchTransformers style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # SwitchTransformers uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->SwitchTransformers class SwitchTransformersDenseActDense(nn.Module): def __init__(self, config: SwitchTransformersConfig): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class SwitchTransformersSparseMLP(nn.Module): r""" Implementation of the Switch Transformers Sparse MLP module. """ def __init__(self, config: SwitchTransformersConfig, expert_class: nn.Module = SwitchTransformersDenseActDense): super().__init__() # Step 1: Get the correct router according to its class self.router = SwitchTransformersTop1Router(config) # Step 2: Get the experts self.experts = nn.ModuleDict() for idx in range(config.num_experts): self.experts[f"expert_{idx}"] = expert_class(config) def forward(self, hidden_states): r""" Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following: 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)` and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor). 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each expert the corresponding hidden states. """ # Step 1: Get the router_mask from the router as well as the probabilities router_mask, router_probs, router_logits = self.router(hidden_states) expert_index = torch.argmax(router_mask, dim=-1) # If a token gets dropped, we just set it to zero such that it does not get updated. next_states = torch.zeros(hidden_states.shape, device=hidden_states.device, dtype=hidden_states.dtype) router_mask = router_mask.bool() batch_size, seq_len, num_experts = router_mask.shape idx_mask = router_mask.reshape(batch_size * seq_len, num_experts).sum(dim=0) idx_mask = torch.nonzero(idx_mask, as_tuple=True)[ 0 ].tolist() # length: number of "activated" expert / value: index for idx in idx_mask: next_states[router_mask[:, :, idx]] = getattr(self.experts, f"expert_{idx}")( hidden_states[router_mask[:, :, idx]] ) hidden_states = router_probs * next_states return hidden_states, (router_logits, expert_index) class SwitchTransformersLayerFF(nn.Module): r""" Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. Parameters: config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. is_sparse (`bool`): Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not """ def __init__(self, config: SwitchTransformersConfig, is_sparse=False): super().__init__() self.is_sparse = is_sparse # Check if it is a sparse layer, if not then it is a dense layer if not self.is_sparse: self.mlp = SwitchTransformersDenseActDense(config) else: self.mlp = SwitchTransformersSparseMLP(config) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, output_router_logits): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.mlp(forwarded_states) if isinstance(forwarded_states, tuple): forwarded_states, router_tuple = forwarded_states else: router_tuple = None output = hidden_states + self.dropout(forwarded_states) if output_router_logits and router_tuple is not None: output = (output, router_tuple) return output # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->SwitchTransformers class SwitchTransformersAttention(nn.Module): def __init__( self, config: SwitchTransformersConfig, has_relative_attention_bias=False, layer_idx: Optional[int] = None, ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) batch_size, seq_length = hidden_states.shape[:2] # if key_value_states are provided this layer is used as a cross-attention layer for the decoder is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache` if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device, cache_position=cache_position ) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, : key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->SwitchTransformers class SwitchTransformersLayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.SelfAttention = SwitchTransformersAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->SwitchTransformers class SwitchTransformersLayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.EncDecAttention = SwitchTransformersAttention( config, has_relative_attention_bias=False, layer_idx=layer_idx ) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class SwitchTransformersBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, is_sparse=False, layer_idx: Optional[int] = None): super().__init__() self.is_decoder = config.is_decoder self.is_sparse = is_sparse self.layer = nn.ModuleList() self.layer.append( SwitchTransformersLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) ) if self.is_decoder: self.layer.append(SwitchTransformersLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(SwitchTransformersLayerFF(config, is_sparse=self.is_sparse)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, output_router_logits=True, return_dict=True, cache_position=None, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, output_router_logits) if isinstance(hidden_states, tuple): hidden_states, router_tuple = hidden_states else: router_tuple = (torch.zeros((1,), device=hidden_states.device, dtype=torch.int64),) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return ( outputs + attention_outputs + (router_tuple,) ) # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights), (router_tuple) @auto_docstring class SwitchTransformersPreTrainedModel(PreTrainedModel): config: SwitchTransformersConfig base_model_prefix = "switch_transformers" supports_gradient_checkpointing = True _can_compile_fullgraph = False _no_split_modules = ["SwitchTransformersBlock"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, SwitchTransformersLayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, (SwitchTransformersModel, SwitchTransformersForConditionalGeneration, SwitchTransformersEncoderModel), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, SwitchTransformersDenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, SwitchTransformersAttention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) elif isinstance(module, SwitchTransformersSparseMLP): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In SwitchTransformers it is usually set" " to the pad_token_id. See SwitchTransformers docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class SwitchTransformersStack(SwitchTransformersPreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.is_decoder = config.is_decoder sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers self.block = nn.ModuleList() for i in range(config.num_layers): is_sparse = (i % sparse_step == 1 or sparse_step == 1) if sparse_step > 0 else False self.block.append( SwitchTransformersBlock( config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse, layer_idx=i ) ) self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.device_map = None self.gradient_checkpointing = False def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_router_logits=True, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) else: past_key_values = DynamicCache() elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions, ) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_router_probs = () if output_router_logits else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) router_probs = layer_outputs[-1] layer_outputs = layer_outputs[:-1] hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) if output_router_logits: all_router_probs = all_router_probs + (router_probs,) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, router_probs=all_router_probs, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @auto_docstring class SwitchTransformersModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False self.decoder = SwitchTransformersStack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS Training](./switch_transformers#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS Training](./switch_transformers#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, SwitchTransformersModel >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersModel.from_pretrained("google/switch-base-8") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for SwitchTransformersModel. >>> # This is not needed for torch's SwitchTransformersForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask if ( output_router_logits and self.config.num_sparse_encoder_layers == 0 and self.config.num_sparse_encoder_layers == 0 ): raise ValueError( "You asked to return `output_router_logits` but the transformer in dense, and does " " not contain any sparse MLP Layers. Set `output_router_logits = False` and restart" ) # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqMoEModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, ) @auto_docstring( custom_intro=""" SWITCH_TRANSFORMERS Model with a `language modeling` head on top. """ ) class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel, GenerationMixin): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = SwitchTransformersStack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = True, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS Training](./switch_transformers#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS Training](./switch_transformers#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> # . To, let’s say you have a dog. To summarize: >>> # Since the model has been trained on MLM, this will output gibberish ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None encoder_z_loss = None encoder_aux_loss = None decoder_z_loss = None decoder_aux_loss = None if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder if self.encoder.config.encoder_sparse_step > 1: encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_outputs[-1]) encoder_z_loss = router_z_loss_func(encoder_router_logits) encoder_router_probs = nn.Softmax(dim=-1)(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_probs, encoder_expert_indexes) else: encoder_z_loss = 0 encoder_aux_loss = 0 if self.decoder.config.decoder_sparse_step > 1: decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_outputs[-1]) decoder_z_loss = router_z_loss_func(decoder_router_logits) decoder_router_probs = nn.Softmax(dim=-1)(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_probs, decoder_expert_indexes) else: decoder_z_loss = 0 decoder_aux_loss = 0 if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits: z_loss = self.router_z_loss_coef * (encoder_z_loss + decoder_z_loss) aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + z_loss + aux_loss if not return_dict: output = (lm_logits,) if output_router_logits: output += (encoder_z_loss, encoder_aux_loss, decoder_z_loss, decoder_aux_loss) output += (*decoder_outputs[1:], *encoder_outputs) return ((loss,) + output) if loss is not None else output return Seq2SeqMoEOutput( loss=loss, logits=lm_logits, encoder_z_loss=encoder_z_loss, encoder_aux_loss=encoder_aux_loss, decoder_z_loss=decoder_z_loss, decoder_aux_loss=decoder_aux_loss, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, ) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) @auto_docstring( custom_intro=""" The bare SWITCH_TRANSFORMERS Model transformer outputting encoder's raw hidden-states without any specific head """ ) class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight"] def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.device_map = None def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = True, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], MoEModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS Training](./switch_transformers#training). Example: ```python >>> from transformers import AutoTokenizer, SwitchTransformersEncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8") >>> model = SwitchTransformersEncoderModel.from_pretrained("google/switch-base-8") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) return encoder_outputs __all__ = [ "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", "SwitchTransformersPreTrainedModel", "SwitchTransformersTop1Router", "SwitchTransformersSparseMLP", ]
transformers/src/transformers/models/switch_transformers/modeling_switch_transformers.py/0
{ "file_path": "transformers/src/transformers/models/switch_transformers/modeling_switch_transformers.py", "repo_id": "transformers", "token_count": 36440 }
471
# coding=utf-8 # Copyright 2024 the Fast authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TextNet model.""" from typing import Any, Optional, Union import torch import torch.nn as nn from torch import Tensor from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.activations import ACT2CLS from transformers.modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from transformers.models.textnet.configuration_textnet import TextNetConfig from transformers.utils import logging from transformers.utils.backbone_utils import BackboneMixin from ...utils import auto_docstring logger = logging.get_logger(__name__) class TextNetConvLayer(nn.Module): def __init__(self, config: TextNetConfig): super().__init__() self.kernel_size = config.stem_kernel_size self.stride = config.stem_stride self.activation_function = config.stem_act_func padding = ( (config.kernel_size[0] // 2, config.kernel_size[1] // 2) if isinstance(config.stem_kernel_size, tuple) else config.stem_kernel_size // 2 ) self.conv = nn.Conv2d( config.stem_num_channels, config.stem_out_channels, kernel_size=config.stem_kernel_size, stride=config.stem_stride, padding=padding, bias=False, ) self.batch_norm = nn.BatchNorm2d(config.stem_out_channels, config.batch_norm_eps) self.activation = nn.Identity() if self.activation_function is not None: self.activation = ACT2CLS[self.activation_function]() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.batch_norm(hidden_states) return self.activation(hidden_states) class TextNetRepConvLayer(nn.Module): r""" This layer supports re-parameterization by combining multiple convolutional branches (e.g., main convolution, vertical, horizontal, and identity branches) during training. At inference time, these branches can be collapsed into a single convolution for efficiency, as per the re-parameterization paradigm. The "Rep" in the name stands for "re-parameterization" (introduced by RepVGG). """ def __init__(self, config: TextNetConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int): super().__init__() self.num_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride padding = ((kernel_size[0] - 1) // 2, (kernel_size[1] - 1) // 2) self.activation_function = nn.ReLU() self.main_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, ) self.main_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps) vertical_padding = ((kernel_size[0] - 1) // 2, 0) horizontal_padding = (0, (kernel_size[1] - 1) // 2) if kernel_size[1] != 1: self.vertical_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=(kernel_size[0], 1), stride=stride, padding=vertical_padding, bias=False, ) self.vertical_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps) else: self.vertical_conv, self.vertical_batch_norm = None, None if kernel_size[0] != 1: self.horizontal_conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=(1, kernel_size[1]), stride=stride, padding=horizontal_padding, bias=False, ) self.horizontal_batch_norm = nn.BatchNorm2d(num_features=out_channels, eps=config.batch_norm_eps) else: self.horizontal_conv, self.horizontal_batch_norm = None, None self.rbr_identity = ( nn.BatchNorm2d(num_features=in_channels, eps=config.batch_norm_eps) if out_channels == in_channels and stride == 1 else None ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: main_outputs = self.main_conv(hidden_states) main_outputs = self.main_batch_norm(main_outputs) # applies a convolution with a vertical kernel if self.vertical_conv is not None: vertical_outputs = self.vertical_conv(hidden_states) vertical_outputs = self.vertical_batch_norm(vertical_outputs) main_outputs = main_outputs + vertical_outputs # applies a convolution with a horizontal kernel if self.horizontal_conv is not None: horizontal_outputs = self.horizontal_conv(hidden_states) horizontal_outputs = self.horizontal_batch_norm(horizontal_outputs) main_outputs = main_outputs + horizontal_outputs if self.rbr_identity is not None: id_out = self.rbr_identity(hidden_states) main_outputs = main_outputs + id_out return self.activation_function(main_outputs) class TextNetStage(nn.Module): def __init__(self, config: TextNetConfig, depth: int): super().__init__() kernel_size = config.conv_layer_kernel_sizes[depth] stride = config.conv_layer_strides[depth] num_layers = len(kernel_size) stage_in_channel_size = config.hidden_sizes[depth] stage_out_channel_size = config.hidden_sizes[depth + 1] in_channels = [stage_in_channel_size] + [stage_out_channel_size] * (num_layers - 1) out_channels = [stage_out_channel_size] * num_layers stage = [] for stage_config in zip(in_channels, out_channels, kernel_size, stride): stage.append(TextNetRepConvLayer(config, *stage_config)) self.stage = nn.ModuleList(stage) def forward(self, hidden_state): for block in self.stage: hidden_state = block(hidden_state) return hidden_state class TextNetEncoder(nn.Module): def __init__(self, config: TextNetConfig): super().__init__() stages = [] num_stages = len(config.conv_layer_kernel_sizes) for stage_ix in range(num_stages): stages.append(TextNetStage(config, stage_ix)) self.stages = nn.ModuleList(stages) def forward( self, hidden_state: torch.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BaseModelOutputWithNoAttention: hidden_states = [hidden_state] for stage in self.stages: hidden_state = stage(hidden_state) hidden_states.append(hidden_state) if not return_dict: output = (hidden_state,) return output + (hidden_states,) if output_hidden_states else output return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) @auto_docstring class TextNetPreTrainedModel(PreTrainedModel): config: TextNetConfig base_model_prefix = "textnet" main_input_name = "pixel_values" def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.weight.data.fill_(1.0) if module.bias is not None: module.bias.data.zero_() @auto_docstring class TextNetModel(TextNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.stem = TextNetConvLayer(config) self.encoder = TextNetEncoder(config) self.pooler = nn.AdaptiveAvgPool2d((2, 2)) self.post_init() @auto_docstring def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> Union[tuple[Any, list[Any]], tuple[Any], BaseModelOutputWithPoolingAndNoAttention]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) hidden_state = self.stem(pixel_values) encoder_outputs = self.encoder( hidden_state, output_hidden_states=output_hidden_states, return_dict=return_dict ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) if not return_dict: output = (last_hidden_state, pooled_output) return output + (encoder_outputs[1],) if output_hidden_states else output return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs[1] if output_hidden_states else None, ) @auto_docstring( custom_intro=""" TextNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class TextNetForImageClassification(TextNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.textnet = TextNetModel(config) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.flatten = nn.Flatten() self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() # classification head self.classifier = nn.ModuleList([self.avg_pool, self.flatten]) # initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Examples: ```python >>> import torch >>> import requests >>> from transformers import TextNetForImageClassification, TextNetImageProcessor >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base") >>> model = TextNetForImageClassification.from_pretrained("czczup/textnet-base") >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> outputs.logits.shape torch.Size([1, 2]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.textnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] for layer in self.classifier: last_hidden_state = layer(last_hidden_state) logits = self.fc(last_hidden_state) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @auto_docstring( custom_intro=""" TextNet backbone, to be used with frameworks like DETR and MaskFormer. """ ) class TextNetBackbone(TextNetPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.textnet = TextNetModel(config) self.num_features = config.hidden_sizes # initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None ) -> Union[tuple[tuple], BackboneOutput]: r""" Examples: ```python >>> import torch >>> import requests >>> from PIL import Image >>> from transformers import AutoImageProcessor, AutoBackbone >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("czczup/textnet-base") >>> model = AutoBackbone.from_pretrained("czczup/textnet-base") >>> inputs = processor(image, return_tensors="pt") >>> with torch.no_grad(): >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.textnet(pixel_values, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[2] feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: hidden_states = outputs.hidden_states if return_dict else outputs[2] output += (hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = ["TextNetBackbone", "TextNetModel", "TextNetPreTrainedModel", "TextNetForImageClassification"]
transformers/src/transformers/models/textnet/modeling_textnet.py/0
{ "file_path": "transformers/src/transformers/models/textnet/modeling_textnet.py", "repo_id": "transformers", "token_count": 7047 }
472
# coding=utf-8 # Copyright 2024 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VideoLlava model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class VideoLlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VideoLlavaForConditionalGeneration`]. It is used to instantiate an VideoLlava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the like LanguageBind/Video-LLaVA-7B-hf. e.g. [LanguageBind/Video-LLaVA-7B-hf](https://huggingface.co/LanguageBind/Video-LLaVA-7B-hf) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`VideoLlavaVisionConfig`, *optional*): Custom vision config or dict. Defaults to `CLIPVisionConfig` if not indicated. text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. Defaults to `LlamaConfig` if not indicated. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 32001): The video token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the CLIP backbone. Can be either "full" to select all features or "default" to select features without `CLS`. vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_seq_length (`int`, *optional*, defaults to 256): Sequence length of one image embedding. video_seq_length (`int`, *optional*, defaults to 2056): Sequence length of one video embedding. multimodal_projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. Example: ```python >>> from transformers import VideoLlavaForConditionalGeneration, VideoLlavaConfig, CLIPVisionConfig, LlamaConfig >>> # Initializing a CLIP-vision config >>> vision_config = CLIPVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a VideoLlava video_llava-1.5-7b style configuration >>> configuration = VideoLlavaConfig(vision_config, text_config) >>> # Initializing a model from the video_llava-1.5-7b style configuration >>> model = VideoLlavaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "video_llava" attribute_map = { "image_token_id": "image_token_index", "video_token_id": "video_token_index", } sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, text_config=None, image_token_index=32000, video_token_index=32001, projector_hidden_act="gelu", vision_feature_select_strategy="default", vision_feature_layer=-2, image_seq_length=256, video_seq_length=2056, multimodal_projector_bias=True, **kwargs, ): self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.image_seq_length = image_seq_length self.video_seq_length = video_seq_length self.multimodal_projector_bias = multimodal_projector_bias self.vision_config = vision_config if isinstance(self.vision_config, dict): if "model_type" not in vision_config: vision_config["model_type"] = "clip_vision_model" logger.warning("Key=`model_type` not found in vision config, setting it to `clip_vision_model`") self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING["clip_vision_model"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=224, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768, ) if isinstance(text_config, dict): if "model_type" not in text_config: text_config["model_type"] = "llama" logger.warning("Key=`model_type` not found in text config, setting it to `llama`") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["llama"]() self.text_config = text_config super().__init__(**kwargs) __all__ = ["VideoLlavaConfig"]
transformers/src/transformers/models/video_llava/configuration_video_llava.py/0
{ "file_path": "transformers/src/transformers/models/video_llava/configuration_video_llava.py", "repo_id": "transformers", "token_count": 2427 }
473
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax VisionTextDualEncoder model.""" from typing import Optional import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_utils import FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, logging from ..auto.configuration_auto import AutoConfig from ..auto.modeling_flax_auto import FLAX_MODEL_MAPPING, FlaxAutoModel from ..clip.modeling_flax_clip import FlaxCLIPOutput, FlaxCLIPVisionModel from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "VisionTextDualEncoderConfig" VISION_TEXT_DUAL_ENCODER_START_DOCSTRING = r""" This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the [`~FlaxAutoModel.from_pretrained`] method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling. In [LiT: Zero-Shot Transfer with Locked-image Text Tuning](https://huggingface.co/papers/2111.07991) it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvement on new zero-shot vision tasks such as image classification or retrieval. After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`VisionTextDualEncoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ VISION_TEXT_DUAL_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using an image processor (e.g. if you use ViT as the encoder, you should use [`AutoImageProcessor`]). See [`ViTImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxVisionTextDualEncoderModule(nn.Module): config: VisionTextDualEncoderConfig dtype: jnp.dtype = jnp.float32 def setup(self): vision_config = self.config.vision_config text_config = self.config.text_config self.vision_embed_dim = vision_config.hidden_size self.text_embed_dim = text_config.hidden_size self.projection_dim = self.config.projection_dim vision_module = FLAX_MODEL_MAPPING.get(self.config.vision_config.__class__, FlaxCLIPVisionModel).module_class text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class self.vision_model = vision_module(vision_config, dtype=self.dtype) self.text_model = text_module(text_config, dtype=self.dtype) self.visual_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.text_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.logit_scale = self.param( "logit_scale", lambda _, shape: jnp.ones(shape) * self.config.logit_scale_init_value, [] ) def __call__( self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, token_type_ids=None, deterministic: bool = True, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = jnp.exp(self.logit_scale) logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale logits_per_image = logits_per_text.T if not return_dict: return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return FlaxCLIPOutput( logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @add_start_docstrings(VISION_TEXT_DUAL_ENCODER_START_DOCSTRING) class FlaxVisionTextDualEncoderModel(FlaxPreTrainedModel): config_class = VisionTextDualEncoderConfig module_class = FlaxVisionTextDualEncoderModule def __init__( self, config: VisionTextDualEncoderConfig, input_shape: Optional[tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if not _do_init: raise ValueError( "`FlaxVisionTextDualEncoderModel` cannot be created without initializing, `_do_init` must be `True`." ) if input_shape is None: input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensor input_ids = jnp.zeros(input_shape[0], dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) token_type_ids = jnp.ones_like(input_ids) attention_mask = jnp.ones_like(input_ids) pixel_values = jax.random.normal(rng, input_shape[1]) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids)[ "params" ] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def __call__( self, input_ids, pixel_values, attention_mask=None, position_ids=None, token_type_ids=None, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(pixel_values, dtype=jnp.float32), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) def get_text_features( self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train=False, ): r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of text model. """ if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic): text_outputs = module.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic, ) pooled_output = text_outputs[1] text_features = module.text_projection(pooled_output) return text_features return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, method=_get_features, rngs=rngs, ) def get_image_features( self, pixel_values, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train=False ): r""" Args: pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`ImageFeatureExtractionMixin`]. See [`ImageFeatureExtractionMixin.__call__`] for details. Returns: image_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of vision model. """ # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, pixel_values, deterministic): vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) pooled_output = vision_outputs[1] # pooled_output image_features = module.visual_projection(pooled_output) return image_features return self.module.apply( {"params": params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, method=_get_features, rngs=rngs, ) @classmethod def from_vision_text_pretrained( cls, vision_model_name_or_path: Optional[str] = None, text_model_name_or_path: Optional[str] = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: """ Params: vision_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the vision model. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. text_model_name_or_path (`str`, *optional*): Information necessary to initiate the text model. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text configuration, use the prefix *text_* for each configuration parameter. - To update the vision configuration, use the prefix *vision_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import FlaxVisionTextDualEncoderModel >>> # initialize a model from pretrained ViT and BERT models. Note that the projection layers will be randomly initialized. >>> model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( ... "google/vit-base-patch16-224", "google-bert/bert-base-uncased" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./vit-bert") >>> # load fine-tuned model >>> model = FlaxVisionTextDualEncoderModel.from_pretrained("./vit-bert") ```""" kwargs_vision = { argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_") } kwargs_text = { argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_") } # remove text, vision kwargs from kwargs for key in kwargs_vision: del kwargs["vision_" + key] for key in kwargs_text: del kwargs["text_" + key] # Load and initialize the text and vision model vision_model = kwargs_vision.pop("model", None) if vision_model is None: if vision_model_name_or_path is None: raise ValueError( "If `vision_model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" ) if "config" not in kwargs_vision: vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) if vision_config.model_type == "clip": kwargs_vision["config"] = vision_config.vision_config vision_model = FlaxCLIPVisionModel.from_pretrained( vision_model_name_or_path, *model_args, **kwargs_vision ) else: kwargs_vision["config"] = vision_config vision_model = FlaxAutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision) text_model = kwargs_text.pop("model", None) if text_model is None: if text_model_name_or_path is None: raise ValueError( "If `text_model` is not defined as an argument, a `text_model_name_or_path` has to be defined" ) if "config" not in kwargs_text: text_config = AutoConfig.from_pretrained(text_model_name_or_path) kwargs_text["config"] = text_config text_model = FlaxAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config, **kwargs) # init model model = cls(config, *model_args, dtype=dtype, **kwargs) model.params["vision_model"] = vision_model.params model.params["text_model"] = text_model.params # the projection layers are always newly initialized when loading the model # using pre-trained vision and text model. logger.warning( "The projection layer and logit scale weights `[('visual_projection', 'kernel'), ('text_projection'," " 'kernel'), ('logit_scale',)]` are newly initialized. You should probably TRAIN this model on a" " down-stream task to be able to use it for predictions and inference." ) return model VISION_TEXT_DUAL_ENCODER_MODEL_DOCSTRING = r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> import jax >>> from transformers import ( ... FlaxVisionTextDualEncoderModel, ... VisionTextDualEncoderProcessor, ... AutoImageProcessor, ... AutoTokenizer, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> image_processor = AutoImageProcesor.from_pretrained("google/vit-base-patch16-224") >>> processor = VisionTextDualEncoderProcessor(image_processor, tokenizer) >>> model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( ... "google/vit-base-patch16-224", "google-bert/bert-base-uncased" ... ) >>> # contrastive training >>> urls = [ ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg", ... ] >>> images = [Image.open(requests.get(url, stream=True).raw) for url in urls] >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="np", padding=True ... ) >>> outputs = model( ... input_ids=inputs.input_ids, ... attention_mask=inputs.attention_mask, ... pixel_values=inputs.pixel_values, ... ) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> # save and load from pretrained >>> model.save_pretrained("vit-bert") >>> model = FlaxVisionTextDualEncoderModel.from_pretrained("vit-bert") >>> # inference >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities ``` """ overwrite_call_docstring( FlaxVisionTextDualEncoderModel, VISION_TEXT_DUAL_ENCODER_INPUTS_DOCSTRING + VISION_TEXT_DUAL_ENCODER_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxVisionTextDualEncoderModel, output_type=FlaxCLIPOutput, config_class=_CONFIG_FOR_DOC ) __all__ = ["FlaxVisionTextDualEncoderModel"]
transformers/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 11079 }
474
# coding=utf-8 # Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 ViT model.""" from __future__ import annotations import collections.abc import math import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_vit import ViTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ViTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "google/vit-base-patch16-224-in21k" _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "google/vit-base-patch16-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" class TFViTEmbeddings(keras.layers.Layer): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = TFViTPatchEmbeddings(config, name="patch_embeddings") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def build(self, input_shape=None): num_patches = self.patch_embeddings.num_patches self.cls_token = self.add_weight( shape=(1, 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="cls_token", ) self.position_embeddings = self.add_weight( shape=(1, num_patches + 1, self.config.hidden_size), initializer=get_initializer(self.config.initializer_range), trainable=True, name="position_embeddings", ) if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Source: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ batch_size, seq_len, dim = shape_list(embeddings) num_patches = seq_len - 1 _, num_positions, _ = shape_list(self.position_embeddings) num_positions -= 1 if num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] h0 = height // self.config.patch_size w0 = width // self.config.patch_size patch_pos_embed = tf.image.resize( images=tf.reshape( patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) ), size=(h0, w0), method="bicubic", ) shape = shape_list(patch_pos_embed) assert h0 == shape[-3] and w0 == shape[-2] patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) return tf.concat(values=(class_pos_embed, patch_pos_embed), axis=1) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) embeddings = self.patch_embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training ) # add the [CLS] token to the embedded patch tokens cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0) embeddings = tf.concat((cls_tokens, embeddings), axis=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings, training=training) return embeddings # Based on timm implementation, which can be found here: # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py class TFViTPatchEmbeddings(keras.layers.Layer): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.num_channels = num_channels self.config = config self.projection = keras.layers.Conv2D( filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding="valid", data_format="channels_last", use_bias=True, kernel_initializer=get_initializer(self.config.initializer_range), bias_initializer="zeros", name="projection", ) def call( self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False ) -> tf.Tensor: batch_size, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if tf.executing_eagerly(): if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) # Change the 2D spatial dimensions to a single temporal dimension. # shape = (batch_size, num_patches, out_channels=embed_dim) num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFViTSelfAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) mixed_key_layer = self.key(inputs=hidden_states) mixed_value_layer = self.value(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFViTSelfOutput(keras.layers.Layer): """ The residual connection is defined in TFViTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTAttention(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFViTSelfAttention(config, name="attention") self.dense_output = TFViTSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFViTIntermediate(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFViTOutput(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = hidden_states + input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFViTLayer(keras.layers.Layer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.attention = TFViTAttention(config, name="attention") self.intermediate = TFViTIntermediate(config, name="intermediate") self.vit_output = TFViTOutput(config, name="output") self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.config = config def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: attention_outputs = self.attention( # in ViT, layernorm is applied before self-attention input_tensor=self.layernorm_before(inputs=hidden_states), head_mask=head_mask, output_attentions=output_attentions, training=training, ) attention_output = attention_outputs[0] # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(inputs=hidden_states) intermediate_output = self.intermediate(hidden_states=layer_output) # second residual connection is done here layer_output = self.vit_output( hidden_states=intermediate_output, input_tensor=hidden_states, training=training ) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "vit_output", None) is not None: with tf.name_scope(self.vit_output.name): self.vit_output.build(None) if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.config.hidden_size]) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.config.hidden_size]) class TFViTEncoder(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.layer = [TFViTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, head_mask=head_mask[i], output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFViTMainLayer(keras.layers.Layer): config_class = ViTConfig def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFViTEmbeddings(config, name="embeddings") self.encoder = TFViTEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.pooler = TFViTPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training, ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(inputs=sequence_output) pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.hidden_size]) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFViTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" VIT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`ViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ VIT_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.", VIT_START_DOCSTRING, ) class TFViTModel(TFViTPreTrainedModel): def __init__(self, config: ViTConfig, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.vit = TFViTMainLayer(config, add_pooling_layer=add_pooling_layer, name="vit") @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor]: outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) class TFViTPooler(keras.layers.Layer): def __init__(self, config: ViTConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.pooler_output_size, kernel_initializer=get_initializer(config.initializer_range), activation=config.pooler_act, name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """, VIT_START_DOCSTRING, ) class TFViTForImageClassification(TFViTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ViTConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.vit = TFViTMainLayer(config, add_pooling_layer=False, name="vit") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def call( self, pixel_values: TFModelInputType | None = None, head_mask: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFSequenceClassifierOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.vit( pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(inputs=sequence_output[:, 0, :]) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "vit", None) is not None: with tf.name_scope(self.vit.name): self.vit.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) __all__ = ["TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel"]
transformers/src/transformers/models/vit/modeling_tf_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/modeling_tf_vit.py", "repo_id": "transformers", "token_count": 15713 }
475
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VitMatte checkpoints from the original repository. URL: https://github.com/hustvl/ViTMatte """ import argparse import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import VitDetConfig, VitMatteConfig, VitMatteForImageMatting, VitMatteImageProcessor def get_config(model_name): hidden_size = 384 if "small" in model_name else 768 num_attention_heads = 6 if "small" in model_name else 12 backbone_config = VitDetConfig( num_channels=4, image_size=512, pretrain_image_size=224, patch_size=16, hidden_size=hidden_size, num_attention_heads=num_attention_heads, use_absolute_position_embeddings=True, use_relative_position_embeddings=True, window_size=14, # 2, 5, 8, 11 for global attention window_block_indices=[0, 1, 3, 4, 6, 7, 9, 10], residual_block_indices=[2, 5, 8, 11], out_features=["stage12"], ) return VitMatteConfig(backbone_config=backbone_config, hidden_size=hidden_size) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config): rename_keys = [] # fmt: off # stem rename_keys.append(("backbone.pos_embed", "backbone.embeddings.position_embeddings")) rename_keys.append(("backbone.patch_embed.proj.weight", "backbone.embeddings.projection.weight")) rename_keys.append(("backbone.patch_embed.proj.bias", "backbone.embeddings.projection.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def convert_vitmatte_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): config = get_config(model_name) # load original state dict model_name_to_filename = { "vitmatte-small-composition-1k": "ViTMatte_S_Com.pth", "vitmatte-base-composition-1k": "ViTMatte_B_Com.pth", "vitmatte-small-distinctions-646": "ViTMatte_S_DIS.pth", "vitmatte-base-distinctions-646": "ViTMatte_B_DIS.pth", } filename = model_name_to_filename[model_name] filepath = hf_hub_download(repo_id="nielsr/vitmatte-checkpoints", filename=filename, repo_type="model") state_dict = torch.load(filepath, map_location="cpu", weights_only=True) # rename keys for key in state_dict.copy(): val = state_dict.pop(key) if "backbone.blocks" in key: key = key.replace("backbone.blocks", "backbone.encoder.layer") if "attn" in key: key = key.replace("attn", "attention") if "fusion_blks" in key: key = key.replace("fusion_blks", "fusion_blocks") if "bn" in key: key = key.replace("bn", "batch_norm") state_dict[key] = val # rename keys rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) # create model processor = VitMatteImageProcessor() model = VitMatteForImageMatting(config) model.eval() # load state dict model.load_state_dict(state_dict) # verify on dummy image + trimap url = "https://github.com/hustvl/ViTMatte/blob/main/demo/bulb_rgb.png?raw=true" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") url = "https://github.com/hustvl/ViTMatte/blob/main/demo/bulb_trimap.png?raw=true" trimap = Image.open(requests.get(url, stream=True).raw) pixel_values = processor(images=image, trimaps=trimap.convert("L"), return_tensors="pt").pixel_values with torch.no_grad(): alphas = model(pixel_values).alphas if model_name == "vitmatte-small-composition-1k": expected_slice = torch.tensor([[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]]) elif model_name == "vitmatte-base-composition-1k": expected_slice = torch.tensor([[0.9972, 0.9971, 0.9981], [0.9948, 0.9987, 0.9994], [0.9963, 0.9992, 0.9995]]) elif model_name == "vitmatte-small-distinctions-646": expected_slice = torch.tensor([[0.9880, 0.9970, 0.9972], [0.9960, 0.9996, 0.9997], [0.9963, 0.9996, 0.9997]]) elif model_name == "vitmatte-base-distinctions-646": expected_slice = torch.tensor([[0.9963, 0.9998, 0.9999], [0.9995, 1.0000, 1.0000], [0.9992, 0.9999, 1.0000]]) assert torch.allclose(alphas[0, 0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub") model.push_to_hub(f"hustvl/{model_name}") processor.push_to_hub(f"hustvl/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="vitmatte-small-composition-1k", type=str, choices=[ "vitmatte-small-composition-1k", "vitmatte-base-composition-1k", "vitmatte-small-distinctions-646", "vitmatte-base-distinctions-646", ], help="Name of the VitMatte model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_vitmatte_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/vitmatte/convert_vitmatte_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/vitmatte/convert_vitmatte_to_hf.py", "repo_id": "transformers", "token_count": 2679 }
476
# coding=utf-8 # Copyright 2023 The Kakao Enterprise Authors, the MMS-TTS Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for VITS.""" import json import os import re from typing import Any, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_phonemizer_available, is_uroman_available, logging if is_phonemizer_available(): import phonemizer if is_uroman_available(): import uroman as ur logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"} def has_non_roman_characters(input_string): # Find any character outside the ASCII range non_roman_pattern = re.compile(r"[^\x00-\x7F]") # Search the input string for non-Roman characters match = non_roman_pattern.search(input_string) has_non_roman = match is not None return has_non_roman class VitsTokenizer(PreTrainedTokenizer): """ Construct a VITS tokenizer. Also supports MMS-TTS. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. language (`str`, *optional*): Language identifier. add_blank (`bool`, *optional*, defaults to `True`): Whether to insert token id 0 in between the other tokens. normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the input text by removing all casing and punctuation. phonemize (`bool`, *optional*, defaults to `True`): Whether to convert the input text into phonemes. is_uroman (`bool`, *optional*, defaults to `False`): Whether the `uroman` Romanizer needs to be applied to the input text prior to tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, pad_token="<pad>", unk_token="<unk>", language=None, add_blank=True, normalize=True, phonemize=True, is_uroman=False, **kwargs, ) -> None: with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.language = language self.add_blank = add_blank self.normalize = normalize self.phonemize = phonemize self.is_uroman = is_uroman super().__init__( pad_token=pad_token, unk_token=unk_token, language=language, add_blank=add_blank, normalize=normalize, phonemize=phonemize, is_uroman=is_uroman, **kwargs, ) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def normalize_text(self, input_string): """Lowercase the input string, respecting any special token ids that may be part or entirely upper-cased.""" all_vocabulary = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) filtered_text = "" i = 0 while i < len(input_string): found_match = False for word in all_vocabulary: if input_string[i : i + len(word)] == word: filtered_text += word i += len(word) found_match = True break if not found_match: filtered_text += input_string[i].lower() i += 1 return filtered_text def _preprocess_char(self, text): """Special treatment of characters in certain languages""" if self.language == "ron": text = text.replace("ț", "ţ") return text def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, normalize: Optional[bool] = None, **kwargs ) -> tuple[str, dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. normalize (`bool`, *optional*, defaults to `None`): Whether or not to apply punctuation and casing normalization to the text inputs. Typically, VITS is trained on lower-cased and un-punctuated text. Hence, normalization is used to ensure that the input text consists only of lower-case characters. kwargs (`dict[str, Any]`, *optional*): Keyword arguments to use for the tokenization. Returns: `tuple[str, dict[str, Any]]`: The prepared text and the unused kwargs. """ normalize = normalize if normalize is not None else self.normalize if normalize: # normalise for casing text = self.normalize_text(text) filtered_text = self._preprocess_char(text) if has_non_roman_characters(filtered_text) and self.is_uroman: if not is_uroman_available(): logger.warning( "Text to the tokenizer contains non-Roman characters. To apply the `uroman` pre-processing " "step automatically, ensure the `uroman` Romanizer is installed with: `pip install uroman` " "Note `uroman` requires python version >= 3.10" "Otherwise, apply the Romanizer manually as per the instructions: https://github.com/isi-nlp/uroman" ) else: uroman = ur.Uroman() filtered_text = uroman.romanize_string(filtered_text) if self.phonemize: if not is_phonemizer_available(): raise ImportError("Please install the `phonemizer` Python package to use this tokenizer.") filtered_text = phonemizer.phonemize( filtered_text, language="en-us", backend="espeak", strip=True, preserve_punctuation=True, with_stress=True, ) filtered_text = re.sub(r"\s+", " ", filtered_text) elif normalize: # strip any chars outside of the vocab (punctuation) filtered_text = "".join(list(filter(lambda char: char in self.encoder, filtered_text))).strip() return filtered_text, kwargs def _tokenize(self, text: str) -> list[str]: """Tokenize a string by inserting the `<pad>` token at the boundary between adjacent characters.""" tokens = list(text) if self.add_blank: interspersed = [self._convert_id_to_token(0)] * (len(tokens) * 2 + 1) interspersed[1::2] = tokens tokens = interspersed return tokens def convert_tokens_to_string(self, tokens: list[str]) -> str: if self.add_blank and len(tokens) > 1: tokens = tokens[1::2] return "".join(tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Union[tuple[str], None]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) __all__ = ["VitsTokenizer"]
transformers/src/transformers/models/vits/tokenization_vits.py/0
{ "file_path": "transformers/src/transformers/models/vits/tokenization_vits.py", "repo_id": "transformers", "token_count": 3967 }
477
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.generic import check_model_inputs from ..auto import AutoModel, AutoModelForCausalLM from ..qwen2_audio.modeling_qwen2_audio import ( Qwen2AudioAttention, Qwen2AudioEncoder, Qwen2AudioEncoderLayer, Qwen2AudioPreTrainedModel, ) from .configuration_voxtral import VoxtralConfig class VoxtralAttention(Qwen2AudioAttention): pass class VoxtralEncoderLayer(Qwen2AudioEncoderLayer): pass class VoxtralPreTrainedModel(Qwen2AudioPreTrainedModel): _supports_flex_attn = True _supports_cache_class = True _supports_attention_backend = True _can_compile_fullgraph = True _supports_attention_backend = True _no_split_modules = None # TODO: @eustlb, I would really prefer to use WhisperEncoder but it's messing with modular @auto_docstring( custom_intro=""" The Voxtral encoder, which is a Whisper encoder. """ ) class VoxtralEncoder(Qwen2AudioEncoder): _can_record_outputs = { "attentions": VoxtralAttention, "hidden_states": VoxtralEncoderLayer, } @check_model_inputs def forward( self, input_features, attention_mask=None, **kwargs: Unpack[TransformersKwargs], ): r""" Args: input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] attention_mask (`torch.Tensor`)`, *optional*): Voxtral does not support masking of the `input_features`, this argument is preserved for compatibility, but it is not used. By default the silence in the input log mel spectrogram are ignored. """ expected_seq_length = self.config.max_source_positions * self.conv1.stride[0] * self.conv2.stride[0] if input_features.shape[-1] != expected_seq_length: raise ValueError( f"Qwen2Audio expects the mel input features to be of length {expected_seq_length}, but found {input_features.shape[-1]}. Make sure to pad the input mel features to {expected_seq_length}." ) input_features = input_features.to(dtype=self.conv1.weight.dtype, device=self.conv1.weight.device) inputs_embeds = nn.functional.gelu(self.conv1(input_features)) inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds)) inputs_embeds = inputs_embeds.permute(0, 2, 1) embed_pos = self.embed_positions.weight hidden_states = (inputs_embeds + embed_pos).to(inputs_embeds.dtype) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) for idx, encoder_layer in enumerate(self.layers): layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=None, ) hidden_states = layer_outputs[0] hidden_states = self.layer_norm(hidden_states) return BaseModelOutput( last_hidden_state=hidden_states, ) class VoxtralMultiModalProjector(nn.Module): def __init__(self, config: VoxtralConfig): super().__init__() self.linear_1 = nn.Linear(config.audio_config.intermediate_size, config.text_config.hidden_size, bias=False) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=False) def forward(self, audio_features): hidden_states = self.linear_1(audio_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states @auto_docstring( custom_intro=""" The Voxtral model, which consists of Whisper encoder, a multi-modal projector and a LLama language model. """ ) class VoxtralForConditionalGeneration(VoxtralPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} _keep_in_fp32_modules_strict = ["embed_positions"] def __init__(self, config): super().__init__(config) self.vocab_size = config.text_config.vocab_size self.audio_tower = AutoModel.from_config(config.audio_config) self.language_model = AutoModelForCausalLM.from_config(config.text_config) self.multi_modal_projector = VoxtralMultiModalProjector(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def get_audio_embeds(self, input_features: torch.FloatTensor): """ This method is used to get the audio embeddings from input features (a log mel spectrogram), meaning inferring the audio encoder and the multi-modal projector. Args: input_features (`torch.FloatTensor`): Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] Returns: `torch.FloatTensor`: The audio embeddings. """ audio_outputs = self.audio_tower(input_features) audio_hidden_states = audio_outputs.last_hidden_state audio_hidden_states = audio_hidden_states.reshape(-1, self.config.audio_config.intermediate_size) audio_embeds = self.multi_modal_projector(audio_hidden_states) return audio_embeds @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, input_features: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import VoxtralForConditionalGeneration, AutoProcessor >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> repo_id = "mistralai/Voxtral-Mini-3B-2507" >>> processor = AutoProcessor.from_pretrained(repo_id) >>> model = VoxtralForConditionalGeneration.from_pretrained(repo_id, dtype=torch.bfloat16, device_map=device) >>> conversation = [ { "role": "user", "content": [ { "type": "audio", "url": "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/dude_where_is_my_car.wav", }, {"type": "text", "text": "What can you tell me about this audio?"}, ], } ] >>> inputs = processor.apply_chat_template(conversation) >>> inputs = inputs.to(device, dtype=torch.bfloat16) >>> outputs = model.generate(**inputs, max_new_tokens=30) >>> processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True) ["This audio is a humorous conversation between two friends, likely in English, where one of them is trying to figure out what the other's tattoo says."] ```""" if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if input_features is not None: audio_embeds = self.get_audio_embeds(input_features) # replace text-audio token placeholders with audio embeddings audio_token_mask = input_ids == self.config.audio_token_id inputs_embeds[audio_token_mask] = audio_embeds outputs: BaseModelOutputWithPast = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) return outputs def prepare_inputs_for_generation(self, *args, **kwargs): # Overwritten -- we should not pass input_features when we are in cached decoding stage input_features = kwargs.pop("input_features", None) cache_position = kwargs.get("cache_position") model_inputs = super().prepare_inputs_for_generation(*args, **kwargs) if cache_position is not None and cache_position[0] == 0: # input_features should only be passed when we are not in cached decoding stage model_inputs["input_features"] = input_features return model_inputs __all__ = ["VoxtralPreTrainedModel", "VoxtralEncoder", "VoxtralForConditionalGeneration"]
transformers/src/transformers/models/voxtral/modular_voxtral.py/0
{ "file_path": "transformers/src/transformers/models/voxtral/modular_voxtral.py", "repo_id": "transformers", "token_count": 4703 }
478
import math from typing import Optional, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ..wav2vec2.modeling_wav2vec2 import Wav2Vec2FeedForward, Wav2Vec2ForSequenceClassification, Wav2Vec2Model from ..wav2vec2_conformer.modeling_wav2vec2_conformer import ( Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerRelPositionalEmbedding, Wav2Vec2ConformerRotaryPositionalEmbedding, Wav2Vec2ConformerSelfAttention, ) from .configuration_wav2vec2_bert import Wav2Vec2BertConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # Copied from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2._compute_new_attention_mask def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor): """ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that stops at the corresponding element in `seq_lens`. Args: hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`): The sequences to mask, where `*` is any number of sequence-specific dimensions including none. seq_lens (`torch.Tensor` of shape `(batch)`: Each element represents the length of the sequence at the same index in `hidden_states` Returns: `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)` """ batch_size, mask_seq_len = hidden_states.shape[:2] indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1) bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len) mask = hidden_states.new_ones((batch_size, mask_seq_len)) mask = mask.masked_fill(bool_mask, 0) return mask class Wav2Vec2BertRotaryPositionalEmbedding(Wav2Vec2ConformerRotaryPositionalEmbedding, nn.Module): def __init__(self, config): nn.Module.__init__(self) dim = config.hidden_size // config.num_attention_heads base = config.rotary_embedding_base inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) # Ignore copy self.register_buffer("inv_freq", inv_freq, persistent=False) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None class Wav2Vec2BertRelPositionalEmbedding(Wav2Vec2ConformerRelPositionalEmbedding): pass class Wav2Vec2BertFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class Wav2Vec2BertFeedForward(Wav2Vec2FeedForward, nn.Module): def __init__(self, config, act_fn=None, hidden_size=None): nn.Module.__init__(self) act_fn = act_fn if act_fn is not None else config.hidden_act hidden_size = hidden_size if hidden_size is not None else config.hidden_size self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn self.output_dense = nn.Linear(config.intermediate_size, hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) class Wav2Vec2BertConvolutionModule(nn.Module): """Convolution block used in the conformer block""" def __init__(self, config): super().__init__() if (config.conv_depthwise_kernel_size - 1) % 2 == 1: raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pointwise_conv1 = nn.Conv1d( config.hidden_size, 2 * config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.glu = nn.GLU(dim=1) self.depthwise_conv = nn.Conv1d( config.hidden_size, config.hidden_size, config.conv_depthwise_kernel_size, stride=1, padding=0, groups=config.hidden_size, bias=False, ) self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.activation = ACT2FN[config.hidden_act] self.pointwise_conv2 = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=1, stride=1, padding=0, bias=False, ) self.dropout = nn.Dropout(config.conformer_conv_dropout) def forward(self, hidden_states, attention_mask=None): hidden_states = self.layer_norm(hidden_states) # Ensure that we do not leak padded positions in depthwise convolution if attention mask is passed. # Put 0 where necessary if attention_mask is not None: hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # exchange the temporal dimension and the feature dimension hidden_states = hidden_states.transpose(1, 2) # GLU mechanism # => (batch, 2*channel, dim) hidden_states = self.pointwise_conv1(hidden_states) # => (batch, channel, dim) hidden_states = self.glu(hidden_states) # Pad the sequence entirely on the left because of causal convolution. hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0)) # 1D Depthwise Conv hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2) hidden_states = self.activation(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class Wav2Vec2BertSelfAttention(Wav2Vec2ConformerSelfAttention, nn.Module): """Construct an Wav2Vec2BertSelfAttention object. Can be enhanced with rotary or relative position embeddings. """ def __init__(self, config, is_adapter_attention=False): nn.Module.__init__(self) hidden_size = config.hidden_size if not is_adapter_attention else config.output_hidden_size self.head_size = hidden_size // config.num_attention_heads self.num_heads = config.num_attention_heads self.position_embeddings_type = config.position_embeddings_type if not is_adapter_attention else None self.linear_q = nn.Linear(hidden_size, hidden_size) self.linear_k = nn.Linear(hidden_size, hidden_size) self.linear_v = nn.Linear(hidden_size, hidden_size) self.linear_out = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(p=config.attention_dropout) if self.position_embeddings_type == "relative": # linear transformation for positional encoding self.linear_pos = nn.Linear(hidden_size, hidden_size, bias=False) # these two learnable bias are used in matrix c and matrix d # as described in https://huggingface.co/papers/1901.02860 Section 3.3 self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) if self.position_embeddings_type == "relative_key": self.left_max_position_embeddings = config.left_max_position_embeddings self.right_max_position_embeddings = config.right_max_position_embeddings num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1 self.distance_embedding = nn.Embedding(num_positions, self.head_size) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: # self-attention mechanism batch_size, sequence_length, hidden_size = hidden_states.size() # make sure query/key states can be != value states query_key_states = hidden_states value_states = hidden_states if self.position_embeddings_type == "rotary": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'" ) query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) # project query_key_states and value_states query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) # => (batch, head, time1, d_k) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) if self.position_embeddings_type == "relative": if relative_position_embeddings is None: raise ValueError( "`relative_position_embeddings` has to be defined when `self.position_embeddings_type ==" " 'relative'" ) # apply relative_position_embeddings to qk scores # as proposed in Transformer_XL: https://huggingface.co/papers/1901.02860 scores = self._apply_relative_embeddings( query=query, key=key, relative_position_embeddings=relative_position_embeddings ) else: scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) if self.position_embeddings_type == "relative_key": query_length, key_length = query.shape[2], key.shape[2] position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_r - position_ids_l distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings) positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings) positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding) scores = scores + (relative_position_attn_weights / math.sqrt(self.head_size)) # apply attention_mask if necessary if attention_mask is not None: scores = scores + attention_mask # => (batch, head, time1, time2) probs = torch.softmax(scores, dim=-1) probs = self.dropout(probs) # => (batch, head, time1, d_k) hidden_states = torch.matmul(probs, value) # => (batch, time1, hidden_size) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) hidden_states = self.linear_out(hidden_states) return hidden_states, probs class Wav2Vec2BertEncoderLayer(GradientCheckpointingLayer): """Conformer block based on https://huggingface.co/papers/2005.08100.""" def __init__(self, config): super().__init__() embed_dim = config.hidden_size dropout = config.attention_dropout # Feed-forward 1 self.ffn1_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn1 = Wav2Vec2BertFeedForward(config) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_dropout = nn.Dropout(dropout) self.self_attn = Wav2Vec2BertSelfAttention(config) # Conformer Convolution self.conv_module = Wav2Vec2BertConvolutionModule(config) # Feed-forward 2 self.ffn2_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn2 = Wav2Vec2BertFeedForward(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, relative_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, conv_attention_mask: Optional[torch.Tensor] = None, ): hidden_states = hidden_states # 1. Feed-Forward 1 layer residual = hidden_states hidden_states = self.ffn1_layer_norm(hidden_states) hidden_states = self.ffn1(hidden_states) hidden_states = hidden_states * 0.5 + residual residual = hidden_states # 2. Self-Attention layer hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weigts = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual # 3. Convolutional Layer residual = hidden_states hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask) hidden_states = residual + hidden_states # 4. Feed-Forward 2 Layer residual = hidden_states hidden_states = self.ffn2_layer_norm(hidden_states) hidden_states = self.ffn2(hidden_states) hidden_states = hidden_states * 0.5 + residual hidden_states = self.final_layer_norm(hidden_states) return hidden_states, attn_weigts class Wav2Vec2BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.position_embeddings_type == "relative": self.embed_positions = Wav2Vec2BertRelPositionalEmbedding(config) elif config.position_embeddings_type == "rotary": self.embed_positions = Wav2Vec2BertRotaryPositionalEmbedding(config) else: self.embed_positions = None self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([Wav2Vec2BertEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None conv_attention_mask = attention_mask if attention_mask is not None: # make sure padded tokens output 0 hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) hidden_states = self.dropout(hidden_states) if self.embed_positions is not None: relative_position_embeddings = self.embed_positions(hidden_states) else: relative_position_embeddings = None synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class Wav2Vec2BertAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size, eps=config.layer_norm_eps) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(Wav2Vec2BertAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride def _compute_sub_sample_lengths_from_attention_mask(self, seq_lens): if seq_lens is None: return seq_lens pad = self.kernel_size // 2 seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1 return seq_lens.floor() def forward(self, hidden_states, attention_mask=None): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) sub_sampled_lengths = None if attention_mask is not None: sub_sampled_lengths = (attention_mask.size(1) - (1 - attention_mask.int()).sum(1)).to(hidden_states.device) for layer in self.layers: layerdrop_prob = torch.rand([]) sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(sub_sampled_lengths) if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer( hidden_states, attention_mask=attention_mask, sub_sampled_lengths=sub_sampled_lengths ) return hidden_states class Wav2Vec2BertAdapterLayer(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.output_hidden_size dropout = config.conformer_conv_dropout self.kernel_size = config.adapter_kernel_size self.stride = config.adapter_stride # 1. residual convolution self.residual_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.residual_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.activation = nn.GLU(dim=1) # Self-Attention self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.self_attn_conv = nn.Conv1d( embed_dim, 2 * embed_dim, self.kernel_size, stride=self.stride, padding=self.stride // 2, ) self.self_attn = Wav2Vec2BertSelfAttention(config, is_adapter_attention=True) self.self_attn_dropout = nn.Dropout(dropout) # Feed-forward self.ffn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.ffn = Wav2Vec2BertFeedForward(config, act_fn=config.adapter_act, hidden_size=embed_dim) def forward( self, hidden_states, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, sub_sampled_lengths: Optional[torch.Tensor] = None, ): residual = self.residual_layer_norm(hidden_states) # Apply pooling to the residual to match the sequence length of the # multi-head attention output. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) residual = residual.transpose(1, 2) residual = self.residual_conv(residual) residual = self.activation(residual) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) residual = residual.transpose(1, 2) hidden_states = self.self_attn_layer_norm(hidden_states) # Apply pooling before feeding to the multihead-attention layer. # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.self_attn_conv(hidden_states) hidden_states = self.activation(hidden_states) # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) hidden_states = hidden_states.transpose(1, 2) if attention_mask is not None: attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths) attention_mask = _prepare_4d_attention_mask( attention_mask, hidden_states.dtype, ) # The rest of the computation is identical to a vanilla Transformer # encoder layer. hidden_states, attn_weights = self.self_attn( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = self.self_attn_dropout(hidden_states) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.ffn_layer_norm(hidden_states) hidden_states = self.ffn(hidden_states) + residual return hidden_states @auto_docstring class Wav2Vec2BertPreTrainedModel(PreTrainedModel): config: Wav2Vec2BertConfig base_model_prefix = "wav2vec2_bert" main_input_name = "input_features" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2BertSelfAttention): if hasattr(module, "pos_bias_u"): nn.init.xavier_uniform_(module.pos_bias_u) if hasattr(module, "pos_bias_v"): nn.init.xavier_uniform_(module.pos_bias_v) elif isinstance(module, Wav2Vec2BertFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, Wav2Vec2BertModel): if hasattr(module, "masked_spec_embed"): module.masked_spec_embed.data.uniform_() elif isinstance( module, (Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForXVector), ): if hasattr(module, "layer_weights"): module.layer_weights.data.fill_(1.0 / (self.config.num_hidden_layers + 1)) elif isinstance(module, AMSoftmaxLoss): # noqa: F821 module.weight.data.normal_() # Ignore copy def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride, padding): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length + 2 * padding - kernel_size, stride, rounding_mode="floor") + 1 if add_adapter: padding = self.config.adapter_kernel_size // 2 for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length( input_lengths, self.config.adapter_kernel_size, self.config.adapter_stride, padding ) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask Wav2Vec2BertBaseModelOutput = Wav2Vec2BaseModelOutput class Wav2Vec2BertModel(Wav2Vec2Model, Wav2Vec2BertPreTrainedModel): def __init__(self, config: Wav2Vec2BertConfig): Wav2Vec2BertPreTrainedModel.__init__(config) self.config = config self.feature_projection = Wav2Vec2BertFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.encoder = Wav2Vec2BertEncoder(config) self.adapter = Wav2Vec2BertAdapter(config) if config.add_adapter else None self.intermediate_ffn = None if config.use_intermediate_ffn_before_adapter: self.intermediate_ffn = Wav2Vec2BertFeedForward(config, act_fn="relu") # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): raise AttributeError("Not needed for Wav2Vec2Bert") def freeze_feature_encoder(self): raise AttributeError("Not needed for Wav2Vec2Bert") def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, Wav2Vec2BertBaseModelOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states, extract_features = self.feature_projection(input_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.intermediate_ffn: expanded_hidden_states = self.intermediate_ffn(hidden_states) hidden_states = hidden_states + 0.5 * expanded_hidden_states if self.adapter is not None: hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BertBaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class Wav2Vec2BertForCTC(Wav2Vec2ConformerForCTC): def __init__(self, config, target_lang: Optional[str] = None): r""" target_lang (`str`, *optional*): Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechSatForCTC`] with adapters. Uses 'eng' by default. """ super().__init__(config) def freeze_feature_encoder(self): raise AttributeError("Not needed for Wav2Vec2Bert") def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones(input_features.shape[:2], device=input_features.device, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum([-1])).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) class Wav2Vec2BertForSequenceClassification(Wav2Vec2ForSequenceClassification): def __init__(self, config): super().__init__(config) def freeze_feature_extractor(self): raise AttributeError("Not needed for Wav2Vec2Bert") def freeze_feature_encoder(self): raise AttributeError("Not needed for Wav2Vec2Bert") def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wav2vec2_bert.parameters(): param.requires_grad = False def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class Wav2Vec2BertForAudioFrameClassification(Wav2Vec2ConformerForAudioFrameClassification): def __init__(self, config): super().__init__(config) def freeze_feature_encoder(self): raise AttributeError("Not needed for Wav2Vec2Bert") def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class Wav2Vec2BertForXVector(Wav2Vec2ConformerForXVector): def __init__(self, config): super().__init__(config) def freeze_feature_encoder(self): raise AttributeError("Not needed for Wav2Vec2Bert") def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wav2vec2_bert( input_features, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "Wav2Vec2BertForAudioFrameClassification", "Wav2Vec2BertForCTC", "Wav2Vec2BertForSequenceClassification", "Wav2Vec2BertForXVector", "Wav2Vec2BertModel", "Wav2Vec2BertPreTrainedModel", ]
transformers/src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 19773 }
479
import math from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, Wav2Vec2BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import logging from ..wav2vec2.modeling_wav2vec2 import ( Wav2Vec2FeatureProjection, Wav2Vec2FeedForward, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2PositionalConvEmbedding, Wav2Vec2PreTrainedModel, ) from .configuration_wavlm import WavLMConfig logger = logging.get_logger(__name__) class WavLMPositionalConvEmbedding(Wav2Vec2PositionalConvEmbedding): pass class WavLMFeatureProjection(Wav2Vec2FeatureProjection): pass class WavLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, num_buckets: int = 320, max_distance: int = 800, has_relative_position_bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.out_proj = nn.Linear(embed_dim, embed_dim) self.num_buckets = num_buckets self.max_distance = max_distance self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1)) self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8) if has_relative_position_bias: self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, index=0, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Attention layer with relative attention""" bsz, tgt_len, _ = hidden_states.size() # first pass of attention layer creates position bias if position_bias is None: position_bias = self.compute_bias(tgt_len, tgt_len) position_bias = ( position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len) ) # Compute relative position bias: # 1) get reshape hidden_states gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1)) gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3) # 2) project hidden states relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states) relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1) # 3) compute gate for position bias from projected hidden states gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1) gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0 # 4) apply gate to position bias to compute gated position_bias gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len)) attn_output, attn_weights = self.torch_multi_head_self_attention( hidden_states, attention_mask, gated_position_bias, output_attentions ) return attn_output, attn_weights, position_bias def torch_multi_head_self_attention( self, hidden_states: torch.FloatTensor, attention_mask: Union[torch.LongTensor, torch.BoolTensor], gated_position_bias: torch.FloatTensor, output_attentions: bool, ) -> (torch.FloatTensor, torch.FloatTensor): """simple wrapper around torch's multi_head_attention_forward function""" # self-attention assumes q = k = v query = key = value = hidden_states.transpose(0, 1) key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None # disable bias and add_zero_attn bias_k = bias_v = None add_zero_attn = False # PyTorch 1.3.0 has F.multi_head_attention_forward defined # so no problem with backwards compatibility attn_output, attn_weights = F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), bias_k, bias_v, add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, output_attentions, gated_position_bias, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...] attn_output = attn_output.transpose(0, 1) if attn_weights is not None: # IMPORTANT: Attention weights are averaged weights # here which should not be the case. This is an open issue # on PyTorch: https://github.com/pytorch/pytorch/issues/32590 attn_weights = attn_weights[:, None].broadcast_to( attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:] ) return attn_output, attn_weights def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor: context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket(relative_position) relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device) values = self.rel_attn_embed(relative_position_bucket) values = values.permute([2, 0, 1]) return values def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor: num_buckets = self.num_buckets // 2 relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_positions_if_large = torch.log(relative_positions.float() / max_exact) relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact) relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact) relative_position_if_large = (max_exact + relative_positions_if_large).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_position_if_large) return relative_buckets class WavLMFeedForward(Wav2Vec2FeedForward): pass class WavLMEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0): attn_residual = hidden_states hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=index, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoderLayerStableLayerNorm(GradientCheckpointingLayer): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=i, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class WavLMEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [ WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, position_bias=position_bias, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class WavLMGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible" f" by `config.num_codevector_groups` {self.num_groups} " "for concatenation." ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True) codevector_probs = codevector_probs.type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity class WavLMPreTrainedModel(PreTrainedModel, Wav2Vec2PreTrainedModel): config: WavLMConfig base_model_prefix = "wavlm" main_input_name = "input_values" supports_gradient_checkpointing = True _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, WavLMGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, WavLMPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, WavLMFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_adapters(self): raise AttributeError("Not needed for WavLM") def init_adapter_layers(self): raise AttributeError("Not needed for WavLM") def load_adapter(self): raise AttributeError("Not needed for WavLM") WavLMBaseModelOutput = Wav2Vec2BaseModelOutput class WavLMModel(Wav2Vec2Model): pass class WavLMForCTC(Wav2Vec2ForCTC): pass class WavLMForSequenceClassification(Wav2Vec2ForSequenceClassification): pass class WavLMForAudioFrameClassification(Wav2Vec2ForAudioFrameClassification): pass class WavLMForXVector(Wav2Vec2ForXVector): pass __all__ = [ "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", "WavLMForXVector", "WavLMModel", "WavLMPreTrainedModel", ]
transformers/src/transformers/models/wavlm/modular_wavlm.py/0
{ "file_path": "transformers/src/transformers/models/wavlm/modular_wavlm.py", "repo_id": "transformers", "token_count": 10450 }
480
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch X-CLIP model.""" import copy from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import ( ModelOutput, auto_docstring, can_return_tuple, logging, torch_int, ) from .configuration_x_clip import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig logger = logging.get_logger(__name__) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->x_clip def x_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass @auto_docstring class XCLIPOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for video-text similarity. logits_per_video (`torch.FloatTensor` of shape `(video_batch_size, text_batch_size)`): The scaled dot product scores between `video_embeds` and `text_embeds`. This represents the video-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, video_batch_size)`): The scaled dot product scores between `text_embeds` and `video_embeds`. This represents the text-video similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`XCLIPTextModel`]. video_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The video embeddings obtained by applying the projection layer to the pooled output of [`XCLIPVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`XCLIPTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`XCLIPVisionModel`]. mit_output (`BaseModelOutputWithPooling`): The output of `XCLIPMultiframeIntegrationTransformer` (MIT for short). """ loss: Optional[torch.FloatTensor] = None logits_per_video: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None video_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None mit_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output", "mit_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->XCLIP class XCLIPVisionEmbeddings(nn.Module): def __init__(self, config: XCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->XCLIP class XCLIPTextEmbeddings(nn.Module): def __init__(self, config: XCLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class XCLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, seq_length, embed_dim = hidden_states.shape queries = self.q_proj(hidden_states) keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2) # CLIP text model uses both `causal_attention_mask` and `attention_mask` # in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask` if self.config._attn_implementation != "flash_attention_2": if attention_mask is not None and causal_attention_mask is not None: attention_mask = attention_mask + causal_attention_mask elif causal_attention_mask is not None: attention_mask = causal_attention_mask else: self.is_causal = causal_attention_mask is not None attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, ) attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class XCLIPMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->XCLIP class XCLIPEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: XCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = XCLIPAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = XCLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->XCLIP class XCLIPDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class XCLIPVisionEncoderLayer(GradientCheckpointingLayer): """ This corresponds to the `CrossFramelAttentionBlock` class in the original implementation. """ def __init__(self, config: XCLIPConfig): super().__init__() self.num_frames = config.num_frames self.embed_dim = config.hidden_size self.message_fc = nn.Linear(self.embed_dim, self.embed_dim) self.message_ln = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.message_attn = XCLIPAttention(config) self.drop_path = XCLIPDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.self_attn = XCLIPAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = XCLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ batch_time, seq_length, hidden_size = hidden_states.size() batch_size = batch_time // self.num_frames msg_token = self.message_fc(hidden_states[:, 0, :]) msg_token = msg_token.view(batch_size, self.num_frames, hidden_size) msg_token = msg_token + self.drop_path(self.message_attn(self.message_ln(msg_token))[0]) # add dummy sequence dimension msg_token = msg_token.view(-1, 1, hidden_size) hidden_states = torch.cat([hidden_states, msg_token], dim=1) residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states hidden_states = hidden_states[:, :seq_length, :] residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs @auto_docstring class XCLIPPreTrainedModel(PreTrainedModel): config: XCLIPConfig base_model_prefix = "x_clip" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, XCLIPTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, XCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, XCLIPAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, XCLIPMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, XCLIPModel): factor = self.config.initializer_factor nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * factor, ) nn.init.normal_(module.prompts_visual_projection, mean=0.0, std=module.vision_embed_dim**-0.5 * factor) elif isinstance(module, XCLIPMultiframeIntegrationTransformer): nn.init.normal_(module.position_embedding, std=self.config.initializer_factor) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) if module.bias is not None: module.bias.data.zero_() # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->XCLIP class XCLIPEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`XCLIPEncoderLayer`]. Args: config: XCLIPConfig """ def __init__(self, config: XCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([XCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class XCLIPTextTransformer(nn.Module): def __init__(self, config: XCLIPTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = XCLIPTextEmbeddings(config) self.encoder = XCLIPEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # X_CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class XCLIPTextModel(XCLIPPreTrainedModel): config: XCLIPTextConfig def __init__(self, config: XCLIPTextConfig): super().__init__(config) self.text_model = XCLIPTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Examples: ```python >>> from transformers import AutoTokenizer, XCLIPTextModel >>> model = XCLIPTextModel.from_pretrained("microsoft/xclip-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/xclip-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class XCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`XCLIPVisionEncoderLayer`]. Args: config: XCLIPConfig """ def __init__(self, config: XCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([XCLIPVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class XCLIPVisionTransformer(nn.Module): """ This corresponds to the `CrossFrameCommunicationTransformer` class in the original implementation. """ def __init__(self, config: XCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = XCLIPVisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = XCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class XCLIPVisionModel(XCLIPPreTrainedModel): config: XCLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: XCLIPVisionConfig): super().__init__(config) self.vision_model = XCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoProcessor, XCLIPVisionModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 16 frames >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> processor = AutoProcessor.from_pretrained("microsoft/xclip-base-patch32") >>> model = XCLIPVisionModel.from_pretrained("microsoft/xclip-base-patch32") >>> pixel_values = processor(videos=list(video), return_tensors="pt").pixel_values >>> batch_size, num_frames, num_channels, height, width = pixel_values.shape >>> pixel_values = pixel_values.reshape(-1, num_channels, height, width) >>> outputs = model(pixel_values) >>> last_hidden_state = outputs.last_hidden_state ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class XCLIPMultiframeIntegrationTransformer(nn.Module): """ This corresponds to the `MultiframeIntegrationTransformer` class in the original implementation. """ def __init__(self, config: XCLIPVisionConfig): super().__init__() self.position_embedding = nn.Parameter(torch.empty(1, config.num_frames, config.hidden_size)) self.encoder = XCLIPEncoder(config) def forward( self, hidden_states, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: residual = hidden_states # add position embeddings hidden_states = hidden_states + self.position_embedding encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = last_hidden_state.type(hidden_states.dtype) + residual pooled_output = last_hidden_state.mean(dim=1, keepdim=False) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class XCLIPCrossAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.num_heads = config.prompt_num_attention_heads dim = config.projection_dim head_dim = dim // self.num_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, dim, bias=False) self.k_proj = nn.Linear(dim, dim, bias=False) self.v_proj = nn.Linear(dim, dim, bias=False) self.attn_drop = nn.Dropout(config.prompt_attention_dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(config.prompt_projection_dropout) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, queries, keys, values): """Input shape: Batch x Time x Channel""" batch_size, query_seq_len, hidden_size = queries.shape batch_size, key_seq_len, hidden_size = keys.shape queries = ( self.q_proj(queries) .reshape(batch_size, query_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) keys = ( self.k_proj(keys) .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) values = ( self.v_proj(values) .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) attn = (queries @ keys.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ values).transpose(1, 2).reshape(batch_size, query_seq_len, hidden_size) x = self.proj(x) x = self.proj_drop(x) return x class PromptGeneratorLayer(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.projection_dim self.cross_attn = XCLIPCrossAttention(config) self.norm1 = nn.LayerNorm(embed_dim, eps=config.text_config.layer_norm_eps) self.norm3 = nn.LayerNorm(embed_dim, eps=config.text_config.layer_norm_eps) self.mlp = nn.Sequential( nn.Linear(embed_dim, embed_dim * 4), ACT2FN[config.prompt_hidden_act], nn.Dropout(config.prompt_attention_dropout), nn.Linear(embed_dim * 4, embed_dim), ) def forward(self, x, visual): x = x + self.cross_attn(self.norm1(x), visual, visual) x = x + self.mlp(self.norm3(x)) return x class XCLIPPromptGenerator(nn.Module): """This corresponds to the `VideoSpecificPrompt` class in the original implementation.""" def __init__(self, config): super().__init__() embed_dim = config.projection_dim self.layernorm = nn.LayerNorm(embed_dim, eps=config.vision_config.layer_norm_eps) self.decoder = nn.ModuleList([PromptGeneratorLayer(config) for _ in range(config.prompt_layers)]) self.alpha = nn.Parameter(torch.ones(embed_dim) * config.prompt_alpha) def forward(self, text, visual): visual = self.layernorm(visual) for layer in self.decoder: text = layer(text, visual) return self.alpha * text @auto_docstring class XCLIPModel(XCLIPPreTrainedModel): config: XCLIPConfig def __init__(self, config: XCLIPConfig): super().__init__(config) if not isinstance(config.text_config, XCLIPTextConfig): raise TypeError( "config.text_config is expected to be of type XCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, XCLIPVisionConfig): raise TypeError( "config.vision_config is expected to be of type XCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config # The module using it is not a PreTrainedModel subclass so we need this text_config._attn_implementation = config._attn_implementation # The module using it is not a PreTrainedModel subclass so we need this vision_config._attn_implementation = config._attn_implementation self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = XCLIPTextTransformer(text_config) self.vision_model = XCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.prompts_visual_layernorm = nn.LayerNorm(self.vision_embed_dim, eps=config.vision_config.layer_norm_eps) self.prompts_visual_projection = nn.Parameter(torch.randn(self.vision_embed_dim, self.projection_dim)) mit_config = copy.copy(vision_config) mit_config.hidden_size = vision_config.mit_hidden_size mit_config.intermediate_size = vision_config.mit_intermediate_size mit_config.num_hidden_layers = vision_config.mit_num_hidden_layers mit_config.num_attention_heads = vision_config.mit_num_attention_heads self.mit = XCLIPMultiframeIntegrationTransformer(mit_config) self.prompts_generator = XCLIPPromptGenerator(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`XCLIPTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, AutoModel >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/xclip-base-patch32") >>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) return text_embeds @auto_docstring def get_video_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: video_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The video embeddings obtained by applying the projection layer to the pooled output of [`XCLIPVisionModel`] and [`XCLIPMultiframeIntegrationTransformer`]. Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoProcessor, AutoModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 8 frames >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> processor = AutoProcessor.from_pretrained("microsoft/xclip-base-patch32") >>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32") >>> inputs = processor(videos=list(video), return_tensors="pt") >>> video_features = model.get_video_features(**inputs) ```""" # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_frames, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(-1, num_channels, height, width) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) video_embeds = vision_outputs[1] video_embeds = self.visual_projection(video_embeds) cls_features = video_embeds.view(batch_size, num_frames, -1) mit_outputs = self.mit( cls_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) video_embeds = mit_outputs[1] return video_embeds @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, XCLIPOutput]: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoProcessor, AutoModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 8 frames >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> processor = AutoProcessor.from_pretrained("microsoft/xclip-base-patch32") >>> model = AutoModel.from_pretrained("microsoft/xclip-base-patch32") >>> inputs = processor( ... text=["playing sports", "eating spaghetti", "go shopping"], ... videos=list(video), ... return_tensors="pt", ... padding=True, ... ) >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits_per_video = outputs.logits_per_video # this is the video-text similarity score >>> probs = logits_per_video.softmax(dim=1) # we can take the softmax to get the label probabilities >>> print(probs) tensor([[1.9496e-04, 9.9960e-01, 2.0825e-04]]) ```""" # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_frames, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(-1, num_channels, height, width) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) video_embeds = vision_outputs[1] video_embeds = self.visual_projection(video_embeds) cls_features = video_embeds.view(batch_size, num_frames, -1) mit_outputs = self.mit( cls_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) video_embeds = mit_outputs[1] img_features = vision_outputs[0][:, 1:, :] img_features = self.prompts_visual_layernorm(img_features) img_features = img_features @ self.prompts_visual_projection img_features = img_features.view(batch_size, num_frames, -1, video_embeds.shape[-1]) img_features = img_features.mean(dim=1, keepdim=False) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) text_embeds = text_embeds.unsqueeze(0).expand(batch_size, -1, -1) text_embeds = text_embeds + self.prompts_generator(text_embeds, img_features) # normalized features video_embeds = video_embeds / video_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_video = torch.einsum("bd,bkd->bk", video_embeds, logit_scale * text_embeds) logits_per_text = logits_per_video.T loss = None if return_loss: loss = x_clip_loss(logits_per_text) if not return_dict: output = (logits_per_video, logits_per_text, text_embeds, video_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return XCLIPOutput( loss=loss, logits_per_video=logits_per_video, logits_per_text=logits_per_text, text_embeds=text_embeds, video_embeds=video_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, mit_output=mit_outputs, ) __all__ = ["XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel"]
transformers/src/transformers/models/x_clip/modeling_x_clip.py/0
{ "file_path": "transformers/src/transformers/models/x_clip/modeling_x_clip.py", "repo_id": "transformers", "token_count": 28211 }
481
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path): # Load checkpoint chkpt = torch.load(xlm_checkpoint_path, map_location="cpu", weights_only=True) state_dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository two_levels_state_dict = {} for k, v in state_dict.items(): if "pred_layer" in k: two_levels_state_dict[k] = v else: two_levels_state_dict["transformer." + k] = v config = chkpt["params"] config = {n: v for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray))} vocab = chkpt["dico_word2id"] vocab = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""): i for s, i in vocab.items()} # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f"Save PyTorch model to {pytorch_weights_dump_path}") torch.save(two_levels_state_dict, pytorch_weights_dump_path) print(f"Save configuration file to {pytorch_config_dump_path}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(config, indent=2) + "\n") print(f"Save vocab file to {pytorch_config_dump_path}") with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1112 }
482
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XLNet configuration""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class XLNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`XLNetModel`] or a [`TFXLNetModel`]. It is used to instantiate a XLNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [xlnet/xlnet-large-cased](https://huggingface.co/xlnet/xlnet-large-cased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the XLNet model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`XLNetModel`] or [`TFXLNetModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. n_layer (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. d_inner (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. ff_activation (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. untie_r (`bool`, *optional*, defaults to `True`): Whether or not to untie relative position biases attn_type (`str`, *optional*, defaults to `"bi"`): The attention type used by the model. Set `"bi"` for XLNet, `"uni"` for Transformer-XL. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. mem_len (`int` or `None`, *optional*): The number of tokens to cache. The key/value pairs that have already been pre-computed in a previous forward pass won't be re-computed. See the [quickstart](https://huggingface.co/transformers/quickstart.html#using-the-past) for more information. reuse_len (`int`, *optional*): The number of tokens in the current batch to be cached and reused in the future. bi_data (`bool`, *optional*, defaults to `False`): Whether or not to use bidirectional input pipeline. Usually set to `True` during pretraining and `False` during finetuning. clamp_len (`int`, *optional*, defaults to -1): Clamp all relative distances larger than clamp_len. Setting this attribute to -1 means no clamping. same_length (`bool`, *optional*, defaults to `False`): Whether or not to use the same attention length for each token. summary_type (`str`, *optional*, defaults to "last"): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (`boo`, *optional*, defaults to `True`): Used in the sequence classification and multiple choice models. Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. summary_last_dropout (`float`, *optional*, defaults to 0.1): Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. start_n_top (`int`, *optional*, defaults to 5): Used in the SQuAD evaluation script. end_n_top (`int`, *optional*, defaults to 5): Used in the SQuAD evaluation script. use_mems_eval (`bool`, *optional*, defaults to `True`): Whether or not the model should make use of the recurrent memory mechanism in evaluation mode. use_mems_train (`bool`, *optional*, defaults to `False`): Whether or not the model should make use of the recurrent memory mechanism in train mode. <Tip> For pretraining, it is recommended to set `use_mems_train` to `True`. For fine-tuning, it is recommended to set `use_mems_train` to `False` as discussed [here](https://github.com/zihangdai/xlnet/issues/41#issuecomment-505102587). If `use_mems_train` is set to `True`, one has to make sure that the train batches are correctly pre-processed, *e.g.* `batch_1 = [[This line is], [This is the]]` and `batch_2 = [[ the first line], [ second line]]` and that all batches are of equal size. </Tip> Examples: ```python >>> from transformers import XLNetConfig, XLNetModel >>> # Initializing a XLNet configuration >>> configuration = XLNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = XLNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xlnet" keys_to_ignore_at_inference = ["mems"] attribute_map = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, ff_activation="gelu", untie_r=True, attn_type="bi", initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=512, reuse_len=None, use_mems_eval=True, use_mems_train=False, bi_data=False, clamp_len=-1, same_length=False, summary_type="last", summary_use_proj=True, summary_activation="tanh", summary_last_dropout=0.1, start_n_top=5, end_n_top=5, pad_token_id=5, bos_token_id=1, eos_token_id=2, **kwargs, ): """Constructs XLNetConfig.""" self.vocab_size = vocab_size self.d_model = d_model self.n_layer = n_layer self.n_head = n_head if d_model % n_head != 0: raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0") if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) self.d_head = d_model // n_head self.ff_activation = ff_activation self.d_inner = d_inner self.untie_r = untie_r self.attn_type = attn_type self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.mem_len = mem_len self.reuse_len = reuse_len self.bi_data = bi_data self.clamp_len = clamp_len self.same_length = same_length self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead.", FutureWarning, ) use_mems_eval = kwargs["use_cache"] self.use_mems_eval = use_mems_eval self.use_mems_train = use_mems_train super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @property def max_position_embeddings(self): logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.") return -1 @max_position_embeddings.setter def max_position_embeddings(self, value): # Message copied from Transformer-XL documentation raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." ) __all__ = ["XLNetConfig"]
transformers/src/transformers/models/xlnet/configuration_xlnet.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/configuration_xlnet.py", "repo_id": "transformers", "token_count": 4350 }
483
# Copyright 2023 The HuggingFace Team. All rights reserved. import datetime import platform import subprocess from typing import Optional, Union import numpy as np def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: """ Helper function to read an audio file through ffmpeg. """ ar = f"{sampling_rate}" ac = "1" format_for_conversion = "f32le" ffmpeg_command = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process: output_stream = ffmpeg_process.communicate(bpayload) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error out_bytes = output_stream[0] audio = np.frombuffer(out_bytes, np.float32) if audio.shape[0] == 0: raise ValueError( "Soundfile is either not in the correct format or is malformed. Ensure that the soundfile has " "a valid audio file extension (e.g. wav, flac or mp3) and is not corrupted. If reading from a remote " "URL, ensure that the URL is the full address to **download** the audio file." ) return audio def ffmpeg_microphone( sampling_rate: int, chunk_length_s: float, format_for_conversion: str = "f32le", ffmpeg_input_device: Optional[str] = None, ffmpeg_additional_args: Optional[list[str]] = None, ): """ Helper function to read audio from a microphone using ffmpeg. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. format_for_conversion (`str`, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Returns: A generator yielding audio chunks of `chunk_length_s` seconds as `bytes` objects of length `int(round(sampling_rate * chunk_length_s)) * size_of_sample`. """ ar = f"{sampling_rate}" ac = "1" if format_for_conversion == "s16le": size_of_sample = 2 elif format_for_conversion == "f32le": size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") system = platform.system() if system == "Linux": format_ = "alsa" input_ = ffmpeg_input_device or "default" elif system == "Darwin": format_ = "avfoundation" input_ = ffmpeg_input_device or ":default" elif system == "Windows": format_ = "dshow" input_ = ffmpeg_input_device or _get_microphone_name() ffmpeg_additional_args = [] if ffmpeg_additional_args is None else ffmpeg_additional_args ffmpeg_command = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] ffmpeg_command.extend(ffmpeg_additional_args) chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample iterator = _ffmpeg_stream(ffmpeg_command, chunk_len) for item in iterator: yield item def ffmpeg_microphone_live( sampling_rate: int, chunk_length_s: float, stream_chunk_s: Optional[int] = None, stride_length_s: Optional[Union[tuple[float, float], float]] = None, format_for_conversion: str = "f32le", ffmpeg_input_device: Optional[str] = None, ffmpeg_additional_args: Optional[list[str]] = None, ): """ Helper function to read audio from a microphone using ffmpeg. This will output `partial` overlapping chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of striding to avoid errors on the "sides" of the various chunks. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. This includes the eventual striding. stream_chunk_s (`float` or `int`): The length of the minimal temporary audio to be returned. stride_length_s (`float` or `int` or `(float, float)`, *optional*): The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of an audio sample but without using that part to actually make the prediction. Setting this does not change the length of the chunk. format_for_conversion (`str`, *optional*, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Return: A generator yielding dictionaries of the following form `{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionally a `"stride" (int, int)` key if `stride_length_s` is defined. `stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item is a whole chunk, or a partial temporary result to be later replaced by another larger chunk. """ if stream_chunk_s is not None: chunk_s = stream_chunk_s else: chunk_s = chunk_length_s microphone = ffmpeg_microphone( sampling_rate, chunk_s, format_for_conversion=format_for_conversion, ffmpeg_input_device=ffmpeg_input_device, ffmpeg_additional_args=[] if ffmpeg_additional_args is None else ffmpeg_additional_args, ) if format_for_conversion == "s16le": dtype = np.int16 size_of_sample = 2 elif format_for_conversion == "f32le": dtype = np.float32 size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") if stride_length_s is None: stride_length_s = chunk_length_s / 6 chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample audio_time = datetime.datetime.now() delta = datetime.timedelta(seconds=chunk_s) for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True): # Put everything back in numpy scale item["raw"] = np.frombuffer(item["raw"], dtype=dtype) item["stride"] = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) item["sampling_rate"] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def chunk_bytes_iter(iterator, chunk_len: int, stride: tuple[int, int], stream: bool = False): """ Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available. """ acc = b"" stride_left, stride_right = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) _stride_left = 0 for raw in iterator: acc += raw if stream and len(acc) < chunk_len: stride = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(acc) >= chunk_len: # We are flushing the accumulator stride = (_stride_left, stride_right) item = {"raw": acc[:chunk_len], "stride": stride} if stream: item["partial"] = False yield item _stride_left = stride_left acc = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(acc) > stride_left: item = {"raw": acc, "stride": (_stride_left, 0)} if stream: item["partial"] = False yield item def _ffmpeg_stream(ffmpeg_command, buflen: int): """ Internal function to create the generator of data through ffmpeg """ bufsize = 2**24 # 16Mo try: with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process: while True: raw = ffmpeg_process.stdout.read(buflen) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error def _get_microphone_name(): """ Retrieve the microphone name in Windows . """ command = ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", ""] try: ffmpeg_devices = subprocess.run(command, text=True, stderr=subprocess.PIPE, encoding="utf-8") microphone_lines = [line for line in ffmpeg_devices.stderr.splitlines() if "(audio)" in line] if microphone_lines: microphone_name = microphone_lines[0].split('"')[1] print(f"Using microphone: {microphone_name}") return f"audio={microphone_name}" except FileNotFoundError: print("ffmpeg was not found. Please install it or make sure it is in your system PATH.") return "default"
transformers/src/transformers/pipelines/audio_utils.py/0
{ "file_path": "transformers/src/transformers/pipelines/audio_utils.py", "repo_id": "transformers", "token_count": 4989 }
484
import inspect import types import warnings from collections.abc import Iterable from typing import TYPE_CHECKING, Optional, Union import numpy as np from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features from ..modelcard import ModelCard from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( PaddingStrategy, add_end_docstrings, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) from .base import ArgumentHandler, ChunkPipeline, build_pipeline_init_args logger = logging.get_logger(__name__) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel if is_tokenizers_available(): import tokenizers if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES Dataset = None if is_torch_available(): import torch from torch.utils.data import Dataset from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES def decode_spans( start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray ) -> tuple: """ Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual answer. In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position. The method supports output the k-best answer through the topk argument. Args: start (`np.ndarray`): Individual start probabilities for each token. end (`np.ndarray`): Individual end probabilities for each token. topk (`int`): Indicates how many possible answer span(s) to extract from the model output. max_answer_len (`int`): Maximum size of the answer to extract from the model's output. undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer """ # Ensure we have batch axis if start.ndim == 1: start = start[None] if end.ndim == 1: end = end[None] # Compute the score of each tuple(start, end) to be the real answer outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) # Remove candidate with end < start and end - start > max_answer_len candidates = np.tril(np.triu(outer), max_answer_len - 1) # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) scores_flat = candidates.flatten() if topk == 1: idx_sort = [np.argmax(scores_flat)] elif len(scores_flat) < topk: idx_sort = np.argsort(-scores_flat) else: idx = np.argpartition(-scores_flat, topk)[0:topk] idx_sort = idx[np.argsort(-scores_flat[idx])] starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) starts = starts[desired_spans] ends = ends[desired_spans] scores = candidates[0, starts, ends] return starts, ends, scores def select_starts_ends( start, end, p_mask, attention_mask, min_null_score=1000000, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): """ Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses `decode_spans()` to generate probabilities for each span to be the actual answer. Args: start (`np.ndarray`): Individual start logits for each token. end (`np.ndarray`): Individual end logits for each token. p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer attention_mask (`np.ndarray`): The attention mask generated by the tokenizer min_null_score(`float`): The minimum null (empty) answer score seen so far. topk (`int`): Indicates how many possible answer span(s) to extract from the model output. handle_impossible_answer(`bool`): Whether to allow null (empty) answers max_answer_len (`int`): Maximum size of the answer to extract from the model's output. """ # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. undesired_tokens = np.abs(np.array(p_mask) - 1) if attention_mask is not None: undesired_tokens = undesired_tokens & attention_mask # Generate mask undesired_tokens_mask = undesired_tokens == 0.0 # Make sure non-context indexes in the tensor cannot contribute to the softmax start = np.where(undesired_tokens_mask, -10000.0, start) end = np.where(undesired_tokens_mask, -10000.0, end) # Normalize logits and spans to retrieve the answer start = np.exp(start - start.max(axis=-1, keepdims=True)) start = start / start.sum() end = np.exp(end - end.max(axis=-1, keepdims=True)) end = end / end.sum() if handle_impossible_answer: min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item()) # Mask CLS start[0, 0] = end[0, 0] = 0.0 starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens) return starts, ends, scores, min_null_score class QuestionAnsweringArgumentHandler(ArgumentHandler): """ QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to internal [`SquadExample`]. QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line supplied arguments. """ _load_processor = False _load_image_processor = False _load_feature_extractor = False _load_tokenizer = True def normalize(self, item): if isinstance(item, SquadExample): return item elif isinstance(item, dict): for k in ["question", "context"]: if k not in item: raise KeyError("You need to provide a dictionary with keys {question:..., context:...}") elif item[k] is None: raise ValueError(f"`{k}` cannot be None") elif isinstance(item[k], str) and len(item[k]) == 0: raise ValueError(f"`{k}` cannot be empty") return QuestionAnsweringPipeline.create_sample(**item) raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)") def __call__(self, *args, **kwargs): # Detect where the actual inputs are if args is not None and len(args) > 0: if len(args) == 1: inputs = args[0] elif len(args) == 2 and {type(el) for el in args} == {str}: inputs = [{"question": args[0], "context": args[1]}] else: inputs = list(args) # Generic compatibility with sklearn and Keras # Batched data elif "X" in kwargs: warnings.warn( "Passing the `X` argument to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.", FutureWarning, ) inputs = kwargs["X"] elif "data" in kwargs: warnings.warn( "Passing the `data` argument to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.", FutureWarning, ) inputs = kwargs["data"] elif "question" in kwargs and "context" in kwargs: if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str): inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]] elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list): if len(kwargs["question"]) != len(kwargs["context"]): raise ValueError("Questions and contexts don't have the same lengths") inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])] elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str): inputs = [{"question": kwargs["question"], "context": kwargs["context"]}] else: raise ValueError("Arguments can't be understood") else: raise ValueError(f"Unknown arguments {kwargs}") # When user is sending a generator we need to trust it's a valid example generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,) if isinstance(inputs, generator_types): return inputs # Normalize inputs if isinstance(inputs, dict): inputs = [inputs] elif isinstance(inputs, Iterable): # Copy to avoid overriding arguments inputs = list(inputs) else: raise ValueError(f"Invalid arguments {kwargs}") for i, item in enumerate(inputs): inputs[i] = self.normalize(item) return inputs @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class QuestionAnsweringPipeline(ChunkPipeline): """ Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering examples](../task_summary#question-answering) for more information. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="deepset/roberta-base-squad2") >>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin") {'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=question-answering). """ default_input_names = "question,context" handle_impossible_answer = False def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: PreTrainedTokenizer, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", **kwargs, ): super().__init__( model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs, ) self._args_parser = QuestionAnsweringArgumentHandler() self.check_model_type( TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) @staticmethod def create_sample( question: Union[str, list[str]], context: Union[str, list[str]] ) -> Union[SquadExample, list[SquadExample]]: """ QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the logic for converting question(s) and context(s) to [`SquadExample`]. We currently support extractive question answering. Arguments: question (`str` or `list[str]`): The question(s) asked. context (`str` or `list[str]`): The context(s) in which we will look for the answer. Returns: One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context. """ if isinstance(question, list): return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)] else: return SquadExample(None, question, context, None, None, None) def _sanitize_parameters( self, padding=None, topk=None, top_k=None, doc_stride=None, max_answer_len=None, max_seq_len=None, max_question_len=None, handle_impossible_answer=None, align_to_words=None, **kwargs, ): # Set defaults values preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len postprocess_params = {} if topk is not None and top_k is None: warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning) top_k = topk if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer if align_to_words is not None: postprocess_params["align_to_words"] = align_to_words return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): """ Answer the question(s) given as inputs by using the context(s). Args: question (`str` or `list[str]`): One or several question(s) (must be used in conjunction with the `context` argument). context (`str` or `list[str]`): One or several context(s) associated with the question(s) (must be used in conjunction with the `question` argument). top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the context is too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. align_to_words (`bool`, *optional*, defaults to `True`): Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt on non-space-separated languages (like Japanese or Chinese) Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The character start index of the answer (in the tokenized version of the input). - **end** (`int`) -- The character end index of the answer (in the tokenized version of the input). - **answer** (`str`) -- The answer to the question. """ # Convert inputs to features if args: warnings.warn( "Passing a list of SQuAD examples to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.", FutureWarning, ) examples = self._args_parser(*args, **kwargs) if isinstance(examples, (list, tuple)) and len(examples) == 1: return super().__call__(examples[0], **kwargs) return super().__call__(examples, **kwargs) def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None): # XXX: This is special, args_parser will not handle anything generator or dataset like # For those we expect user to send a simple valid example either directly as a SquadExample or simple dict. # So we still need a little sanitation here. if isinstance(example, dict): example = SquadExample(None, example["question"], example["context"], None, None, None) if max_seq_len is None: max_seq_len = min(self.tokenizer.model_max_length, 384) if doc_stride is None: doc_stride = min(max_seq_len // 2, 128) if doc_stride > max_seq_len: raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})") if not self.tokenizer.is_fast: features = squad_convert_examples_to_features( examples=[example], tokenizer=self.tokenizer, max_seq_length=max_seq_len, doc_stride=doc_stride, max_query_length=max_question_len, padding_strategy=PaddingStrategy.MAX_LENGTH, is_training=False, tqdm_enabled=False, ) else: # Define the side we want to truncate / pad and the text/pair sorting question_first = self.tokenizer.padding_side == "right" encoded_inputs = self.tokenizer( text=example.question_text if question_first else example.context_text, text_pair=example.context_text if question_first else example.question_text, padding=padding, truncation="only_second" if question_first else "only_first", max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, ) # When the input is too long, it's converted in a batch of inputs with overflowing tokens # and a stride of overlap between the inputs. If a batch of inputs is given, a special output # "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample. # Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping". # "num_span" is the number of output samples generated from the overflowing tokens. num_spans = len(encoded_inputs["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) p_mask = [ [tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)] for span_id in range(num_spans) ] features = [] for span_idx in range(num_spans): input_ids_span_idx = encoded_inputs["input_ids"][span_idx] attention_mask_span_idx = ( encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None ) token_type_ids_span_idx = ( encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None ) # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 submask = p_mask[span_idx] features.append( SquadFeatures( input_ids=input_ids_span_idx, attention_mask=attention_mask_span_idx, token_type_ids=token_type_ids_span_idx, p_mask=submask, encoding=encoded_inputs[span_idx], # We don't use the rest of the values - and actually # for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample cls_index=None, token_to_orig_map={}, example_index=0, unique_id=0, paragraph_len=0, token_is_max_context=0, tokens=[], start_position=0, end_position=0, is_impossible=False, qas_id=None, ) ) for i, feature in enumerate(features): fw_args = {} others = {} model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"] for k, v in feature.__dict__.items(): if k in model_input_names: if self.framework == "tf": tensor = tf.constant(v) if tensor.dtype == tf.int64: tensor = tf.cast(tensor, tf.int32) fw_args[k] = tf.expand_dims(tensor, 0) elif self.framework == "pt": tensor = torch.tensor(v) if tensor.dtype == torch.int32: tensor = tensor.long() fw_args[k] = tensor.unsqueeze(0) else: others[k] = v is_last = i == len(features) - 1 yield {"example": example, "is_last": is_last, **fw_args, **others} def _forward(self, inputs): example = inputs["example"] model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} # `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported model_forward = self.model.forward if self.framework == "pt" else self.model.call if "use_cache" in inspect.signature(model_forward).parameters: model_inputs["use_cache"] = False output = self.model(**model_inputs) if isinstance(output, dict): return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs} else: start, end = output[:2] return {"start": start, "end": end, "example": example, **inputs} def postprocess( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, align_to_words=True, ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: if self.framework == "pt" and output["start"].dtype == torch.bfloat16: start_ = output["start"].to(torch.float32) end_ = output["end"].to(torch.float32) else: start_ = output["start"] end_ = output["end"] example = output["example"] p_mask = output["p_mask"] attention_mask = ( output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None ) pre_topk = ( top_k * 2 + 10 if align_to_words else top_k ) # Some candidates may be deleted if we align to words starts, ends, scores, min_null_score = select_starts_ends( start_, end_, p_mask, attention_mask, min_null_score, pre_topk, handle_impossible_answer, max_answer_len, ) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) # Convert the answer (tokens) back to the original text # Score: score from the model # Start: Index of the first character of the answer in the context string # End: Index of the character following the last character of the answer in the context string # Answer: Plain text of the answer for s, e, score in zip(starts, ends, scores): token_to_orig_map = output["token_to_orig_map"] answers.append( { "score": score.item(), "start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(), "end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(), "answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]), } ) else: # Convert the answer (tokens) back to the original text # Score: score from the model # Start: Index of the first character of the answer in the context string # End: Index of the character following the last character of the answer in the context string # Answer: Plain text of the answer question_first = self.tokenizer.padding_side == "right" enc = output["encoding"] # Encoding was *not* padded, input_ids *might*. # It doesn't make a difference unless we're padding on # the left hand side, since now we have different offsets # everywhere. if self.tokenizer.padding_side == "left": offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum() else: offset = 0 # Sometimes the max probability token is in the middle of a word so: # - we start by finding the right word containing the token with `token_to_word` # - then we convert this word in a character span with `word_to_chars` sequence_index = 1 if question_first else 0 for s, e, score in zip(starts, ends, scores): s = s - offset e = e - offset start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words) target_answer = example.context_text[start_index:end_index] answer = self.get_answer(answers, target_answer) if answer: answer["score"] += score.item() else: answers.append( { "score": score.item(), "start": start_index, "end": end_index, "answer": example.context_text[start_index:end_index], } ) if handle_impossible_answer: answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""}) answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] if len(answers) == 1: return answers[0] return answers def get_answer(self, answers: list[dict], target: str) -> Optional[dict]: for answer in answers: if answer["answer"].lower() == target.lower(): return answer return None def get_indices( self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool ) -> tuple[int, int]: if align_to_words: try: start_word = enc.token_to_word(s) end_word = enc.token_to_word(e) start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0] end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1] except Exception: # Some tokenizers don't really handle words. Keep to offsets then. start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] else: start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] return start_index, end_index def span_to_answer(self, text: str, start: int, end: int) -> dict[str, Union[str, int]]: """ When decoding from token probabilities, this method maps token indexes to actual word in the initial context. Args: text (`str`): The actual context to extract the answer from. start (`int`): The answer starting token index. end (`int`): The answer end token index. Returns: Dictionary like `{'answer': str, 'start': int, 'end': int}` """ words = [] token_idx = char_start_idx = char_end_idx = chars_idx = 0 for i, word in enumerate(text.split(" ")): token = self.tokenizer.tokenize(word) # Append words if they are in the span if start <= token_idx <= end: if token_idx == start: char_start_idx = chars_idx if token_idx == end: char_end_idx = chars_idx + len(word) words += [word] # Stop if we went over the end of the answer if token_idx > end: break # Append the subtokenization length to the running index token_idx += len(token) chars_idx += len(word) + 1 # Join text with spaces return { "answer": " ".join(words), "start": max(0, char_start_idx), "end": min(len(text), char_end_idx), }
transformers/src/transformers/pipelines/question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/question_answering.py", "repo_id": "transformers", "token_count": 13992 }
485
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any from ..integrations import prepare_for_hqq_linear from ..utils import is_accelerate_available, is_hqq_available, is_torch_available, logging from .base import HfQuantizer from .quantizers_utils import get_module_from_name if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel if is_accelerate_available(): from accelerate.hooks import remove_hook_from_module if is_torch_available(): import torch logger = logging.get_logger(__name__) # Finds the parent of a node module named "name" def find_parent(model, name): module_tree = name.split(".")[:-1] parent = model for m in module_tree: parent = parent._modules[m] return parent class HqqHfQuantizer(HfQuantizer): """ HQQ quantizer base HF class. nn.Linear modules are first tagged with quant_config in _process_model_before_weight_loading(). The actual quantization and offloading to the GPU is done in check_quantized_param(). """ use_keep_in_fp32_modules = False requires_parameters_quantization = True requires_calibration = False required_packages = ["hqq"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.dtype = None self.using_multi_gpu = False def validate_environment(self, *args, **kwargs): if not (is_hqq_available()): raise ImportError( "A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`." ) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) if self.dtype is None: if "dtype" in kwargs: self.dtype = kwargs["dtype"] else: self.dtype = torch.float32 logger.info("Setting dtype to torch.float32 as the default value since it was not specified.") device_map = kwargs.get("device_map") if isinstance(device_map, dict): if "cpu" in device_map.values() or "disk" in device_map.values(): raise ValueError( "You are attempting to use an HQQ model with a device_map that contains a CPU or disk device." " This is not supported. Please remove the CPU or disk device from the device_map." ) else: self.using_multi_gpu = len(set(device_map.values())) > 1 def update_missing_keys( self, model: "PreTrainedModel", missing_keys: list[str], prefix: str, **kwargs ) -> list[str]: if self.pre_quantized: return [key for key in missing_keys if ("weight" not in key)] else: return missing_keys # Adds missing keys for HQQLinear modules that are loaded but the model with initialized with torch.nn.Linear def update_expected_keys( self, model: "PreTrainedModel", expected_keys: list[str], loaded_keys: list[str] ) -> list[str]: if not self.pre_quantized: return expected_keys # Collects all quantizable (linear) layers def _find_hqq_quantizable_layers(model, layers): for name, module in model.named_children(): if isinstance(module, (torch.nn.Linear)): layers.add(module.name) _find_hqq_quantizable_layers(module, layers) new_keys = set(expected_keys) if is_hqq_available(): from hqq.core.quantize import HQQLinear # Name modules for name, module in model.named_modules(): module.name = name # valid modules are Linear layers that have HQQLinear state_dict. We ignore skip_modules and any layers with Linear state_dict() params _valid_modules = set() _find_hqq_quantizable_layers(model, _valid_modules) # Remove skipped modules _skipped_modules = set() for _module in _valid_modules: for _skip_module in model.config.quantization_config["skip_modules"]: if _skip_module in _module: _skipped_modules.add(_module) _valid_modules -= _skipped_modules # Append new expected layers based on _ref_keys _ref_keys = HQQLinear( linear_layer=None, quant_config=None, compute_dtype=torch.float16, device="cpu", del_orig=False, ).state_dict_keys() - {"bias"} # Clean-up _rm_keys = set() for key in new_keys: if any(_module in key for _module in _valid_modules): _rm_keys.add(key) new_keys -= _rm_keys # At this point, new_keys contains all the keys of the layers that are NOT HQQLinear or torch.nn.Linear # Re-populate Linear/HQQLinear for _module in _valid_modules: if _module + ".weight" in loaded_keys: new_keys.add(_module + ".weight") else: new_keys.update({_module + "." + _ref_key for _ref_key in _ref_keys}) if _module + ".bias" in loaded_keys: new_keys.add(_module + ".bias") return list(new_keys) def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ) -> bool: if is_hqq_available(): from hqq.core.quantize import HQQLinear module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized: return (isinstance(module, (torch.nn.Linear, HQQLinear))) and tensor_name != "weight" else: return ( isinstance(module, torch.nn.Linear) and tensor_name == "weight" # bias doesn't need to be quantized, we use this as a workaround to avoid loading bias into HQQLinear assuming it was loaded # in the state_dict directly with the weight because hqq overwrote load_state_dict for this layer or (isinstance(module, HQQLinear) and tensor_name == "bias") ) def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: dict[str, Any], unexpected_keys: list[str], ): """ Each nn.Linear layer is processed here. We first check if the corresponding module state_dict contains already HQQ quantized parameters. If not, we create a temp linear layer with the module state_dict params and use it for quantization """ if is_hqq_available(): from hqq.core.quantize import HQQLinear # TODO: This is a compatibility hack. HQQ-quantized linear layers do not have a `weight` attribute, # but some models attempt to access `weight.dtype` during the forward pass. To prevent runtime errors, # we patch HQQLinear with a dummy `weight` property that returns an empty tensor with the correct dtype and device. @property def weight(_self: HQQLinear): return torch.empty(0, dtype=_self.compute_dtype, device=_self.device) HQQLinear.weight = weight module, tensor_name = get_module_from_name(model, param_name) layer_name = ".".join(param_name.split(".")[:-1]) parent_module = find_parent(model, layer_name) node = layer_name.split(".")[-1] if tensor_name == "bias": # this should already be set return # set module state_dict module_state_dict = {} for k, v in state_dict.items(): if layer_name + "." in k: module_state_dict[k.split(".")[-1]] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k) if self.pre_quantized: if isinstance(module, HQQLinear): return else: hqq_layer = HQQLinear( linear_layer=None, quant_config=None, compute_dtype=self.dtype, device=target_device, del_orig=False, ) hqq_layer.load_state_dict(module_state_dict) if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor): hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias) if self.using_multi_gpu: hqq_layer = self._patch_layer_for_multigpu(hqq_layer) setattr(parent_module, node, hqq_layer) # cleanup del module.__dict__, module torch.cuda.empty_cache() return # Step 1: populate module with weight/bias from module state dict for key, tensor in module_state_dict.items(): setattr(module, key, torch.nn.Parameter(tensor)) # Step 2: Replace module with either HQQLinear or move it to device. We do this via setattr on the parent as doing on it on the module # directly doesn't work. quant_config = model.config.quantization_config["quant_config"] skip_modules = model.config.quantization_config["skip_modules"] module_tag = ".".join(module.name.split(".")[-2:]) module_quant_config = None if "weight_quant_params" in quant_config: module_quant_config = quant_config elif module_tag in quant_config: module_quant_config = quant_config[module_tag] for skip_module in skip_modules: if skip_module in module.name: module_quant_config = None break if module_quant_config is not None: hqq_layer = HQQLinear( module, quant_config=module_quant_config, compute_dtype=self.dtype, device=target_device, del_orig=True, ) if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor): hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias) if self.using_multi_gpu: hqq_layer = self._patch_layer_for_multigpu(hqq_layer) setattr(parent_module, node, hqq_layer) else: module = module.to(dtype=self.dtype, device=target_device) setattr(parent_module, node, module) torch.cuda.empty_cache() # Remove accelerate hook and uses a simpler forward pass. Otherwise, this breaks with multi-gpu def _patch_layer_for_multigpu(self, hqq_layer): hqq_layer = remove_hook_from_module(hqq_layer) def forward_with_device(self, x): out = torch.matmul(x.to(self.device), self.dequantize().t()) if self.bias is not None: out += self.bias return out hqq_layer.forward = lambda x: forward_with_device(hqq_layer, x) return hqq_layer def _process_model_before_weight_loading( self, model: "PreTrainedModel", **kwargs, ): # Add the corresponding quant_config to each valid module. This allows us to do the actual nn.Linear -> HQQLinear conversion in create_quantized_param(). # prepare_for_hqq_linear() also sets the right quantization config inside the model (model.config.quantization_config) and the layers (hqq_layer.quant_config) model = prepare_for_hqq_linear(model, quantization_config=self.quantization_config) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_hqq_quantized = True model.is_hqq_serializable = self.is_serializable() return model def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return True
transformers/src/transformers/quantizers/quantizer_hqq.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_hqq.py", "repo_id": "transformers", "token_count": 5741 }
486
# Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see tokenization_utils_fast.py """ import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Optional, Union, overload from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) # Slow tokenizers are saved in a vocabulary plus three separated files SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" class Trie: """ Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass Loose reference https://en.wikipedia.org/wiki/Trie """ def __init__(self, *args): self.data = {} self._tokens = set() self._termination_char = "" self.update(*args) def update(self, *args): """ Updates the Trie with new tokens provided as arguments. Args: *args: Variable number of words to be added to the Trie. """ for token in tuple(*args): self.add(token) def add(self, word: str): """ Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation. The special key `""` in `self._termination_char` is used to represent termination. This function is idempotent, adding twice the same word will leave the trie unchanged Example: ```python >>> trie = Trie() >>> trie.add("Hello 友達") >>> trie.data {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} >>> trie.add("Hello") >>> trie.data {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} ``` """ if not word: # Prevent empty string return self._tokens.add(word) ref = self.data for char in word: ref[char] = ref.setdefault(char, {}) ref = ref[char] ref[self._termination_char] = 1 def split(self, text: str) -> list[str]: """ Will look for the words added to the trie within `text`. Output is the original string split along the boundaries of the words found. This trie will match the longest possible word first ! Example: ```python >>> trie = Trie() >>> trie.split("[CLS] This is a extra_id_100") ["[CLS] This is a extra_id_100"] >>> trie.add("[CLS]") >>> trie.add("extra_id_1") >>> trie.add("extra_id_100") >>> trie.split("[CLS] This is a extra_id_100") ["[CLS]", " This is a ", "extra_id_100"] ``` """ # indexes are counted left of the chars index. # "hello", index 0, is left of h, index 1 is between h and e. # index 5 is right of the "o". # States are going to capture every possible start (indexes as above) # as keys, and have as values, a pointer to the position in the trie # where we're at. This is a partial match for now. # This enables to keep track of multiple matches while we're iterating # the string # If the trie contains, "blowing", and "lower" and we encounter the # string "blower", we need to split into ["b", "lower"]. # This is where we need to keep track of multiple possible starts. states = OrderedDict() # This will contain every indices where we need # to cut. # We force to cut at offset 0 and len(text) (added later) offsets = [0] # This is used by the lookahead which needs to skip over # some text where the full match exceeded the place in the initial # for loop skip = 0 # Main loop, Giving this algorithm O(n) complexity for current, current_char in enumerate(text): if skip and current < skip: # Prevents the lookahead for matching twice # like extra_id_100 and id_100 continue # This will track every state # that stop matching, we need to stop tracking them. # If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then # fail on "b", we need to remove 0 from the valid states. to_remove = set() # Whenever we found a match, we need to drop everything # this is a greedy algorithm, it will match on the first found token reset = False # In this case, we already have partial matches (But unfinished) for start, trie_pointer in states.items(): if "" in trie_pointer: # This is a final match, we need to reset and # store the results in `offsets`. # Lookahead to match longest first # Important in case of extra_id_1 vs extra_id_100 # Here we are also actively looking for other earlier partial # matches # "[CLS]", "L", we need to match CLS even if L is special for lookstart, looktrie_pointer in states.items(): if lookstart > start: # This partial match is later, we can stop looking break elif lookstart < start: # This partial match is earlier, the trie pointer # was already updated, so index is + 1 lookahead_index = current + 1 end = current + 1 else: # Here lookstart == start and # looktrie_pointer == trie_pointer # It wasn't updated yet so indices are current ones lookahead_index = current end = current next_char = text[lookahead_index] if lookahead_index < len(text) else None if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index while next_char in looktrie_pointer: looktrie_pointer = looktrie_pointer[next_char] lookahead_index += 1 if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index if lookahead_index == len(text): # End of string break next_char = text[lookahead_index] # End lookahead # Storing and resetting offsets.append(start) offsets.append(end) reset = True break elif current_char in trie_pointer: # The current character being looked at has a match within the trie # update the pointer (it will be stored back into states later). trie_pointer = trie_pointer[current_char] # Storing back the new pointer into the states. # Partial matches got longer by one. states[start] = trie_pointer else: # The new character has not match in the trie, we need # to stop keeping track of this partial match. # We can't do it directly within the loop because of how # python iteration works to_remove.add(start) # Either clearing the full start (we found a real match) # Or clearing only the partial matches that didn't work. if reset: states = {} else: for start in to_remove: del states[start] # If this character is a starting character within the trie # start keeping track of this partial match. if current >= skip and current_char in self.data: states[current] = self.data[current_char] # We have a cut at the end with states. for start, trie_pointer in states.items(): if "" in trie_pointer: # This is a final match, we need to reset and # store the results in `offsets`. end = len(text) offsets.append(start) offsets.append(end) # Longest cut is always the one with lower start so the first # item so we need to break. break return self.cut_text(text, offsets) def cut_text(self, text, offsets): # We have all the offsets now, we just need to do the actual splitting. # We need to eventually add the first part of the string and the eventual # last part. offsets.append(len(text)) tokens = [] start = 0 for end in offsets: if start > end: logger.error( "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it" " anyway." ) continue elif start == end: # This might happen if there's a match at index 0 # we're also preventing zero-width cuts in case of two # consecutive matches continue tokens.append(text[start:end]) start = end return tokens class ExtensionsTrie(Trie): def __init__(self, *args): super().__init__(*args) def extensions(self, prefix: str): """ Generates all extensions of a given prefix token in the Trie. Example: ```python >>> trie = Trie() >>> trie.add("apple") >>> trie.add("app") >>> trie.add("application") >>> trie.extensions("app") ['app', 'apple', 'application'] ``` """ prefix_node = self._get_node(prefix) ret = self._collect_tokens(prefix_node) return [prefix + token for token in ret] def _get_node(self, token: str) -> dict: """ Retrieves the node corresponding to the given token in the Trie. Args: token (str): The token for which the corresponding node needs to be retrieved. Returns: dict: The node in the Trie corresponding to the given token. """ node = self.data for char in token: if char not in node: break node = node[char] return node def _collect_tokens(self, node: dict) -> list: """ Generates all tokens in the Trie starting from a given node. Args: node (dict): The node in the Trie from which tokens need to be generated. Returns: list: List of tokens generated from the given node. """ tokens = [self._termination_char] if self._termination_char in node else [] for token, subtrie_head in node.items(): if token != self._termination_char: subtokens = self._collect_tokens(subtrie_head) tokens.extend([token + subtoken for subtoken in subtokens]) return tokens def _is_whitespace(char): """Checks whether `char` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `char` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `char` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def _is_end_of_word(text): """Checks whether the last character in text is one of a punctuation, control or whitespace character.""" last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char)) def _is_start_of_word(text): """Checks whether the first character in text is one of a punctuation, control or whitespace character.""" first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char)) def _insert_one_token_to_ordered_list(token_list: list[str], new_token: str): """ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted. """ insertion_idx = bisect.bisect_left(token_list, new_token) # Checks if new_token is already in the ordered token_list if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: # new_token is in token_list, don't add return else: token_list.insert(insertion_idx, new_token) @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizer(PreTrainedTokenizerBase): """ Base class for all slow tokenizers. Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`]. Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). """ def __init__(self, **kwargs): # 1. Init the parent class self.tokens_trie = Trie() # 2. init `_added_tokens_decoder` if child class did not if not hasattr(self, "_added_tokens_decoder"): self._added_tokens_decoder: dict[int, AddedToken] = {} # 3. if a `added_tokens_decoder` is passed, we are loading from a saved tokenizer, we overwrite self._added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {})) self._added_tokens_encoder: dict[str, int] = {k.content: v for v, k in self._added_tokens_decoder.items()} # 4 init the parent class super().__init__(**kwargs) # 4. If some of the special tokens are not part of the vocab, we add them, at the end. # the order of addition is the same as self.SPECIAL_TOKENS_ATTRIBUTES following `tokenizers` self._add_tokens( [token for token in self.all_special_tokens_extended if token not in self._added_tokens_encoder], special_tokens=True, ) self._decode_use_source_tokenizer = False @property def is_fast(self) -> bool: return False @property def vocab_size(self) -> int: """ `int`: Size of the base vocabulary (without the added tokens). """ raise NotImplementedError @property def added_tokens_encoder(self) -> dict[str, int]: """ Returns the sorted mapping from string to index. The added tokens encoder is cached for performance optimisation in `self._added_tokens_encoder` for the slow tokenizers. """ return {k.content: v for v, k in sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])} @property def added_tokens_decoder(self) -> dict[int, AddedToken]: """ Returns the added tokens in the vocabulary as a dictionary of index to AddedToken. Returns: `dict[str, int]`: The added tokens. """ return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])) @added_tokens_decoder.setter def added_tokens_decoder(self, value: dict[int, Union[AddedToken, str]]) -> dict[int, AddedToken]: # Always raise an error if string because users should define the behavior for index, token in value.items(): if not isinstance(token, (str, AddedToken)) or not isinstance(index, int): raise TypeError( f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}" ) self._added_tokens_decoder[index] = AddedToken(token) if isinstance(token, str) else token self._added_tokens_encoder[str(token)] = index self._update_total_vocab_size() def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from the fast call because for now we always add the tokens even if they are already in the vocabulary. This is something we should change. Returns: `dict[str, int]`: The added tokens. """ return self._added_tokens_encoder def __len__(self): """ Size of the full vocabulary with the added tokens. """ return self.total_vocab_size def _update_total_vocab_size(self): """ Update the size of the full vocabulary with the added tokens. Counts the `keys` and not the `values` because otherwise if there is a hole in the vocab, we will add tokenizers at a wrong index. This operation is slow and is only updated when adding tokens. """ self.total_vocab_size = len(self.get_vocab()) def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the vocab which is why they have to be handled specifically. Args: new_tokens (`list[str]`or `list[tokenizers.AddedToken]`): Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the stripping and normalization of this token. This is NOT possible in `tokenizers`. special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the tokens should be added as special tokens. Returns: `int`: The number of tokens actually added to the vocabulary. Examples: ```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```""" added_tokens = 0 if new_tokens is None: return added_tokens # TODO this is fairly slow to improve! current_vocab = self.get_vocab().copy() new_idx = len(current_vocab) # only call this once, len gives the last index + 1 for token in new_tokens: if not isinstance(token, (str, AddedToken)): raise TypeError(f"Token {token} is not a string but a {type(token)}.") if str(token) == "": continue if isinstance(token, str): if token in self._added_tokens_encoder: continue else: # very important for fast and slow equivalence! is_special = token in self.all_special_tokens or special_tokens token = AddedToken( token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special ) elif special_tokens: # doing token.special=True changes the normalization! will fix in rust # this is important and the only reason why the AddedTokens in each class are normalized by default token.__setstate__({"special": True, "normalized": token.normalized}) if token in self._added_tokens_decoder: continue if not token.special and token.normalized and getattr(self, "do_lower_case", False): # Normalize if requested token.content = token.content.lower() if token.content not in current_vocab: token_index = new_idx + added_tokens current_vocab[token.content] = token_index added_tokens += 1 else: token_index = current_vocab[token.content] if token.special and str(token) not in self.all_special_tokens: self._special_tokens_map["additional_special_tokens"].append(token) # the setter automatically updates the reverse map self._added_tokens_decoder[token_index] = token self._added_tokens_encoder[token.content] = token_index if self.verbose: logger.info(f"Adding {token} to the vocabulary") self._update_trie() self._update_total_vocab_size() return added_tokens def _update_trie(self, unique_no_split_tokens: Optional[str] = []): for token in self._added_tokens_decoder.values(): if token.content not in self.tokens_trie._tokens: self.tokens_trie.add(token.content) for token in unique_no_split_tokens: if token not in self.tokens_trie._tokens: self.tokens_trie.add(token) def num_special_tokens_to_add(self, pair: bool = False) -> int: """ Returns the number of added tokens when encoding a sequence with special tokens. <Tip> This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. </Tip> Args: pair (`bool`, *optional*, defaults to `False`): Whether the number of added tokens should be computed in the case of a sequence pair or a single sequence. Returns: `int`: Number of special tokens added to sequences. """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def tokenize(self, text: TextInput, **kwargs) -> list[str]: """ Converts a string into a sequence of tokens, using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Takes care of added tokens. Args: text (`str`): The sequence to be encoded. **kwargs (additional keyword arguments): Passed along to the model-specific `prepare_for_tokenization` preprocessing method. Returns: `list[str]`: The list of tokens. """ split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens) text, kwargs = self.prepare_for_tokenization(text, **kwargs) if kwargs: logger.warning(f"Keyword arguments {kwargs} not recognized.") if hasattr(self, "do_lower_case") and self.do_lower_case: # convert non-special tokens to lowercase. Might be super slow as well? escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)] escaped_special_toks += [ re.escape(s_tok.content) for s_tok in (self._added_tokens_decoder.values()) if not s_tok.special and s_tok.normalized ] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text) if split_special_tokens: no_split_token = [] tokens = [text] else: no_split_token = self._added_tokens_encoder.keys() # don't split on any of the added tokens # "This is something<special_token_1> else" tokens = self.tokens_trie.split(text) # ["This is something", "<special_token_1>", " else"] for i, token in enumerate(tokens): if token in no_split_token: tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None) left = tokens[i - 1] if i > 0 else None right = tokens[i + 1] if i < len(tokens) - 1 else None if isinstance(tok_extended, AddedToken): if tok_extended.rstrip and right: # A bit counter-intuitive but we strip the left of the string # since tok_extended.rstrip means the special token is eating all white spaces on its right tokens[i + 1] = right.lstrip() # Strip white spaces on the left if tok_extended.lstrip and left: tokens[i - 1] = left.rstrip() # Opposite here if tok_extended.single_word and left and left[-1] != " ": tokens[i - 1] += token tokens[i] = "" elif tok_extended.single_word and right and right[0] != " ": tokens[i + 1] = token + tokens[i + 1] tokens[i] = "" else: raise ValueError( f"{tok_extended} cannot be tokenized because it was not properly added" f" to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}" ) # ["This is something", "<special_token_1>", "else"] tokenized_text = [] for token in tokens: # Need to skip eventual empty (fully stripped) tokens if not token: continue if token in no_split_token: tokenized_text.append(token) else: tokenized_text.extend(self._tokenize(token)) # ["This", " is", " something", "<special_token_1>", "else"] return tokenized_text def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. Args: tokens (`str` or `list[str]`): One or several token(s) to convert to token id(s). Returns: `int` or `list[int]`: The token id or list of token ids. """ if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self._added_tokens_encoder: return self._added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: if is_split_into_words: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when" " `is_split_into_words=True`." ) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of" " integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair], list[EncodedInput], list[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if ( not isinstance(ids_or_pair_ids, (list, tuple)) or is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)) ): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, split_special_tokens=split_special_tokens, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: list[Union[PreTokenizedInputPair, tuple[list[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, split_special_tokens=split_special_tokens, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, **kwargs ) -> tuple[str, dict[str, Any]]: """ Performs any necessary transformations before tokenization. This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the `kwargs` at the end of the encoding process to be sure all the arguments have been used. Args: text (`str`): The text to prepare. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. kwargs (`dict[str, Any]`, *optional*): Keyword arguments to use for the tokenization. Returns: `tuple[str, dict[str, Any]]`: The prepared text and the unused kwargs. """ return (text, kwargs) def get_special_tokens_mask( self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`list[int]`): List of ids of the first sequence. token_ids_1 (`list[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) @overload def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ... @overload def convert_ids_to_tokens(self, ids: list[int], skip_special_tokens: bool = False) -> list[str]: ... def convert_ids_to_tokens( self, ids: Union[int, list[int]], skip_special_tokens: bool = False ) -> Union[str, list[str]]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `list[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `list[str]`: The decoded token(s). """ if isinstance(ids, int): if ids in self._added_tokens_decoder: return self._added_tokens_decoder[ids].content else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self._added_tokens_decoder: tokens.append(self._added_tokens_decoder[index].content) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index: int) -> str: raise NotImplementedError def convert_tokens_to_string(self, tokens: list[str]) -> str: return " ".join(tokens) def _decode( self, token_ids: Union[int, list[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # If given is a single id, prevents splitting the string in upcoming loop if isinstance(filtered_tokens, str): filtered_tokens = [filtered_tokens] legacy_added_tokens = set(self._added_tokens_encoder.keys()) - set(self.all_special_tokens) | { token for token in self.additional_special_tokens if self.convert_tokens_to_ids(token) >= self.vocab_size } # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] # TODO @ArthurZ in version 5, special tokens should be handled in convert_tokens_to_string, while _convert_tokens_to_string for token in filtered_tokens: if skip_special_tokens and token in self.all_special_tokens: continue if token in legacy_added_tokens: if current_sub_text: string = self.convert_tokens_to_string(current_sub_text) if len(string) > 0: sub_texts.append(string) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) if spaces_between_special_tokens: text = " ".join(sub_texts) else: text = "".join(sub_texts) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text
transformers/src/transformers/tokenization_utils.py/0
{ "file_path": "transformers/src/transformers/tokenization_utils.py", "repo_id": "transformers", "token_count": 21779 }
487
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import re import types from contextlib import contextmanager from datetime import datetime from functools import lru_cache from inspect import isfunction from typing import ( Any, Callable, Literal, Optional, Union, get_args, get_origin, get_type_hints, ) from packaging import version from . import logging from .import_utils import is_jinja_available, is_torch_available, is_vision_available logger = logging.get_logger(__name__) if is_jinja_available(): import jinja2 from jinja2.ext import Extension from jinja2.sandbox import ImmutableSandboxedEnvironment else: jinja2 = None if is_vision_available(): from PIL.Image import Image if is_torch_available(): from torch import Tensor BASIC_TYPES = (int, float, str, bool, Any, type(None), ...) # Extracts the initial segment of the docstring, containing the function description description_re = re.compile(r"^(.*?)[\n\s]*(Args:|Returns:|Raises:|\Z)", re.DOTALL) # Extracts the Args: block from the docstring args_re = re.compile(r"\n\s*Args:\n\s*(.*?)[\n\s]*(Returns:|Raises:|\Z)", re.DOTALL) # Splits the Args: block into individual arguments args_split_re = re.compile( r""" (?:^|\n) # Match the start of the args block, or a newline \s*(\w+):\s* # Capture the argument name and strip spacing (.*?)\s* # Capture the argument description, which can span multiple lines, and strip trailing spacing (?=\n\s*\w+:|\Z) # Stop when you hit the next argument or the end of the block """, re.DOTALL | re.VERBOSE, ) # Extracts the Returns: block from the docstring, if present. Note that most chat templates ignore the return type/doc! returns_re = re.compile(r"\n\s*Returns:\n\s*(.*?)[\n\s]*(Raises:|\Z)", re.DOTALL) class TypeHintParsingException(Exception): """Exception raised for errors in parsing type hints to generate JSON schemas""" pass class DocstringParsingException(Exception): """Exception raised for errors in parsing docstrings to generate JSON schemas""" pass def _get_json_schema_type(param_type: type) -> dict[str, str]: type_mapping = { int: {"type": "integer"}, float: {"type": "number"}, str: {"type": "string"}, bool: {"type": "boolean"}, type(None): {"type": "null"}, Any: {}, } if is_vision_available(): type_mapping[Image] = {"type": "image"} if is_torch_available(): type_mapping[Tensor] = {"type": "audio"} return type_mapping.get(param_type, {"type": "object"}) def _parse_type_hint(hint: str) -> dict: origin = get_origin(hint) args = get_args(hint) if origin is None: try: return _get_json_schema_type(hint) except KeyError: raise TypeHintParsingException( "Couldn't parse this type hint, likely due to a custom class or object: ", hint ) elif origin is Union or (hasattr(types, "UnionType") and origin is types.UnionType): # Recurse into each of the subtypes in the Union, except None, which is handled separately at the end subtypes = [_parse_type_hint(t) for t in args if t is not type(None)] if len(subtypes) == 1: # A single non-null type can be expressed directly return_dict = subtypes[0] elif all(isinstance(subtype["type"], str) for subtype in subtypes): # A union of basic types can be expressed as a list in the schema return_dict = {"type": sorted([subtype["type"] for subtype in subtypes])} else: # A union of more complex types requires "anyOf" return_dict = {"anyOf": subtypes} if type(None) in args: return_dict["nullable"] = True return return_dict elif origin is Literal and len(args) > 0: LITERAL_TYPES = (int, float, str, bool, type(None)) args_types = [] for arg in args: if type(arg) not in LITERAL_TYPES: raise TypeHintParsingException("Only the valid python literals can be listed in typing.Literal.") arg_type = _get_json_schema_type(type(arg)).get("type") if arg_type is not None and arg_type not in args_types: args_types.append(arg_type) return { "type": args_types.pop() if len(args_types) == 1 else list(args_types), "enum": list(args), } elif origin is list: if not args: return {"type": "array"} else: # Lists can only have a single type argument, so recurse into it return {"type": "array", "items": _parse_type_hint(args[0])} elif origin is tuple: if not args: return {"type": "array"} if len(args) == 1: raise TypeHintParsingException( f"The type hint {str(hint).replace('typing.', '')} is a Tuple with a single element, which " "we do not automatically convert to JSON schema as it is rarely necessary. If this input can contain " "more than one element, we recommend " "using a list[] type instead, or if it really is a single element, remove the tuple[] wrapper and just " "pass the element directly." ) if ... in args: raise TypeHintParsingException( "Conversion of '...' is not supported in Tuple type hints. " "Use list[] types for variable-length" " inputs instead." ) return {"type": "array", "prefixItems": [_parse_type_hint(t) for t in args]} elif origin is dict: # The JSON equivalent to a dict is 'object', which mandates that all keys are strings # However, we can specify the type of the dict values with "additionalProperties" out = {"type": "object"} if len(args) == 2: out["additionalProperties"] = _parse_type_hint(args[1]) return out raise TypeHintParsingException("Couldn't parse this type hint, likely due to a custom class or object: ", hint) def _convert_type_hints_to_json_schema(func: Callable) -> dict: type_hints = get_type_hints(func) signature = inspect.signature(func) required = [] for param_name, param in signature.parameters.items(): if param.annotation == inspect.Parameter.empty: raise TypeHintParsingException(f"Argument {param.name} is missing a type hint in function {func.__name__}") if param.default == inspect.Parameter.empty: required.append(param_name) properties = {} for param_name, param_type in type_hints.items(): properties[param_name] = _parse_type_hint(param_type) schema = {"type": "object", "properties": properties} if required: schema["required"] = required return schema def parse_google_format_docstring(docstring: str) -> tuple[Optional[str], Optional[dict], Optional[str]]: """ Parses a Google-style docstring to extract the function description, argument descriptions, and return description. Args: docstring (str): The docstring to parse. Returns: The function description, arguments, and return description. """ # Extract the sections description_match = description_re.search(docstring) args_match = args_re.search(docstring) returns_match = returns_re.search(docstring) # Clean and store the sections description = description_match.group(1).strip() if description_match else None docstring_args = args_match.group(1).strip() if args_match else None returns = returns_match.group(1).strip() if returns_match else None # Parsing the arguments into a dictionary if docstring_args is not None: docstring_args = "\n".join([line for line in docstring_args.split("\n") if line.strip()]) # Remove blank lines matches = args_split_re.findall(docstring_args) args_dict = {match[0]: re.sub(r"\s*\n+\s*", " ", match[1].strip()) for match in matches} else: args_dict = {} return description, args_dict, returns def get_json_schema(func: Callable) -> dict: """ This function generates a JSON schema for a given function, based on its docstring and type hints. This is mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires that the function has a docstring, and that each argument has a description in the docstring, in the standard Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint. Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is optional because most chat templates ignore the return value of the function. Args: func: The function to generate a JSON schema for. Returns: A dictionary containing the JSON schema for the function. Examples: ```python >>> def multiply(x: float, y: float): >>> ''' >>> A function that multiplies two numbers >>> >>> Args: >>> x: The first number to multiply >>> y: The second number to multiply >>> ''' >>> return x * y >>> >>> print(get_json_schema(multiply)) { "name": "multiply", "description": "A function that multiplies two numbers", "parameters": { "type": "object", "properties": { "x": {"type": "number", "description": "The first number to multiply"}, "y": {"type": "number", "description": "The second number to multiply"} }, "required": ["x", "y"] } } ``` The general use for these schemas is that they are used to generate tool descriptions for chat templates that support them, like so: ```python >>> from transformers import AutoTokenizer >>> from transformers.utils import get_json_schema >>> >>> def multiply(x: float, y: float): >>> ''' >>> A function that multiplies two numbers >>> >>> Args: >>> x: The first number to multiply >>> y: The second number to multiply >>> return x * y >>> ''' >>> >>> multiply_schema = get_json_schema(multiply) >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >>> messages = [{"role": "user", "content": "What is 179 x 4571?"}] >>> formatted_chat = tokenizer.apply_chat_template( >>> messages, >>> tools=[multiply_schema], >>> chat_template="tool_use", >>> return_dict=True, >>> return_tensors="pt", >>> add_generation_prompt=True >>> ) >>> # The formatted chat can now be passed to model.generate() ``` Each argument description can also have an optional `(choices: ...)` block at the end, such as `(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will only be parsed correctly if it is at the end of the line: ```python >>> def drink_beverage(beverage: str): >>> ''' >>> A function that drinks a beverage >>> >>> Args: >>> beverage: The beverage to drink (choices: ["tea", "coffee"]) >>> ''' >>> pass >>> >>> print(get_json_schema(drink_beverage)) ``` { 'name': 'drink_beverage', 'description': 'A function that drinks a beverage', 'parameters': { 'type': 'object', 'properties': { 'beverage': { 'type': 'string', 'enum': ['tea', 'coffee'], 'description': 'The beverage to drink' } }, 'required': ['beverage'] } } """ doc = inspect.getdoc(func) if not doc: raise DocstringParsingException( f"Cannot generate JSON schema for {func.__name__} because it has no docstring!" ) doc = doc.strip() main_doc, param_descriptions, return_doc = parse_google_format_docstring(doc) json_schema = _convert_type_hints_to_json_schema(func) if (return_dict := json_schema["properties"].pop("return", None)) is not None: if return_doc is not None: # We allow a missing return docstring since most templates ignore it return_dict["description"] = return_doc for arg, schema in json_schema["properties"].items(): if arg not in param_descriptions: raise DocstringParsingException( f"Cannot generate JSON schema for {func.__name__} because the docstring has no description for the argument '{arg}'" ) desc = param_descriptions[arg] enum_choices = re.search(r"\(choices:\s*(.*?)\)\s*$", desc, flags=re.IGNORECASE) if enum_choices: schema["enum"] = [c.strip() for c in json.loads(enum_choices.group(1))] desc = enum_choices.string[: enum_choices.start()].strip() schema["description"] = desc output = {"name": func.__name__, "description": main_doc, "parameters": json_schema} if return_dict is not None: output["return"] = return_dict return {"type": "function", "function": output} def _render_with_assistant_indices( compiled_template, messages, tools, documents, add_generation_prompt, **template_kwargs ): rendered_blocks = [] generation_indices = [] with compiled_template.environment.activate_tracker(rendered_blocks, generation_indices): for block in compiled_template.generate( messages=messages, tools=tools, documents=documents, add_generation_prompt=add_generation_prompt, **template_kwargs, ): rendered_blocks.append(block) rendered_chat = "".join(rendered_blocks) return rendered_chat, generation_indices @lru_cache def _compile_jinja_template(chat_template): if not is_jinja_available(): raise ImportError( "apply_chat_template requires jinja2 to be installed. Please install it using `pip install jinja2`." ) class AssistantTracker(Extension): # This extension is used to track the indices of assistant-generated tokens in the rendered chat tags = {"generation"} def __init__(self, environment: ImmutableSandboxedEnvironment): # The class is only initiated by jinja. super().__init__(environment) environment.extend(activate_tracker=self.activate_tracker) self._rendered_blocks = None self._generation_indices = None def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.CallBlock: lineno = next(parser.stream).lineno body = parser.parse_statements(["name:endgeneration"], drop_needle=True) return jinja2.nodes.CallBlock(self.call_method("_generation_support"), [], [], body).set_lineno(lineno) @jinja2.pass_eval_context def _generation_support(self, context: jinja2.nodes.EvalContext, caller: jinja2.runtime.Macro) -> str: rv = caller() if self.is_active(): # Only track generation indices if the tracker is active start_index = len("".join(self._rendered_blocks)) end_index = start_index + len(rv) self._generation_indices.append((start_index, end_index)) return rv def is_active(self) -> bool: return self._rendered_blocks or self._generation_indices @contextmanager def activate_tracker(self, rendered_blocks: list[int], generation_indices: list[int]): try: if self.is_active(): raise ValueError("AssistantTracker should not be reused before closed") self._rendered_blocks = rendered_blocks self._generation_indices = generation_indices yield finally: self._rendered_blocks = None self._generation_indices = None if version.parse(jinja2.__version__) < version.parse("3.1.0"): raise ImportError( f"apply_chat_template requires jinja2>=3.1.0 to be installed. Your version is {jinja2.__version__}." ) def raise_exception(message): raise jinja2.exceptions.TemplateError(message) def tojson(x, ensure_ascii=False, indent=None, separators=None, sort_keys=False): # We override the built-in tojson filter because Jinja's default filter escapes HTML characters # We also expose some options like custom indents and separators return json.dumps(x, ensure_ascii=ensure_ascii, indent=indent, separators=separators, sort_keys=sort_keys) def strftime_now(format): return datetime.now().strftime(format) jinja_env = ImmutableSandboxedEnvironment( trim_blocks=True, lstrip_blocks=True, extensions=[AssistantTracker, jinja2.ext.loopcontrols] ) jinja_env.filters["tojson"] = tojson jinja_env.globals["raise_exception"] = raise_exception jinja_env.globals["strftime_now"] = strftime_now return jinja_env.from_string(chat_template) def render_jinja_template( conversations: list[list[dict[str, str]]], tools: Optional[list[Union[dict, Callable]]] = None, documents: Optional[list[dict[str, str]]] = None, chat_template: Optional[str] = None, return_assistant_tokens_mask: Optional[bool] = False, continue_final_message: Optional[bool] = False, add_generation_prompt: Optional[bool] = False, **kwargs, ) -> str: if return_assistant_tokens_mask and not re.search(r"\{\%-?\s*generation\s*-?\%\}", chat_template): logger.warning_once( "return_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword." ) # Compilation function uses a cache to avoid recompiling the same template compiled_template = _compile_jinja_template(chat_template) # We accept either JSON schemas or functions for tools. If we get functions, we convert them to schemas if tools is not None: tool_schemas = [] for tool in tools: if isinstance(tool, dict): tool_schemas.append(tool) elif isfunction(tool): tool_schemas.append(get_json_schema(tool)) else: raise ValueError( "Tools should either be a JSON schema, or a callable function with type hints " "and a docstring suitable for auto-conversion to a schema." ) else: tool_schemas = None if documents is not None: for document in documents: if not isinstance(document, dict): raise TypeError("Documents should be a list of dicts with 'title' and 'text' keys!") rendered = [] all_generation_indices = [] for chat in conversations: if hasattr(chat, "messages"): # Indicates it's a Conversation object chat = chat.messages if return_assistant_tokens_mask: rendered_chat, generation_indices = _render_with_assistant_indices( compiled_template=compiled_template, messages=chat, tools=tool_schemas, documents=documents, add_generation_prompt=add_generation_prompt, **kwargs, ) all_generation_indices.append(generation_indices) else: rendered_chat = compiled_template.render( messages=chat, tools=tool_schemas, documents=documents, add_generation_prompt=add_generation_prompt, **kwargs, ) if continue_final_message: final_message = chat[-1]["content"] if isinstance(final_message, (list, tuple)): for content_block in reversed(final_message): if "text" in content_block: # Pick the last text block in the message (the first one we hit while iterating in reverse) final_message = content_block["text"] break else: raise ValueError( "continue_final_message is set but we could not find any text to continuein the final message!" ) if final_message.strip() not in rendered_chat: raise ValueError( "continue_final_message is set but the final message does not appear in the chat after " "applying the chat template! This can happen if the chat template deletes portions of " "the final message. Please verify the chat template and final message in your chat to " "ensure they are compatible." ) final_msg_loc = rendered_chat.rindex(final_message.strip()) if rendered_chat[final_msg_loc : final_msg_loc + len(final_message.lstrip())] == final_message: # The template preserves spacing or the message doesn't have trailing spacing, so things are simple rendered_chat = rendered_chat[: final_msg_loc + len(final_message.lstrip())] else: # The message has trailing spacing that was trimmed, so we must be more cautious rendered_chat = rendered_chat[: final_msg_loc + len(final_message.strip())] rendered.append(rendered_chat) return rendered, all_generation_indices
transformers/src/transformers/utils/chat_template_utils.py/0
{ "file_path": "transformers/src/transformers/utils/chat_template_utils.py", "repo_id": "transformers", "token_count": 9085 }
488
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class PreTrainedTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"])
transformers/src/transformers/utils/dummy_tokenizers_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_tokenizers_objects.py", "repo_id": "transformers", "token_count": 104 }
489
# Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class BetterTransformerIntegrationTest(unittest.TestCase): # refer to the full test suite in Optimum library: # https://github.com/huggingface/optimum/tree/main/tests/bettertransformer def test_transform_and_reverse(self): r""" Classic tests to simply check if the conversion has been successful. """ model_id = "hf-internal-testing/tiny-random-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) inp = tokenizer("This is me", return_tensors="pt") model = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) output = model.generate(**inp) model = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()) ) output_from_pretrained = model_reloaded.generate(**inp) torch.testing.assert_close(output, output_from_pretrained) def test_error_save_pretrained(self): r""" The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode. All should be good if the model is reversed. """ model_id = "hf-internal-testing/tiny-random-t5" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) model = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError): model.save_pretrained(tmpdirname) model = model.reverse_bettertransformer() model.save_pretrained(tmpdirname)
transformers/tests/bettertransformer/test_integration.py/0
{ "file_path": "transformers/tests/bettertransformer/test_integration.py", "repo_id": "transformers", "token_count": 1106 }
490
# Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from transformers import AutoTokenizer, is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( ConfidenceCriteria, EosTokenCriteria, MaxLengthCriteria, MaxTimeCriteria, StoppingCriteriaList, StopStringCriteria, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(9) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(10) self.assertTrue(all(criteria(input_ids, scores))) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(9) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(10) self.assertTrue(all(criteria(input_ids, scores))) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(all(criteria(input_ids, scores))) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(all(criteria(input_ids, scores))) def test_eos_token_criteria(self): criteria = EosTokenCriteria(eos_token_id=0) input_ids, scores = self._get_tensors(5) input_ids[:, -1] = 0 self.assertTrue(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(5) input_ids[:2, -1] = 0 input_ids[2, -1] = 1 self.assertListEqual(criteria(input_ids, scores).tolist(), [True, True, False]) input_ids, scores = self._get_tensors(5) input_ids[:, -1] = 1 self.assertListEqual(criteria(input_ids, scores).tolist(), [False, False, False]) def test_confidence_criteria(self): criteria = ConfidenceCriteria(assistant_confidence_threshold=0.5) vocab_size = 250 length = 5 input_ids = ids_tensor((1, length), vocab_size) scores = (torch.randn((1, vocab_size)),) # Simulate high confidence by setting the probability of the last token to be high scores[0][0, input_ids[0, -1]] = 10.0 # Logits before softmax self.assertFalse(criteria(input_ids, scores)) # Simulate low confidence by setting the probability of the last token to be low scores[0][0, input_ids[0, -1]] = -10.0 # Logits before softmax self.assertTrue(criteria(input_ids, scores)) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1) def test_stop_string_criteria(self): true_strings = [ "<|im_start|><|im_end|>", "<|im_start|><|im_end|<|im_end|>", ">><|im_start|>>stop", "stop", "e nd", ] false_strings = [ "<|im_start|><|im_end|", "<|im_start|><|im_end|<|im_end|", "<|im_end|><|im_start|>", "<|im_end|<>stop<|im_end|", "end", "en d", "eNd", "<|im_end|", "|im_end|>", "s", ] stop_strings = ["<|im_end|>", "stop", "e nd"] # Use a tokenizer that won't actually have special tokens for these tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for i in range(len(true_strings)): self.assertTrue(criteria(true_input_ids["input_ids"][i : i + 1], scores)) for i in range(len(false_strings)): self.assertFalse(criteria(false_input_ids["input_ids"][i : i + 1], scores)) # Now try it with a tokenizer where those are actually special tokens tokenizer = AutoTokenizer.from_pretrained("cognitivecomputations/dolphin-2.5-mixtral-8x7b") tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for i in range(len(true_strings)): self.assertTrue(criteria(true_input_ids["input_ids"][i : i + 1], scores)) for i in range(len(false_strings)): self.assertFalse(criteria(false_input_ids["input_ids"][i : i + 1], scores)) def test_stop_string_criteria_vocab_size_mismatch(self): """Test that StopStringCriteria handles tokens above len(tokenizer) correctly.""" tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") # Create input_ids with tokens above len(tokenizer) input_ids = torch.tensor([[len(tokenizer) + 1024, 1, 2]], device=torch_device) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=["test"]) # This should not raise an error and should return False since no stop string is matched self.assertFalse(criteria(input_ids, scores)) def test_stop_string_matching_positions(self): stop_string = "stop" token_list = ["last", "top", "topper", "s", "p"] token_indices = list(range(len(token_list))) all_token_valid_positions, all_token_end_overlaps = StopStringCriteria._stop_string_get_matching_positions( token_list=token_list, token_indices=token_indices, stop_strings=[stop_string] ) valid_positions = { token_list[idx]: positions for idx, positions in all_token_valid_positions[stop_string].items() } end_overlaps = {token_list[idx]: overlaps for idx, overlaps in all_token_end_overlaps[stop_string].items()} self.assertEqual(valid_positions, {"s": [3], "last": [2]}) self.assertEqual(end_overlaps, {"top": [3], "topper": [3], "p": [1]}) def test_stop_string_embedding_vecs(self): stop_string = "stop" token_list = ["last", "top", "topper", "s", "p"] token_indices = list(range(len(token_list))) embedding_vec, max_valid_positions, max_valid_end_lens = StopStringCriteria._stop_string_create_embedding_vec( token_list=token_list, token_indices=token_indices, stop_strings=[stop_string] ) # Positions inside the stop string where the token matches (excluding end overlaps) valid_positions = embedding_vec[:, 0].tolist() self.assertEqual(valid_positions, [2, -1, -1, 3, -1, -1]) # Overlap lengths between end of stop string and start of token end_overlaps = embedding_vec[:, 1].tolist() self.assertEqual(end_overlaps, [-1, 3, 3, -1, 1, -1]) # Length of each token token_lengths = embedding_vec[:-1, 2].tolist() self.assertEqual(token_lengths, [len(token) for token in token_list]) def test_single_letter_stop_string(self): true_strings = ["a", "baa", "abc"] # "abc" is a single token false_strings = ["abbbbbbb", "b"] # "abbbbbbb" is split into multiple tokens stop_strings = ["a"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for input_ids in true_input_ids["input_ids"]: self.assertTrue(criteria(input_ids.unsqueeze(0), scores)) for input_ids in false_input_ids["input_ids"]: self.assertFalse(criteria(input_ids.unsqueeze(0), scores)) def test_criterias_per_row(self): text = "They completed the challenging puzzle, revealing the hidden image at the end" stop_strings = ["end"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id inputs = tokenizer(text, return_tensors="pt", add_special_tokens=False) scores = None criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=20), StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings), ] ) # trigger stopping when at least one criteria is satisfied, one value per batch self.assertTrue(criteria(inputs["input_ids"], scores)) # return False when neither is satisfied self.assertFalse(criteria(inputs["input_ids"][:, :-1], scores)) def test_criterias_per_row_batched(self): text = [ "They completed the challenging puzzle, revealing the hidden image at the end", "Today a dragon flew over France", "The aroma of freshly baked pizza filled the kitchen", ] stop_strings = ["end"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" inputs = tokenizer(text, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=20), StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings), ] ) # trigger stopping when at least one criteria is satisfied self.assertListEqual(criteria(inputs["input_ids"], scores).tolist(), [True, False, False]) # False when neither is satisfied self.assertListEqual(criteria(inputs["input_ids"][:, :-1], scores).tolist(), [False, False, False])
transformers/tests/generation/test_stopping_criteria.py/0
{ "file_path": "transformers/tests/generation/test_stopping_criteria.py", "repo_id": "transformers", "token_count": 5122 }
491
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Arcee model.""" import unittest from pytest import mark from transformers import AutoTokenizer, is_torch_available from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_accelerator, slow, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( ArceeConfig, ArceeForCausalLM, ArceeForQuestionAnswering, ArceeForSequenceClassification, ArceeForTokenClassification, ArceeModel, ) from transformers.models.arcee.modeling_arcee import ArceeRotaryEmbedding class ArceeModelTester(CausalLMModelTester): if is_torch_available(): config_class = ArceeConfig base_model_class = ArceeModel causal_lm_class = ArceeForCausalLM sequence_class = ArceeForSequenceClassification token_class = ArceeForTokenClassification @require_torch class ArceeModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( ArceeModel, ArceeForCausalLM, ArceeForSequenceClassification, ArceeForQuestionAnswering, ArceeForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ArceeModel, "text-classification": ArceeForSequenceClassification, "text-generation": ArceeForCausalLM, "zero-shot": ArceeForSequenceClassification, "question-answering": ArceeForQuestionAnswering, "token-classification": ArceeForTokenClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False model_tester_class = ArceeModelTester rotary_embedding_layer = ArceeRotaryEmbedding # Enables RoPE tests if set # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = ArceeForCausalLM if is_torch_available() else None def test_arcee_mlp_uses_relu_squared(self): """Test that ArceeMLP uses ReLU² activation instead of SiLU.""" config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.hidden_act = "relu2" # Ensure we're using relu2 activation model = ArceeModel(config) # Check that the MLP layers use the correct activation mlp = model.layers[0].mlp # Test with a simple input x = torch.randn(1, 10, config.hidden_size) up_output = mlp.up_proj(x) # Verify ReLU² activation: x * relu(x) expected_activation = up_output * torch.relu(up_output) actual_activation = mlp.act_fn(up_output) self.assertTrue(torch.allclose(expected_activation, actual_activation, atol=1e-5)) @require_torch_accelerator class ArceeIntegrationTest(unittest.TestCase): def tearDown(self): import gc gc.collect() torch.cuda.empty_cache() @slow def test_model_from_pretrained(self): # This test would be enabled once a pretrained model is available # For now, we just test that the model can be instantiated config = ArceeConfig() model = ArceeForCausalLM(config) self.assertIsInstance(model, ArceeForCausalLM) @mark.skip(reason="Model is not currently public - will update test post release") @slow def test_model_generation(self): EXPECTED_TEXT_COMPLETION = ( """Once upon a time,In a village there was a farmer who had three sons. The farmer was very old and he""" ) prompt = "Once upon a time" tokenizer = AutoTokenizer.from_pretrained("arcee-ai/model-id") model = ArceeForCausalLM.from_pretrained("arcee-ai/model-id", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=20) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @mark.skip(reason="Model is not currently public - will update test post release") @slow @require_flash_attn @mark.flash_attn_test def test_model_generation_flash_attn(self): EXPECTED_TEXT_COMPLETION = ( " the food, the people, and the overall experience. I would definitely recommend this place to others." ) prompt = "This is a nice place. " * 1024 + "I really enjoy the scenery," tokenizer = AutoTokenizer.from_pretrained("arcee-ai/model-id") model = ArceeForCausalLM.from_pretrained( "arcee-ai/model-id", device_map="auto", attn_implementation="flash_attention_2", dtype="auto" ) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=20) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text[len(prompt) :])
transformers/tests/models/arcee/test_modeling_arcee.py/0
{ "file_path": "transformers/tests/models/arcee/test_modeling_arcee.py", "repo_id": "transformers", "token_count": 2429 }
492