[
  {
    "path": ".gitignore",
    "content": "models/\nwandb/\n*.backup.*\nsrc/*\n# Created by https://www.toptal.com/developers/gitignore/api/python,macos,jupyternotebooks,visualstudiocode\n# Edit at https://www.toptal.com/developers/gitignore?templates=python,macos,jupyternotebooks,visualstudiocode\n\n### JupyterNotebooks ###\n# gitignore template for Jupyter Notebooks\n# website: http://jupyter.org/\n\n.ipynb_checkpoints\n*/.ipynb_checkpoints/*\n\n# IPython\nprofile_default/\nipython_config.py\n\n# Remove previous ipynb_checkpoints\n#   git rm -r .ipynb_checkpoints/\n\n### macOS ###\n# General\n.DS_Store\n.AppleDouble\n.LSOverride\n\n# Icon must end with two \\r\nIcon\n\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n### macOS Patch ###\n# iCloud generated files\n*.icloud\n\n### Python ###\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n\n# IPython\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# poetry\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\n#   commonly ignored for libraries.\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\n#poetry.lock\n\n# pdm\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\n#pdm.lock\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\n#   in version control.\n#   https://pdm.fming.dev/#use-with-ide\n.pdm.toml\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/\n\n# PyCharm\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\n#.idea/\n\n### Python Patch ###\n# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration\npoetry.toml\n\n# ruff\n.ruff_cache/\n\n# LSP config files\npyrightconfig.json\n\n### VisualStudioCode ###\n.vscode/*\n!.vscode/settings.json\n!.vscode/tasks.json\n!.vscode/launch.json\n!.vscode/extensions.json\n!.vscode/*.code-snippets\n\n# Local History for Visual Studio Code\n.history/\n\n# Built Visual Studio Code Extensions\n*.vsix\n\n### VisualStudioCode Patch ###\n# Ignore all local history of files\n.history\n.ionide\n\n# End of https://www.toptal.com/developers/gitignore/api/python,macos,jupyternotebooks,visualstudiocode\n\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 Junbum Lee\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# InfiniTransformer\n\nUnofficial PyTorch/🤗Transformers implementation of Leave No Context Behind: Efficient Infinite Context Transformers with Infini-attention,\nwith Llama3 and Gemma model supported. (Llama 2 and 1 is also supported)\n\n- Paper Link: https://arxiv.org/abs/2404.07143\n\n## Two types of Implementation for Infini-Attention\n\n**Type I. Infini Attention in Model-wise, Trainer-wise**\n\n- Overrides modeling and config python files.\n- Full edit, Not compatible with basic HF trainer.\n- Need custom training code\n- Memory usage is **much lower** than SDPA(default) attention\n  - can train Gemma-2B with 32768 seq len(2048*16) on 2x H100 80G (with AdamW optimizer, No gradient checkpointing)\n  - can train Llama-3-8B with 1M seq len(2048*512) on 2x H100 80G (with Adafactor optimizer, no grad checkpointing)\n- Can train 'infinite' context -- check `train.gemma.infini.noclm.1Mseq.sh` with 1x H100 80G (with AdamW optimizer, No gradient checkpointing)\n\n**Type II. Infini Attention in Attention-Layer only**\n\n- Overrides modeling python file only, especially Attention layer only.\n- Minimal edit, fully compatible with HF(Trainer, etc)\n- Memory usage is ~eq with SDPA(default) attention\n  - can train Gemma-2B with 8192 seq len(128*64) on 2x H100 80G (with Adafactor Optimizer + Gradient Checkpointing)\n\n## How to use Type I. Infini Attention in Model-wise, Trainer-wise.\n\n### 1. Clone this repository\n\n```bash\ngit clone https://github.com/Beomi/InfiniTransformer\n```\n\n### 2. Install dependencies\n\n> We need to install the latest version(`b109257f4f`) of 🤗Transformers from the source code.\n\n```bash\npip install -r requirements.txt\npip install -e git+https://github.com/huggingface/transformers.git@b109257f4f#egg=transformers\n# or just pip install transformers\n```\n\n### 3. Run the example(Inference, simple forward/backward test)\n\n```bash\npython test_basic.infini.py\n```\n\n### 4. Train with your data\n\nTrain Llama-3 1M seq len with 2K segment size, with [MiniPile Dataset](https://huggingface.co/datasets/JeanKaddour/minipile)\n\n```bash\n./train.llama.infini.noclm.1Mseq.sh\n```\n\nor\n\nTrain Gemma-2B 32K seq len with 2K segment size, with [WikiText2 Dataset](https://huggingface.co/datasets/wikitext)\n\n```bash\n./train.gemma.infini.noclm.sh\n```\n\nor\n\nTrain Gemma-2B  1M seq len with 2K segment size, with [MiniPile Dataset](https://huggingface.co/datasets/JeanKaddour/minipile)\n\n```bash\n./train.gemma.infini.noclm.1Mseq.sh\n```\n\n## How to use Type II. Infini Attention in Attention-Layer only\n\n### 1. Clone this repository\n\n```bash\ngit clone https://github.com/Beomi/InfiniTransformer\n```\n\n### 2. Install dependencies\n\n> We need to install the latest version(`b109257f4f`) of 🤗Transformers from the source code.\n\n```bash\npip install -r requirements.txt\npip install -e git+https://github.com/huggingface/transformers.git@b109257f4f#egg=transformers\n```\n\n### 3. Remove original `modeling_gemma.py`, make a symbolic link with new `modeling_gemma.py`\n\n```bash\npython test_basic.infini.py\n```\n\n### 4. Run the example(Inference, simple forward/backward test)\n\n```bash\npython test_basic.py\n```\n\n### 5. Train with your data\n\n```bash\npython test_train.small.gemma.py\n```\n\n<img width=\"808\" alt=\"image\" src=\"https://github.com/Beomi/InfiniTransformer/assets/11323660/c3cb7b1e-531c-4652-a5de-fcf36b1c03bc\">\n\nExample code used wikitext-2-raw-v1 from https://huggingface.co/datasets/wikitext\n\nHere's the test wandb log here -> https://api.wandb.ai/links/beomi2/1rsqrkfn\n\n### 6. Inference\n\n```bash\npython test_basic.trained.py\n```\n\n**Sample Generation w/ 1-epoch Trained Model on WikiText2**\n\nInput:\n\n> This work introduces an efficient method to scale Transformer-based\n\nOutput1:\n\n> models for denoising , denoising denoising , and deep denoising of images of the U2 EPK model , using a coefficient that is a function of the depth of the image resolution . The paper experiments with image denoising by Turbo @-@ based filtering , denoising by generative adversarial networks , and video denoising by denoising each of the three elements of the video ( color of the pixels / frames ) . The results are considered fair . The video is not discussed . The paper is not considering an actual application in an industrial context ,  line is probably a 1 . It is built in the Nohmi…\n\nOutput2:\n\n> vision models across platforms using a custom architecture optimized for both vision ( 3D / 2D ) and vision and language . In other words , a single model can run on different types of devices , a feature that is critical for the development of general @-@ purpose and large-scale AI ( see also : The One @-@ Model @-@ for @-@ All @-@ Things @-@ AI Problem ) . The model is the first to reach a global scale ( 200 GPU + ) on a single GPU using the Transformer and its variants . The model can run at the end of 1967 . He had his family relocated to a house in a nearby neighborhood , where they lived for five years , before returning to their primary residence in St. Petersburg . Later comments of 1968 made by his fellow musician Bruce Hornsby made it clear that he had gone through a lot , both personally and professionally .\n"
  },
  {
    "path": "gpu_mem_track.py",
    "content": "import gc\nimport datetime\nimport inspect\n\nimport torch\nimport numpy as np\n\ndtype_memory_size_dict = {\n    torch.float64: 64 / 8,\n    torch.double: 64 / 8,\n    torch.float32: 32 / 8,\n    torch.float: 32 / 8,\n    torch.float16: 16 / 8,\n    torch.half: 16 / 8,\n    torch.int64: 64 / 8,\n    torch.long: 64 / 8,\n    torch.int32: 32 / 8,\n    torch.int: 32 / 8,\n    torch.int16: 16 / 8,\n    torch.short: 16 / 8,\n    torch.uint8: 8 / 8,\n    torch.int8: 8 / 8,\n}\n# compatibility of torch1.0\nif getattr(torch, \"bfloat16\", None) is not None:\n    dtype_memory_size_dict[torch.bfloat16] = 16/8\nif getattr(torch, \"bool\", None) is not None:\n    dtype_memory_size_dict[torch.bool] = 8/8 # pytorch use 1 byte for a bool, see https://github.com/pytorch/pytorch/issues/41571\n\ndef get_mem_space(x):\n    try:\n        ret = dtype_memory_size_dict[x]\n    except KeyError:\n        print(f\"dtype {x} is not supported!\")\n    return ret\n\nclass MemTracker(object):\n    \"\"\"\n    Class used to track pytorch memory usage\n    Arguments:\n        detail(bool, default True): whether the function shows the detail gpu memory usage\n        path(str): where to save log file\n        verbose(bool, default False): whether show the trivial exception\n        device(int): GPU number, default is 0\n    \"\"\"\n    def __init__(self, detail=True, path='', verbose=False, device=0):\n        self.print_detail = detail\n        self.last_tensor_sizes = set()\n        self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'\n        self.verbose = verbose\n        self.begin = True\n        self.device = device\n\n    def get_tensors(self):\n        for obj in gc.get_objects():\n            try:\n                if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n                    tensor = obj\n                else:\n                    continue\n                if tensor.is_cuda:\n                    yield tensor\n            except Exception as e:\n                if self.verbose:\n                    print('A trivial exception occured: {}'.format(e))\n\n    def get_tensor_usage(self):\n        sizes = [np.prod(np.array(tensor.size())) * get_mem_space(tensor.dtype) for tensor in self.get_tensors()]\n        return np.sum(sizes) / 1024**2\n\n    def get_allocate_usage(self):\n        return torch.cuda.memory_allocated() / 1024**2\n\n    def clear_cache(self):\n        gc.collect()\n        torch.cuda.empty_cache()\n\n    def print_all_gpu_tensor(self, file=None):\n        for x in self.get_tensors():\n            print(x.size(), x.dtype, np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2, file=file)\n\n    def track(self):\n        \"\"\"\n        Track the GPU memory usage\n        \"\"\"\n        frameinfo = inspect.stack()[1]\n        where_str = frameinfo.filename + ' line ' + str(frameinfo.lineno) + ': ' + frameinfo.function\n\n        with open(self.gpu_profile_fn, 'a+') as f:\n\n            if self.begin:\n                f.write(f\"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |\"\n                        f\" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb\"\n                        f\" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\\n\\n\")\n                self.begin = False\n\n            if self.print_detail is True:\n                ts_list = [(tensor.size(), tensor.dtype) for tensor in self.get_tensors()]\n                new_tensor_sizes = {(type(x),\n                                    tuple(x.size()),\n                                    ts_list.count((x.size(), x.dtype)),\n                                    np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2,\n                                    x.dtype) for x in self.get_tensors()}\n                for t, s, n, m, data_type in new_tensor_sizes - self.last_tensor_sizes:\n                    f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\\n')\n                for t, s, n, m, data_type in self.last_tensor_sizes - new_tensor_sizes:\n                    f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\\n')\n\n                self.last_tensor_sizes = new_tensor_sizes\n\n            f.write(f\"\\nAt {where_str:<50}\"\n                    f\" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb\"\n                    f\" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\\n\\n\")\n"
  },
  {
    "path": "inference.gemma.infini.py",
    "content": "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nimport torch\nfrom transformers import AutoTokenizer\nfrom infini_gemma import GemmaForCausalLM\n\n# from gpu_mem_track import MemTracker\n\n# gpu_tracker = MemTracker()\n\n\ndef generate_text_with_stateful_segments(\n    model,\n    tokenizer,\n    prompt_text,\n    max_length=300,\n    segment_length=2048,\n    temperature=1.0,\n):\n    # gpu_tracker.track()\n    model.eval()\n    # gpu_tracker.track()\n\n    # Encode the prompt text\n    encoded_input = tokenizer(prompt_text, return_tensors=\"pt\")\n    input_ids = encoded_input[\"input_ids\"]\n\n    original_length = len(input_ids[0])  # Get the original length of the prompt\n    print(\"Original seq len:\", original_length)\n\n    # Initialize memory and norm_term\n    memory, norm_term = None, None\n\n    # Manage long initial prompts by processing them in segments\n    if input_ids.size(1) > segment_length:\n        # gpu_tracker.track()\n        print(\"Processing prompt in segments\")\n        num_segments = input_ids.size(1) // segment_length\n        for i in range(num_segments):\n            segment = input_ids[:, i * segment_length : (i + 1) * segment_length]\n            # gpu_tracker.track()\n            outputs = model(\n                input_ids=segment.to(model.device), memory=memory, norm_term=norm_term\n            )\n            # gpu_tracker.track()\n            memory = outputs.memory\n            norm_term = outputs.norm_term\n            # gpu_tracker.track()\n        # Handle leftover tokens\n        # leftover = input_ids.size(1) % segment_length\n        # if leftover > 0:\n        #     segment = input_ids[:, -leftover:]\n        #     outputs = model(input_ids=segment.to(model.device), memory=memory, norm_term=norm_term)\n        #     memory = outputs.memory\n        #     norm_term = outputs.norm_term\n        print(\"Prompt/Segments processed, starting generation\")\n    else:\n        print(\"Short, single-segment prompt, start generation now.\")\n    # Initialize the generation with the full prompt or the last processed segment\n    generated_sequence = input_ids\n    print(\"Target seq len:\", original_length + max_length)\n    while generated_sequence.size(1) < original_length + max_length:\n        # print(\"generated_sequence.size(1):\", generated_sequence.size(1))\n        past = None\n        # if generated_sequence.size(1) over segment_length, re-compute memory and norm_term\n        if generated_sequence.size(1) % segment_length == 0:\n            input_segment = generated_sequence[:, -segment_length:]\n            # gpu_tracker.track()\n            outputs = model(\n                input_ids=input_segment.to(model.device),\n                memory=memory,\n                norm_term=norm_term,\n            )\n            # gpu_tracker.track()\n            # Update memory and norm_term for the next, new segment\n            memory = outputs.memory\n            norm_term = outputs.norm_term\n\n            # gpu_tracker.track()\n            next_token_logits = outputs.logits[:, -1, :]\n            scaled_logits = next_token_logits / temperature\n            probs = torch.nn.functional.softmax(scaled_logits, dim=-1)\n            next_token = torch.multinomial(probs, num_samples=1).detach()\n\n            # Append to the generated sequence\n            generated_sequence = torch.cat(\n                (generated_sequence, next_token.to(\"cpu\")), dim=1\n            )\n            # gpu_tracker.track()\n        else:\n            leftover = generated_sequence.size(1) % segment_length\n            input_segment = generated_sequence[:, -leftover:]  # Use the last segment\n\n            # gpu_tracker.track()\n            outputs = model(\n                input_ids=input_segment.to(model.device),\n                memory=memory,\n                norm_term=norm_term,\n                no_memory_update=True,\n                use_cache=True,\n                past_key_values=past,\n            )\n            past = outputs.past_key_values\n            # gpu_tracker.track()\n\n            # Obtain the last token predictions and sample\n            next_token_logits = outputs.logits[:, -1, :]\n            scaled_logits = next_token_logits / temperature\n            probs = torch.nn.functional.softmax(scaled_logits, dim=-1)\n            next_token = torch.multinomial(probs, num_samples=1).detach()\n\n            # Append to the generated sequence\n            generated_sequence = torch.cat(\n                (generated_sequence, next_token.to(\"cpu\")), dim=1\n            )\n            # gpu_tracker.track()\n\n        # # Break the loop if we reach max_length\n        # if generated_sequence.size(1) >= max_length:\n        #     break\n\n    # Decode the generated tokens to text\n    generated_text = tokenizer.decode(generated_sequence[0], skip_special_tokens=True)\n\n    return generated_text.replace(prompt_text, \"\")\n\n\n# Load the model and tokenizer\nmodel_path = \"./models/gemma-2b-infini-noclm-minipile/step_3000\"\ntokenizer = AutoTokenizer.from_pretrained(model_path)\nmodel = GemmaForCausalLM.from_pretrained(\n    model_path,\n    torch_dtype=torch.bfloat16,\n    device_map={\"\": 0},\n)\nprint(model)\nprint(model.dtype)\n\n\n# # Sample prompt\nprompt_text = \"Once upon a time\"\n\n# Generate text\nwith torch.no_grad():\n    generated_text = generate_text_with_stateful_segments(\n        model, tokenizer, prompt_text, max_length=512, temperature=0.8\n    )\n    print(\"Short-Generated(512) Text: \\n\", generated_text)\n\n    print(\"-\" * 40)\n\n# Sample prompt\nprompt_text = \"Once upon a time\"\n\n# Generate text\nwith torch.no_grad():\n    generated_text = generate_text_with_stateful_segments(\n        model, tokenizer, prompt_text, max_length=3000, temperature=0.8\n    )\n    print(\"Long-Generated(3000) Text: \\n\", generated_text)\n\nprint(\"-\" * 40)\n\nlong_text = \"\"\"Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, “and what is the use of a book,” thought Alice “without pictures or conversations?”\n\nSo she was considering in her own mind (as well as she could, for the hot day made her feel very sleepy and stupid), whether the pleasure of making a daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly a White Rabbit with pink eyes ran close by her.\n\nThere was nothing so very remarkable in that; nor did Alice think it so very much out of the way to hear the Rabbit say to itself, “Oh dear! Oh dear! I shall be late!” (when she thought it over afterwards, it occurred to her that she ought to have wondered at this, but at the time it all seemed quite natural); but when the Rabbit actually took a watch out of its waistcoat-pocket, and looked at it, and then hurried on, Alice started to her feet, for it flashed across her mind that she had never before seen a rabbit with either a waistcoat-pocket, or a watch to take out of it, and burning with curiosity, she ran across the field after it, and fortunately was just in time to see it pop down a large rabbit-hole under the hedge.\n\nIn another moment down went Alice after it, never once considering how in the world she was to get out again.\n\nThe rabbit-hole went straight on like a tunnel for some way, and then dipped suddenly down, so suddenly that Alice had not a moment to think about stopping herself before she found herself falling down a very deep well.\n\nEither the well was very deep, or she fell very slowly, for she had plenty of time as she went down to look about her and to wonder what was going to happen next. First, she tried to look down and make out what she was coming to, but it was too dark to see anything; then she looked at the sides of the well, and noticed that they were filled with cupboards and book-shelves; here and there she saw maps and pictures hung upon pegs. She took down a jar from one of the shelves as she passed; it was labelled “ORANGE MARMALADE”, but to her great disappointment it was empty: she did not like to drop the jar for fear of killing somebody underneath, so managed to put it into one of the cupboards as she fell past it.\n\n“Well!” thought Alice to herself, “after such a fall as this, I shall think nothing of tumbling down stairs! How brave they’ll all think me at home! Why, I wouldn’t say anything about it, even if I fell off the top of the house!” (Which was very likely true.)\n\nDown, down, down. Would the fall never come to an end? “I wonder how many miles I’ve fallen by this time?” she said aloud. “I must be getting somewhere near the centre of the earth. Let me see: that would be four thousand miles down, I think—” (for, you see, Alice had learnt several things of this sort in her lessons in the schoolroom, and though this was not a very good opportunity for showing off her knowledge, as there was no one to listen to her, still it was good practice to say it over) “—yes, that’s about the right distance—but then I wonder what Latitude or Longitude I’ve got to?” (Alice had no idea what Latitude was, or Longitude either, but thought they were nice grand words to say.)\n\nPresently she began again. “I wonder if I shall fall right through the earth! How funny it’ll seem to come out among the people that walk with their heads downward! The Antipathies, I think—” (she was rather glad there was no one listening, this time, as it didn’t sound at all the right word) “—but I shall have to ask them what the name of the country is, you know. Please, Ma’am, is this New Zealand or Australia?” (and she tried to curtsey as she spoke—fancy curtseying as you’re falling through the air! Do you think you could manage it?) “And what an ignorant little girl she’ll think me for asking! No, it’ll never do to ask: perhaps I shall see it written up somewhere.”\n\nDown, down, down. There was nothing else to do, so Alice soon began talking again. “Dinah’ll miss me very much to-night, I should think!” (Dinah was the cat.) “I hope they’ll remember her saucer of milk at tea-time. Dinah my dear! I wish you were down here with me! There are no mice in the air, I’m afraid, but you might catch a bat, and that’s very like a mouse, you know. But do cats eat bats, I wonder?” And here Alice began to get rather sleepy, and went on saying to herself, in a dreamy sort of way, “Do cats eat bats? Do cats eat bats?” and sometimes, “Do bats eat cats?” for, you see, as she couldn’t answer either question, it didn’t much matter which way she put it. She felt that she was dozing off, and had just begun to dream that she was walking hand in hand with Dinah, and saying to her very earnestly, “Now, Dinah, tell me the truth: did you ever eat a bat?” when suddenly, thump! thump! down she came upon a heap of sticks and dry leaves, and the fall was over.\n\nAlice was not a bit hurt, and she jumped up on to her feet in a moment: she looked up, but it was all dark overhead; before her was another long passage, and the White Rabbit was still in sight, hurrying down it. There was not a moment to be lost: away went Alice like the wind, and was just in time to hear it say, as it turned a corner, “Oh my ears and whiskers, how late it’s getting!” She was close behind it when she turned the corner, but the Rabbit was no longer to be seen: she found herself in a long, low hall, which was lit up by a row of lamps hanging from the roof.\n\nThere were doors all round the hall, but they were all locked; and when Alice had been all the way down one side and up the other, trying every door, she walked sadly down the middle, wondering how she was ever to get out again.\n\nSuddenly she came upon a little three-legged table, all made of solid glass; there was nothing on it except a tiny golden key, and Alice’s first thought was that it might belong to one of the doors of the hall; but, alas! either the locks were too large, or the key was too small, but at any rate it would not open any of them. However, on the second time round, she came upon a low curtain she had not noticed before, and behind it was a little door about fifteen inches high: she tried the little golden key in the lock, and to her great delight it fitted!\n\nAlice opened the door and found that it led into a small passage, not much larger than a rat-hole: she knelt down and looked along the passage into the loveliest garden you ever saw. How she longed to get out of that dark hall, and wander about among those beds of bright flowers and those cool fountains, but she could not even get her head through the doorway; “and even if my head would go through,” thought poor Alice, “it would be of very little use without my shoulders. Oh, how I wish I could shut up like a telescope! I think I could, if I only knew how to begin.” For, you see, so many out-of-the-way things had happened lately, that Alice had begun to think that very few things indeed were really impossible.\n\nThere seemed to be no use in waiting by the little door, so she went back to the table, half hoping she might find another key on it, or at any rate a book of rules for shutting people up like telescopes: this time she found a little bottle on it, (“which certainly was not here before,” said Alice,) and round the neck of the bottle was a paper label, with the words “DRINK ME,” beautifully printed on it in large letters.\n\nIt was all very well to say “Drink me,” but the wise little Alice was not going to do that in a hurry. “No, I’ll look first,” she said, “and see whether it’s marked ‘poison’ or not”; for she had read several nice little histories about children who had got burnt, and eaten up by wild beasts and other unpleasant things, all because they would not remember the simple rules their friends had taught them: such as, that a red-hot poker will burn you if you hold it too long; and that if you cut your finger very deeply with a knife, it usually bleeds; and she had never forgotten that, if you drink much from a bottle marked “poison,” it is almost certain to disagree with you, sooner or later.\n\nHowever, this bottle was not marked “poison,” so Alice ventured to taste it, and finding it very nice, (it had, in fact, a sort of mixed flavour of cherry-tart, custard, pine-apple, roast turkey, toffee, and hot buttered toast,) she very soon finished it off.\n\n“What a curious feeling!” said Alice; “I must be shutting up like a telescope.”\n\nAnd so it was indeed: she was now only ten inches high, and her face brightened up at the thought that she was now the right size for going through the little door into that lovely garden. First, however, she waited for a few minutes to see if she was going to shrink any further: she felt a little nervous about this; “for it might end, you know,” said Alice to herself, “in my going out altogether, like a candle. I wonder what I should be like then?” And she tried to fancy what the flame of a candle is like after the candle is blown out, for she could not remember ever having seen such a thing.\n\nAfter a while, finding that nothing more happened, she decided on going into the garden at once; but, alas for poor Alice! when she got to the door, she found she had forgotten the little golden key, and when she went back to the table for it, she found she could not possibly reach it: she could see it quite plainly through the glass, and she tried her best to climb up one of the legs of the table, but it was too slippery; and when she had tired herself out with trying, the poor little thing sat down and cried.\n\n“Come, there’s no use in crying like that!” said Alice to herself, rather sharply; “I advise you to leave off this minute!” She generally gave herself very good advice, (though she very seldom followed it), and sometimes she scolded herself so severely as to bring tears into her eyes; and once she remembered trying to box her own ears for having cheated herself in a game of croquet she was playing against herself, for this curious child was very fond of pretending to be two people. “But it’s no use now,” thought poor Alice, “to pretend to be two people! Why, there’s hardly enough of me left to make one respectable person!”\n\nSoon her eye fell on a little glass box that was lying under the table: she opened it, and found in it a very small cake, on which the words “EAT ME” were beautifully marked in currants. “Well, I’ll eat it,” said Alice, “and if it makes me grow larger, I can reach the key; and if it makes me grow smaller, I can creep under the door; so either way I’ll get into the garden, and I don’t care which happens!”\n\nShe ate a little bit, and said anxiously to herself, “Which way? Which way?”, holding her hand on the top of her head to feel which way it was growing, and she was quite surprised to find that she remained the same size: to be sure, this generally happens when one eats cake, but Alice had got so much into the way of expecting nothing but out-of-the-way things to happen, that it seemed quite dull and stupid for life to go on in the common way.\n\nSo she set to work, and very soon finished off the cake.\n\n“Curiouser and curiouser!” cried Alice (she was so much surprised, that for the moment she quite forgot how to speak good English); “now I’m opening out like the largest telescope that ever was! Good-bye, feet!” (for when she looked down at her feet, they seemed to be almost out of sight, they were getting so far off). “Oh, my poor little feet, I wonder who will put on your shoes and stockings for you now, dears? I’m sure I shan’t be able! I shall be a great deal too far off to trouble myself about you: you must manage the best way you can;—but I must be kind to them,” thought Alice, “or perhaps they won’t walk the way I want to go! Let me see: I’ll give them a new pair of boots every Christmas.”\n\nAnd she went on planning to herself how she would manage it. “They must go by the carrier,” she thought; “and how funny it’ll seem, sending presents to one’s own feet! And how odd the directions will look!\n\n     Alice’s Right Foot, Esq.,\n       Hearthrug,\n         near the Fender,\n           (with Alice’s love).\nOh dear, what nonsense I’m talking!”\n\nJust then her head struck against the roof of the hall: in fact she was now more than nine feet high, and she at once took up the little golden key and hurried off to the garden door.\n\nPoor Alice! It was as much as she could do, lying down on one side, to look through into the garden with one eye; but to get through was more hopeless than ever: she sat down and began to cry again.\n\n“You ought to be ashamed of yourself,” said Alice, “a great girl like you,” (she might well say this), “to go on crying in this way! Stop this moment, I tell you!” But she went on all the same, shedding gallons of tears, until there was a large pool all round her, about four inches deep and reaching half down the hall.\n\nAfter a time she heard a little pattering of feet in the distance, and she hastily dried her eyes to see what was coming. It was the White Rabbit returning, splendidly dressed, with a pair of white kid gloves in one hand and a large fan in the other: he came trotting along in a great hurry, muttering to himself as he came, “Oh! the Duchess, the Duchess! Oh! won’t she be savage if I’ve kept her waiting!” Alice felt so desperate that she was ready to ask help of any one; so, when the Rabbit came near her, she began, in a low, timid voice, “If you please, sir—” The Rabbit started violently, dropped the white kid gloves and the fan, and skurried away into the darkness as hard as he could go.\n\nAlice took up the fan and gloves, and, as the hall was very hot, she kept fanning herself all the time she went on talking: “Dear, dear! How queer everything is to-day! And yesterday things went on just as usual. I wonder if I’ve been changed in the night? Let me think: was I the same when I got up this morning? I almost think I can remember feeling a little different. But if I’m not the same, the next question is, Who in the world am I? Ah, that’s the great puzzle!” And she began thinking over all the children she knew that were of the same age as herself, to see if she could have been changed for any of them.\n\n“I’m sure I’m not Ada,” she said, “for her hair goes in such long ringlets, and mine doesn’t go in ringlets at all; and I’m sure I can’t be Mabel, for I know all sorts of things, and she, oh! she knows such a very little! Besides, she’s she, and I’m I, and—oh dear, how puzzling it all is! I’ll try if I know all the things I used to know. Let me see: four times five is twelve, and four times six is thirteen, and four times seven is—oh dear! I shall never get to twenty at that rate! However, the Multiplication Table doesn’t signify: let’s try Geography. London is the capital of Paris, and Paris is the capital of Rome, and Rome—no, that’s all wrong, I’m certain! I must have been changed for Mabel! I’ll try and say ‘How doth the little—’” and she crossed her hands on her lap as if she were saying lessons, and began to repeat it, but her voice sounded hoarse and strange, and the words did not come the same as they used to do:—\n\n“How doth the little crocodile\n    Improve his shining tail,\nAnd pour the waters of the Nile\n    On every golden scale!\n\n“How cheerfully he seems to grin,\n    How neatly spread his claws,\nAnd welcome little fishes in\n    With gently smiling jaws!”\n\n“I’m sure those are not the right words,” said poor Alice, and her eyes filled with tears again as she went on, “I must be Mabel after all, and I shall have to go and live in that poky little house, and have next to no toys to play with, and oh! ever so many lessons to learn! No, I’ve made up my mind about it; if I’m Mabel, I’ll stay down here! It’ll be no use their putting their heads down and saying ‘Come up again, dear!’ I shall only look up and say ‘Who am I then? Tell me that first, and then, if I like being that person, I’ll come up: if not, I’ll stay down here till I’m somebody else’—but, oh dear!” cried Alice, with a sudden burst of tears, “I do wish they would put their heads down! I am so very tired of being all alone here!”\n\nAs she said this she looked down at her hands, and was surprised to see that she had put on one of the Rabbit’s little white kid gloves while she was talking. “How can I have done that?” she thought. “I must be growing small again.” She got up and went to the table to measure herself by it, and found that, as nearly as she could guess, she was now about two feet high, and was going on shrinking rapidly: she soon found out that the cause of this was the fan she was holding, and she dropped it hastily, just in time to avoid shrinking away altogether.\n\n“That was a narrow escape!” said Alice, a good deal frightened at the sudden change, but very glad to find herself still in existence; “and now for the garden!” and she ran with all speed back to the little door: but, alas! the little door was shut again, and the little golden key was lying on the glass table as before, “and things are worse than ever,” thought the poor child, “for I never was so small as this before, never! And I declare it’s too bad, that it is!”\n\nAs she said these words her foot slipped, and in another moment, splash! she was up to her chin in salt water. Her first idea was that she had somehow fallen into the sea, “and in that case I can go back by railway,” she said to herself. (Alice had been to the seaside once in her life, and had come to the general conclusion, that wherever you go to on the English coast you find a number of bathing machines in the sea, some children digging in the sand with wooden spades, then a row of lodging houses, and behind them a railway station.) However, she soon made out that she was in the pool of tears which she had wept when she was nine feet high.\n\n“I wish I hadn’t cried so much!” said Alice, as she swam about, trying to find her way out. “I shall be punished for it now, I suppose, by being drowned in my own tears! That will be a queer thing, to be sure! However, everything is queer to-day.”\n\nJust then she heard something splashing about in the pool a little way off, and she swam nearer to make out what it was: at first she thought it must be a walrus or hippopotamus, but then she remembered how small she was now, and she soon made out that it was only a mouse that had slipped in like herself.\n\n“Would it be of any use, now,” thought Alice, “to speak to this mouse? Everything is so out-of-the-way down here, that I should think very likely it can talk: at any rate, there’s no harm in trying.” So she began: “O Mouse, do you know the way out of this pool? I am very tired of swimming about here, O Mouse!” (Alice thought this must be the right way of speaking to a mouse: she had never done such a thing before, but she remembered having seen in her brother’s Latin Grammar, “A mouse—of a mouse—to a mouse—a mouse—O mouse!”) The Mouse looked at her rather inquisitively, and seemed to her to wink with one of its little eyes, but it said nothing.\n\n“Perhaps it doesn’t understand English,” thought Alice; “I daresay it’s a French mouse, come over with William the Conqueror.” (For, with all her knowledge of history, Alice had no very clear notion how long ago anything had happened.) So she began again: “Où est ma chatte?” which was the first sentence in her French lesson-book. The Mouse gave a sudden leap out of the water, and seemed to quiver all over with fright. “Oh, I beg your pardon!” cried Alice hastily, afraid that she had hurt the poor animal’s feelings. “I quite forgot you didn’t like cats.”\n\n“Not like cats!” cried the Mouse, in a shrill, passionate voice. “Would you like cats if you were me?”\n\n“Well, perhaps not,” said Alice in a soothing tone: “don’t be angry about it. And yet I wish I could show you our cat Dinah: I think you’d take a fancy to cats if you could only see her. She is such a dear quiet thing,” Alice went on, half to herself, as she swam lazily about in the pool, “and she sits purring so nicely by the fire, licking her paws and washing her face—and she is such a nice soft thing to nurse—and she’s such a capital one for catching mice—oh, I beg your pardon!” cried Alice again, for this time the Mouse was bristling all over, and she felt certain it must be really offended. “We won’t talk about her any more if you’d rather not.”\n\n“We indeed!” cried the Mouse, who was trembling down to the end of his tail. “As if I would talk on such a subject! Our family always hated cats: nasty, low, vulgar things! Don’t let me hear the name again!”\n\n“I won’t indeed!” said Alice, in a great hurry to change the subject of conversation. “Are you—are you fond—of—of dogs?” The Mouse did not answer, so Alice went on eagerly: “There is such a nice little dog near our house I should like to show you! A little bright-eyed terrier, you know, with oh, such long curly brown hair! And it’ll fetch things when you throw them, and it’ll sit up and beg for its dinner, and all sorts of things—I can’t remember half of them—and it belongs to a farmer, you know, and he says it’s so useful, it’s worth a hundred pounds! He says it kills all the rats and—oh dear!” cried Alice in a sorrowful tone, “I’m afraid I’ve offended it again!” For the Mouse was swimming away from her as hard as it could go, and making quite a commotion in the pool as it went.\n\nSo she called softly after it,\n\"\"\".strip()\n\n# Generate text: uses 10GB vram\n# with torch.autograd.profiler.profile(use_cuda=True) as prof:\nwith torch.no_grad():\n    generated_text = generate_text_with_stateful_segments(\n        model, tokenizer, long_text, max_length=1000, temperature=0.8\n    )\n    print(\"Long-Generated(5212+1000) Text: \\n\", generated_text)\n"
  },
  {
    "path": "infini_gemma/__init__.py",
    "content": "from .configuration_infini_gemma import GemmaConfig\nfrom .modeling_infini_gemma import GemmaForCausalLM\n"
  },
  {
    "path": "infini_gemma/configuration_infini_gemma.py",
    "content": "from transformers import GemmaConfig as OriginalGemmaConfig\n\n\nclass GemmaConfig(OriginalGemmaConfig):\n    def __init__(\n        self,\n        vocab_size=256000,\n        hidden_size=3072,\n        intermediate_size=24576,\n        num_hidden_layers=28,\n        num_attention_heads=16,\n        num_key_value_heads=16,\n        head_dim=256,\n        hidden_act=\"gelu_pytorch_tanh\",\n        hidden_activation=None,\n        max_position_embeddings=32768,\n        initializer_range=0.02,\n        rms_norm_eps=0.000001,\n        use_cache=True,\n        pad_token_id=0,\n        eos_token_id=1,\n        bos_token_id=2,\n        tie_word_embeddings=True,\n        rope_theta=10000,\n        attention_bias=False,\n        attention_dropout=0,\n        segment_size=2048,\n        **kwargs\n    ):\n        super().__init__(\n            vocab_size,\n            hidden_size,\n            intermediate_size,\n            num_hidden_layers,\n            num_attention_heads,\n            num_key_value_heads,\n            head_dim,\n            hidden_act,\n            hidden_activation,\n            max_position_embeddings,\n            initializer_range,\n            rms_norm_eps,\n            use_cache,\n            pad_token_id,\n            eos_token_id,\n            bos_token_id,\n            tie_word_embeddings,\n            rope_theta,\n            attention_bias,\n            attention_dropout,\n            **kwargs\n        )\n        self.segment_size = segment_size\n"
  },
  {
    "path": "infini_gemma/modeling_infini_gemma.py",
    "content": "# coding=utf-8\n# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Gemma model, with Infini-Attention.\"\"\"\n\nimport os\nimport math\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom transformers.activations import ACT2FN\nfrom transformers.cache_utils import Cache, DynamicCache, StaticCache\nfrom transformers.modeling_attn_mask_utils import (\n    AttentionMaskConverter,\n    _prepare_4d_causal_attention_mask,\n)\nfrom transformers.modeling_outputs import (\n    ModelOutput,\n    SequenceClassifierOutputWithPast,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.pytorch_utils import (\n    ALL_LAYERNORM_LAYERS,\n    is_torch_greater_or_equal_than_1_13,\n)\nfrom transformers.utils import (\n    add_start_docstrings,\n    add_start_docstrings_to_model_forward,\n    is_flash_attn_2_available,\n    is_flash_attn_greater_or_equal_2_10,\n    logging,\n    replace_return_docstrings,\n)\nfrom transformers.utils.import_utils import is_torch_fx_available\n\nfrom dataclasses import dataclass\nfrom .configuration_infini_gemma import GemmaConfig\n\nDEBUG = os.environ.get(\"DEBUG\", False)\n\n\ndef debug_print(*args):\n    if DEBUG:\n        print(*args)\n\n\nif is_flash_attn_2_available():\n    from flash_attn import flash_attn_func, flash_attn_varlen_func\n    from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa\n\n\n# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.\n# It means that the function will not be traced through and simply appear as a node in the graph.\nif is_torch_fx_available():\n    if not is_torch_greater_or_equal_than_1_13:\n        import torch.fx\n\n    _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GemmaConfig\"\n\n\n@dataclass\nclass InfiniBaseModelOutputWithPast(ModelOutput):\n    \"\"\"\n    Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n    Args:\n        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n            Sequence of hidden-states at the output of the last layer of the model.\n\n            If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n            hidden_size)` is output.\n        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n            `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n            encoder_sequence_length, embed_size_per_head)`.\n\n            Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n            `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n            input) to speed up sequential decoding.\n        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n            sequence_length)`.\n\n            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n            heads.\n    \"\"\"\n\n    last_hidden_state: torch.FloatTensor = None\n    past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n    attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n    memory: torch.FloatTensor = None\n    norm_term: torch.FloatTensor = None\n\n\n@dataclass\nclass InfiniCausalLMOutputWithPast(ModelOutput):\n    \"\"\"\n    Base class for causal language model (or autoregressive) outputs.\n\n    Args:\n        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n            Language modeling loss (for next-token prediction).\n        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n            `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n            Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n            `past_key_values` input) to speed up sequential decoding.\n        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n            sequence_length)`.\n\n            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n            heads.\n    \"\"\"\n\n    loss: Optional[torch.FloatTensor] = None\n    logits: torch.FloatTensor = None\n    past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n    attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n    memory: torch.FloatTensor = None\n    norm_term: torch.FloatTensor = None\n\n\ndef _get_unpad_data(attention_mask):\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = seqlens_in_batch.max().item()\n    cu_seqlens = F.pad(\n        torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)\n    )\n    return (\n        indices,\n        cu_seqlens,\n        max_seqlen_in_batch,\n    )\n\n\nclass GemmaRMSNorm(nn.Module):\n    def __init__(self, dim: int, eps: float = 1e-6):\n        super().__init__()\n        self.eps = eps\n        self.weight = nn.Parameter(torch.zeros(dim))\n\n    def _norm(self, x):\n        return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n    def forward(self, x):\n        output = self._norm(x.float())\n        # Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)\n        # See https://github.com/huggingface/transformers/pull/29402\n        output = output * (1.0 + self.weight.float())\n        return output.type_as(x)\n\n\nALL_LAYERNORM_LAYERS.append(GemmaRMSNorm)\n\n\nclass GemmaRotaryEmbedding(nn.Module):\n    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n        super().__init__()\n\n        self.dim = dim\n        self.max_position_embeddings = max_position_embeddings\n        self.base = base\n        self.register_buffer(\"inv_freq\", None, persistent=False)\n\n    @torch.no_grad()\n    def forward(self, x, position_ids, seq_len=None):\n        # x: [bs, num_attention_heads, seq_len, head_size]\n        if self.inv_freq is None:\n            self.inv_freq = 1.0 / (\n                self.base\n                ** (\n                    torch.arange(\n                        0, self.dim, 2, dtype=torch.int64, device=x.device\n                    ).float()\n                    / self.dim\n                )\n            )\n        inv_freq_expanded = (\n            self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)\n        )\n        position_ids_expanded = position_ids[:, None, :].float()\n        # Force float32 since bfloat16 loses precision on long contexts\n        # See https://github.com/huggingface/transformers/pull/29285\n        device_type = x.device.type\n        device_type = (\n            device_type\n            if isinstance(device_type, str) and device_type != \"mps\"\n            else \"cpu\"\n        )\n        with torch.autocast(device_type=device_type, enabled=False):\n            freqs = (\n                inv_freq_expanded.float() @ position_ids_expanded.float()\n            ).transpose(1, 2)\n            emb = torch.cat((freqs, freqs), dim=-1)\n            cos = emb.cos()\n            sin = emb.sin()\n        return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)\n\n\n# Copied from transformers.models.llama.modeling_llama.rotate_half\ndef rotate_half(x):\n    \"\"\"Rotates half the hidden dims of the input.\"\"\"\n    x1 = x[..., : x.shape[-1] // 2]\n    x2 = x[..., x.shape[-1] // 2 :]\n    return torch.cat((-x2, x1), dim=-1)\n\n\n# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n    \"\"\"Applies Rotary Position Embedding to the query and key tensors.\n\n    Args:\n        q (`torch.Tensor`): The query tensor.\n        k (`torch.Tensor`): The key tensor.\n        cos (`torch.Tensor`): The cosine part of the rotary embedding.\n        sin (`torch.Tensor`): The sine part of the rotary embedding.\n        position_ids (`torch.Tensor`, *optional*):\n            Deprecated and unused.\n        unsqueeze_dim (`int`, *optional*, defaults to 1):\n            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\n    Returns:\n        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.\n    \"\"\"\n    cos = cos.unsqueeze(unsqueeze_dim)\n    sin = sin.unsqueeze(unsqueeze_dim)\n    q_embed = (q * cos) + (rotate_half(q) * sin)\n    k_embed = (k * cos) + (rotate_half(k) * sin)\n    return q_embed, k_embed\n\n\nclass GemmaMLP(nn.Module):\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.hidden_size = config.hidden_size\n        self.intermediate_size = config.intermediate_size\n        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n        if config.hidden_activation is None:\n            logger.warning_once(\n                \"Gemma's activation function should be approximate GeLU and not exact GeLU.\\n\"\n                \"Changing the activation function to `gelu_pytorch_tanh`.\"\n                f\"if you want to use the legacy `{config.hidden_act}`, \"\n                f\"edit the `model.config` to set `hidden_activation={config.hidden_act}` \"\n                \"  instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details.\"\n            )\n            hidden_activation = \"gelu_pytorch_tanh\"\n        else:\n            hidden_activation = config.hidden_activation\n        self.act_fn = ACT2FN[hidden_activation]\n\n    def forward(self, x):\n        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n\n# Copied from transformers.models.llama.modeling_llama.repeat_kv\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n    \"\"\"\n    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n    \"\"\"\n    batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n    if n_rep == 1:\n        return hidden_states\n    hidden_states = hidden_states[:, :, None, :, :].expand(\n        batch, num_key_value_heads, n_rep, slen, head_dim\n    )\n    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n\n\nclass GemmaAttention(nn.Module):\n    \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n    # Ignore copy\n    def __init__(self, config: GemmaConfig, layer_idx: Optional[int] = None):\n        super().__init__()\n        self.config = config\n        self.layer_idx = layer_idx\n        if layer_idx is None:\n            logger.warning_once(\n                f\"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will \"\n                \"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` \"\n                \"when creating this class.\"\n            )\n\n        self.attention_dropout = config.attention_dropout\n        self.hidden_size = config.hidden_size\n        self.num_heads = config.num_attention_heads\n        self.head_dim = config.head_dim\n        self.num_key_value_heads = config.num_key_value_heads\n        self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n        self.max_position_embeddings = config.max_position_embeddings\n        self.rope_theta = config.rope_theta\n        self.is_causal = True\n\n        if self.hidden_size % self.num_heads != 0:\n            raise ValueError(\n                f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n                f\" and `num_heads`: {self.num_heads}).\"\n            )\n\n        self.q_proj = nn.Linear(\n            self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias\n        )\n        self.k_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.v_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.o_proj = nn.Linear(\n            self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias\n        )\n        self.rotary_emb = GemmaRotaryEmbedding(\n            self.head_dim,\n            max_position_embeddings=self.max_position_embeddings,\n            base=self.rope_theta,\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        attn_weights = torch.matmul(\n            query_states, key_states.transpose(2, 3)\n        ) / math.sqrt(self.head_dim)\n\n        if attention_mask is not None:  # no matter the length, we just slice it\n            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]\n            attn_weights = attn_weights + causal_mask\n\n        # upcast attention to fp32\n        attn_weights = nn.functional.softmax(\n            attn_weights, dim=-1, dtype=torch.float32\n        ).to(query_states.dtype)\n        attn_weights = nn.functional.dropout(\n            attn_weights, p=self.attention_dropout, training=self.training\n        )\n        attn_output = torch.matmul(attn_weights, value_states)\n\n        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n            raise ValueError(\n                f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n                f\" {attn_output.size()}\"\n            )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n\n        attn_output = attn_output.view(bsz, q_len, -1)\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with Llama->Gemma\nclass GemmaFlashAttention2(GemmaAttention):\n    \"\"\"\n    Gemma flash attention module. This module inherits from `GemmaAttention` as the weights of the module stays\n    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of\n    flash attention and deal with padding tokens in case the input contains any of them.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.\n        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.\n        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).\n        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()\n\n    # Ignore copy\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.LongTensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        output_attentions = False\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        # Flash attention requires the input to have the shape\n        # batch_size x seq_length x head_dim x hidden_dim\n        # therefore we just need to keep the original shape\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache\n        # to be able to avoid many of these transpose/reshape/view.\n        query_states = query_states.transpose(1, 2)\n        key_states = key_states.transpose(1, 2)\n        value_states = value_states.transpose(1, 2)\n\n        dropout_rate = self.attention_dropout if self.training else 0.0\n\n        # In PEFT, usually we cast the layer norms in float32 for training stability reasons\n        # therefore the input hidden states gets silently casted in float32. Hence, we need\n        # cast them back in the correct dtype just to be sure everything works as expected.\n        # This might slowdown training & inference so it is recommended to not cast the LayerNorms\n        # in fp32. (GemmaRMSNorm handles it correctly)\n\n        input_dtype = query_states.dtype\n        if input_dtype == torch.float32:\n            if torch.is_autocast_enabled():\n                target_dtype = torch.get_autocast_gpu_dtype()\n            # Handle the case where the model is quantized\n            elif hasattr(self.config, \"_pre_quantization_dtype\"):\n                target_dtype = self.config._pre_quantization_dtype\n            else:\n                target_dtype = self.q_proj.weight.dtype\n\n            logger.warning_once(\n                f\"The input hidden states seems to be silently casted in float32, this might be related to\"\n                f\" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in\"\n                f\" {target_dtype}.\"\n            )\n\n            query_states = query_states.to(target_dtype)\n            key_states = key_states.to(target_dtype)\n            value_states = value_states.to(target_dtype)\n\n        attn_output = self._flash_attention_forward(\n            query_states,\n            key_states,\n            value_states,\n            attention_mask,\n            q_len,\n            dropout=dropout_rate,\n        )\n\n        attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n    def _flash_attention_forward(\n        self,\n        query_states,\n        key_states,\n        value_states,\n        attention_mask,\n        query_length,\n        dropout=0.0,\n        softmax_scale=None,\n    ):\n        \"\"\"\n        Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token\n        first unpad the input, then computes the attention scores and pad the final attention scores.\n\n        Args:\n            query_states (`torch.Tensor`):\n                Input query states to be passed to Flash Attention API\n            key_states (`torch.Tensor`):\n                Input key states to be passed to Flash Attention API\n            value_states (`torch.Tensor`):\n                Input value states to be passed to Flash Attention API\n            attention_mask (`torch.Tensor`):\n                The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the\n                position of padding tokens and 1 for the position of non-padding tokens.\n            dropout (`float`):\n                Attention dropout\n            softmax_scale (`float`, *optional*):\n                The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)\n        \"\"\"\n        if not self._flash_attn_uses_top_left_mask:\n            causal = self.is_causal\n        else:\n            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in GemmaFlashAttention2 __init__.\n            causal = self.is_causal and query_length != 1\n\n        # Contains at least one padding token in the sequence\n        if attention_mask is not None:\n            batch_size = query_states.shape[0]\n            (\n                query_states,\n                key_states,\n                value_states,\n                indices_q,\n                cu_seq_lens,\n                max_seq_lens,\n            ) = self._upad_input(\n                query_states, key_states, value_states, attention_mask, query_length\n            )\n\n            cu_seqlens_q, cu_seqlens_k = cu_seq_lens\n            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens\n\n            attn_output_unpad = flash_attn_varlen_func(\n                query_states,\n                key_states,\n                value_states,\n                cu_seqlens_q=cu_seqlens_q,\n                cu_seqlens_k=cu_seqlens_k,\n                max_seqlen_q=max_seqlen_in_batch_q,\n                max_seqlen_k=max_seqlen_in_batch_k,\n                dropout_p=dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n            attn_output = pad_input(\n                attn_output_unpad, indices_q, batch_size, query_length\n            )\n        else:\n            attn_output = flash_attn_func(\n                query_states,\n                key_states,\n                value_states,\n                dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n        return attn_output\n\n    def _upad_input(\n        self, query_layer, key_layer, value_layer, attention_mask, query_length\n    ):\n        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)\n        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape\n\n        key_layer = index_first_axis(\n            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        value_layer = index_first_axis(\n            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        if query_length == kv_seq_len:\n            query_layer = index_first_axis(\n                query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),\n                indices_k,\n            )\n            cu_seqlens_q = cu_seqlens_k\n            max_seqlen_in_batch_q = max_seqlen_in_batch_k\n            indices_q = indices_k\n        elif query_length == 1:\n            max_seqlen_in_batch_q = 1\n            cu_seqlens_q = torch.arange(\n                batch_size + 1, dtype=torch.int32, device=query_layer.device\n            )  # There is a memcpy here, that is very bad.\n            indices_q = cu_seqlens_q[:-1]\n            query_layer = query_layer.squeeze(1)\n        else:\n            # The -q_len: slice assumes left padding.\n            attention_mask = attention_mask[:, -query_length:]\n            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(\n                query_layer, attention_mask\n            )\n\n        return (\n            query_layer,\n            key_layer,\n            value_layer,\n            indices_q,\n            (cu_seqlens_q, cu_seqlens_k),\n            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),\n        )\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Gemma\nclass GemmaSdpaAttention(GemmaAttention):\n    \"\"\"\n    Gemma attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from\n    `GemmaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to\n    SDPA API.\n    \"\"\"\n\n    # Ignore copy\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        if output_attentions:\n            # TODO: Improve this warning with e.g. `model.config.attn_implementation = \"manual\"` once this is implemented.\n            logger.warning_once(\n                \"GemmaModel is using GemmaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, \"\n                'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.'\n            )\n            return super().forward(\n                hidden_states=hidden_states,\n                attention_mask=attention_mask,\n                position_ids=position_ids,\n                past_key_value=past_key_value,\n                output_attentions=output_attentions,\n                use_cache=use_cache,\n                cache_position=cache_position,\n            )\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]\n\n        # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,\n        # Reference: https://github.com/pytorch/pytorch/issues/112577.\n        if query_states.device.type == \"cuda\" and causal_mask is not None:\n            query_states = query_states.contiguous()\n            key_states = key_states.contiguous()\n            value_states = value_states.contiguous()\n\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n        )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n        attn_output = attn_output.view(bsz, q_len, -1)\n\n        attn_output = self.o_proj(attn_output)\n\n        return attn_output, None, past_key_value\n\n\nclass GemmaInfiniAttention(GemmaAttention):\n    def __init__(\n        self,\n        config: GemmaConfig,\n        layer_idx: Optional[int] = None,\n    ):\n        super().__init__(config, layer_idx)\n\n        # Each head has its own gate\n        # init with -100 to make it close to 0 effect at the beginning\n        self.gate = nn.Parameter(torch.full((1, self.num_heads, 1, 1), -100.0))\n        self.segment_size = config.segment_size\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[torch.Tensor] = None,\n        norm_term: Optional[torch.Tensor] = None,\n        no_memory_update: bool = False,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        segment = hidden_states  # no need to split in TYPE-2 implementation\n\n        # Pre-allocate tensor for all outputs\n        bsz, _, hidden_dim = hidden_states.size()\n\n        query_states = self.q_proj(segment)\n        key_states = self.k_proj(segment)\n        value_states = self.v_proj(segment)\n\n        # Assuming the presence of batch size and dimension handling as before\n        bsz, q_len, _ = segment.size()  # q_len == self.segment_size\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        # memory and norm_term should use layer_idx to store the memory and norm_term\n        if no_memory_update:\n            memory = {}\n            norm_term = {}\n            memory_output = None\n        else:\n            # Infini Attention memory does not use PE\n            # Memory retrieval and attention calculation per segment\n            memory_output = self._retrieve_from_memory(\n                query_states,\n                memory.get(self.layer_idx, None) if memory is not None else None,\n                norm_term.get(self.layer_idx, None) if norm_term is not None else None,\n            )\n            debug_print(\"Memory Output Shape:\", memory_output.shape)\n\n        # Update memory with current segment's key and value states\n        if no_memory_update:\n            # do not update memory\n            pass\n        else:\n            updated_memory, updated_norm_term = self._update_memory(\n                key_states,\n                value_states,\n                memory.get(self.layer_idx, None) if memory is not None else None,\n                norm_term.get(self.layer_idx, None) if norm_term is not None else None,\n            )\n            debug_print(\"Memory Output Shape:\", updated_memory.shape)\n            debug_print(\"Updated Memory Shape:\", updated_norm_term.shape)\n            if memory is None and norm_term is None:\n                memory = {}\n                norm_term = {}\n            memory[self.layer_idx] = updated_memory.detach()\n            norm_term[self.layer_idx] = updated_norm_term.detach()\n\n        # Rotary embeddings, set seq_len to q_len as we are processing a segment\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=q_len)\n\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states,\n            key_states,\n            cos[:, : min(self.segment_size, q_len), :],\n            sin[:, : min(self.segment_size, q_len), :],\n            None,\n        )\n\n        # Basic cache\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\n                \"sin\": sin,\n                \"cos\": cos,\n                \"cache_position\": cache_position,\n            }\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # GQA\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            causal_mask = causal_mask[\n                :, :, : min(self.segment_size, q_len), : key_states.shape[-2]\n            ]  # FIXME: This is wrong, should be [:, :, :, :self.segment_size]\n\n        debug_print(\"causal_mask.shape\", causal_mask.shape)\n        debug_print(\"query_states.shape\", query_states.shape)\n\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n        )\n\n        if memory_output is None:\n            combined_output = attn_output\n        else:\n            combined_output = (\n                F.sigmoid(self.gate) * memory_output\n                + (1 - F.sigmoid(self.gate)) * attn_output\n            )\n\n        # Prepare output for this segment\n        combined_output = combined_output.transpose(1, 2).contiguous()\n        combined_output = combined_output.view(bsz, q_len, self.hidden_size)\n\n        final_output = self.o_proj(combined_output)\n\n        if no_memory_update:\n            memory = None\n            norm_term = None\n\n        return (\n            final_output,\n            None,\n            None,\n            memory,\n            norm_term,\n        )\n\n    def _retrieve_from_memory(self, query_states, memory, norm_term):\n        # query_states: [batch_size, num_heads, seq_len, head_dim]\n\n        # Check if memory is initialized\n        if memory is None or norm_term is None:\n            debug_print(\"[Retrieve] No memory or norm term found\")\n            return torch.zeros_like(query_states)\n\n        debug_print(\"[Retrieve] query_states.shape\", query_states.shape)\n        debug_print(\"[Retrieve] self.memory.shape\", memory.shape)\n\n        # Apply ELU activation\n        query_states = F.elu(query_states) + 1  # ELU activation + 1 for stability\n        memory_output = torch.matmul(query_states, memory)\n\n        debug_print(\"[Retrieve] memory_output.shape\", memory_output.shape)\n        debug_print(\"[Retrieve] self.norm_term.shape\", norm_term.shape)\n\n        # Broadcast norm_term to the shape of query_states, then sum across head_dim for normalization\n        norm_term_broadcastable = torch.matmul(\n            query_states,\n            norm_term.transpose(-2, -1),\n        )\n        debug_print(\n            \"[Broadcast] norm_term_broadcastable.shape\", norm_term_broadcastable.shape\n        )\n\n        # Perform division\n        memory_output = memory_output / norm_term_broadcastable\n        return memory_output\n\n    def _update_memory(self, key_states, value_states, memory, norm_term):\n        # key_states: [batch_size, num_heads, seq_len, head_dim]\n        # value_states: [batch_size, num_heads, seq_len, value_dim]\n\n        key_states = F.elu(key_states) + 1  # Apply ELU activation\n\n        if memory is not None:\n            memory = memory + torch.matmul(key_states.transpose(-2, -1), value_states)\n        else:\n            memory = torch.matmul(key_states.transpose(-2, -1), value_states)\n\n        if norm_term is not None:\n            norm_term = norm_term + key_states.sum(\n                dim=2, keepdim=True\n            )  # Update normalization term\n        else:\n            norm_term = key_states.sum(\n                dim=2, keepdim=True\n            )  # Initialize normalization term\n\n        debug_print(\"[Update] self.memory.shape\", memory.shape)\n        debug_print(\"[Update] self.norm_term.shape\", norm_term.shape)\n\n        return memory, norm_term\n\n\n# GEMMA_ATTENTION_CLASSES = {\n#     \"eager\": GemmaInfiniAttention,  # GemmaAttention,\n#     \"flash_attention_2\": GemmaFlashAttention2,\n#     \"sdpa\": GemmaSdpaAttention,\n# }\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->GEMMA,Llama->Gemma\nclass GemmaDecoderLayer(nn.Module):\n    def __init__(self, config: GemmaConfig, layer_idx: int):\n        super().__init__()\n        self.hidden_size = config.hidden_size\n\n        self.self_attn = GemmaInfiniAttention(  # GEMMA_ATTENTION_CLASSES[config._attn_implementation](\n            config=config, layer_idx=layer_idx\n        )\n\n        self.mlp = GemmaMLP(config)\n        self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.post_attention_layernorm = GemmaRMSNorm(\n            config.hidden_size, eps=config.rms_norm_eps\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Tuple[torch.Tensor]] = None,\n        output_attentions: Optional[bool] = False,\n        use_cache: Optional[bool] = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[torch.Tensor] = None,\n        norm_term: Optional[torch.Tensor] = None,\n        no_memory_update: Optional[bool] = False,\n        **kwargs,\n    ) -> Tuple[\n        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]\n    ]:\n        \"\"\"\n        Args:\n            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n            attention_mask (`torch.FloatTensor`, *optional*):\n                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n                query_sequence_length, key_sequence_length)` if default attention is used.\n            output_attentions (`bool`, *optional*):\n                Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n                returned tensors for more detail.\n            use_cache (`bool`, *optional*):\n                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n                (see `past_key_values`).\n            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n        \"\"\"\n        if \"padding_mask\" in kwargs:\n            warnings.warn(\n                \"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`\"\n            )\n\n        residual = hidden_states\n\n        hidden_states = self.input_layernorm(hidden_states)\n\n        # Self Attention\n        _attended = self.self_attn(\n            hidden_states=hidden_states,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_value=past_key_value,\n            output_attentions=output_attentions,\n            use_cache=use_cache,\n            cache_position=cache_position,\n            memory=memory,\n            norm_term=norm_term,\n            no_memory_update=no_memory_update,\n            **kwargs,\n        )\n        hidden_states, self_attn_weights, present_key_value, memory, norm_term = (\n            _attended\n        )\n        hidden_states = residual + hidden_states\n\n        # Fully Connected\n        residual = hidden_states\n        hidden_states = self.post_attention_layernorm(hidden_states)\n        hidden_states = self.mlp(hidden_states)\n        hidden_states = residual + hidden_states\n\n        outputs = (hidden_states,)\n\n        if output_attentions:\n            outputs += (self_attn_weights,)\n\n        if use_cache:\n            outputs += (present_key_value,)\n\n        if memory is not None and norm_term is not None:\n            outputs += (\n                memory,\n                norm_term,\n            )\n\n        return outputs\n\n\nGEMMA_START_DOCSTRING = r\"\"\"\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`GemmaConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Gemma Model outputting raw hidden-states without any specific head on top.\",\n    GEMMA_START_DOCSTRING,\n)\nclass GemmaPreTrainedModel(PreTrainedModel):\n    config_class = GemmaConfig\n    base_model_prefix = \"model\"\n    supports_gradient_checkpointing = True\n    _keep_in_fp32_modules = [\"inv_freq\", \"rotary_emb\", \"cos_cached\", \"sin_cached\"]\n    _no_split_modules = [\"GemmaDecoderLayer\"]\n    _skip_keys_device_placement = [\"past_key_values\", \"causal_mask\"]\n    _supports_flash_attn_2 = True\n    _supports_sdpa = True\n    _supports_cache_class = True\n\n    def _init_weights(self, module):\n        std = self.config.initializer_range\n        if isinstance(module, nn.Linear):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n\n    def _setup_cache(\n        self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None\n    ):\n        if (\n            self.config._attn_implementation == \"flash_attention_2\"\n            and cache_cls == StaticCache\n        ):\n            raise ValueError(\n                \"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` \"\n                \"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers\"\n            )\n\n        for layer in self.model.layers:\n            weights = layer.self_attn.o_proj.weight\n            layer.self_attn.past_key_value = cache_cls(\n                self.config,\n                max_batch_size,\n                max_cache_len,\n                device=weights.device,\n                dtype=weights.dtype,\n            )\n\n    def _reset_cache(self):\n        for layer in self.model.layers:\n            layer.self_attn.past_key_value = None\n\n\nGEMMA_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance;\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Gemma Model outputting raw hidden-states without any specific head on top.\",\n    GEMMA_START_DOCSTRING,\n)\n# Copied from transformers.models.llama.modeling_llama.LlamaModel with LLAMA->GEMMA,Llama->Gemma\nclass GemmaModel(GemmaPreTrainedModel):\n    \"\"\"\n    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GemmaDecoderLayer`]\n\n    Args:\n        config: GemmaConfig\n    \"\"\"\n\n    def __init__(self, config: GemmaConfig):\n        super().__init__(config)\n        self.padding_idx = config.pad_token_id\n        self.vocab_size = config.vocab_size\n\n        self.embed_tokens = nn.Embedding(\n            config.vocab_size, config.hidden_size, self.padding_idx\n        )\n        self.layers = nn.ModuleList(\n            [\n                GemmaDecoderLayer(config, layer_idx)\n                for layer_idx in range(config.num_hidden_layers)\n            ]\n        )\n        self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.gradient_checkpointing = False\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)\n    # Ignore copy\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[torch.Tensor] = None,\n        norm_term: Optional[torch.Tensor] = None,\n        no_memory_update: Optional[bool] = False,\n    ) -> Union[Tuple, InfiniBaseModelOutputWithPast]:\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        if (input_ids is None) ^ (inputs_embeds is not None):\n            raise ValueError(\n                \"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one\"\n            )\n\n        if self.gradient_checkpointing and self.training and use_cache:\n            logger.warning_once(\n                \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.\"\n            )\n            use_cache = False\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embed_tokens(input_ids)\n\n        past_seen_tokens = 0\n        if use_cache:  # kept for BC (cache positions)\n            if not isinstance(past_key_values, StaticCache):\n                past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n            past_seen_tokens = past_key_values.get_seq_length()\n\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_seen_tokens,\n                past_seen_tokens + inputs_embeds.shape[1],\n                device=inputs_embeds.device,\n            )\n\n        if position_ids is None:\n            position_ids = cache_position.unsqueeze(0)\n\n        causal_mask = self._update_causal_mask(\n            attention_mask,\n            inputs_embeds,\n            cache_position,\n            past_seen_tokens + inputs_embeds.shape[1],\n        )\n\n        # embed positions\n        hidden_states = inputs_embeds\n\n        # normalized\n        # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5\n        # See https://github.com/huggingface/transformers/pull/29402\n        normalizer = torch.tensor(\n            self.config.hidden_size**0.5, dtype=hidden_states.dtype\n        )\n        hidden_states = hidden_states * normalizer\n\n        # decoder layers\n        all_hidden_states = () if output_hidden_states else None\n        all_self_attns = () if output_attentions else None\n        next_decoder_cache = None\n\n        for decoder_layer in self.layers:\n            if output_hidden_states:\n                all_hidden_states += (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(\n                    decoder_layer.__call__,\n                    hidden_states,\n                    causal_mask,\n                    position_ids,\n                    past_key_values,\n                    output_attentions,\n                    use_cache,\n                    cache_position,\n                    memory,  # FIXME?\n                    norm_term,\n                    no_memory_update,\n                )\n            else:\n                layer_outputs = decoder_layer(\n                    hidden_states,\n                    attention_mask=causal_mask,\n                    position_ids=position_ids,\n                    past_key_value=past_key_values,\n                    output_attentions=output_attentions,\n                    use_cache=use_cache,\n                    cache_position=cache_position,\n                    memory=memory,\n                    norm_term=norm_term,\n                    no_memory_update=no_memory_update,\n                )\n\n            hidden_states = layer_outputs[0]\n\n            if use_cache:\n                next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n\n            if output_attentions:\n                all_self_attns += (layer_outputs[1],)\n\n            memory = layer_outputs[-2]\n            norm_term = layer_outputs[-1]\n\n        hidden_states = self.norm(hidden_states)\n\n        # add hidden states from the last decoder layer\n        if output_hidden_states:\n            all_hidden_states += (hidden_states,)\n\n        next_cache = None\n        if use_cache:\n            next_cache = (\n                next_decoder_cache.to_legacy_cache()\n                if isinstance(next_decoder_cache, Cache)\n                else next_decoder_cache\n            )\n        if not return_dict:\n            return tuple(\n                v\n                for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]\n                if v is not None\n            )\n        return InfiniBaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=next_cache,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attns,\n            memory=memory,\n            norm_term=norm_term,\n        )\n\n    # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static\n    # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.\n    # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using\n    # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114\n    def _update_causal_mask(\n        self, attention_mask, input_tensor, cache_position, current_length\n    ):\n        if self.config._attn_implementation == \"flash_attention_2\":\n            if attention_mask is not None and 0.0 in attention_mask:\n                return attention_mask\n            return None\n\n        dtype, device = input_tensor.dtype, input_tensor.device\n        min_dtype = torch.finfo(dtype).min\n        sequence_length = input_tensor.shape[1]\n        if hasattr(\n            getattr(self.layers[0], \"self_attn\", {}), \"past_key_value\"\n        ):  # static cache\n            target_length = self.config.max_position_embeddings\n        else:  # dynamic cache\n            target_length = (\n                attention_mask.shape[-1]\n                if isinstance(attention_mask, torch.Tensor)\n                else current_length + 1\n            )\n\n        causal_mask = torch.full(\n            (sequence_length, target_length),\n            fill_value=min_dtype,\n            dtype=dtype,\n            device=device,\n        )\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(\n            target_length, device=device\n        ) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(\n            input_tensor.shape[0], 1, -1, -1\n        )\n        if attention_mask is not None:\n            causal_mask = (\n                causal_mask.clone()\n            )  # copy to contiguous memory for in-place edit\n            if attention_mask.dim() == 2:\n                mask_length = attention_mask.shape[-1]\n                padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[\n                    :, None, None, :\n                ].eq(0.0)\n                causal_mask[..., :mask_length] = causal_mask[\n                    ..., :mask_length\n                ].masked_fill(padding_mask, min_dtype)\n            elif attention_mask.dim() == 4:\n                # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with\n                # cache. In that case, the 4D attention mask attends to the newest tokens only.\n                if attention_mask.shape[-2] < cache_position[0] + sequence_length:\n                    offset = cache_position[0]\n                else:\n                    offset = 0\n                mask_shape = attention_mask.shape\n                mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype\n                causal_mask[\n                    : mask_shape[0],\n                    : mask_shape[1],\n                    offset : mask_shape[2] + offset,\n                    : mask_shape[3],\n                ] = mask_slice\n\n        if (\n            self.config._attn_implementation == \"sdpa\"\n            and attention_mask is not None\n            and attention_mask.device.type == \"cuda\"\n        ):\n            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when\n            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.\n            # Details: https://github.com/pytorch/pytorch/issues/110213\n            causal_mask = AttentionMaskConverter._unmask_unattended(\n                causal_mask, min_dtype\n            )\n\n        return causal_mask\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->GEMMA,Llama->Gemma,llama->gemma\nclass GemmaForCausalLM(GemmaPreTrainedModel):\n    _tied_weights_keys = [\"lm_head.weight\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.model = GemmaModel(config)\n        self.vocab_size = config.vocab_size\n        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def set_decoder(self, decoder):\n        self.model = decoder\n\n    def get_decoder(self):\n        return self.model\n\n    # Ignore copy\n    @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)\n    @replace_return_docstrings(\n        output_type=InfiniCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n    )\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[torch.Tensor] = None,\n        norm_term: Optional[torch.Tensor] = None,\n        no_memory_update: Optional[bool] = False,\n    ) -> Union[Tuple, InfiniCausalLMOutputWithPast]:\n        r\"\"\"\n        Args:\n            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n                Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,\n                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n                (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.\n\n        Returns:\n\n        Example:\n\n        ```python\n        >>> from transformers import AutoTokenizer, GemmaForCausalLM\n\n        >>> model = GemmaForCausalLM.from_pretrained(\"google/gemma-7b\")\n        >>> tokenizer = AutoTokenizer.from_pretrained(\"google/gemma-7b\")\n\n        >>> prompt = \"What is your favorite condiment?\"\n        >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n        >>> # Generate\n        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n        \"What is your favorite condiment?\"\n        ```\"\"\"\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n        outputs = self.model(\n            input_ids=input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n            cache_position=cache_position,\n            memory=memory,\n            norm_term=norm_term,\n            no_memory_update=no_memory_update,\n        )\n\n        hidden_states = outputs[0]\n        memory = outputs.memory\n        norm_term = outputs.norm_term\n        logits = self.lm_head(hidden_states)\n        logits = logits.float()\n        loss = None\n        if labels is not None:\n            # Shift so that tokens < n predict n\n            shift_logits = logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss()\n            shift_logits = shift_logits.view(-1, self.config.vocab_size)\n            shift_labels = shift_labels.view(-1)\n            # Enable model parallelism\n            shift_labels = shift_labels.to(shift_logits.device)\n            loss = loss_fct(shift_logits, shift_labels)\n\n        if not return_dict:\n            output = (logits,) + outputs[1:]\n            return (loss,) + output if loss is not None else output\n\n        return InfiniCausalLMOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=outputs.past_key_values,\n            hidden_states=outputs.hidden_states,\n            attentions=outputs.attentions,\n            memory=memory,\n            norm_term=norm_term,\n        )\n\n    def prepare_inputs_for_generation(\n        self,\n        input_ids,\n        past_key_values=None,\n        attention_mask=None,\n        inputs_embeds=None,\n        cache_position=None,\n        **kwargs,\n    ):\n        # With static cache, the `past_key_values` is None\n        # TODO joao: standardize interface for the different Cache classes and remove of this if\n        has_static_cache = False\n        if past_key_values is None:\n            past_key_values = getattr(\n                getattr(self.model.layers[0], \"self_attn\", {}), \"past_key_value\", None\n            )\n            has_static_cache = past_key_values is not None\n\n        past_length = 0\n        if past_key_values is not None:\n            if isinstance(past_key_values, Cache):\n                past_length = (\n                    cache_position[0]\n                    if cache_position is not None\n                    else past_key_values.get_seq_length()\n                )\n                max_cache_length = (\n                    torch.tensor(\n                        past_key_values.get_max_length(), device=input_ids.device\n                    )\n                    if past_key_values.get_max_length() is not None\n                    else None\n                )\n                cache_length = (\n                    past_length\n                    if max_cache_length is None\n                    else torch.min(max_cache_length, past_length)\n                )\n            # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects\n            else:\n                cache_length = past_length = past_key_values[0][0].shape[2]\n                max_cache_length = None\n\n            # Keep only the unprocessed tokens:\n            # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n            # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as\n            # input)\n            if (\n                attention_mask is not None\n                and attention_mask.shape[1] > input_ids.shape[1]\n            ):\n                input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n            # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n            # input_ids based on the past_length.\n            elif past_length < input_ids.shape[1]:\n                input_ids = input_ids[:, past_length:]\n            # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n\n            # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n            if (\n                max_cache_length is not None\n                and attention_mask is not None\n                and cache_length + input_ids.shape[1] > max_cache_length\n            ):\n                attention_mask = attention_mask[:, -max_cache_length:]\n\n        position_ids = kwargs.get(\"position_ids\", None)\n        if attention_mask is not None and position_ids is None:\n            # create position_ids on the fly for batch generation\n            position_ids = attention_mask.long().cumsum(-1) - 1\n            position_ids.masked_fill_(attention_mask == 0, 1)\n            if past_key_values:\n                position_ids = position_ids[:, -input_ids.shape[1] :]\n\n        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n        if inputs_embeds is not None and past_key_values is None:\n            model_inputs = {\"inputs_embeds\": inputs_embeds}\n        else:\n            # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise\n            # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114\n            # TODO: use `next_tokens` directly instead.\n            model_inputs = {\"input_ids\": input_ids.contiguous()}\n\n        input_length = (\n            position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]\n        )\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_length, past_length + input_length, device=input_ids.device\n            )\n        else:\n            cache_position = cache_position[-input_length:]\n\n        if has_static_cache:\n            past_key_values = None\n\n        model_inputs.update(\n            {\n                \"position_ids\": position_ids,\n                \"cache_position\": cache_position,\n                \"past_key_values\": past_key_values,\n                \"use_cache\": kwargs.get(\"use_cache\"),\n                \"attention_mask\": attention_mask,\n            }\n        )\n        return model_inputs\n\n    @staticmethod\n    def _reorder_cache(past_key_values, beam_idx):\n        reordered_past = ()\n        for layer_past in past_key_values:\n            reordered_past += (\n                tuple(\n                    past_state.index_select(0, beam_idx.to(past_state.device))\n                    for past_state in layer_past\n                ),\n            )\n        return reordered_past\n\n\n# Remove: Classifiers\n"
  },
  {
    "path": "infini_llama/__init__.py",
    "content": "from transformers import LlamaConfig  # for convinience\nfrom .modeling_infini_llama import LlamaForCausalLM\n"
  },
  {
    "path": "infini_llama/modeling_infini_llama.py",
    "content": "# coding=utf-8\n# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Llama model, with Infini-Attention.\"\"\"\n\nimport os\nimport math\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom transformers.activations import ACT2FN\nfrom transformers.cache_utils import Cache, DynamicCache, StaticCache\nfrom transformers.modeling_attn_mask_utils import (\n    AttentionMaskConverter,\n    _prepare_4d_causal_attention_mask,\n)\nfrom transformers.modeling_outputs import (\n    ModelOutput,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.pytorch_utils import (\n    ALL_LAYERNORM_LAYERS,\n    is_torch_greater_or_equal_than_1_13,\n)\nfrom transformers.utils import (\n    add_start_docstrings,\n    add_start_docstrings_to_model_forward,\n    is_flash_attn_2_available,\n    is_flash_attn_greater_or_equal_2_10,\n    logging,\n    replace_return_docstrings,\n)\nfrom transformers.utils.import_utils import is_torch_fx_available\n\nfrom dataclasses import dataclass\nfrom transformers import LlamaConfig\n\nDEBUG = os.environ.get(\"DEBUG\", False)\n\n\ndef debug_print(*args):\n    if DEBUG:\n        print(*args)\n\n\nif is_flash_attn_2_available():\n    from flash_attn import flash_attn_func, flash_attn_varlen_func\n    from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa\n\n\n# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.\n# It means that the function will not be traced through and simply appear as a node in the graph.\nif is_torch_fx_available():\n    if not is_torch_greater_or_equal_than_1_13:\n        import torch.fx\n\n    _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LlamaConfig\"\n\n\n@dataclass\nclass InfiniBaseModelOutputWithPast(ModelOutput):\n    \"\"\"\n    Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\n    Args:\n        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n            Sequence of hidden-states at the output of the last layer of the model.\n\n            If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n            hidden_size)` is output.\n        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n            `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n            encoder_sequence_length, embed_size_per_head)`.\n\n            Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n            `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n            input) to speed up sequential decoding.\n        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n            sequence_length)`.\n\n            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n            heads.\n    \"\"\"\n\n    last_hidden_state: torch.FloatTensor = None\n    past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n    attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n    memory: torch.FloatTensor = None\n    norm_term: torch.FloatTensor = None\n\n\n@dataclass\nclass InfiniCausalLMOutputWithPast(ModelOutput):\n    \"\"\"\n    Base class for causal language model (or autoregressive) outputs.\n\n    Args:\n        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n            Language modeling loss (for next-token prediction).\n        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n            `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n            Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n            `past_key_values` input) to speed up sequential decoding.\n        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n            sequence_length)`.\n\n            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n            heads.\n    \"\"\"\n\n    loss: Optional[torch.FloatTensor] = None\n    logits: torch.FloatTensor = None\n    past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n    attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n    memory: torch.FloatTensor = None\n    norm_term: torch.FloatTensor = None\n\n\ndef _get_unpad_data(attention_mask):\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = seqlens_in_batch.max().item()\n    cu_seqlens = F.pad(\n        torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)\n    )\n    return (\n        indices,\n        cu_seqlens,\n        max_seqlen_in_batch,\n    )\n\n\nclass LlamaRMSNorm(nn.Module):\n    def __init__(self, hidden_size, eps=1e-6):\n        \"\"\"\n        LlamaRMSNorm is equivalent to T5LayerNorm\n        \"\"\"\n        super().__init__()\n        self.weight = nn.Parameter(torch.ones(hidden_size))\n        self.variance_epsilon = eps\n\n    def forward(self, hidden_states):\n        input_dtype = hidden_states.dtype\n        hidden_states = hidden_states.to(torch.float32)\n        variance = hidden_states.pow(2).mean(-1, keepdim=True)\n        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)\n        return self.weight * hidden_states.to(input_dtype)\n\n\nALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)\n\n\nclass LlamaRotaryEmbedding(nn.Module):\n    def __init__(\n        self,\n        dim,\n        max_position_embeddings=2048,\n        base=10000,\n        device=None,\n        scaling_factor=1.0,\n    ):\n        super().__init__()\n        self.scaling_factor = scaling_factor\n        self.dim = dim\n        self.max_position_embeddings = max_position_embeddings\n        self.base = base\n        inv_freq = 1.0 / (\n            self.base\n            ** (\n                torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device)\n                / self.dim\n            )\n        )\n        self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n        # For BC we register cos and sin cached\n        self.max_seq_len_cached = max_position_embeddings\n        t = torch.arange(\n            self.max_seq_len_cached, device=device, dtype=torch.int64\n        ).type_as(self.inv_freq)\n        t = t / self.scaling_factor\n        freqs = torch.outer(t, self.inv_freq)\n        # Different from paper, but it uses a different permutation in order to obtain the same calculation\n        emb = torch.cat((freqs, freqs), dim=-1)\n        self.register_buffer(\n            \"_cos_cached\", emb.cos().to(torch.get_default_dtype()), persistent=False\n        )\n        self.register_buffer(\n            \"_sin_cached\", emb.sin().to(torch.get_default_dtype()), persistent=False\n        )\n\n    @property\n    def sin_cached(self):\n        logger.warning_once(\n            \"The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use \"\n            \"the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class\"\n        )\n        return self._sin_cached\n\n    @property\n    def cos_cached(self):\n        logger.warning_once(\n            \"The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use \"\n            \"the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class\"\n        )\n        return self._cos_cached\n\n    @torch.no_grad()\n    def forward(self, x, position_ids):\n        # x: [bs, num_attention_heads, seq_len, head_size]\n        inv_freq_expanded = (\n            self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)\n        )\n        position_ids_expanded = position_ids[:, None, :].float()\n        # Force float32 since bfloat16 loses precision on long contexts\n        # See https://github.com/huggingface/transformers/pull/29285\n        device_type = x.device.type\n        device_type = (\n            device_type\n            if isinstance(device_type, str) and device_type != \"mps\"\n            else \"cpu\"\n        )\n        with torch.autocast(device_type=device_type, enabled=False):\n            freqs = (\n                inv_freq_expanded.float() @ position_ids_expanded.float()\n            ).transpose(1, 2)\n            emb = torch.cat((freqs, freqs), dim=-1)\n            cos = emb.cos()\n            sin = emb.sin()\n        return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)\n\n\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\n    \"\"\"LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev\"\"\"\n\n    def forward(self, x, position_ids):\n        # difference to the original RoPE: a scaling factor is aplied to the position ids\n        position_ids = position_ids.float() / self.scaling_factor\n        cos, sin = super().forward(x, position_ids)\n        return cos, sin\n\n\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\n    \"\"\"LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla\"\"\"\n\n    def forward(self, x, position_ids):\n        # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length\n        seq_len = torch.max(position_ids) + 1\n        if seq_len > self.max_position_embeddings:\n            base = self.base * (\n                (self.scaling_factor * seq_len / self.max_position_embeddings)\n                - (self.scaling_factor - 1)\n            ) ** (self.dim / (self.dim - 2))\n            inv_freq = 1.0 / (\n                base\n                ** (\n                    torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device)\n                    / self.dim\n                )\n            )\n            self.register_buffer(\n                \"inv_freq\", inv_freq, persistent=False\n            )  # TODO joao: this may break with compilation\n\n        cos, sin = super().forward(x, position_ids)\n        return cos, sin\n\n\n# Copied from transformers.models.llama.modeling_llama.rotate_half\ndef rotate_half(x):\n    \"\"\"Rotates half the hidden dims of the input.\"\"\"\n    x1 = x[..., : x.shape[-1] // 2]\n    x2 = x[..., x.shape[-1] // 2 :]\n    return torch.cat((-x2, x1), dim=-1)\n\n\n# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n    \"\"\"Applies Rotary Position Embedding to the query and key tensors.\n\n    Args:\n        q (`torch.Tensor`): The query tensor.\n        k (`torch.Tensor`): The key tensor.\n        cos (`torch.Tensor`): The cosine part of the rotary embedding.\n        sin (`torch.Tensor`): The sine part of the rotary embedding.\n        position_ids (`torch.Tensor`, *optional*):\n            Deprecated and unused.\n        unsqueeze_dim (`int`, *optional*, defaults to 1):\n            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\n    Returns:\n        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.\n    \"\"\"\n    cos = cos.unsqueeze(unsqueeze_dim)\n    sin = sin.unsqueeze(unsqueeze_dim)\n    q_embed = (q * cos) + (rotate_half(q) * sin)\n    k_embed = (k * cos) + (rotate_half(k) * sin)\n    return q_embed, k_embed\n\n\nclass LlamaMLP(nn.Module):\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.hidden_size = config.hidden_size\n        self.intermediate_size = config.intermediate_size\n        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n        self.act_fn = ACT2FN[config.hidden_act]\n\n    def forward(self, x):\n        if self.config.pretraining_tp > 1:\n            slice = self.intermediate_size // self.config.pretraining_tp\n            gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n            up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n            down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n            gate_proj = torch.cat(\n                [\n                    F.linear(x, gate_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ],\n                dim=-1,\n            )\n            up_proj = torch.cat(\n                [\n                    F.linear(x, up_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ],\n                dim=-1,\n            )\n\n            intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n            down_proj = [\n                F.linear(intermediate_states[i], down_proj_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            down_proj = sum(down_proj)\n        else:\n            down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n        return down_proj\n\n\n# Copied from transformers.models.llama.modeling_llama.repeat_kv\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n    \"\"\"\n    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n    \"\"\"\n    batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n    if n_rep == 1:\n        return hidden_states\n    hidden_states = hidden_states[:, :, None, :, :].expand(\n        batch, num_key_value_heads, n_rep, slen, head_dim\n    )\n    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n\n\nclass LlamaAttention(nn.Module):\n    \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n    def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):\n        super().__init__()\n        self.config = config\n        self.layer_idx = layer_idx\n        if layer_idx is None:\n            logger.warning_once(\n                f\"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will \"\n                \"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` \"\n                \"when creating this class.\"\n            )\n\n        self.attention_dropout = config.attention_dropout\n        self.hidden_size = config.hidden_size\n        self.num_heads = config.num_attention_heads\n        self.head_dim = self.hidden_size // self.num_heads\n        self.num_key_value_heads = config.num_key_value_heads\n        self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n        self.max_position_embeddings = config.max_position_embeddings\n        self.rope_theta = config.rope_theta\n        self.is_causal = True\n\n        if (self.head_dim * self.num_heads) != self.hidden_size:\n            raise ValueError(\n                f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n                f\" and `num_heads`: {self.num_heads}).\"\n            )\n\n        self.q_proj = nn.Linear(\n            self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias\n        )\n        self.k_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.v_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.o_proj = nn.Linear(\n            self.hidden_size, self.hidden_size, bias=config.attention_bias\n        )\n        self._init_rope()\n\n    def _init_rope(self):\n        if self.config.rope_scaling is None:\n            self.rotary_emb = LlamaRotaryEmbedding(\n                self.head_dim,\n                max_position_embeddings=self.max_position_embeddings,\n                base=self.rope_theta,\n            )\n        else:\n            scaling_type = self.config.rope_scaling[\"type\"]\n            scaling_factor = self.config.rope_scaling[\"factor\"]\n            if scaling_type == \"linear\":\n                self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n                    self.head_dim,\n                    max_position_embeddings=self.max_position_embeddings,\n                    scaling_factor=scaling_factor,\n                    base=self.rope_theta,\n                )\n            elif scaling_type == \"dynamic\":\n                self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n                    self.head_dim,\n                    max_position_embeddings=self.max_position_embeddings,\n                    scaling_factor=scaling_factor,\n                    base=self.rope_theta,\n                )\n            else:\n                raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        bsz, q_len, _ = hidden_states.size()\n\n        if self.config.pretraining_tp > 1:\n            key_value_slicing = (\n                self.num_key_value_heads * self.head_dim\n            ) // self.config.pretraining_tp\n            query_slices = self.q_proj.weight.split(\n                (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n            )\n            key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n            value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n            query_states = [\n                F.linear(hidden_states, query_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            query_states = torch.cat(query_states, dim=-1)\n\n            key_states = [\n                F.linear(hidden_states, key_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            key_states = torch.cat(key_states, dim=-1)\n\n            value_states = [\n                F.linear(hidden_states, value_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            value_states = torch.cat(value_states, dim=-1)\n\n        else:\n            query_states = self.q_proj(hidden_states)\n            key_states = self.k_proj(hidden_states)\n            value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        attn_weights = torch.matmul(\n            query_states, key_states.transpose(2, 3)\n        ) / math.sqrt(self.head_dim)\n\n        if attention_mask is not None:  # no matter the length, we just slice it\n            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]\n            attn_weights = attn_weights + causal_mask\n\n        # upcast attention to fp32\n        attn_weights = nn.functional.softmax(\n            attn_weights, dim=-1, dtype=torch.float32\n        ).to(query_states.dtype)\n        attn_weights = nn.functional.dropout(\n            attn_weights, p=self.attention_dropout, training=self.training\n        )\n        attn_output = torch.matmul(attn_weights, value_states)\n\n        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n            raise ValueError(\n                f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n                f\" {attn_output.size()}\"\n            )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n\n        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n        if self.config.pretraining_tp > 1:\n            attn_output = attn_output.split(\n                self.hidden_size // self.config.pretraining_tp, dim=2\n            )\n            o_proj_slices = self.o_proj.weight.split(\n                self.hidden_size // self.config.pretraining_tp, dim=1\n            )\n            attn_output = sum(\n                [\n                    F.linear(attn_output[i], o_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ]\n            )\n        else:\n            attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n\nclass LlamaFlashAttention2(LlamaAttention):\n    \"\"\"\n    Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays\n    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of\n    flash attention and deal with padding tokens in case the input contains any of them.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.\n        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.\n        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).\n        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.LongTensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        output_attentions = False\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        # Flash attention requires the input to have the shape\n        # batch_size x seq_length x head_dim x hidden_dim\n        # therefore we just need to keep the original shape\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache\n        # to be able to avoid many of these transpose/reshape/view.\n        query_states = query_states.transpose(1, 2)\n        key_states = key_states.transpose(1, 2)\n        value_states = value_states.transpose(1, 2)\n\n        dropout_rate = self.attention_dropout if self.training else 0.0\n\n        # In PEFT, usually we cast the layer norms in float32 for training stability reasons\n        # therefore the input hidden states gets silently casted in float32. Hence, we need\n        # cast them back in the correct dtype just to be sure everything works as expected.\n        # This might slowdown training & inference so it is recommended to not cast the LayerNorms\n        # in fp32. (LlamaRMSNorm handles it correctly)\n\n        input_dtype = query_states.dtype\n        if input_dtype == torch.float32:\n            if torch.is_autocast_enabled():\n                target_dtype = torch.get_autocast_gpu_dtype()\n            # Handle the case where the model is quantized\n            elif hasattr(self.config, \"_pre_quantization_dtype\"):\n                target_dtype = self.config._pre_quantization_dtype\n            else:\n                target_dtype = self.q_proj.weight.dtype\n\n            logger.warning_once(\n                f\"The input hidden states seems to be silently casted in float32, this might be related to\"\n                f\" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in\"\n                f\" {target_dtype}.\"\n            )\n\n            query_states = query_states.to(target_dtype)\n            key_states = key_states.to(target_dtype)\n            value_states = value_states.to(target_dtype)\n\n        attn_output = self._flash_attention_forward(\n            query_states,\n            key_states,\n            value_states,\n            attention_mask,\n            q_len,\n            dropout=dropout_rate,\n        )\n\n        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n    def _flash_attention_forward(\n        self,\n        query_states,\n        key_states,\n        value_states,\n        attention_mask,\n        query_length,\n        dropout=0.0,\n        softmax_scale=None,\n    ):\n        \"\"\"\n        Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token\n        first unpad the input, then computes the attention scores and pad the final attention scores.\n\n        Args:\n            query_states (`torch.Tensor`):\n                Input query states to be passed to Flash Attention API\n            key_states (`torch.Tensor`):\n                Input key states to be passed to Flash Attention API\n            value_states (`torch.Tensor`):\n                Input value states to be passed to Flash Attention API\n            attention_mask (`torch.Tensor`):\n                The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the\n                position of padding tokens and 1 for the position of non-padding tokens.\n            dropout (`float`):\n                Attention dropout\n            softmax_scale (`float`, *optional*):\n                The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)\n        \"\"\"\n        if not self._flash_attn_uses_top_left_mask:\n            causal = self.is_causal\n        else:\n            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.\n            causal = self.is_causal and query_length != 1\n\n        # Contains at least one padding token in the sequence\n        if attention_mask is not None:\n            batch_size = query_states.shape[0]\n            (\n                query_states,\n                key_states,\n                value_states,\n                indices_q,\n                cu_seq_lens,\n                max_seq_lens,\n            ) = self._upad_input(\n                query_states, key_states, value_states, attention_mask, query_length\n            )\n\n            cu_seqlens_q, cu_seqlens_k = cu_seq_lens\n            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens\n\n            attn_output_unpad = flash_attn_varlen_func(\n                query_states,\n                key_states,\n                value_states,\n                cu_seqlens_q=cu_seqlens_q,\n                cu_seqlens_k=cu_seqlens_k,\n                max_seqlen_q=max_seqlen_in_batch_q,\n                max_seqlen_k=max_seqlen_in_batch_k,\n                dropout_p=dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n            attn_output = pad_input(\n                attn_output_unpad, indices_q, batch_size, query_length\n            )\n        else:\n            attn_output = flash_attn_func(\n                query_states,\n                key_states,\n                value_states,\n                dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n        return attn_output\n\n    def _upad_input(\n        self, query_layer, key_layer, value_layer, attention_mask, query_length\n    ):\n        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)\n        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape\n\n        key_layer = index_first_axis(\n            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        value_layer = index_first_axis(\n            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        if query_length == kv_seq_len:\n            query_layer = index_first_axis(\n                query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),\n                indices_k,\n            )\n            cu_seqlens_q = cu_seqlens_k\n            max_seqlen_in_batch_q = max_seqlen_in_batch_k\n            indices_q = indices_k\n        elif query_length == 1:\n            max_seqlen_in_batch_q = 1\n            cu_seqlens_q = torch.arange(\n                batch_size + 1, dtype=torch.int32, device=query_layer.device\n            )  # There is a memcpy here, that is very bad.\n            indices_q = cu_seqlens_q[:-1]\n            query_layer = query_layer.squeeze(1)\n        else:\n            # The -q_len: slice assumes left padding.\n            attention_mask = attention_mask[:, -query_length:]\n            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(\n                query_layer, attention_mask\n            )\n\n        return (\n            query_layer,\n            key_layer,\n            value_layer,\n            indices_q,\n            (cu_seqlens_q, cu_seqlens_k),\n            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),\n        )\n\n\nclass LlamaSdpaAttention(LlamaAttention):\n    \"\"\"\n    Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from\n    `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to\n    SDPA API.\n    \"\"\"\n\n    # Adapted from LlamaAttention.forward\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        if output_attentions:\n            # TODO: Improve this warning with e.g. `model.config.attn_implementation = \"manual\"` once this is implemented.\n            logger.warning_once(\n                \"LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, \"\n                'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.'\n            )\n            return super().forward(\n                hidden_states=hidden_states,\n                attention_mask=attention_mask,\n                position_ids=position_ids,\n                past_key_value=past_key_value,\n                output_attentions=output_attentions,\n                use_cache=use_cache,\n                cache_position=cache_position,\n            )\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        # In case static cache is used, it is an instance attribute.\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]\n\n        # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,\n        # Reference: https://github.com/pytorch/pytorch/issues/112577.\n        if query_states.device.type == \"cuda\" and causal_mask is not None:\n            query_states = query_states.contiguous()\n            key_states = key_states.contiguous()\n            value_states = value_states.contiguous()\n\n        # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather\n        # relying on the `is_causal` argument.\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n            is_causal=causal_mask is None and q_len > 1,\n        )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n        attn_output = attn_output.view(bsz, q_len, self.hidden_size)\n\n        attn_output = self.o_proj(attn_output)\n\n        return attn_output, None, past_key_value\n\n\nclass LlamaInfiniAttention(LlamaAttention):\n    def __init__(\n        self,\n        config: LlamaConfig,\n        layer_idx: Optional[int] = None,\n    ):\n        super().__init__(config, layer_idx)\n\n        # Each head has its own gate\n        # init with -100 to make it close to 0 effect at the beginning\n        self.gate = nn.Parameter(torch.full((1, self.num_heads, 1, 1), 0.0))\n        # self.segment_size = config.segment_size\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[dict] = None,\n        norm_term: Optional[dict] = None,\n        no_memory_update: bool = False,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        segment = hidden_states  # no need to split in TYPE-2 implementation\n\n        # Pre-allocate tensor for all outputs\n        bsz, _, hidden_dim = hidden_states.size()\n\n        query_states = self.q_proj(segment)\n        key_states = self.k_proj(segment)\n        value_states = self.v_proj(segment)\n\n        # Assuming the presence of batch size and dimension handling as before\n        bsz, q_len, _ = segment.size()  # q_len == self.segment_size\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        debug_print(\"Query States Shape:\", query_states.shape)\n        debug_print(\"Key States Shape:\", key_states.shape)\n        debug_print(\"Value States Shape:\", value_states.shape)\n\n        # memory and norm_term should use layer_idx to store the memory and norm_term\n        if no_memory_update:\n            memory = {}\n            norm_term = {}\n            memory_output = None\n        else:\n            # Infini Attention memory does not use PE\n            # Memory retrieval and attention calculation per segment\n            memory_output = self._retrieve_from_memory(\n                query_states,\n                memory.get(self.layer_idx, None) if memory is not None else None,\n                norm_term.get(self.layer_idx, None) if norm_term is not None else None,\n            )\n            debug_print(\"Memory Output Shape:\", memory_output.shape)\n\n        # Update memory with current segment's key and value states\n        if no_memory_update:\n            # do not update memory\n            pass\n        else:\n            updated_memory, updated_norm_term = self._update_memory(\n                key_states,\n                value_states,\n                memory.get(self.layer_idx, None) if memory is not None else None,\n                norm_term.get(self.layer_idx, None) if norm_term is not None else None,\n            )\n            debug_print(\"Memory Output Shape:\", updated_memory.shape)\n            debug_print(\"Updated Memory Shape:\", updated_norm_term.shape)\n            if memory is None and norm_term is None:\n                memory = {}\n                norm_term = {}\n            memory[self.layer_idx] = updated_memory.detach()\n            norm_term[self.layer_idx] = updated_norm_term.detach()\n\n        # Rotary embeddings, set seq_len to q_len as we are processing a segment\n        cos, sin = self.rotary_emb(value_states, position_ids)\n\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states,\n            key_states,\n            cos,  # cos[:, : min(self.segment_size, q_len), :],\n            sin,  # sin[:, : min(self.segment_size, q_len), :],\n            None,\n        )\n\n        # Basic cache\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\n                \"sin\": sin,\n                \"cos\": cos,\n                \"cache_position\": cache_position,\n            }\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # GQA\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            # causal_mask = causal_mask[\n            #     :, :, : min(self.segment_size, q_len), : key_states.shape[-2]\n            # ]  # FIXME: This is wrong, should be [:, :, :, :self.segment_size]\n            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]\n\n        debug_print(\"causal_mask.shape\", causal_mask.shape)\n        debug_print(\"query_states.shape\", query_states.shape)\n\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n        )\n\n        if memory_output is None:\n            combined_output = attn_output\n        else:\n            combined_output = (\n                F.sigmoid(self.gate) * memory_output\n                + (1 - F.sigmoid(self.gate)) * attn_output\n            )\n\n        # Prepare output for this segment\n        combined_output = combined_output.transpose(1, 2).contiguous()\n        combined_output = combined_output.view(bsz, q_len, self.hidden_size)\n\n        final_output = self.o_proj(combined_output)\n\n        if no_memory_update:\n            memory = None\n            norm_term = None\n\n        return (\n            final_output,\n            None,\n            None,\n            memory,\n            norm_term,\n        )\n\n    def _retrieve_from_memory(self, query_states, memory, norm_term):\n        # query_states: [batch_size, num_heads, seq_len, head_dim]\n\n        # Check if memory is initialized\n        if memory is None or norm_term is None:\n            debug_print(\"[Retrieve] No memory or norm term found\")\n            return torch.zeros_like(query_states)\n\n        debug_print(\"[Retrieve] query_states.shape\", query_states.shape)\n        debug_print(\"[Retrieve] self.memory.shape\", memory.shape)\n\n        # Apply ELU activation\n        query_states = F.elu(query_states) + 1  # ELU activation + 1 for stability\n        memory_output = torch.matmul(\n            # GQA\n            query_states,\n            memory.repeat(1, self.num_key_value_groups, 1, 1),\n        )\n\n        debug_print(\"[Retrieve] memory_output.shape\", memory_output.shape)\n        debug_print(\"[Retrieve] self.norm_term.shape\", norm_term.shape)\n\n        # Broadcast norm_term to the shape of query_states, then sum across head_dim for normalization\n        norm_term_broadcastable = torch.matmul(\n            query_states,\n            # GQA\n            norm_term.transpose(-2, -1).repeat(1, self.num_key_value_groups, 1, 1),\n        )\n        debug_print(\n            \"[Broadcast] norm_term_broadcastable.shape\", norm_term_broadcastable.shape\n        )\n\n        # Perform division\n        memory_output = memory_output / norm_term_broadcastable\n        return memory_output\n\n    def _update_memory(self, key_states, value_states, memory, norm_term):\n        # key_states: [batch_size, num_heads, seq_len, head_dim]\n        # value_states: [batch_size, num_heads, seq_len, value_dim]\n\n        key_states = F.elu(key_states) + 1  # Apply ELU activation\n\n        if memory is not None:\n            memory = memory + torch.matmul(key_states.transpose(-2, -1), value_states)\n        else:\n            memory = torch.matmul(key_states.transpose(-2, -1), value_states)\n\n        if norm_term is not None:\n            norm_term = norm_term + key_states.sum(\n                dim=2, keepdim=True\n            )  # Update normalization term\n        else:\n            norm_term = key_states.sum(\n                dim=2, keepdim=True\n            )  # Initialize normalization term\n\n        debug_print(\"[Update] self.memory.shape\", memory.shape)\n        debug_print(\"[Update] self.norm_term.shape\", norm_term.shape)\n\n        return memory, norm_term\n\n\n# LLAMA_ATTENTION_CLASSES = {\n#     \"eager\": LlamaInfiniAttention,  # LlamaAttention,\n#     \"flash_attention_2\": LlamaFlashAttention2,\n#     \"sdpa\": LlamaSdpaAttention,\n# }\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->LLAMA,Llama->Llama\nclass LlamaDecoderLayer(nn.Module):\n    def __init__(self, config: LlamaConfig, layer_idx: int):\n        super().__init__()\n        self.hidden_size = config.hidden_size\n\n        self.self_attn = LlamaInfiniAttention(  # LLAMA_ATTENTION_CLASSES[config._attn_implementation](\n            config=config, layer_idx=layer_idx\n        )\n\n        self.mlp = LlamaMLP(config)\n        self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.post_attention_layernorm = LlamaRMSNorm(\n            config.hidden_size, eps=config.rms_norm_eps\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Tuple[torch.Tensor]] = None,\n        output_attentions: Optional[bool] = False,\n        use_cache: Optional[bool] = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[dict] = None,\n        norm_term: Optional[dict] = None,\n        no_memory_update: Optional[bool] = False,\n        **kwargs,\n    ) -> Tuple[\n        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]\n    ]:\n        \"\"\"\n        Args:\n            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n            attention_mask (`torch.FloatTensor`, *optional*):\n                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n                query_sequence_length, key_sequence_length)` if default attention is used.\n            output_attentions (`bool`, *optional*):\n                Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n                returned tensors for more detail.\n            use_cache (`bool`, *optional*):\n                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n                (see `past_key_values`).\n            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n        \"\"\"\n        if \"padding_mask\" in kwargs:\n            warnings.warn(\n                \"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`\"\n            )\n\n        residual = hidden_states\n\n        hidden_states = self.input_layernorm(hidden_states)\n\n        # Self Attention\n        _attended = self.self_attn(\n            hidden_states=hidden_states,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_value=past_key_value,\n            output_attentions=output_attentions,\n            use_cache=use_cache,\n            cache_position=cache_position,\n            memory=memory,\n            norm_term=norm_term,\n            no_memory_update=no_memory_update,\n            **kwargs,\n        )\n        hidden_states, self_attn_weights, present_key_value, memory, norm_term = (\n            _attended\n        )\n        hidden_states = residual + hidden_states\n\n        # Fully Connected\n        residual = hidden_states\n        hidden_states = self.post_attention_layernorm(hidden_states)\n        hidden_states = self.mlp(hidden_states)\n        hidden_states = residual + hidden_states\n\n        outputs = (hidden_states,)\n\n        if output_attentions:\n            outputs += (self_attn_weights,)\n\n        if use_cache:\n            outputs += (present_key_value,)\n\n        if memory is not None and norm_term is not None:\n            outputs += (\n                memory,\n                norm_term,\n            )\n\n        return outputs\n\n\nLLAMA_START_DOCSTRING = r\"\"\"\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`LlamaConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Llama Model outputting raw hidden-states without any specific head on top.\",\n    LLAMA_START_DOCSTRING,\n)\nclass LlamaPreTrainedModel(PreTrainedModel):\n    config_class = LlamaConfig\n    base_model_prefix = \"model\"\n    supports_gradient_checkpointing = True\n    # _keep_in_fp32_modules = [\"inv_freq\", \"rotary_emb\", \"cos_cached\", \"sin_cached\"]\n    _no_split_modules = [\"LlamaDecoderLayer\"]\n    _skip_keys_device_placement = [\n        \"past_key_values\",\n    ]  # \"causal_mask\"]\n    _supports_flash_attn_2 = True\n    _supports_sdpa = True\n    _supports_cache_class = True\n\n    def _init_weights(self, module):\n        std = self.config.initializer_range\n        if isinstance(module, nn.Linear):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n\n    def _setup_cache(\n        self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None\n    ):\n        if (\n            self.config._attn_implementation == \"flash_attention_2\"\n            and cache_cls == StaticCache\n        ):\n            raise ValueError(\n                \"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` \"\n                \"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers\"\n            )\n\n        # for layer in self.model.layers:\n        #     weights = layer.self_attn.o_proj.weight\n        #     layer.self_attn.past_key_value = cache_cls(\n        #         self.config,\n        #         max_batch_size,\n        #         max_cache_len,\n        #         device=weights.device,\n        #         dtype=weights.dtype,\n        #     )\n        for layer in self.model.layers:\n            device = layer.input_layernorm.weight.device\n            if hasattr(self.config, \"_pre_quantization_dtype\"):\n                dtype = self.config._pre_quantization_dtype\n            else:\n                dtype = layer.self_attn.o_proj.weight.dtype\n            layer.self_attn.past_key_value = cache_cls(\n                self.config, max_batch_size, max_cache_len, device=device, dtype=dtype\n            )\n\n    def _reset_cache(self):\n        for layer in self.model.layers:\n            layer.self_attn.past_key_value = None\n\n\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance;\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Llama Model outputting raw hidden-states without any specific head on top.\",\n    LLAMA_START_DOCSTRING,\n)\n# Copied from transformers.models.llama.modeling_llama.LlamaModel with LLAMA->LLAMA,Llama->Llama\nclass LlamaModel(LlamaPreTrainedModel):\n    \"\"\"\n    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]\n\n    Args:\n        config: LlamaConfig\n    \"\"\"\n\n    def __init__(self, config: LlamaConfig):\n        super().__init__(config)\n        self.padding_idx = config.pad_token_id\n        self.vocab_size = config.vocab_size\n\n        self.embed_tokens = nn.Embedding(\n            config.vocab_size, config.hidden_size, self.padding_idx\n        )\n        self.layers = nn.ModuleList(\n            [\n                LlamaDecoderLayer(config, layer_idx)\n                for layer_idx in range(config.num_hidden_layers)\n            ]\n        )\n        self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.gradient_checkpointing = False\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    # Ignore copy\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[dict] = None,\n        norm_term: Optional[dict] = None,\n        no_memory_update: Optional[bool] = False,\n    ) -> Union[Tuple, InfiniBaseModelOutputWithPast]:\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        if (input_ids is None) ^ (inputs_embeds is not None):\n            raise ValueError(\n                \"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one\"\n            )\n\n        if self.gradient_checkpointing and self.training and use_cache:\n            logger.warning_once(\n                \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.\"\n            )\n            use_cache = False\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embed_tokens(input_ids)\n\n        past_seen_tokens = 0\n        if use_cache:  # kept for BC (cache positions)\n            if not isinstance(past_key_values, StaticCache):\n                past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n                past_seen_tokens = past_key_values.get_seq_length()\n\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_seen_tokens,\n                past_seen_tokens + inputs_embeds.shape[1],\n                device=inputs_embeds.device,\n            )\n\n        if position_ids is None:\n            position_ids = cache_position.unsqueeze(0)\n\n        causal_mask = self._update_causal_mask(\n            attention_mask,\n            inputs_embeds,\n            cache_position,\n            # FIXME: why inputs_embeds needed?\n            past_seen_tokens + inputs_embeds.shape[1],\n        )\n\n        # embed positions\n        hidden_states = inputs_embeds\n\n        # # normalized: No need for Llama(only Gemma)\n        # # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5\n        # # See https://github.com/huggingface/transformers/pull/29402\n        # normalizer = torch.tensor(\n        #     self.config.hidden_size**0.5, dtype=hidden_states.dtype\n        # )\n        # hidden_states = hidden_states * normalizer\n\n        # decoder layers\n        all_hidden_states = () if output_hidden_states else None\n        all_self_attns = () if output_attentions else None\n        next_decoder_cache = None\n\n        for decoder_layer in self.layers:\n            if output_hidden_states:\n                all_hidden_states += (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(\n                    decoder_layer.__call__,\n                    hidden_states,\n                    causal_mask,\n                    position_ids,\n                    past_key_values,\n                    output_attentions,\n                    use_cache,\n                    cache_position,\n                    memory,  # FIXME?\n                    norm_term,\n                    no_memory_update,\n                )\n            else:\n                layer_outputs = decoder_layer(\n                    hidden_states,\n                    attention_mask=causal_mask,\n                    position_ids=position_ids,\n                    past_key_value=past_key_values,\n                    output_attentions=output_attentions,\n                    use_cache=use_cache,\n                    cache_position=cache_position,\n                    memory=memory,\n                    norm_term=norm_term,\n                    no_memory_update=no_memory_update,\n                )\n\n            hidden_states = layer_outputs[0]\n\n            if use_cache:\n                next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n\n            if output_attentions:\n                all_self_attns += (layer_outputs[1],)\n\n            memory = layer_outputs[-2]\n            norm_term = layer_outputs[-1]\n\n        hidden_states = self.norm(hidden_states)\n\n        # add hidden states from the last decoder layer\n        if output_hidden_states:\n            all_hidden_states += (hidden_states,)\n\n        next_cache = None\n        if use_cache:\n            next_cache = (\n                next_decoder_cache.to_legacy_cache()\n                if isinstance(next_decoder_cache, Cache)\n                else next_decoder_cache\n            )\n        if not return_dict:\n            return tuple(\n                v\n                for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]\n                if v is not None\n            )\n        return InfiniBaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=next_cache,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attns,\n            memory=memory,\n            norm_term=norm_term,\n        )\n\n    # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static\n    # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.\n    # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using\n    # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114\n    def _update_causal_mask(\n        self, attention_mask, input_tensor, cache_position, current_length\n    ):\n        if self.config._attn_implementation == \"flash_attention_2\":\n            if attention_mask is not None and 0.0 in attention_mask:\n                return attention_mask\n            return None\n\n        dtype, device = input_tensor.dtype, input_tensor.device\n        min_dtype = torch.finfo(dtype).min\n        sequence_length = input_tensor.shape[1]\n        if hasattr(\n            getattr(self.layers[0], \"self_attn\", {}), \"past_key_value\"\n        ):  # static cache\n            target_length = self.config.max_position_embeddings\n        else:  # dynamic cache\n            target_length = (\n                attention_mask.shape[-1]\n                if isinstance(attention_mask, torch.Tensor)\n                else current_length + 1\n            )\n\n        causal_mask = torch.full(\n            (sequence_length, target_length),\n            fill_value=min_dtype,\n            dtype=dtype,\n            device=device,\n        )\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(\n            target_length, device=device\n        ) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(\n            input_tensor.shape[0], 1, -1, -1\n        )\n        if attention_mask is not None:\n            causal_mask = (\n                causal_mask.clone()\n            )  # copy to contiguous memory for in-place edit\n            if attention_mask.dim() == 2:\n                mask_length = attention_mask.shape[-1]\n                padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[\n                    :, None, None, :\n                ].eq(0.0)\n                causal_mask[..., :mask_length] = causal_mask[\n                    ..., :mask_length\n                ].masked_fill(padding_mask, min_dtype)\n            elif attention_mask.dim() == 4:\n                # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with\n                # cache. In that case, the 4D attention mask attends to the newest tokens only.\n                if attention_mask.shape[-2] < cache_position[0] + sequence_length:\n                    offset = cache_position[0]\n                else:\n                    offset = 0\n                mask_shape = attention_mask.shape\n                mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype\n                causal_mask[\n                    : mask_shape[0],\n                    : mask_shape[1],\n                    offset : mask_shape[2] + offset,\n                    : mask_shape[3],\n                ] = mask_slice\n\n        if (\n            self.config._attn_implementation == \"sdpa\"\n            and attention_mask is not None\n            and attention_mask.device.type == \"cuda\"\n        ):\n            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when\n            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.\n            # Details: https://github.com/pytorch/pytorch/issues/110213\n            causal_mask = AttentionMaskConverter._unmask_unattended(\n                causal_mask, min_dtype\n            )\n\n        return causal_mask\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->LLAMA,Llama->Llama,llama->llama\nclass LlamaForCausalLM(LlamaPreTrainedModel):\n    _tied_weights_keys = [\"lm_head.weight\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.model = LlamaModel(config)\n        self.vocab_size = config.vocab_size\n        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def set_decoder(self, decoder):\n        self.model = decoder\n\n    def get_decoder(self):\n        return self.model\n\n    # Ignore copy\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    @replace_return_docstrings(\n        output_type=InfiniCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n    )\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n        memory: Optional[dict] = None,\n        norm_term: Optional[dict] = None,\n        no_memory_update: Optional[bool] = False,\n    ) -> Union[Tuple, InfiniCausalLMOutputWithPast]:\n        r\"\"\"\n        Args:\n            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n        Returns:\n\n        Example:\n\n        ```python\n        >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n        >>> model = LlamaForCausalLM.from_pretrained(\"google/llama-7b\")\n        >>> tokenizer = AutoTokenizer.from_pretrained(\"google/llama-7b\")\n\n        >>> prompt = \"What is your favorite condiment?\"\n        >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n        >>> # Generate\n        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n        \"What is your favorite condiment?\"\n        ```\"\"\"\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n        outputs = self.model(\n            input_ids=input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n            cache_position=cache_position,\n            memory=memory,\n            norm_term=norm_term,\n            no_memory_update=no_memory_update,\n        )\n\n        hidden_states = outputs[0]\n        memory = outputs.memory\n        norm_term = outputs.norm_term\n        logits = self.lm_head(hidden_states)\n        logits = logits.float()\n        loss = None\n        if labels is not None:\n            # Shift so that tokens < n predict n\n            shift_logits = logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss()\n            shift_logits = shift_logits.view(-1, self.config.vocab_size)\n            shift_labels = shift_labels.view(-1)\n            # Enable model parallelism\n            shift_labels = shift_labels.to(shift_logits.device)\n            loss = loss_fct(shift_logits, shift_labels)\n\n        if not return_dict:\n            output = (logits,) + outputs[1:]\n            return (loss,) + output if loss is not None else output\n\n        return InfiniCausalLMOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=outputs.past_key_values,\n            hidden_states=outputs.hidden_states,\n            attentions=outputs.attentions,\n            memory=memory,\n            norm_term=norm_term,\n        )\n\n    def prepare_inputs_for_generation(\n        self,\n        input_ids,\n        past_key_values=None,\n        attention_mask=None,\n        inputs_embeds=None,\n        cache_position=None,\n        **kwargs,\n    ):\n        # With static cache, the `past_key_values` is None\n        # TODO joao: standardize interface for the different Cache classes and remove of this if\n        has_static_cache = False\n        if past_key_values is None:\n            past_key_values = getattr(\n                getattr(self.model.layers[0], \"self_attn\", {}), \"past_key_value\", None\n            )\n            has_static_cache = past_key_values is not None\n\n        past_length = 0\n        if past_key_values is not None:\n            if isinstance(past_key_values, Cache):\n                past_length = (\n                    cache_position[0]\n                    if cache_position is not None\n                    else past_key_values.get_seq_length()\n                )\n                max_cache_length = (\n                    torch.tensor(\n                        past_key_values.get_max_length(), device=input_ids.device\n                    )\n                    if past_key_values.get_max_length() is not None\n                    else None\n                )\n                cache_length = (\n                    past_length\n                    if max_cache_length is None\n                    else torch.min(max_cache_length, past_length)\n                )\n            # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects\n            else:\n                cache_length = past_length = past_key_values[0][0].shape[2]\n                max_cache_length = None\n\n            # Keep only the unprocessed tokens:\n            # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n            # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as\n            # input)\n            if (\n                attention_mask is not None\n                and attention_mask.shape[1] > input_ids.shape[1]\n            ):\n                input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n            # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n            # input_ids based on the past_length.\n            elif past_length < input_ids.shape[1]:\n                input_ids = input_ids[:, past_length:]\n            # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n\n            # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n            if (\n                max_cache_length is not None\n                and attention_mask is not None\n                and cache_length + input_ids.shape[1] > max_cache_length\n            ):\n                attention_mask = attention_mask[:, -max_cache_length:]\n\n        position_ids = kwargs.get(\"position_ids\", None)\n        if attention_mask is not None and position_ids is None:\n            # create position_ids on the fly for batch generation\n            position_ids = attention_mask.long().cumsum(-1) - 1\n            position_ids.masked_fill_(attention_mask == 0, 1)\n            if past_key_values:\n                position_ids = position_ids[:, -input_ids.shape[1] :]\n\n        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n        if inputs_embeds is not None and past_key_values is None:\n            model_inputs = {\"inputs_embeds\": inputs_embeds}\n        else:\n            # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise\n            # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114\n            # TODO: use `next_tokens` directly instead.\n            model_inputs = {\"input_ids\": input_ids.contiguous()}\n\n        input_length = (\n            position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]\n        )\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_length, past_length + input_length, device=input_ids.device\n            )\n        else:\n            cache_position = cache_position[-input_length:]\n\n        if has_static_cache:\n            past_key_values = None\n\n        model_inputs.update(\n            {\n                \"position_ids\": position_ids,\n                \"cache_position\": cache_position,\n                \"past_key_values\": past_key_values,\n                \"use_cache\": kwargs.get(\"use_cache\"),\n                \"attention_mask\": attention_mask,\n            }\n        )\n        return model_inputs\n\n    @staticmethod\n    def _reorder_cache(past_key_values, beam_idx):\n        reordered_past = ()\n        for layer_past in past_key_values:\n            reordered_past += (\n                tuple(\n                    past_state.index_select(0, beam_idx.to(past_state.device))\n                    for past_state in layer_past\n                ),\n            )\n        return reordered_past\n\n\n# Remove: Classifiers\n"
  },
  {
    "path": "modeling_gemma.py",
    "content": "# coding=utf-8\n# Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Gemma model.\"\"\"\n\nimport os\nimport math\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...cache_utils import Cache, DynamicCache, StaticCache\nfrom ...modeling_attn_mask_utils import (\n    AttentionMaskConverter,\n    _prepare_4d_causal_attention_mask,\n)\nfrom ...modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n    SequenceClassifierOutputWithPast,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13\nfrom ...utils import (\n    add_start_docstrings,\n    add_start_docstrings_to_model_forward,\n    is_flash_attn_2_available,\n    is_flash_attn_greater_or_equal_2_10,\n    logging,\n    replace_return_docstrings,\n)\nfrom ...utils.import_utils import is_torch_fx_available\nfrom .configuration_gemma import GemmaConfig\n\nDEBUG = os.environ.get(\"DEBUG\", False)\n\n\ndef debug_print(*args):\n    if DEBUG:\n        print(*args)\n\n\nif is_flash_attn_2_available():\n    from flash_attn import flash_attn_func, flash_attn_varlen_func\n    from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa\n\n\n# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.\n# It means that the function will not be traced through and simply appear as a node in the graph.\nif is_torch_fx_available():\n    if not is_torch_greater_or_equal_than_1_13:\n        import torch.fx\n\n    _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GemmaConfig\"\n\n\ndef _get_unpad_data(attention_mask):\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = seqlens_in_batch.max().item()\n    cu_seqlens = F.pad(\n        torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)\n    )\n    return (\n        indices,\n        cu_seqlens,\n        max_seqlen_in_batch,\n    )\n\n\nclass GemmaRMSNorm(nn.Module):\n    def __init__(self, dim: int, eps: float = 1e-6):\n        super().__init__()\n        self.eps = eps\n        self.weight = nn.Parameter(torch.zeros(dim))\n\n    def _norm(self, x):\n        return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n    def forward(self, x):\n        output = self._norm(x.float())\n        # Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)\n        # See https://github.com/huggingface/transformers/pull/29402\n        output = output * (1.0 + self.weight.float())\n        return output.type_as(x)\n\n\nALL_LAYERNORM_LAYERS.append(GemmaRMSNorm)\n\n\nclass GemmaRotaryEmbedding(nn.Module):\n    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n        super().__init__()\n\n        self.dim = dim\n        self.max_position_embeddings = max_position_embeddings\n        self.base = base\n        self.register_buffer(\"inv_freq\", None, persistent=False)\n\n    @torch.no_grad()\n    def forward(self, x, position_ids, seq_len=None):\n        # x: [bs, num_attention_heads, seq_len, head_size]\n        if self.inv_freq is None:\n            self.inv_freq = 1.0 / (\n                self.base\n                ** (\n                    torch.arange(\n                        0, self.dim, 2, dtype=torch.int64, device=x.device\n                    ).float()\n                    / self.dim\n                )\n            )\n        inv_freq_expanded = (\n            self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)\n        )\n        position_ids_expanded = position_ids[:, None, :].float()\n        # Force float32 since bfloat16 loses precision on long contexts\n        # See https://github.com/huggingface/transformers/pull/29285\n        device_type = x.device.type\n        device_type = (\n            device_type\n            if isinstance(device_type, str) and device_type != \"mps\"\n            else \"cpu\"\n        )\n        with torch.autocast(device_type=device_type, enabled=False):\n            freqs = (\n                inv_freq_expanded.float() @ position_ids_expanded.float()\n            ).transpose(1, 2)\n            emb = torch.cat((freqs, freqs), dim=-1)\n            cos = emb.cos()\n            sin = emb.sin()\n        return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)\n\n\n# Copied from transformers.models.llama.modeling_llama.rotate_half\ndef rotate_half(x):\n    \"\"\"Rotates half the hidden dims of the input.\"\"\"\n    x1 = x[..., : x.shape[-1] // 2]\n    x2 = x[..., x.shape[-1] // 2 :]\n    return torch.cat((-x2, x1), dim=-1)\n\n\n# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n    \"\"\"Applies Rotary Position Embedding to the query and key tensors.\n\n    Args:\n        q (`torch.Tensor`): The query tensor.\n        k (`torch.Tensor`): The key tensor.\n        cos (`torch.Tensor`): The cosine part of the rotary embedding.\n        sin (`torch.Tensor`): The sine part of the rotary embedding.\n        position_ids (`torch.Tensor`, *optional*):\n            Deprecated and unused.\n        unsqueeze_dim (`int`, *optional*, defaults to 1):\n            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\n    Returns:\n        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.\n    \"\"\"\n    cos = cos.unsqueeze(unsqueeze_dim)\n    sin = sin.unsqueeze(unsqueeze_dim)\n    q_embed = (q * cos) + (rotate_half(q) * sin)\n    k_embed = (k * cos) + (rotate_half(k) * sin)\n    return q_embed, k_embed\n\n\nclass GemmaMLP(nn.Module):\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.hidden_size = config.hidden_size\n        self.intermediate_size = config.intermediate_size\n        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n        if config.hidden_activation is None:\n            logger.warning_once(\n                \"Gemma's activation function should be approximate GeLU and not exact GeLU.\\n\"\n                \"Changing the activation function to `gelu_pytorch_tanh`.\"\n                f\"if you want to use the legacy `{config.hidden_act}`, \"\n                f\"edit the `model.config` to set `hidden_activation={config.hidden_act}` \"\n                \"  instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details.\"\n            )\n            hidden_activation = \"gelu_pytorch_tanh\"\n        else:\n            hidden_activation = config.hidden_activation\n        self.act_fn = ACT2FN[hidden_activation]\n\n    def forward(self, x):\n        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n\n# Copied from transformers.models.llama.modeling_llama.repeat_kv\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n    \"\"\"\n    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n    \"\"\"\n    batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n    if n_rep == 1:\n        return hidden_states\n    hidden_states = hidden_states[:, :, None, :, :].expand(\n        batch, num_key_value_heads, n_rep, slen, head_dim\n    )\n    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n\n\nclass GemmaAttention(nn.Module):\n    \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n    # Ignore copy\n    def __init__(self, config: GemmaConfig, layer_idx: Optional[int] = None):\n        super().__init__()\n        self.config = config\n        self.layer_idx = layer_idx\n        if layer_idx is None:\n            logger.warning_once(\n                f\"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will \"\n                \"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` \"\n                \"when creating this class.\"\n            )\n\n        self.attention_dropout = config.attention_dropout\n        self.hidden_size = config.hidden_size\n        self.num_heads = config.num_attention_heads\n        self.head_dim = config.head_dim\n        self.num_key_value_heads = config.num_key_value_heads\n        self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n        self.max_position_embeddings = config.max_position_embeddings\n        self.rope_theta = config.rope_theta\n        self.is_causal = True\n\n        if self.hidden_size % self.num_heads != 0:\n            raise ValueError(\n                f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n                f\" and `num_heads`: {self.num_heads}).\"\n            )\n\n        self.q_proj = nn.Linear(\n            self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias\n        )\n        self.k_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.v_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.o_proj = nn.Linear(\n            self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias\n        )\n        self.rotary_emb = GemmaRotaryEmbedding(\n            self.head_dim,\n            max_position_embeddings=self.max_position_embeddings,\n            base=self.rope_theta,\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        attn_weights = torch.matmul(\n            query_states, key_states.transpose(2, 3)\n        ) / math.sqrt(self.head_dim)\n\n        if attention_mask is not None:  # no matter the length, we just slice it\n            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]\n            attn_weights = attn_weights + causal_mask\n\n        # upcast attention to fp32\n        attn_weights = nn.functional.softmax(\n            attn_weights, dim=-1, dtype=torch.float32\n        ).to(query_states.dtype)\n        attn_weights = nn.functional.dropout(\n            attn_weights, p=self.attention_dropout, training=self.training\n        )\n        attn_output = torch.matmul(attn_weights, value_states)\n\n        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n            raise ValueError(\n                f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n                f\" {attn_output.size()}\"\n            )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n\n        attn_output = attn_output.view(bsz, q_len, -1)\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with Llama->Gemma\nclass GemmaFlashAttention2(GemmaAttention):\n    \"\"\"\n    Gemma flash attention module. This module inherits from `GemmaAttention` as the weights of the module stays\n    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of\n    flash attention and deal with padding tokens in case the input contains any of them.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.\n        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.\n        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).\n        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()\n\n    # Ignore copy\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.LongTensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        output_attentions = False\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        # Flash attention requires the input to have the shape\n        # batch_size x seq_length x head_dim x hidden_dim\n        # therefore we just need to keep the original shape\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache\n        # to be able to avoid many of these transpose/reshape/view.\n        query_states = query_states.transpose(1, 2)\n        key_states = key_states.transpose(1, 2)\n        value_states = value_states.transpose(1, 2)\n\n        dropout_rate = self.attention_dropout if self.training else 0.0\n\n        # In PEFT, usually we cast the layer norms in float32 for training stability reasons\n        # therefore the input hidden states gets silently casted in float32. Hence, we need\n        # cast them back in the correct dtype just to be sure everything works as expected.\n        # This might slowdown training & inference so it is recommended to not cast the LayerNorms\n        # in fp32. (GemmaRMSNorm handles it correctly)\n\n        input_dtype = query_states.dtype\n        if input_dtype == torch.float32:\n            if torch.is_autocast_enabled():\n                target_dtype = torch.get_autocast_gpu_dtype()\n            # Handle the case where the model is quantized\n            elif hasattr(self.config, \"_pre_quantization_dtype\"):\n                target_dtype = self.config._pre_quantization_dtype\n            else:\n                target_dtype = self.q_proj.weight.dtype\n\n            logger.warning_once(\n                f\"The input hidden states seems to be silently casted in float32, this might be related to\"\n                f\" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in\"\n                f\" {target_dtype}.\"\n            )\n\n            query_states = query_states.to(target_dtype)\n            key_states = key_states.to(target_dtype)\n            value_states = value_states.to(target_dtype)\n\n        attn_output = self._flash_attention_forward(\n            query_states,\n            key_states,\n            value_states,\n            attention_mask,\n            q_len,\n            dropout=dropout_rate,\n        )\n\n        attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n    def _flash_attention_forward(\n        self,\n        query_states,\n        key_states,\n        value_states,\n        attention_mask,\n        query_length,\n        dropout=0.0,\n        softmax_scale=None,\n    ):\n        \"\"\"\n        Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token\n        first unpad the input, then computes the attention scores and pad the final attention scores.\n\n        Args:\n            query_states (`torch.Tensor`):\n                Input query states to be passed to Flash Attention API\n            key_states (`torch.Tensor`):\n                Input key states to be passed to Flash Attention API\n            value_states (`torch.Tensor`):\n                Input value states to be passed to Flash Attention API\n            attention_mask (`torch.Tensor`):\n                The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the\n                position of padding tokens and 1 for the position of non-padding tokens.\n            dropout (`float`):\n                Attention dropout\n            softmax_scale (`float`, *optional*):\n                The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)\n        \"\"\"\n        if not self._flash_attn_uses_top_left_mask:\n            causal = self.is_causal\n        else:\n            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in GemmaFlashAttention2 __init__.\n            causal = self.is_causal and query_length != 1\n\n        # Contains at least one padding token in the sequence\n        if attention_mask is not None:\n            batch_size = query_states.shape[0]\n            (\n                query_states,\n                key_states,\n                value_states,\n                indices_q,\n                cu_seq_lens,\n                max_seq_lens,\n            ) = self._upad_input(\n                query_states, key_states, value_states, attention_mask, query_length\n            )\n\n            cu_seqlens_q, cu_seqlens_k = cu_seq_lens\n            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens\n\n            attn_output_unpad = flash_attn_varlen_func(\n                query_states,\n                key_states,\n                value_states,\n                cu_seqlens_q=cu_seqlens_q,\n                cu_seqlens_k=cu_seqlens_k,\n                max_seqlen_q=max_seqlen_in_batch_q,\n                max_seqlen_k=max_seqlen_in_batch_k,\n                dropout_p=dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n            attn_output = pad_input(\n                attn_output_unpad, indices_q, batch_size, query_length\n            )\n        else:\n            attn_output = flash_attn_func(\n                query_states,\n                key_states,\n                value_states,\n                dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n        return attn_output\n\n    def _upad_input(\n        self, query_layer, key_layer, value_layer, attention_mask, query_length\n    ):\n        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)\n        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape\n\n        key_layer = index_first_axis(\n            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        value_layer = index_first_axis(\n            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        if query_length == kv_seq_len:\n            query_layer = index_first_axis(\n                query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),\n                indices_k,\n            )\n            cu_seqlens_q = cu_seqlens_k\n            max_seqlen_in_batch_q = max_seqlen_in_batch_k\n            indices_q = indices_k\n        elif query_length == 1:\n            max_seqlen_in_batch_q = 1\n            cu_seqlens_q = torch.arange(\n                batch_size + 1, dtype=torch.int32, device=query_layer.device\n            )  # There is a memcpy here, that is very bad.\n            indices_q = cu_seqlens_q[:-1]\n            query_layer = query_layer.squeeze(1)\n        else:\n            # The -q_len: slice assumes left padding.\n            attention_mask = attention_mask[:, -query_length:]\n            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(\n                query_layer, attention_mask\n            )\n\n        return (\n            query_layer,\n            key_layer,\n            value_layer,\n            indices_q,\n            (cu_seqlens_q, cu_seqlens_k),\n            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),\n        )\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Gemma\nclass GemmaSdpaAttention(GemmaAttention):\n    \"\"\"\n    Gemma attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from\n    `GemmaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to\n    SDPA API.\n    \"\"\"\n\n    # Ignore copy\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        if output_attentions:\n            # TODO: Improve this warning with e.g. `model.config.attn_implementation = \"manual\"` once this is implemented.\n            logger.warning_once(\n                \"GemmaModel is using GemmaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, \"\n                'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.'\n            )\n            return super().forward(\n                hidden_states=hidden_states,\n                attention_mask=attention_mask,\n                position_ids=position_ids,\n                past_key_value=past_key_value,\n                output_attentions=output_attentions,\n                use_cache=use_cache,\n                cache_position=cache_position,\n            )\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin, None\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]\n\n        # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,\n        # Reference: https://github.com/pytorch/pytorch/issues/112577.\n        if query_states.device.type == \"cuda\" and causal_mask is not None:\n            query_states = query_states.contiguous()\n            key_states = key_states.contiguous()\n            value_states = value_states.contiguous()\n\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n        )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n        attn_output = attn_output.view(bsz, q_len, -1)\n\n        attn_output = self.o_proj(attn_output)\n\n        return attn_output, None, past_key_value\n\n\nclass GemmaInfiniAttention(GemmaAttention):\n    def __init__(\n        self,\n        config: GemmaConfig,\n        layer_idx: Optional[int] = None,\n    ):\n        super().__init__(config, layer_idx)\n\n        # Each head has its own gate\n        # init with -100 to make it close to 0 effect at the beginning\n        self.gate = nn.Parameter(torch.full((1, self.num_heads, 1, 1), -100.0))\n        self.segment_size = config.segment_size\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        self.memory = None\n        self.norm_term = None\n\n        total_len = hidden_states.size(1)\n        debug_print(\"total_len\", total_len)\n        segments = torch.tensor_split(\n            hidden_states,\n            list(range(self.segment_size, total_len, self.segment_size)),\n            dim=1,\n        )\n        \n        # Pre-allocate tensor for all outputs\n        bsz, _, hidden_dim = hidden_states.size()\n        final_output = torch.empty(bsz, total_len, hidden_dim, device=hidden_states.device, dtype=hidden_states.dtype)\n\n\n        debug_print(\"len(segments):\", len(segments))\n\n        start_index = 0\n        for segment in segments:\n            # Process each segment\n            query_states = self.q_proj(segment)\n            key_states = self.k_proj(segment)\n            value_states = self.v_proj(segment)\n\n            # Assuming the presence of batch size and dimension handling as before\n            bsz, q_len, _ = segment.size()  # q_len == self.segment_size\n            query_states = query_states.view(\n                bsz, q_len, self.num_heads, self.head_dim\n            ).transpose(1, 2)\n            key_states = key_states.view(\n                bsz, q_len, self.num_key_value_heads, self.head_dim\n            ).transpose(1, 2)\n            value_states = value_states.view(\n                bsz, q_len, self.num_key_value_heads, self.head_dim\n            ).transpose(1, 2)\n\n            # Memory retrieval and update should be w/o PE\n            memory_output = self._retrieve_from_memory(query_states)\n            debug_print(\"Memory Output Shape:\", memory_output.shape)\n            # Update memory with current segment's key and value states\n            self._update_memory(key_states, value_states)\n\n            # Rotary embeddings, set seq_len to q_len as we are processing a segment\n            cos, sin = self.rotary_emb(value_states, position_ids, seq_len=q_len)\n\n            query_states, key_states = apply_rotary_pos_emb(\n                query_states,\n                key_states,\n                cos[:, : min(self.segment_size, q_len), :],\n                sin[:, : min(self.segment_size, q_len), :],\n                None,\n            )\n\n            # Basic cache\n            past_key_value = getattr(self, \"past_key_value\", past_key_value)\n            if past_key_value is not None:\n                # sin and cos are specific to RoPE models; cache_position needed for the static cache\n                cache_kwargs = {\n                    \"sin\": sin,\n                    \"cos\": cos,\n                    \"cache_position\": cache_position,\n                }\n                key_states, value_states = past_key_value.update(\n                    key_states, value_states, self.layer_idx, cache_kwargs\n                )\n\n            # GQA\n            key_states = repeat_kv(key_states, self.num_key_value_groups)\n            value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n            causal_mask = attention_mask\n            if attention_mask is not None:\n                causal_mask = causal_mask[\n                    :, :, : min(self.segment_size, q_len), : key_states.shape[-2]\n                ]  # FIXME: This is wrong, should be [:, :, :, :self.segment_size]\n\n            debug_print(\"causal_mask.shape\", causal_mask.shape)\n            debug_print(\"query_states.shape\", query_states.shape)\n\n            attn_output = torch.nn.functional.scaled_dot_product_attention(\n                query_states,\n                key_states,\n                value_states,\n                attn_mask=causal_mask,\n                dropout_p=self.attention_dropout if self.training else 0.0,\n            )\n\n            combined_output = (\n                F.sigmoid(self.gate) * memory_output\n                + (1 - F.sigmoid(self.gate)) * attn_output\n            )\n\n            # Prepare output for this segment\n            combined_output = combined_output.transpose(1, 2).contiguous()\n            combined_output = combined_output.view(bsz, q_len, self.hidden_size)\n            \n            segment_output = self.o_proj(combined_output)\n        \n            # Determine the segment size (important for the last segment which might be smaller)\n            current_segment_size = segment.size(1)\n\n            # Fill the corresponding part of the pre-allocated tensor\n            final_output[:, start_index:start_index + current_segment_size, :] = segment_output\n            start_index += current_segment_size\n            \n        return final_output, None, None\n        # Concatenate outputs from all segments\n        # final_output = torch.cat(final_outputs, dim=1)\n        # return final_output, None, None\n\n    def _retrieve_from_memory(self, query_states):\n        # query_states: [batch_size, num_heads, seq_len, head_dim]\n\n        # Check if memory is initialized\n        if self.memory is None or self.norm_term is None:\n            debug_print(\"[Retrieve] No memory or norm term found\")\n            return torch.zeros_like(query_states)\n\n        debug_print(\"[Retrieve] query_states.shape\", query_states.shape)\n        debug_print(\"[Retrieve] self.memory.shape\", self.memory.shape)\n\n        # Apply ELU activation\n        query_states = F.elu(query_states) + 1  # ELU activation + 1 for stability\n        memory_output = torch.matmul(query_states, self.memory)\n\n        debug_print(\"[Retrieve] memory_output.shape\", memory_output.shape)\n        debug_print(\"[Retrieve] self.norm_term.shape\", self.norm_term.shape)\n\n        # Broadcast norm_term to the shape of query_states, then sum across head_dim for normalization\n        norm_term_broadcastable = torch.matmul(\n            query_states,\n            self.norm_term.transpose(-2, -1),\n        )\n        debug_print(\n            \"[Broadcast] norm_term_broadcastable.shape\", norm_term_broadcastable.shape\n        )\n\n        # Perform division\n        memory_output = memory_output / norm_term_broadcastable\n        return memory_output\n\n    def _update_memory(self, key_states, value_states):\n        # key_states: [batch_size, num_heads, seq_len, head_dim]\n        # value_states: [batch_size, num_heads, seq_len, value_dim]\n\n        key_states = F.elu(key_states) + 1  # Apply ELU activation\n\n        if self.memory is not None:\n            self.memory = self.memory + torch.matmul(\n                key_states.transpose(-2, -1), value_states\n            )\n        else:\n            self.memory = torch.matmul(key_states.transpose(-2, -1), value_states)\n\n        if self.norm_term is not None:\n            self.norm_term = self.norm_term + key_states.sum(\n                dim=2, keepdim=True\n            )  # Update normalization term\n        else:\n            self.norm_term = key_states.sum(\n                dim=2, keepdim=True\n            )  # Initialize normalization term\n\n        debug_print(\"[Update] self.memory.shape\", self.memory.shape)\n        debug_print(\"[Update] self.norm_term.shape\", self.norm_term.shape)\n\n\nGEMMA_ATTENTION_CLASSES = {\n    \"eager\": GemmaInfiniAttention,  # GemmaAttention,\n    \"flash_attention_2\": GemmaFlashAttention2,\n    \"sdpa\": GemmaSdpaAttention,\n}\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->GEMMA,Llama->Gemma\nclass GemmaDecoderLayer(nn.Module):\n    def __init__(self, config: GemmaConfig, layer_idx: int):\n        super().__init__()\n        self.hidden_size = config.hidden_size\n\n        self.self_attn = GEMMA_ATTENTION_CLASSES[config._attn_implementation](\n            config=config, layer_idx=layer_idx\n        )\n\n        self.mlp = GemmaMLP(config)\n        self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.post_attention_layernorm = GemmaRMSNorm(\n            config.hidden_size, eps=config.rms_norm_eps\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Tuple[torch.Tensor]] = None,\n        output_attentions: Optional[bool] = False,\n        use_cache: Optional[bool] = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[\n        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]\n    ]:\n        \"\"\"\n        Args:\n            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n            attention_mask (`torch.FloatTensor`, *optional*):\n                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n                query_sequence_length, key_sequence_length)` if default attention is used.\n            output_attentions (`bool`, *optional*):\n                Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n                returned tensors for more detail.\n            use_cache (`bool`, *optional*):\n                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n                (see `past_key_values`).\n            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n        \"\"\"\n        if \"padding_mask\" in kwargs:\n            warnings.warn(\n                \"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`\"\n            )\n\n        residual = hidden_states\n\n        hidden_states = self.input_layernorm(hidden_states)\n\n        # Self Attention\n        hidden_states, self_attn_weights, present_key_value = self.self_attn(\n            hidden_states=hidden_states,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_value=past_key_value,\n            output_attentions=output_attentions,\n            use_cache=use_cache,\n            cache_position=cache_position,\n            **kwargs,\n        )\n        hidden_states = residual + hidden_states\n\n        # Fully Connected\n        residual = hidden_states\n        hidden_states = self.post_attention_layernorm(hidden_states)\n        hidden_states = self.mlp(hidden_states)\n        hidden_states = residual + hidden_states\n\n        outputs = (hidden_states,)\n\n        if output_attentions:\n            outputs += (self_attn_weights,)\n\n        if use_cache:\n            outputs += (present_key_value,)\n\n        return outputs\n\n\nGEMMA_START_DOCSTRING = r\"\"\"\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`GemmaConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Gemma Model outputting raw hidden-states without any specific head on top.\",\n    GEMMA_START_DOCSTRING,\n)\nclass GemmaPreTrainedModel(PreTrainedModel):\n    config_class = GemmaConfig\n    base_model_prefix = \"model\"\n    supports_gradient_checkpointing = True\n    _keep_in_fp32_modules = [\"inv_freq\", \"rotary_emb\", \"cos_cached\", \"sin_cached\"]\n    _no_split_modules = [\"GemmaDecoderLayer\"]\n    _skip_keys_device_placement = [\"past_key_values\", \"causal_mask\"]\n    _supports_flash_attn_2 = True\n    _supports_sdpa = True\n    _supports_cache_class = True\n\n    def _init_weights(self, module):\n        std = self.config.initializer_range\n        if isinstance(module, nn.Linear):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n\n    def _setup_cache(\n        self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None\n    ):\n        if (\n            self.config._attn_implementation == \"flash_attention_2\"\n            and cache_cls == StaticCache\n        ):\n            raise ValueError(\n                \"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` \"\n                \"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers\"\n            )\n\n        for layer in self.model.layers:\n            weights = layer.self_attn.o_proj.weight\n            layer.self_attn.past_key_value = cache_cls(\n                self.config,\n                max_batch_size,\n                max_cache_len,\n                device=weights.device,\n                dtype=weights.dtype,\n            )\n\n    def _reset_cache(self):\n        for layer in self.model.layers:\n            layer.self_attn.past_key_value = None\n\n\nGEMMA_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance;\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare Gemma Model outputting raw hidden-states without any specific head on top.\",\n    GEMMA_START_DOCSTRING,\n)\n# Copied from transformers.models.llama.modeling_llama.LlamaModel with LLAMA->GEMMA,Llama->Gemma\nclass GemmaModel(GemmaPreTrainedModel):\n    \"\"\"\n    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GemmaDecoderLayer`]\n\n    Args:\n        config: GemmaConfig\n    \"\"\"\n\n    def __init__(self, config: GemmaConfig):\n        super().__init__(config)\n        self.padding_idx = config.pad_token_id\n        self.vocab_size = config.vocab_size\n\n        self.embed_tokens = nn.Embedding(\n            config.vocab_size, config.hidden_size, self.padding_idx\n        )\n        self.layers = nn.ModuleList(\n            [\n                GemmaDecoderLayer(config, layer_idx)\n                for layer_idx in range(config.num_hidden_layers)\n            ]\n        )\n        self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.gradient_checkpointing = False\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)\n    # Ignore copy\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Union[Tuple, BaseModelOutputWithPast]:\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        if (input_ids is None) ^ (inputs_embeds is not None):\n            raise ValueError(\n                \"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one\"\n            )\n\n        if self.gradient_checkpointing and self.training and use_cache:\n            logger.warning_once(\n                \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.\"\n            )\n            use_cache = False\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embed_tokens(input_ids)\n\n        past_seen_tokens = 0\n        if use_cache:  # kept for BC (cache positions)\n            if not isinstance(past_key_values, StaticCache):\n                past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n            past_seen_tokens = past_key_values.get_seq_length()\n\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_seen_tokens,\n                past_seen_tokens + inputs_embeds.shape[1],\n                device=inputs_embeds.device,\n            )\n\n        if position_ids is None:\n            position_ids = cache_position.unsqueeze(0)\n\n        causal_mask = self._update_causal_mask(\n            attention_mask,\n            inputs_embeds,\n            cache_position,\n            past_seen_tokens + inputs_embeds.shape[1],\n        )\n\n        # embed positions\n        hidden_states = inputs_embeds\n\n        # normalized\n        # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5\n        # See https://github.com/huggingface/transformers/pull/29402\n        normalizer = torch.tensor(\n            self.config.hidden_size**0.5, dtype=hidden_states.dtype\n        )\n        hidden_states = hidden_states * normalizer\n\n        # decoder layers\n        all_hidden_states = () if output_hidden_states else None\n        all_self_attns = () if output_attentions else None\n        next_decoder_cache = None\n\n        for decoder_layer in self.layers:\n            if output_hidden_states:\n                all_hidden_states += (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(\n                    decoder_layer.__call__,\n                    hidden_states,\n                    causal_mask,\n                    position_ids,\n                    past_key_values,\n                    output_attentions,\n                    use_cache,\n                    cache_position,\n                )\n            else:\n                layer_outputs = decoder_layer(\n                    hidden_states,\n                    attention_mask=causal_mask,\n                    position_ids=position_ids,\n                    past_key_value=past_key_values,\n                    output_attentions=output_attentions,\n                    use_cache=use_cache,\n                    cache_position=cache_position,\n                )\n\n            hidden_states = layer_outputs[0]\n\n            if use_cache:\n                next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n\n            if output_attentions:\n                all_self_attns += (layer_outputs[1],)\n\n        hidden_states = self.norm(hidden_states)\n\n        # add hidden states from the last decoder layer\n        if output_hidden_states:\n            all_hidden_states += (hidden_states,)\n\n        next_cache = None\n        if use_cache:\n            next_cache = (\n                next_decoder_cache.to_legacy_cache()\n                if isinstance(next_decoder_cache, Cache)\n                else next_decoder_cache\n            )\n        if not return_dict:\n            return tuple(\n                v\n                for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]\n                if v is not None\n            )\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=next_cache,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attns,\n        )\n\n    # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static\n    # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.\n    # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using\n    # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114\n    def _update_causal_mask(\n        self, attention_mask, input_tensor, cache_position, current_length\n    ):\n        if self.config._attn_implementation == \"flash_attention_2\":\n            if attention_mask is not None and 0.0 in attention_mask:\n                return attention_mask\n            return None\n\n        dtype, device = input_tensor.dtype, input_tensor.device\n        min_dtype = torch.finfo(dtype).min\n        sequence_length = input_tensor.shape[1]\n        if hasattr(\n            getattr(self.layers[0], \"self_attn\", {}), \"past_key_value\"\n        ):  # static cache\n            target_length = self.config.max_position_embeddings\n        else:  # dynamic cache\n            target_length = (\n                attention_mask.shape[-1]\n                if isinstance(attention_mask, torch.Tensor)\n                else current_length + 1\n            )\n\n        causal_mask = torch.full(\n            (sequence_length, target_length),\n            fill_value=min_dtype,\n            dtype=dtype,\n            device=device,\n        )\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(\n            target_length, device=device\n        ) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(\n            input_tensor.shape[0], 1, -1, -1\n        )\n        if attention_mask is not None:\n            causal_mask = (\n                causal_mask.clone()\n            )  # copy to contiguous memory for in-place edit\n            if attention_mask.dim() == 2:\n                mask_length = attention_mask.shape[-1]\n                padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[\n                    :, None, None, :\n                ].eq(0.0)\n                causal_mask[..., :mask_length] = causal_mask[\n                    ..., :mask_length\n                ].masked_fill(padding_mask, min_dtype)\n            elif attention_mask.dim() == 4:\n                # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with\n                # cache. In that case, the 4D attention mask attends to the newest tokens only.\n                if attention_mask.shape[-2] < cache_position[0] + sequence_length:\n                    offset = cache_position[0]\n                else:\n                    offset = 0\n                mask_shape = attention_mask.shape\n                mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype\n                causal_mask[\n                    : mask_shape[0],\n                    : mask_shape[1],\n                    offset : mask_shape[2] + offset,\n                    : mask_shape[3],\n                ] = mask_slice\n\n        if (\n            self.config._attn_implementation == \"sdpa\"\n            and attention_mask is not None\n            and attention_mask.device.type == \"cuda\"\n        ):\n            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when\n            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.\n            # Details: https://github.com/pytorch/pytorch/issues/110213\n            causal_mask = AttentionMaskConverter._unmask_unattended(\n                causal_mask, min_dtype\n            )\n\n        return causal_mask\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->GEMMA,Llama->Gemma,llama->gemma\nclass GemmaForCausalLM(GemmaPreTrainedModel):\n    _tied_weights_keys = [\"lm_head.weight\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.model = GemmaModel(config)\n        self.vocab_size = config.vocab_size\n        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def set_decoder(self, decoder):\n        self.model = decoder\n\n    def get_decoder(self):\n        return self.model\n\n    # Ignore copy\n    @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)\n    @replace_return_docstrings(\n        output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n    )\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Union[Tuple, CausalLMOutputWithPast]:\n        r\"\"\"\n        Args:\n            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n        Returns:\n\n        Example:\n\n        ```python\n        >>> from transformers import AutoTokenizer, GemmaForCausalLM\n\n        >>> model = GemmaForCausalLM.from_pretrained(\"google/gemma-7b\")\n        >>> tokenizer = AutoTokenizer.from_pretrained(\"google/gemma-7b\")\n\n        >>> prompt = \"What is your favorite condiment?\"\n        >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n        >>> # Generate\n        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n        \"What is your favorite condiment?\"\n        ```\"\"\"\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n        outputs = self.model(\n            input_ids=input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n            cache_position=cache_position,\n        )\n\n        hidden_states = outputs[0]\n        logits = self.lm_head(hidden_states)\n        logits = logits.float()\n        loss = None\n        if labels is not None:\n            # Shift so that tokens < n predict n\n            shift_logits = logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss()\n            shift_logits = shift_logits.view(-1, self.config.vocab_size)\n            shift_labels = shift_labels.view(-1)\n            # Enable model parallelism\n            shift_labels = shift_labels.to(shift_logits.device)\n            loss = loss_fct(shift_logits, shift_labels)\n\n        if not return_dict:\n            output = (logits,) + outputs[1:]\n            return (loss,) + output if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=outputs.past_key_values,\n            hidden_states=outputs.hidden_states,\n            attentions=outputs.attentions,\n        )\n\n    def prepare_inputs_for_generation(\n        self,\n        input_ids,\n        past_key_values=None,\n        attention_mask=None,\n        inputs_embeds=None,\n        cache_position=None,\n        **kwargs,\n    ):\n        # With static cache, the `past_key_values` is None\n        # TODO joao: standardize interface for the different Cache classes and remove of this if\n        has_static_cache = False\n        if past_key_values is None:\n            past_key_values = getattr(\n                getattr(self.model.layers[0], \"self_attn\", {}), \"past_key_value\", None\n            )\n            has_static_cache = past_key_values is not None\n\n        past_length = 0\n        if past_key_values is not None:\n            if isinstance(past_key_values, Cache):\n                past_length = (\n                    cache_position[0]\n                    if cache_position is not None\n                    else past_key_values.get_seq_length()\n                )\n                max_cache_length = (\n                    torch.tensor(\n                        past_key_values.get_max_length(), device=input_ids.device\n                    )\n                    if past_key_values.get_max_length() is not None\n                    else None\n                )\n                cache_length = (\n                    past_length\n                    if max_cache_length is None\n                    else torch.min(max_cache_length, past_length)\n                )\n            # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects\n            else:\n                cache_length = past_length = past_key_values[0][0].shape[2]\n                max_cache_length = None\n\n            # Keep only the unprocessed tokens:\n            # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n            # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as\n            # input)\n            if (\n                attention_mask is not None\n                and attention_mask.shape[1] > input_ids.shape[1]\n            ):\n                input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n            # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n            # input_ids based on the past_length.\n            elif past_length < input_ids.shape[1]:\n                input_ids = input_ids[:, past_length:]\n            # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n\n            # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n            if (\n                max_cache_length is not None\n                and attention_mask is not None\n                and cache_length + input_ids.shape[1] > max_cache_length\n            ):\n                attention_mask = attention_mask[:, -max_cache_length:]\n\n        position_ids = kwargs.get(\"position_ids\", None)\n        if attention_mask is not None and position_ids is None:\n            # create position_ids on the fly for batch generation\n            position_ids = attention_mask.long().cumsum(-1) - 1\n            position_ids.masked_fill_(attention_mask == 0, 1)\n            if past_key_values:\n                position_ids = position_ids[:, -input_ids.shape[1] :]\n\n        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n        if inputs_embeds is not None and past_key_values is None:\n            model_inputs = {\"inputs_embeds\": inputs_embeds}\n        else:\n            # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise\n            # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114\n            # TODO: use `next_tokens` directly instead.\n            model_inputs = {\"input_ids\": input_ids.contiguous()}\n\n        input_length = (\n            position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]\n        )\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_length, past_length + input_length, device=input_ids.device\n            )\n        else:\n            cache_position = cache_position[-input_length:]\n\n        if has_static_cache:\n            past_key_values = None\n\n        model_inputs.update(\n            {\n                \"position_ids\": position_ids,\n                \"cache_position\": cache_position,\n                \"past_key_values\": past_key_values,\n                \"use_cache\": kwargs.get(\"use_cache\"),\n                \"attention_mask\": attention_mask,\n            }\n        )\n        return model_inputs\n\n    @staticmethod\n    def _reorder_cache(past_key_values, beam_idx):\n        reordered_past = ()\n        for layer_past in past_key_values:\n            reordered_past += (\n                tuple(\n                    past_state.index_select(0, beam_idx.to(past_state.device))\n                    for past_state in layer_past\n                ),\n            )\n        return reordered_past\n\n\n@add_start_docstrings(\n    \"\"\"\n    The Gemma Model transformer with a sequence classification head on top (linear layer).\n\n    [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-2) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    GEMMA_START_DOCSTRING,\n)\n# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->GEMMA,Llama->Gemma\nclass GemmaForSequenceClassification(GemmaPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n        self.num_labels = config.num_labels\n        self.model = GemmaModel(config)\n        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n        r\"\"\"\n        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n        \"\"\"\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        transformer_outputs = self.model(\n            input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n        hidden_states = transformer_outputs[0]\n        logits = self.score(hidden_states)\n\n        if input_ids is not None:\n            batch_size = input_ids.shape[0]\n        else:\n            batch_size = inputs_embeds.shape[0]\n\n        if self.config.pad_token_id is None and batch_size != 1:\n            raise ValueError(\n                \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n            )\n        if self.config.pad_token_id is None:\n            sequence_lengths = -1\n        else:\n            if input_ids is not None:\n                # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility\n                sequence_lengths = (\n                    torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1\n                )\n                sequence_lengths = sequence_lengths % input_ids.shape[-1]\n                sequence_lengths = sequence_lengths.to(logits.device)\n            else:\n                sequence_lengths = -1\n\n        pooled_logits = logits[\n            torch.arange(batch_size, device=logits.device), sequence_lengths\n        ]\n\n        loss = None\n        if labels is not None:\n            labels = labels.to(logits.device)\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (\n                    labels.dtype == torch.long or labels.dtype == torch.int\n                ):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n                else:\n                    loss = loss_fct(pooled_logits, labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(\n                    pooled_logits.view(-1, self.num_labels), labels.view(-1)\n                )\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(pooled_logits, labels)\n        if not return_dict:\n            output = (pooled_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=pooled_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n"
  },
  {
    "path": "original_llama.py",
    "content": "# coding=utf-8\n# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.\n#\n# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX\n# and OPT implementations in this library. It has been modified from its\n# original forms to accommodate minor architectural differences compared\n# to GPT-NeoX and OPT used by the Meta AI team that trained the model.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch LLaMA model.\"\"\"\n\nimport math\nimport warnings\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...cache_utils import Cache, DynamicCache, StaticCache\nfrom ...modeling_attn_mask_utils import AttentionMaskConverter\nfrom ...modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n    QuestionAnsweringModelOutput,\n    SequenceClassifierOutputWithPast,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import ALL_LAYERNORM_LAYERS\nfrom ...utils import (\n    add_start_docstrings,\n    add_start_docstrings_to_model_forward,\n    is_flash_attn_2_available,\n    is_flash_attn_greater_or_equal_2_10,\n    logging,\n    replace_return_docstrings,\n)\nfrom .configuration_llama import LlamaConfig\n\n\nif is_flash_attn_2_available():\n    from flash_attn import flash_attn_func, flash_attn_varlen_func\n    from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LlamaConfig\"\n\n\ndef _get_unpad_data(attention_mask):\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = seqlens_in_batch.max().item()\n    cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))\n    return (\n        indices,\n        cu_seqlens,\n        max_seqlen_in_batch,\n    )\n\n\nclass LlamaRMSNorm(nn.Module):\n    def __init__(self, hidden_size, eps=1e-6):\n        \"\"\"\n        LlamaRMSNorm is equivalent to T5LayerNorm\n        \"\"\"\n        super().__init__()\n        self.weight = nn.Parameter(torch.ones(hidden_size))\n        self.variance_epsilon = eps\n\n    def forward(self, hidden_states):\n        input_dtype = hidden_states.dtype\n        hidden_states = hidden_states.to(torch.float32)\n        variance = hidden_states.pow(2).mean(-1, keepdim=True)\n        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)\n        return self.weight * hidden_states.to(input_dtype)\n\n\nALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)\n\n\nclass LlamaRotaryEmbedding(nn.Module):\n    def __init__(\n        self,\n        dim,\n        max_position_embeddings=2048,\n        base=10000,\n        device=None,\n        scaling_factor=1.0,\n    ):\n        super().__init__()\n        self.scaling_factor = scaling_factor\n        self.dim = dim\n        self.max_position_embeddings = max_position_embeddings\n        self.base = base\n        inv_freq = 1.0 / (\n            self.base\n            ** (\n                torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device)\n                / self.dim\n            )\n        )\n        self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n        # For BC we register cos and sin cached\n        self.max_seq_len_cached = max_position_embeddings\n        t = torch.arange(\n            self.max_seq_len_cached, device=device, dtype=torch.int64\n        ).type_as(self.inv_freq)\n        t = t / self.scaling_factor\n        freqs = torch.outer(t, self.inv_freq)\n        # Different from paper, but it uses a different permutation in order to obtain the same calculation\n        emb = torch.cat((freqs, freqs), dim=-1)\n        self.register_buffer(\n            \"_cos_cached\", emb.cos().to(torch.get_default_dtype()), persistent=False\n        )\n        self.register_buffer(\n            \"_sin_cached\", emb.sin().to(torch.get_default_dtype()), persistent=False\n        )\n\n    @property\n    def sin_cached(self):\n        logger.warning_once(\n            \"The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use \"\n            \"the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class\"\n        )\n        return self._sin_cached\n\n    @property\n    def cos_cached(self):\n        logger.warning_once(\n            \"The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use \"\n            \"the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class\"\n        )\n        return self._cos_cached\n\n    @torch.no_grad()\n    def forward(self, x, position_ids):\n        # x: [bs, num_attention_heads, seq_len, head_size]\n        inv_freq_expanded = (\n            self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)\n        )\n        position_ids_expanded = position_ids[:, None, :].float()\n        # Force float32 since bfloat16 loses precision on long contexts\n        # See https://github.com/huggingface/transformers/pull/29285\n        device_type = x.device.type\n        device_type = (\n            device_type\n            if isinstance(device_type, str) and device_type != \"mps\"\n            else \"cpu\"\n        )\n        with torch.autocast(device_type=device_type, enabled=False):\n            freqs = (\n                inv_freq_expanded.float() @ position_ids_expanded.float()\n            ).transpose(1, 2)\n            emb = torch.cat((freqs, freqs), dim=-1)\n            cos = emb.cos()\n            sin = emb.sin()\n        return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)\n\n\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\n    \"\"\"LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev\"\"\"\n\n    def forward(self, x, position_ids):\n        # difference to the original RoPE: a scaling factor is aplied to the position ids\n        position_ids = position_ids.float() / self.scaling_factor\n        cos, sin = super().forward(x, position_ids)\n        return cos, sin\n\n\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\n    \"\"\"LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla\"\"\"\n\n    def forward(self, x, position_ids):\n        # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length\n        seq_len = torch.max(position_ids) + 1\n        if seq_len > self.max_position_embeddings:\n            base = self.base * (\n                (self.scaling_factor * seq_len / self.max_position_embeddings)\n                - (self.scaling_factor - 1)\n            ) ** (self.dim / (self.dim - 2))\n            inv_freq = 1.0 / (\n                base\n                ** (\n                    torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device)\n                    / self.dim\n                )\n            )\n            self.register_buffer(\n                \"inv_freq\", inv_freq, persistent=False\n            )  # TODO joao: this may break with compilation\n\n        cos, sin = super().forward(x, position_ids)\n        return cos, sin\n\n\ndef rotate_half(x):\n    \"\"\"Rotates half the hidden dims of the input.\"\"\"\n    x1 = x[..., : x.shape[-1] // 2]\n    x2 = x[..., x.shape[-1] // 2 :]\n    return torch.cat((-x2, x1), dim=-1)\n\n\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n    \"\"\"Applies Rotary Position Embedding to the query and key tensors.\n\n    Args:\n        q (`torch.Tensor`): The query tensor.\n        k (`torch.Tensor`): The key tensor.\n        cos (`torch.Tensor`): The cosine part of the rotary embedding.\n        sin (`torch.Tensor`): The sine part of the rotary embedding.\n        position_ids (`torch.Tensor`, *optional*):\n            Deprecated and unused.\n        unsqueeze_dim (`int`, *optional*, defaults to 1):\n            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\n    Returns:\n        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.\n    \"\"\"\n    cos = cos.unsqueeze(unsqueeze_dim)\n    sin = sin.unsqueeze(unsqueeze_dim)\n    q_embed = (q * cos) + (rotate_half(q) * sin)\n    k_embed = (k * cos) + (rotate_half(k) * sin)\n    return q_embed, k_embed\n\n\nclass LlamaMLP(nn.Module):\n    def __init__(self, config):\n        super().__init__()\n        self.config = config\n        self.hidden_size = config.hidden_size\n        self.intermediate_size = config.intermediate_size\n        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n        self.act_fn = ACT2FN[config.hidden_act]\n\n    def forward(self, x):\n        if self.config.pretraining_tp > 1:\n            slice = self.intermediate_size // self.config.pretraining_tp\n            gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n            up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n            down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n            gate_proj = torch.cat(\n                [\n                    F.linear(x, gate_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ],\n                dim=-1,\n            )\n            up_proj = torch.cat(\n                [\n                    F.linear(x, up_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ],\n                dim=-1,\n            )\n\n            intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n            down_proj = [\n                F.linear(intermediate_states[i], down_proj_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            down_proj = sum(down_proj)\n        else:\n            down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n        return down_proj\n\n\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n    \"\"\"\n    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,\n    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)\n    \"\"\"\n    batch, num_key_value_heads, slen, head_dim = hidden_states.shape\n    if n_rep == 1:\n        return hidden_states\n    hidden_states = hidden_states[:, :, None, :, :].expand(\n        batch, num_key_value_heads, n_rep, slen, head_dim\n    )\n    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)\n\n\nclass LlamaAttention(nn.Module):\n    \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n    def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):\n        super().__init__()\n        self.config = config\n        self.layer_idx = layer_idx\n        if layer_idx is None:\n            logger.warning_once(\n                f\"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will \"\n                \"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` \"\n                \"when creating this class.\"\n            )\n\n        self.attention_dropout = config.attention_dropout\n        self.hidden_size = config.hidden_size\n        self.num_heads = config.num_attention_heads\n        self.head_dim = self.hidden_size // self.num_heads\n        self.num_key_value_heads = config.num_key_value_heads\n        self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n        self.max_position_embeddings = config.max_position_embeddings\n        self.rope_theta = config.rope_theta\n        self.is_causal = True\n\n        if (self.head_dim * self.num_heads) != self.hidden_size:\n            raise ValueError(\n                f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n                f\" and `num_heads`: {self.num_heads}).\"\n            )\n\n        self.q_proj = nn.Linear(\n            self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias\n        )\n        self.k_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.v_proj = nn.Linear(\n            self.hidden_size,\n            self.num_key_value_heads * self.head_dim,\n            bias=config.attention_bias,\n        )\n        self.o_proj = nn.Linear(\n            self.hidden_size, self.hidden_size, bias=config.attention_bias\n        )\n        self._init_rope()\n\n    def _init_rope(self):\n        if self.config.rope_scaling is None:\n            self.rotary_emb = LlamaRotaryEmbedding(\n                self.head_dim,\n                max_position_embeddings=self.max_position_embeddings,\n                base=self.rope_theta,\n            )\n        else:\n            scaling_type = self.config.rope_scaling[\"type\"]\n            scaling_factor = self.config.rope_scaling[\"factor\"]\n            if scaling_type == \"linear\":\n                self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n                    self.head_dim,\n                    max_position_embeddings=self.max_position_embeddings,\n                    scaling_factor=scaling_factor,\n                    base=self.rope_theta,\n                )\n            elif scaling_type == \"dynamic\":\n                self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n                    self.head_dim,\n                    max_position_embeddings=self.max_position_embeddings,\n                    scaling_factor=scaling_factor,\n                    base=self.rope_theta,\n                )\n            else:\n                raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        bsz, q_len, _ = hidden_states.size()\n\n        if self.config.pretraining_tp > 1:\n            key_value_slicing = (\n                self.num_key_value_heads * self.head_dim\n            ) // self.config.pretraining_tp\n            query_slices = self.q_proj.weight.split(\n                (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n            )\n            key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n            value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n            query_states = [\n                F.linear(hidden_states, query_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            query_states = torch.cat(query_states, dim=-1)\n\n            key_states = [\n                F.linear(hidden_states, key_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            key_states = torch.cat(key_states, dim=-1)\n\n            value_states = [\n                F.linear(hidden_states, value_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            value_states = torch.cat(value_states, dim=-1)\n\n        else:\n            query_states = self.q_proj(hidden_states)\n            key_states = self.k_proj(hidden_states)\n            value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        attn_weights = torch.matmul(\n            query_states, key_states.transpose(2, 3)\n        ) / math.sqrt(self.head_dim)\n\n        if attention_mask is not None:  # no matter the length, we just slice it\n            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]\n            attn_weights = attn_weights + causal_mask\n\n        # upcast attention to fp32\n        attn_weights = nn.functional.softmax(\n            attn_weights, dim=-1, dtype=torch.float32\n        ).to(query_states.dtype)\n        attn_weights = nn.functional.dropout(\n            attn_weights, p=self.attention_dropout, training=self.training\n        )\n        attn_output = torch.matmul(attn_weights, value_states)\n\n        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n            raise ValueError(\n                f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n                f\" {attn_output.size()}\"\n            )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n\n        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n        if self.config.pretraining_tp > 1:\n            attn_output = attn_output.split(\n                self.hidden_size // self.config.pretraining_tp, dim=2\n            )\n            o_proj_slices = self.o_proj.weight.split(\n                self.hidden_size // self.config.pretraining_tp, dim=1\n            )\n            attn_output = sum(\n                [\n                    F.linear(attn_output[i], o_proj_slices[i])\n                    for i in range(self.config.pretraining_tp)\n                ]\n            )\n        else:\n            attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n\nclass LlamaFlashAttention2(LlamaAttention):\n    \"\"\"\n    Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays\n    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of\n    flash attention and deal with padding tokens in case the input contains any of them.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.\n        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.\n        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).\n        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.LongTensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        output_attentions = False\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        # Flash attention requires the input to have the shape\n        # batch_size x seq_length x head_dim x hidden_dim\n        # therefore we just need to keep the original shape\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache\n        # to be able to avoid many of these transpose/reshape/view.\n        query_states = query_states.transpose(1, 2)\n        key_states = key_states.transpose(1, 2)\n        value_states = value_states.transpose(1, 2)\n\n        dropout_rate = self.attention_dropout if self.training else 0.0\n\n        # In PEFT, usually we cast the layer norms in float32 for training stability reasons\n        # therefore the input hidden states gets silently casted in float32. Hence, we need\n        # cast them back in the correct dtype just to be sure everything works as expected.\n        # This might slowdown training & inference so it is recommended to not cast the LayerNorms\n        # in fp32. (LlamaRMSNorm handles it correctly)\n\n        input_dtype = query_states.dtype\n        if input_dtype == torch.float32:\n            if torch.is_autocast_enabled():\n                target_dtype = torch.get_autocast_gpu_dtype()\n            # Handle the case where the model is quantized\n            elif hasattr(self.config, \"_pre_quantization_dtype\"):\n                target_dtype = self.config._pre_quantization_dtype\n            else:\n                target_dtype = self.q_proj.weight.dtype\n\n            logger.warning_once(\n                f\"The input hidden states seems to be silently casted in float32, this might be related to\"\n                f\" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in\"\n                f\" {target_dtype}.\"\n            )\n\n            query_states = query_states.to(target_dtype)\n            key_states = key_states.to(target_dtype)\n            value_states = value_states.to(target_dtype)\n\n        attn_output = self._flash_attention_forward(\n            query_states,\n            key_states,\n            value_states,\n            attention_mask,\n            q_len,\n            dropout=dropout_rate,\n        )\n\n        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()\n        attn_output = self.o_proj(attn_output)\n\n        if not output_attentions:\n            attn_weights = None\n\n        return attn_output, attn_weights, past_key_value\n\n    def _flash_attention_forward(\n        self,\n        query_states,\n        key_states,\n        value_states,\n        attention_mask,\n        query_length,\n        dropout=0.0,\n        softmax_scale=None,\n    ):\n        \"\"\"\n        Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token\n        first unpad the input, then computes the attention scores and pad the final attention scores.\n\n        Args:\n            query_states (`torch.Tensor`):\n                Input query states to be passed to Flash Attention API\n            key_states (`torch.Tensor`):\n                Input key states to be passed to Flash Attention API\n            value_states (`torch.Tensor`):\n                Input value states to be passed to Flash Attention API\n            attention_mask (`torch.Tensor`):\n                The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the\n                position of padding tokens and 1 for the position of non-padding tokens.\n            dropout (`float`):\n                Attention dropout\n            softmax_scale (`float`, *optional*):\n                The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)\n        \"\"\"\n        if not self._flash_attn_uses_top_left_mask:\n            causal = self.is_causal\n        else:\n            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.\n            causal = self.is_causal and query_length != 1\n\n        # Contains at least one padding token in the sequence\n        if attention_mask is not None:\n            batch_size = query_states.shape[0]\n            (\n                query_states,\n                key_states,\n                value_states,\n                indices_q,\n                cu_seq_lens,\n                max_seq_lens,\n            ) = self._upad_input(\n                query_states, key_states, value_states, attention_mask, query_length\n            )\n\n            cu_seqlens_q, cu_seqlens_k = cu_seq_lens\n            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens\n\n            attn_output_unpad = flash_attn_varlen_func(\n                query_states,\n                key_states,\n                value_states,\n                cu_seqlens_q=cu_seqlens_q,\n                cu_seqlens_k=cu_seqlens_k,\n                max_seqlen_q=max_seqlen_in_batch_q,\n                max_seqlen_k=max_seqlen_in_batch_k,\n                dropout_p=dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n            attn_output = pad_input(\n                attn_output_unpad, indices_q, batch_size, query_length\n            )\n        else:\n            attn_output = flash_attn_func(\n                query_states,\n                key_states,\n                value_states,\n                dropout,\n                softmax_scale=softmax_scale,\n                causal=causal,\n            )\n\n        return attn_output\n\n    def _upad_input(\n        self, query_layer, key_layer, value_layer, attention_mask, query_length\n    ):\n        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)\n        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape\n\n        key_layer = index_first_axis(\n            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        value_layer = index_first_axis(\n            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim),\n            indices_k,\n        )\n        if query_length == kv_seq_len:\n            query_layer = index_first_axis(\n                query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim),\n                indices_k,\n            )\n            cu_seqlens_q = cu_seqlens_k\n            max_seqlen_in_batch_q = max_seqlen_in_batch_k\n            indices_q = indices_k\n        elif query_length == 1:\n            max_seqlen_in_batch_q = 1\n            cu_seqlens_q = torch.arange(\n                batch_size + 1, dtype=torch.int32, device=query_layer.device\n            )  # There is a memcpy here, that is very bad.\n            indices_q = cu_seqlens_q[:-1]\n            query_layer = query_layer.squeeze(1)\n        else:\n            # The -q_len: slice assumes left padding.\n            attention_mask = attention_mask[:, -query_length:]\n            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(\n                query_layer, attention_mask\n            )\n\n        return (\n            query_layer,\n            key_layer,\n            value_layer,\n            indices_q,\n            (cu_seqlens_q, cu_seqlens_k),\n            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),\n        )\n\n\nclass LlamaSdpaAttention(LlamaAttention):\n    \"\"\"\n    Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from\n    `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to\n    SDPA API.\n    \"\"\"\n\n    # Adapted from LlamaAttention.forward\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Cache] = None,\n        output_attentions: bool = False,\n        use_cache: bool = False,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n        if output_attentions:\n            # TODO: Improve this warning with e.g. `model.config.attn_implementation = \"manual\"` once this is implemented.\n            logger.warning_once(\n                \"LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, \"\n                'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation=\"eager\"` when loading the model.'\n            )\n            return super().forward(\n                hidden_states=hidden_states,\n                attention_mask=attention_mask,\n                position_ids=position_ids,\n                past_key_value=past_key_value,\n                output_attentions=output_attentions,\n                use_cache=use_cache,\n                cache_position=cache_position,\n            )\n\n        bsz, q_len, _ = hidden_states.size()\n\n        query_states = self.q_proj(hidden_states)\n        key_states = self.k_proj(hidden_states)\n        value_states = self.v_proj(hidden_states)\n\n        query_states = query_states.view(\n            bsz, q_len, self.num_heads, self.head_dim\n        ).transpose(1, 2)\n        key_states = key_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n        value_states = value_states.view(\n            bsz, q_len, self.num_key_value_heads, self.head_dim\n        ).transpose(1, 2)\n\n        cos, sin = self.rotary_emb(value_states, position_ids)\n        query_states, key_states = apply_rotary_pos_emb(\n            query_states, key_states, cos, sin\n        )\n\n        # In case static cache is used, it is an instance attribute.\n        past_key_value = getattr(self, \"past_key_value\", past_key_value)\n\n        if past_key_value is not None:\n            # sin and cos are specific to RoPE models; cache_position needed for the static cache\n            cache_kwargs = {\"sin\": sin, \"cos\": cos, \"cache_position\": cache_position}\n            key_states, value_states = past_key_value.update(\n                key_states, value_states, self.layer_idx, cache_kwargs\n            )\n\n        key_states = repeat_kv(key_states, self.num_key_value_groups)\n        value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n        causal_mask = attention_mask\n        if attention_mask is not None:\n            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]\n\n        # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,\n        # Reference: https://github.com/pytorch/pytorch/issues/112577.\n        if query_states.device.type == \"cuda\" and causal_mask is not None:\n            query_states = query_states.contiguous()\n            key_states = key_states.contiguous()\n            value_states = value_states.contiguous()\n\n        # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather\n        # relying on the `is_causal` argument.\n        attn_output = torch.nn.functional.scaled_dot_product_attention(\n            query_states,\n            key_states,\n            value_states,\n            attn_mask=causal_mask,\n            dropout_p=self.attention_dropout if self.training else 0.0,\n            is_causal=causal_mask is None and q_len > 1,\n        )\n\n        attn_output = attn_output.transpose(1, 2).contiguous()\n        attn_output = attn_output.view(bsz, q_len, self.hidden_size)\n\n        attn_output = self.o_proj(attn_output)\n\n        return attn_output, None, past_key_value\n\n\nLLAMA_ATTENTION_CLASSES = {\n    \"eager\": LlamaAttention,\n    \"flash_attention_2\": LlamaFlashAttention2,\n    \"sdpa\": LlamaSdpaAttention,\n}\n\n\nclass LlamaDecoderLayer(nn.Module):\n    def __init__(self, config: LlamaConfig, layer_idx: int):\n        super().__init__()\n        self.hidden_size = config.hidden_size\n\n        self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](\n            config=config, layer_idx=layer_idx\n        )\n\n        self.mlp = LlamaMLP(config)\n        self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.post_attention_layernorm = LlamaRMSNorm(\n            config.hidden_size, eps=config.rms_norm_eps\n        )\n\n    def forward(\n        self,\n        hidden_states: torch.Tensor,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_value: Optional[Tuple[torch.Tensor]] = None,\n        output_attentions: Optional[bool] = False,\n        use_cache: Optional[bool] = False,\n        cache_position: Optional[torch.LongTensor] = None,\n        **kwargs,\n    ) -> Tuple[\n        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]\n    ]:\n        \"\"\"\n        Args:\n            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n            attention_mask (`torch.FloatTensor`, *optional*):\n                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n                query_sequence_length, key_sequence_length)` if default attention is used.\n            output_attentions (`bool`, *optional*):\n                Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n                returned tensors for more detail.\n            use_cache (`bool`, *optional*):\n                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n                (see `past_key_values`).\n            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n        \"\"\"\n        if \"padding_mask\" in kwargs:\n            warnings.warn(\n                \"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`\"\n            )\n\n        residual = hidden_states\n\n        hidden_states = self.input_layernorm(hidden_states)\n\n        # Self Attention\n        hidden_states, self_attn_weights, present_key_value = self.self_attn(\n            hidden_states=hidden_states,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_value=past_key_value,\n            output_attentions=output_attentions,\n            use_cache=use_cache,\n            cache_position=cache_position,\n            **kwargs,\n        )\n        hidden_states = residual + hidden_states\n\n        # Fully Connected\n        residual = hidden_states\n        hidden_states = self.post_attention_layernorm(hidden_states)\n        hidden_states = self.mlp(hidden_states)\n        hidden_states = residual + hidden_states\n\n        outputs = (hidden_states,)\n\n        if output_attentions:\n            outputs += (self_attn_weights,)\n\n        if use_cache:\n            outputs += (present_key_value,)\n\n        return outputs\n\n\nLLAMA_START_DOCSTRING = r\"\"\"\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`LlamaConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare LLaMA Model outputting raw hidden-states without any specific head on top.\",\n    LLAMA_START_DOCSTRING,\n)\nclass LlamaPreTrainedModel(PreTrainedModel):\n    config_class = LlamaConfig\n    base_model_prefix = \"model\"\n    supports_gradient_checkpointing = True\n    _no_split_modules = [\"LlamaDecoderLayer\"]\n    _skip_keys_device_placement = [\"past_key_values\"]\n    _supports_flash_attn_2 = True\n    _supports_sdpa = True\n    _supports_cache_class = True\n\n    def _init_weights(self, module):\n        std = self.config.initializer_range\n        if isinstance(module, nn.Linear):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=std)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n\n    def _setup_cache(\n        self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None\n    ):\n        if (\n            self.config._attn_implementation == \"flash_attention_2\"\n            and cache_cls == StaticCache\n        ):\n            raise ValueError(\n                \"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` \"\n                \"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers\"\n            )\n\n        for layer in self.model.layers:\n            device = layer.input_layernorm.weight.device\n            if hasattr(self.config, \"_pre_quantization_dtype\"):\n                dtype = self.config._pre_quantization_dtype\n            else:\n                dtype = layer.self_attn.o_proj.weight.dtype\n            layer.self_attn.past_key_value = cache_cls(\n                self.config, max_batch_size, max_cache_len, device=device, dtype=dtype\n            )\n\n    def _reset_cache(self):\n        for layer in self.model.layers:\n            layer.self_attn.past_key_value = None\n\n\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance;\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare LLaMA Model outputting raw hidden-states without any specific head on top.\",\n    LLAMA_START_DOCSTRING,\n)\nclass LlamaModel(LlamaPreTrainedModel):\n    \"\"\"\n    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]\n\n    Args:\n        config: LlamaConfig\n    \"\"\"\n\n    def __init__(self, config: LlamaConfig):\n        super().__init__(config)\n        self.padding_idx = config.pad_token_id\n        self.vocab_size = config.vocab_size\n\n        self.embed_tokens = nn.Embedding(\n            config.vocab_size, config.hidden_size, self.padding_idx\n        )\n        self.layers = nn.ModuleList(\n            [\n                LlamaDecoderLayer(config, layer_idx)\n                for layer_idx in range(config.num_hidden_layers)\n            ]\n        )\n        self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n        self.gradient_checkpointing = False\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Union[Tuple, BaseModelOutputWithPast]:\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        if (input_ids is None) ^ (inputs_embeds is not None):\n            raise ValueError(\n                \"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one\"\n            )\n\n        if self.gradient_checkpointing and self.training and use_cache:\n            logger.warning_once(\n                \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.\"\n            )\n            use_cache = False\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embed_tokens(input_ids)\n\n        past_seen_tokens = 0\n        if use_cache:  # kept for BC (cache positions)\n            if not isinstance(past_key_values, StaticCache):\n                past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n                past_seen_tokens = past_key_values.get_seq_length()\n\n        if cache_position is None:\n            if isinstance(past_key_values, StaticCache):\n                raise ValueError(\n                    \"cache_position is a required argument when using StaticCache.\"\n                )\n            cache_position = torch.arange(\n                past_seen_tokens,\n                past_seen_tokens + inputs_embeds.shape[1],\n                device=inputs_embeds.device,\n            )\n\n        if position_ids is None:\n            position_ids = cache_position.unsqueeze(0)\n\n        causal_mask = self._update_causal_mask(\n            attention_mask, inputs_embeds, cache_position, past_seen_tokens\n        )\n\n        # embed positions\n        hidden_states = inputs_embeds\n\n        # decoder layers\n        all_hidden_states = () if output_hidden_states else None\n        all_self_attns = () if output_attentions else None\n        next_decoder_cache = None\n\n        for decoder_layer in self.layers:\n            if output_hidden_states:\n                all_hidden_states += (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n                layer_outputs = self._gradient_checkpointing_func(\n                    decoder_layer.__call__,\n                    hidden_states,\n                    causal_mask,\n                    position_ids,\n                    past_key_values,\n                    output_attentions,\n                    use_cache,\n                    cache_position,\n                )\n            else:\n                layer_outputs = decoder_layer(\n                    hidden_states,\n                    attention_mask=causal_mask,\n                    position_ids=position_ids,\n                    past_key_value=past_key_values,\n                    output_attentions=output_attentions,\n                    use_cache=use_cache,\n                    cache_position=cache_position,\n                )\n\n            hidden_states = layer_outputs[0]\n\n            if use_cache:\n                next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n\n            if output_attentions:\n                all_self_attns += (layer_outputs[1],)\n\n        hidden_states = self.norm(hidden_states)\n\n        # add hidden states from the last decoder layer\n        if output_hidden_states:\n            all_hidden_states += (hidden_states,)\n\n        next_cache = None\n        if use_cache:\n            next_cache = (\n                next_decoder_cache.to_legacy_cache()\n                if isinstance(next_decoder_cache, Cache)\n                else next_decoder_cache\n            )\n        if not return_dict:\n            return tuple(\n                v\n                for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]\n                if v is not None\n            )\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=next_cache,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attns,\n        )\n\n    def _update_causal_mask(\n        self,\n        attention_mask: torch.Tensor,\n        input_tensor: torch.Tensor,\n        cache_position: torch.Tensor,\n        past_seen_tokens: int,\n    ):\n        # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static\n        # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.\n        # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using\n        # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114\n\n        if self.config._attn_implementation == \"flash_attention_2\":\n            if attention_mask is not None and 0.0 in attention_mask:\n                return attention_mask\n            return None\n\n        if self.config._attn_implementation == \"sdpa\":\n            # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument,\n            # in order to dispatch on Flash Attention 2.\n            if AttentionMaskConverter._ignore_causal_mask_sdpa(\n                attention_mask,\n                inputs_embeds=input_tensor,\n                past_key_values_length=past_seen_tokens,\n            ):\n                return None\n\n        dtype, device = input_tensor.dtype, input_tensor.device\n        min_dtype = torch.finfo(dtype).min\n        sequence_length = input_tensor.shape[1]\n        if hasattr(\n            getattr(self.layers[0], \"self_attn\", {}), \"past_key_value\"\n        ):  # static cache\n            target_length = self.config.max_position_embeddings\n        else:  # dynamic cache\n            target_length = (\n                attention_mask.shape[-1]\n                if isinstance(attention_mask, torch.Tensor)\n                else past_seen_tokens + sequence_length + 1\n            )\n\n        causal_mask = torch.full(\n            (sequence_length, target_length),\n            fill_value=min_dtype,\n            dtype=dtype,\n            device=device,\n        )\n        if sequence_length != 1:\n            causal_mask = torch.triu(causal_mask, diagonal=1)\n        causal_mask *= torch.arange(\n            target_length, device=device\n        ) > cache_position.reshape(-1, 1)\n        causal_mask = causal_mask[None, None, :, :].expand(\n            input_tensor.shape[0], 1, -1, -1\n        )\n        if attention_mask is not None:\n            causal_mask = (\n                causal_mask.clone()\n            )  # copy to contiguous memory for in-place edit\n            if attention_mask.dim() == 2:\n                mask_length = attention_mask.shape[-1]\n                padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[\n                    :, None, None, :\n                ].eq(0.0)\n                causal_mask[..., :mask_length] = causal_mask[\n                    ..., :mask_length\n                ].masked_fill(padding_mask, min_dtype)\n            elif attention_mask.dim() == 4:\n                # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with\n                # cache. In that case, the 4D attention mask attends to the newest tokens only.\n                if attention_mask.shape[-2] < cache_position[0] + sequence_length:\n                    offset = cache_position[0]\n                else:\n                    offset = 0\n                mask_shape = attention_mask.shape\n                mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype\n                causal_mask[\n                    : mask_shape[0],\n                    : mask_shape[1],\n                    offset : mask_shape[2] + offset,\n                    : mask_shape[3],\n                ] = mask_slice\n\n        if (\n            self.config._attn_implementation == \"sdpa\"\n            and attention_mask is not None\n            and attention_mask.device.type == \"cuda\"\n        ):\n            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when\n            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.\n            # Details: https://github.com/pytorch/pytorch/issues/110213\n            causal_mask = AttentionMaskConverter._unmask_unattended(\n                causal_mask, min_dtype\n            )\n\n        return causal_mask\n\n\nclass LlamaForCausalLM(LlamaPreTrainedModel):\n    _tied_weights_keys = [\"lm_head.weight\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.model = LlamaModel(config)\n        self.vocab_size = config.vocab_size\n        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def set_decoder(self, decoder):\n        self.model = decoder\n\n    def get_decoder(self):\n        return self.model\n\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    @replace_return_docstrings(\n        output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n    )\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n        cache_position: Optional[torch.LongTensor] = None,\n    ) -> Union[Tuple, CausalLMOutputWithPast]:\n        r\"\"\"\n        Args:\n            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n        Returns:\n\n        Example:\n\n        ```python\n        >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n        >>> model = LlamaForCausalLM.from_pretrained(\"meta-llama/Llama-2-7b-hf\")\n        >>> tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-2-7b-hf\")\n\n        >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n        >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n        >>> # Generate\n        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n        \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n        ```\"\"\"\n        output_attentions = (\n            output_attentions\n            if output_attentions is not None\n            else self.config.output_attentions\n        )\n        output_hidden_states = (\n            output_hidden_states\n            if output_hidden_states is not None\n            else self.config.output_hidden_states\n        )\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n        outputs = self.model(\n            input_ids=input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n            cache_position=cache_position,\n        )\n\n        hidden_states = outputs[0]\n        if self.config.pretraining_tp > 1:\n            lm_head_slices = self.lm_head.weight.split(\n                self.vocab_size // self.config.pretraining_tp, dim=0\n            )\n            logits = [\n                F.linear(hidden_states, lm_head_slices[i])\n                for i in range(self.config.pretraining_tp)\n            ]\n            logits = torch.cat(logits, dim=-1)\n        else:\n            logits = self.lm_head(hidden_states)\n        logits = logits.float()\n\n        loss = None\n        if labels is not None:\n            # Shift so that tokens < n predict n\n            shift_logits = logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss()\n            shift_logits = shift_logits.view(-1, self.config.vocab_size)\n            shift_labels = shift_labels.view(-1)\n            # Enable model parallelism\n            shift_labels = shift_labels.to(shift_logits.device)\n            loss = loss_fct(shift_logits, shift_labels)\n\n        if not return_dict:\n            output = (logits,) + outputs[1:]\n            return (loss,) + output if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=outputs.past_key_values,\n            hidden_states=outputs.hidden_states,\n            attentions=outputs.attentions,\n        )\n\n    def prepare_inputs_for_generation(\n        self,\n        input_ids,\n        past_key_values=None,\n        attention_mask=None,\n        inputs_embeds=None,\n        cache_position=None,\n        **kwargs,\n    ):\n        # With static cache, the `past_key_values` is None\n        # TODO joao: standardize interface for the different Cache classes and remove of this if\n        has_static_cache = False\n        if past_key_values is None:\n            past_key_values = getattr(\n                getattr(self.model.layers[0], \"self_attn\", {}), \"past_key_value\", None\n            )\n            has_static_cache = past_key_values is not None\n\n        past_length = 0\n        if past_key_values is not None:\n            if isinstance(past_key_values, Cache):\n                past_length = (\n                    cache_position[0]\n                    if cache_position is not None\n                    else past_key_values.get_seq_length()\n                )\n                max_cache_length = (\n                    torch.tensor(\n                        past_key_values.get_max_length(), device=input_ids.device\n                    )\n                    if past_key_values.get_max_length() is not None\n                    else None\n                )\n                cache_length = (\n                    past_length\n                    if max_cache_length is None\n                    else torch.min(max_cache_length, past_length)\n                )\n            # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects\n            else:\n                cache_length = past_length = past_key_values[0][0].shape[2]\n                max_cache_length = None\n\n            # Keep only the unprocessed tokens:\n            # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n            # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as\n            # input)\n            if (\n                attention_mask is not None\n                and attention_mask.shape[1] > input_ids.shape[1]\n            ):\n                input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n            # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n            # input_ids based on the past_length.\n            elif past_length < input_ids.shape[1]:\n                input_ids = input_ids[:, past_length:]\n            # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n\n            # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n            if (\n                max_cache_length is not None\n                and attention_mask is not None\n                and cache_length + input_ids.shape[1] > max_cache_length\n            ):\n                attention_mask = attention_mask[:, -max_cache_length:]\n\n        position_ids = kwargs.get(\"position_ids\", None)\n        if attention_mask is not None and position_ids is None:\n            # create position_ids on the fly for batch generation\n            position_ids = attention_mask.long().cumsum(-1) - 1\n            position_ids.masked_fill_(attention_mask == 0, 1)\n            if past_key_values:\n                position_ids = position_ids[:, -input_ids.shape[1] :]\n\n        # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n        if inputs_embeds is not None and past_key_values is None:\n            model_inputs = {\"inputs_embeds\": inputs_embeds}\n        else:\n            # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise\n            # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114\n            # TODO: use `next_tokens` directly instead.\n            model_inputs = {\"input_ids\": input_ids.contiguous()}\n\n        input_length = (\n            position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]\n        )\n        if cache_position is None:\n            cache_position = torch.arange(\n                past_length, past_length + input_length, device=input_ids.device\n            )\n        else:\n            cache_position = cache_position[-input_length:]\n\n        if has_static_cache:\n            past_key_values = None\n\n        model_inputs.update(\n            {\n                \"position_ids\": position_ids,\n                \"cache_position\": cache_position,\n                \"past_key_values\": past_key_values,\n                \"use_cache\": kwargs.get(\"use_cache\"),\n                \"attention_mask\": attention_mask,\n            }\n        )\n        return model_inputs\n\n    @staticmethod\n    def _reorder_cache(past_key_values, beam_idx):\n        reordered_past = ()\n        for layer_past in past_key_values:\n            reordered_past += (\n                tuple(\n                    past_state.index_select(0, beam_idx.to(past_state.device))\n                    for past_state in layer_past\n                ),\n            )\n        return reordered_past\n\n\n@add_start_docstrings(\n    \"\"\"\n    The LLaMa Model transformer with a sequence classification head on top (linear layer).\n\n    [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-2) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    LLAMA_START_DOCSTRING,\n)\nclass LlamaForSequenceClassification(LlamaPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n        self.num_labels = config.num_labels\n        self.model = LlamaModel(config)\n        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.model.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.model.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    def forward(\n        self,\n        input_ids: torch.LongTensor = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n        r\"\"\"\n        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n        \"\"\"\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        transformer_outputs = self.model(\n            input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n        hidden_states = transformer_outputs[0]\n        logits = self.score(hidden_states)\n\n        if input_ids is not None:\n            batch_size = input_ids.shape[0]\n        else:\n            batch_size = inputs_embeds.shape[0]\n\n        if self.config.pad_token_id is None and batch_size != 1:\n            raise ValueError(\n                \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n            )\n        if self.config.pad_token_id is None:\n            sequence_lengths = -1\n        else:\n            if input_ids is not None:\n                # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility\n                sequence_lengths = (\n                    torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1\n                )\n                sequence_lengths = sequence_lengths % input_ids.shape[-1]\n                sequence_lengths = sequence_lengths.to(logits.device)\n            else:\n                sequence_lengths = -1\n\n        pooled_logits = logits[\n            torch.arange(batch_size, device=logits.device), sequence_lengths\n        ]\n\n        loss = None\n        if labels is not None:\n            labels = labels.to(logits.device)\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (\n                    labels.dtype == torch.long or labels.dtype == torch.int\n                ):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n                else:\n                    loss = loss_fct(pooled_logits, labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(\n                    pooled_logits.view(-1, self.num_labels), labels.view(-1)\n                )\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(pooled_logits, labels)\n        if not return_dict:\n            output = (pooled_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=pooled_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n\n@add_start_docstrings(\n    \"\"\"\nThe Llama Model transformer with a span classification head on top for extractive question-answering tasks like\nSQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    \"\"\",\n    LLAMA_START_DOCSTRING,\n)\nclass LlamaForQuestionAnswering(LlamaPreTrainedModel):\n    base_model_prefix = \"transformer\"\n\n    # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = LlamaModel(config)\n        self.qa_outputs = nn.Linear(config.hidden_size, 2)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.transformer.embed_tokens\n\n    def set_input_embeddings(self, value):\n        self.transformer.embed_tokens = value\n\n    @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        position_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[List[torch.FloatTensor]] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        start_positions: Optional[torch.LongTensor] = None,\n        end_positions: Optional[torch.LongTensor] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, QuestionAnsweringModelOutput]:\n        r\"\"\"\n        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for position (index) of the start of the labelled span for computing the token classification loss.\n            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n            are not taken into account for computing the loss.\n        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for position (index) of the end of the labelled span for computing the token classification loss.\n            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n            are not taken into account for computing the loss.\n        \"\"\"\n        return_dict = (\n            return_dict if return_dict is not None else self.config.use_return_dict\n        )\n\n        outputs = self.transformer(\n            input_ids,\n            attention_mask=attention_mask,\n            position_ids=position_ids,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        sequence_output = outputs[0]\n\n        logits = self.qa_outputs(sequence_output)\n        start_logits, end_logits = logits.split(1, dim=-1)\n        start_logits = start_logits.squeeze(-1).contiguous()\n        end_logits = end_logits.squeeze(-1).contiguous()\n\n        total_loss = None\n        if start_positions is not None and end_positions is not None:\n            # If we are on multi-GPU, split add a dimension\n            if len(start_positions.size()) > 1:\n                start_positions = start_positions.squeeze(-1).to(start_logits.device)\n            if len(end_positions.size()) > 1:\n                end_positions = end_positions.squeeze(-1).to(end_logits.device)\n            # sometimes the start/end positions are outside our model inputs, we ignore these terms\n            ignored_index = start_logits.size(1)\n            start_positions = start_positions.clamp(0, ignored_index)\n            end_positions = end_positions.clamp(0, ignored_index)\n\n            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n            start_loss = loss_fct(start_logits, start_positions)\n            end_loss = loss_fct(end_logits, end_positions)\n            total_loss = (start_loss + end_loss) / 2\n\n        if not return_dict:\n            output = (start_logits, end_logits) + outputs[2:]\n            return ((total_loss,) + output) if total_loss is not None else output\n\n        return QuestionAnsweringModelOutput(\n            loss=total_loss,\n            start_logits=start_logits,\n            end_logits=end_logits,\n            hidden_states=outputs.hidden_states,\n            attentions=outputs.attentions,\n        )\n"
  },
  {
    "path": "requirements.txt",
    "content": "torch==2.2.2\ndatasets\naccelerate\nwandb\n\n"
  },
  {
    "path": "test_basic.infini.py",
    "content": "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"  # TODO: set the GPU device\n\nimport torch\nfrom torch.nn import functional as F\nfrom transformers import AutoTokenizer, pipeline\nfrom infini_gemma import GemmaForCausalLM\nfrom infini_gemma import GemmaConfig\n\nprint(\"Torch Version:\", torch.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\n\nif torch.cuda.is_available():\n    device = \"cuda:0\"  # set GPU device using CUDA_VISIBLE_DEVICES\nelse:\n    device = \"cpu\"\n\nconfig = GemmaConfig.from_pretrained(\n    \"google/gemma-2b\",\n)\n\nprint(config)\n\n# Create the Gemma model with Infini-attention\nmodel = GemmaForCausalLM(config)\n# model = model.from_pretrained(\"google/gemma-2b\")\npretrained_model = GemmaForCausalLM.from_pretrained(\"google/gemma-2b\")\n# Step 4: Transfer weights\n# Note: This is a simplified example; you need to ensure that each parameter's dimensions match.\nfor param in model.named_parameters():\n    name = param[0]\n    if name in pretrained_model.state_dict():\n        # Check if dimensions match, and only then assign the weights\n        if param[1].size() == pretrained_model.state_dict()[name].size():\n            param[1].data = pretrained_model.state_dict()[name].data.clone()\n        else:\n            print(f\"Skipping {name} due to size mismatch.\")\nprint(model)\nmodel.to(device)\n\n# Generate some dummy input data\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2b\")\ntext = \"\"\"This work introduces an efficient method to scale Transformer-based\"\"\"\nlongtext = \"\"\"The new memory states M s and z s are then passed to the next segment S + 1, building in a recurrence in each attention layer. The right side term σ (K ) T V in Eq. (4) is known as an associative binding operator (Smolensky, 1990; Hebb, 2005; Schlag et al., 2020).\nInspired by the success of delta rule (Munkhdalai et al., 2019; Schlag et al., 2020; 2021), we have also incorporated it into our Infini-attention. The delta rule attempts a slightly improved memory update by first retrieving existing value entries and subtracting them from the new values before applying the associative bindings as new update.\"\"\"\n\nencoded = tokenizer(\n    text,\n    return_tensors=\"pt\",\n)\n# attention_mask = torch.ones_like(input_ids)\nencoded[\"labels\"] = encoded[\"input_ids\"].clone()\n\nlong_encoded = tokenizer(\n    longtext,\n    return_tensors=\"pt\",\n)\n# attention_mask = torch.ones_like(input_ids)\nlong_encoded[\"labels\"] = long_encoded[\"input_ids\"].clone()\n\nprint(encoded)\n# Test the forward pass\noutputs = model(**encoded.to(device))  # position_ids=position_ids)\nprint(\"Short Text Loss\")\nprint(outputs.loss)\noutputs.loss.backward()  # Test the backward pass\n\noutputs = model(**long_encoded.to(device))  # position_ids=position_ids)\nprint(\"Long Text Loss\")\nprint(outputs.loss)\noutputs.loss.backward()  # Test the backward pass\n\nprint(\"backprop done\")\n\n\n# Step 1: Get effective batch size and sequence length\nbatch_size = encoded[\"input_ids\"].shape[0]\nsequence_length = encoded[\"input_ids\"].shape[1]\n\n# Step 2: Prepare input data for generation\ninput_ids = encoded[\"input_ids\"]\nattention_mask = encoded.get(\"attention_mask\", None)\n\n# Step 3: Initialize past\npast = None\n\n# Step 4: Start generation loop\nfor _ in range(10):  # 10 is the number of new tokens to generate\n    with torch.no_grad():\n        # Get next token scores\n        outputs = model(\n            input_ids,\n            attention_mask=attention_mask,\n            use_cache=True,\n            past_key_values=past,\n        )\n        next_token_logits = outputs.logits[:, -1, :]\n        past = outputs.past_key_values\n\n        # Perform sampling to get the next token\n        next_token = torch.multinomial(\n            F.softmax(next_token_logits, dim=-1), num_samples=1\n        )\n\n        # Update input_ids, attention_mask, and past\n        input_ids = torch.cat([input_ids, next_token], dim=-1)\n        if attention_mask is not None:\n            attention_mask = F.pad(attention_mask, (0, 1), value=1)\n\n# Step 5: Return generated sequence\ngenerated_sequence = tokenizer.decode(input_ids[0], skip_special_tokens=False)\nprint(\"generated_sequence:\", generated_sequence)\n\n# Test .generate() method\ngenerated = model.generate(\n    **encoded,\n    max_new_tokens=32,\n    do_sample=True,\n    num_return_sequences=1,\n)\nprint(\"Generated:\")\nprint(tokenizer.decode(generated[0], skip_special_tokens=False))\n"
  },
  {
    "path": "test_basic.py",
    "content": "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"  # TODO: set the GPU device\n\nimport torch\nfrom torch.nn import functional as F\nfrom transformers import GemmaConfig, GemmaForCausalLM, AutoTokenizer, pipeline\n\nprint(\"Torch Version:\", torch.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\n\nif torch.cuda.is_available():\n    device = \"cuda:0\"  # set GPU device using CUDA_VISIBLE_DEVICES\nelse:\n    device = \"cpu\"\n\nconfig = GemmaConfig.from_pretrained(\n    \"google/gemma-2b\",\n    attn_implementation=\"eager\",\n)\nconfig.memory_size = 2048\nconfig.use_cache = True\nconfig.segment_size = 16\n\nprint(config)\n\n# Create the Gemma model with Infini-attention\nmodel = GemmaForCausalLM(config)\n# model = model.from_pretrained(\"google/gemma-2b\")\npretrained_model = GemmaForCausalLM.from_pretrained(\"google/gemma-2b\")\n# Step 4: Transfer weights\n# Note: This is a simplified example; you need to ensure that each parameter's dimensions match.\nfor param in model.named_parameters():\n    name = param[0]\n    if name in pretrained_model.state_dict():\n        # Check if dimensions match, and only then assign the weights\n        if param[1].size() == pretrained_model.state_dict()[name].size():\n            param[1].data = pretrained_model.state_dict()[name].data.clone()\n        else:\n            print(f\"Skipping {name} due to size mismatch.\")\nprint(model)\nmodel.to(device)\n\n# Generate some dummy input data\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2b\")\ntext = \"\"\"This work introduces an efficient method to scale Transformer-based\"\"\"\nlongtext = \"\"\"The new memory states M s and z s are then passed to the next segment S + 1, building in a recurrence in each attention layer. The right side term σ (K ) T V in Eq. (4) is known as an associative binding operator (Smolensky, 1990; Hebb, 2005; Schlag et al., 2020).\nInspired by the success of delta rule (Munkhdalai et al., 2019; Schlag et al., 2020; 2021), we have also incorporated it into our Infini-attention. The delta rule attempts a slightly improved memory update by first retrieving existing value entries and subtracting them from the new values before applying the associative bindings as new update.\"\"\"\n\nencoded = tokenizer(\n    text,\n    return_tensors=\"pt\",\n)\n# attention_mask = torch.ones_like(input_ids)\nencoded[\"labels\"] = encoded[\"input_ids\"].clone()\n\nlong_encoded = tokenizer(\n    longtext,\n    return_tensors=\"pt\",\n)\n# attention_mask = torch.ones_like(input_ids)\nlong_encoded[\"labels\"] = long_encoded[\"input_ids\"].clone()\n\nprint(encoded)\n# Test the forward pass\noutputs = model(**encoded.to(device))  # position_ids=position_ids)\nprint(\"Short Text Loss\")\nprint(outputs.loss)\noutputs.loss.backward()  # Test the backward pass\n\noutputs = model(**long_encoded.to(device))  # position_ids=position_ids)\nprint(\"Long Text Loss\")\nprint(outputs.loss)\noutputs.loss.backward()  # Test the backward pass\n\nprint(\"backprop done\")\n\n\n# Step 1: Get effective batch size and sequence length\nbatch_size = encoded[\"input_ids\"].shape[0]\nsequence_length = encoded[\"input_ids\"].shape[1]\n\n# Step 2: Prepare input data for generation\ninput_ids = encoded[\"input_ids\"]\nattention_mask = encoded.get(\"attention_mask\", None)\n\n# Step 3: Initialize past\npast = None\n\n# Step 4: Start generation loop\nfor _ in range(10):  # 10 is the number of new tokens to generate\n    with torch.no_grad():\n        # Get next token scores\n        outputs = model(\n            input_ids,\n            attention_mask=attention_mask,\n            use_cache=True,\n            past_key_values=past,\n        )\n        next_token_logits = outputs.logits[:, -1, :]\n        past = outputs.past_key_values\n\n        # Perform sampling to get the next token\n        next_token = torch.multinomial(\n            F.softmax(next_token_logits, dim=-1), num_samples=1\n        )\n\n        # Update input_ids, attention_mask, and past\n        input_ids = torch.cat([input_ids, next_token], dim=-1)\n        if attention_mask is not None:\n            attention_mask = F.pad(attention_mask, (0, 1), value=1)\n\n# Step 5: Return generated sequence\ngenerated_sequence = tokenizer.decode(input_ids[0], skip_special_tokens=False)\nprint(\"generated_sequence:\", generated_sequence)\n\n# Test .generate() method\ngenerated = model.generate(\n    **encoded,\n    max_new_tokens=32,\n    do_sample=True,\n    num_return_sequences=1,\n)\nprint(\"Generated:\")\nprint(tokenizer.decode(generated[0], skip_special_tokens=False))\n"
  },
  {
    "path": "test_basic.trained.py",
    "content": "import os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"  # TODO: set the GPU device\n\nimport torch\nfrom torch.nn import functional as F\nfrom transformers import GemmaConfig, GemmaForCausalLM, AutoTokenizer, pipeline\n\nmodel = GemmaForCausalLM.from_pretrained(\n    'models/gemma-2b-wikitext/checkpoint-297',\n    torch_dtype='auto',\n    device_map={'':0},\n    attn_implementation=\"eager\",\n)\n\nprint(model)\n\n# Generate some dummy input data\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2b\")\ntext = \"\"\"This work introduces an efficient method to scale Transformer-based\"\"\"\n\nencoded = tokenizer(\n    text,\n    return_tensors=\"pt\",\n).to(model.device)\n\n# Step 1: Get effective batch size and sequence length\nbatch_size = encoded[\"input_ids\"].shape[0]\nsequence_length = encoded[\"input_ids\"].shape[1]\n\n# Step 2: Prepare input data for generation\ninput_ids = encoded[\"input_ids\"]\nattention_mask = encoded.get(\"attention_mask\", None)\n\n# Step 3: Initialize past\npast = None\n\n# Step 4: Start generation loop\nfor _ in range(500):  # 10 is the number of new tokens to generate\n    with torch.no_grad():\n        # Get next token scores\n        outputs = model(\n            input_ids,\n            attention_mask=attention_mask,\n            use_cache=True,\n            past_key_values=past,\n        )\n        next_token_logits = outputs.logits[:, -1, :]\n        past = outputs.past_key_values\n\n        # Perform sampling to get the next token\n        next_token = torch.multinomial(\n            F.softmax(next_token_logits, dim=-1), num_samples=1\n        )\n\n        # Update input_ids, attention_mask, and past\n        input_ids = torch.cat([input_ids, next_token], dim=-1)\n        if attention_mask is not None:\n            attention_mask = F.pad(attention_mask, (0, 1), value=1)\n\n# Step 5: Return generated sequence\ngenerated_sequence = tokenizer.decode(input_ids[0], skip_special_tokens=False)\nprint(\"Input:\")\nprint(text)\nprint(\"generated_sequence:\")\nprint(generated_sequence.replace(text, ''))\n\n# Test .generate() method\ngenerated = model.generate(\n    **encoded,\n    max_new_tokens=1024,\n    do_sample=True,\n    num_return_sequences=1,\n)\nprint(\"Generated:\")\nprint(tokenizer.decode(generated[0], skip_special_tokens=False).replace(text, ''))\n"
  },
  {
    "path": "test_model_to_hf.py",
    "content": "import os\n\nfrom itertools import chain\n\nimport torch\nfrom datasets import load_dataset\n\nfrom transformers import (\n    AutoTokenizer,\n    GemmaConfig,\n    GemmaForCausalLM,\n    Trainer,\n    TrainingArguments,\n    set_seed,\n    default_data_collator,\n)\n\nset_seed(42)\n\nprint(\"Torch Version:\", torch.__version__)\n\nconfig = GemmaConfig.from_pretrained(\n    \"google/gemma-2b\",\n    attn_implementation=\"eager\",\n)\n# config.max_position_embeddings = 128\n# config.use_cache = False\nconfig.segment_size = config.max_position_embeddings # Add config\n\nprint(config)\n\npretrained_model = GemmaForCausalLM.from_pretrained(\n    \"google/gemma-2b\", torch_dtype=\"auto\"\n)\ntokenizer = AutoTokenizer.from_pretrained(\n    \"google/gemma-2b\", \n)\npretrained_model.save_pretrained('./models/gemma-2b')\nconfig.save_pretrained('./models/gemma-2b')\ntokenizer.save_pretrained('./models/gemma-2b')"
  },
  {
    "path": "test_train.small.gemma.infini.py",
    "content": "import os\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"  # TODO: set the GPU device\nos.environ[\"WANDB_PROJECT\"] = \"InfiniTransformer\"\n# os.environ[\"WANDB_MODE\"] = \"offline\"\n\n\nfrom itertools import chain\n\nimport torch\nfrom datasets import load_dataset\n\nfrom transformers import (\n    AutoTokenizer,\n    Trainer,\n    TrainingArguments,\n    set_seed,\n    default_data_collator,\n)\nfrom infini_gemma import GemmaForCausalLM, GemmaConfig\n\nset_seed(42)\n\nprint(\"Torch Version:\", torch.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\n\nif torch.cuda.is_available():\n    device = \"cuda:0\"  # set GPU device using CUDA_VISIBLE_DEVICES\nelse:\n    device = \"cpu\"\n\nif os.path.exists(\"./models/gemma-2b\"):\n    model = GemmaForCausalLM.from_pretrained(\n        \"./models/gemma-2b\", torch_dtype=\"auto\", device_map=\"auto\"\n    )\n    config = model.config\n    print(config)\n    print(model)\nelse:\n    config = GemmaConfig.from_pretrained(\n        \"google/gemma-2b\",\n        attn_implementation=\"eager\",\n    )\n    # config.max_position_embeddings = 128\n    config.use_cache = False\n    config.segment_size = config.max_position_embeddings\n\n    print(config)\n\n    # Create the Gemma model with Infini-attention\n    model = GemmaForCausalLM(config)\n    # model = model.from_pretrained(\"google/gemma-2b\")\n    pretrained_model = GemmaForCausalLM.from_pretrained(\n        \"google/gemma-2b\", torch_dtype=\"auto\"\n    )\n    # Step 4: Transfer weights\n    # Note: This is a simplified example; you need to ensure that each parameter's dimensions match.\n    for param in model.named_parameters():\n        name = param[0]\n        if name in pretrained_model.state_dict():\n            # Check if dimensions match, and only then assign the weights\n            if param[1].size() == pretrained_model.state_dict()[name].size():\n                param[1].data = pretrained_model.state_dict()[name].data.clone()\n            else:\n                print(f\"Skipping {name} due to size mismatch.\")\n    print(model)\n    # model = model.to(torch.bfloat16)\n    model = model.to(device)\n\n# wiki = load_dataset(\"wikipedia\", \"20220301.en\", split=\"train[:20000]\")\nwiki = load_dataset(\"wikitext\", \"wikitext-2-raw-v1\")\n\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2b\")\n\n\ndef tokenize_function(examples):\n    return tokenizer(examples[\"text\"])\n\n\ntry:\n    column_names = list(wiki[\"train\"].features)\nexcept KeyError:\n    column_names = list(wiki.features)\ntokenized_datasets = wiki.map(\n    tokenize_function, remove_columns=column_names, batched=True\n)\n\n\nblock_size = config.segment_size * 4  # will be 32768\nprint(\"block_size:\", block_size)\n\n\ndef group_texts(examples):\n    # Concatenate all texts.\n    concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n    total_length = len(concatenated_examples[list(examples.keys())[0]])\n    # We drop the small remainder, and if the total_length < block_size  we exclude this batch and return an empty dict.\n    # We could add padding if the model supported it instead of this drop, you can customize this part to your needs.\n    total_length = (total_length // block_size) * block_size\n    # Split by chunks of max_len.\n    result = {\n        k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n        for k, t in concatenated_examples.items()\n    }\n    result[\"labels\"] = result[\"input_ids\"].copy()\n    return result\n\n\nlm_datasets = tokenized_datasets.map(\n    group_texts,\n    batched=True,\n)\n\nprint(lm_datasets)\n# print(lm_datasets[\"train\"][\"input_ids\"][0])\n\ntraining_args = TrainingArguments(\n    output_dir=\"./models/gemma-2b-wikitext\",\n    overwrite_output_dir=True,\n    num_train_epochs=1,\n    per_device_train_batch_size=1,  # to test batch dim\n    save_total_limit=1,\n    report_to=\"wandb\",  # \"none\" if you don't want to report to wandb\n    run_name=\"gemma-2b-wikitext\",\n    optim=\"adafactor\",\n    learning_rate=1e-4,\n    bf16=True,\n    logging_first_step=True,\n    logging_steps=1,\n    save_strategy=\"epoch\",\n    # warmup_ratio=0.1,\n    max_grad_norm=1.0,\n    gradient_checkpointing=True,  # Reduce vram 69G -> 43G\n)\n\ntry:\n    train_dataset = lm_datasets[\"train\"]\nexcept KeyError:\n    train_dataset = lm_datasets\n\ntrainer = Trainer(\n    model=model,\n    tokenizer=tokenizer,\n    args=training_args,\n    train_dataset=train_dataset,\n    # eval_dataset=lm_datasets[\"validation\"],\n    data_collator=default_data_collator,\n)\n\ntrainer.train()\n"
  },
  {
    "path": "test_train.small.gemma.py",
    "content": "import os\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"  # TODO: set the GPU device\nos.environ[\"WANDB_PROJECT\"] = \"InfiniTransformer\"\n# os.environ[\"WANDB_MODE\"] = \"offline\"\n\n\nfrom itertools import chain\n\nimport torch\nfrom datasets import load_dataset\n\nfrom transformers import (\n    AutoTokenizer,\n    GemmaConfig,\n    GemmaForCausalLM,\n    Trainer,\n    TrainingArguments,\n    set_seed,\n    default_data_collator,\n)\n\nset_seed(42)\n\nprint(\"Torch Version:\", torch.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\n\nif torch.cuda.is_available():\n    device = \"cuda:0\"  # set GPU device using CUDA_VISIBLE_DEVICES\nelse:\n    device = \"cpu\"\n    \nif os.path.exists('./models/gemma-2b'):\n    model = GemmaForCausalLM.from_pretrained('./models/gemma-2b', \n                                             torch_dtype='auto', \n                                             device_map='auto',\n                                             attn_implementation=\"eager\",\n                                            )\n    config = model.config\n    print(config)\n    print(model)\nelse:\n    config = GemmaConfig.from_pretrained(\n        \"google/gemma-2b\",\n        attn_implementation=\"eager\",\n    )\n    # config.max_position_embeddings = 128\n    config.use_cache = False\n    config.segment_size = config.max_position_embeddings\n\n    print(config)\n\n    # Create the Gemma model with Infini-attention\n    model = GemmaForCausalLM(config)\n    # model = model.from_pretrained(\"google/gemma-2b\")\n    pretrained_model = GemmaForCausalLM.from_pretrained(\n        \"google/gemma-2b\", torch_dtype=\"auto\"\n    )\n    # Step 4: Transfer weights\n    # Note: This is a simplified example; you need to ensure that each parameter's dimensions match.\n    for param in model.named_parameters():\n        name = param[0]\n        if name in pretrained_model.state_dict():\n            # Check if dimensions match, and only then assign the weights\n            if param[1].size() == pretrained_model.state_dict()[name].size():\n                param[1].data = pretrained_model.state_dict()[name].data.clone()\n            else:\n                print(f\"Skipping {name} due to size mismatch.\")\n    print(model)\n    # model = model.to(torch.bfloat16)\n    model = model.to(device)\n\nwiki = load_dataset(\"JeanKaddour/minipile\" )\n\ntokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2b\")\n\n\ndef tokenize_function(examples):\n    return tokenizer(examples[\"text\"])\n\ntry:\n    column_names = list(wiki[\"train\"].features)\nexcept KeyError:\n    column_names = list(wiki.features)\ntokenized_datasets = wiki.map(\n    tokenize_function, remove_columns=column_names, batched=True\n)\n\n\nblock_size = config.segment_size * 8  # will be 8K\nprint(\"block_size:\", block_size)\n\n\ndef group_texts(examples):\n    # Concatenate all texts.\n    concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n    total_length = len(concatenated_examples[list(examples.keys())[0]])\n    # We drop the small remainder, and if the total_length < block_size  we exclude this batch and return an empty dict.\n    # We could add padding if the model supported it instead of this drop, you can customize this part to your needs.\n    total_length = (total_length // block_size) * block_size\n    # Split by chunks of max_len.\n    result = {\n        k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n        for k, t in concatenated_examples.items()\n    }\n    result[\"labels\"] = result[\"input_ids\"].copy()\n    return result\n\n\nlm_datasets = tokenized_datasets.map(\n    group_texts,\n    batched=True,\n)\n\nprint(lm_datasets)\n# print(lm_datasets[\"train\"][\"input_ids\"][0])\n\ntraining_args = TrainingArguments(\n    output_dir=\"./models/gemma-2b-wikitext\",\n    overwrite_output_dir=True,\n    num_train_epochs=1,\n    per_device_train_batch_size=2,  # to test batch dim\n    save_total_limit=1,\n    report_to=\"wandb\",  # \"none\" if you don't want to report to wandb\n    run_name=\"gemma-2b-bookcorpus\",\n    optim=\"adafactor\",\n    learning_rate=1e-4,\n    bf16=True,\n    logging_first_step=True,\n    logging_steps=1,\n    save_strategy=\"epoch\",\n    # warmup_ratio=0.1,\n    max_grad_norm=1.0,\n    gradient_checkpointing=True, # Reduce vram 69G -> 43G\n)\n\ntry:\n    train_dataset = lm_datasets[\"train\"]\nexcept KeyError:\n    train_dataset = lm_datasets\n\ntrainer = Trainer(\n    model=model,\n    tokenizer=tokenizer,\n    args=training_args,\n    train_dataset=train_dataset,\n    # eval_dataset=lm_datasets[\"validation\"],\n    data_collator=default_data_collator,\n)\n\ntrainer.train()\n"
  },
  {
    "path": "train.gemma.infini.noclm.1Mseq.sh",
    "content": "# export CUDA_VISIBLE_DEVICES=0\n\naccelerate launch --mixed_precision='bf16' \\\n    train.gemma.infini.noclm.py \\\n    --model_name_or_path='google/gemma-2b' \\\n    --segment_length=2048 \\\n    --block_size=1048576 \\\n    --dataset_name='JeanKaddour/minipile' \\\n    --per_device_train_batch_size=2 \\\n    --per_device_eval_batch_size=2 \\\n    --output_dir='./models/gemma-2b-infini-noclm-minipile' \\\n    --checkpointing_steps=100 \\\n    --num_train_epochs=1 \\\n    --learning_rate=5e-5 \\\n    --seed=42 \\\n    --low_cpu_mem_usage \\\n    --report_to='wandb' \\\n    --preprocessing_num_workers=64 \\\n    --with_tracking \\\n"
  },
  {
    "path": "train.gemma.infini.noclm.32k.sh",
    "content": "# export CUDA_VISIBLE_DEVICES=0\n\naccelerate launch --mixed_precision='bf16' \\\n    train.gemma.infini.noclm.py \\\n    --model_name_or_path='google/gemma-2b' \\\n    --segment_length=2048 \\\n    --block_size=32768 \\\n    --dataset_name='JeanKaddour/minipile' \\\n    --per_device_train_batch_size=2 \\\n    --per_device_eval_batch_size=2 \\\n    --output_dir='./models/gemma-2b-infini-noclm-minipile' \\\n    --checkpointing_steps=100 \\\n    --num_train_epochs=1 \\\n    --learning_rate=5e-5 \\\n    --seed=42 \\\n    --low_cpu_mem_usage \\\n    --report_to='wandb' \\\n    --preprocessing_num_workers=64 \\\n    --with_tracking \\\n"
  },
  {
    "path": "train.gemma.infini.noclm.py",
    "content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\non a text file or a dataset without using HuggingFace Trainer.\n\nHere is the full list of checkpoints on the hub that can be fine-tuned by this script:\nhttps://huggingface.co/models?filter=text-generation\n\"\"\"\n# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n\nimport argparse\nimport json\nimport logging\nimport math\nimport os\nimport random\nfrom itertools import chain\nfrom pathlib import Path\n\nimport datasets\nimport torch\nfrom accelerate import Accelerator, DistributedType\nfrom accelerate.logging import get_logger\nfrom accelerate.utils import set_seed\nfrom datasets import load_dataset\nfrom huggingface_hub import HfApi\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom transformers import (\n    CONFIG_MAPPING,\n    MODEL_MAPPING,\n    AutoConfig,\n    AutoModelForCausalLM,\n    AutoTokenizer,\n    SchedulerType,\n    default_data_collator,\n    get_scheduler,\n    Qwen2MoeConfig,\n)\nfrom transformers.utils import check_min_version, send_example_telemetry\nfrom transformers.utils.versions import require_version\nfrom infini_gemma import GemmaForCausalLM, GemmaConfig\nfrom datasets import DatasetDict, interleave_datasets\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.40.0.dev0\")\n\nlogger = get_logger(__name__)\n\nrequire_version(\n    \"datasets>=2.14.0\",\n    \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\",\n)\n\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description=\"Finetune a transformers model on a causal language modeling task\"\n    )\n    parser.add_argument(\n        \"--dataset_name\",\n        type=str,\n        default=None,\n        help=\"The name of the dataset to use (via the datasets library).\",\n    )\n    parser.add_argument(\n        \"--dataset_config_name\",\n        type=str,\n        default=None,\n        help=\"The configuration name of the dataset to use (via the datasets library).\",\n    )\n    parser.add_argument(\n        \"--train_file\",\n        type=str,\n        default=None,\n        help=\"A csv, txt or a json file containing the training data.\",\n    )\n    parser.add_argument(\n        \"--validation_file\",\n        type=str,\n        default=None,\n        help=\"A csv, txt or a json file containing the validation data.\",\n    )\n    parser.add_argument(\n        \"--validation_split_percentage\",\n        default=5,\n        help=\"The percentage of the train set used as validation set in case there's no validation split\",\n    )\n    parser.add_argument(\n        \"--model_name_or_path\",\n        type=str,\n        help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n        required=False,\n    )\n    parser.add_argument(\n        \"--config_name\",\n        type=str,\n        default=None,\n        help=\"Pretrained config name or path if not the same as model_name\",\n    )\n    parser.add_argument(\n        \"--tokenizer_name\",\n        type=str,\n        default=None,\n        help=\"Pretrained tokenizer name or path if not the same as model_name\",\n    )\n    parser.add_argument(\n        \"--use_slow_tokenizer\",\n        action=\"store_true\",\n        help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n    )\n    parser.add_argument(\n        \"--per_device_train_batch_size\",\n        type=int,\n        default=8,\n        help=\"Batch size (per device) for the training dataloader.\",\n    )\n    parser.add_argument(\n        \"--per_device_eval_batch_size\",\n        type=int,\n        default=8,\n        help=\"Batch size (per device) for the evaluation dataloader.\",\n    )\n    parser.add_argument(\n        \"--learning_rate\",\n        type=float,\n        default=5e-5,\n        help=\"Initial learning rate (after the potential warmup period) to use.\",\n    )\n    parser.add_argument(\n        \"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\"\n    )\n    parser.add_argument(\n        \"--num_train_epochs\",\n        type=int,\n        default=3,\n        help=\"Total number of training epochs to perform.\",\n    )\n    parser.add_argument(\n        \"--max_train_steps\",\n        type=int,\n        default=None,\n        help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n    )\n    parser.add_argument(\n        \"--gradient_accumulation_steps\",\n        type=int,\n        default=1,\n        help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n    )\n    parser.add_argument(\n        \"--lr_scheduler_type\",\n        type=SchedulerType,\n        default=\"linear\",\n        help=\"The scheduler type to use.\",\n        choices=[\n            \"linear\",\n            \"cosine\",\n            \"cosine_with_restarts\",\n            \"polynomial\",\n            \"constant\",\n            \"constant_with_warmup\",\n        ],\n    )\n    parser.add_argument(\n        \"--num_warmup_steps\",\n        type=int,\n        default=0,\n        help=\"Number of steps for the warmup in the lr scheduler.\",\n    )\n    parser.add_argument(\n        \"--output_dir\", type=str, default=None, help=\"Where to store the final model.\"\n    )\n    parser.add_argument(\n        \"--seed\", type=int, default=None, help=\"A seed for reproducible training.\"\n    )\n    parser.add_argument(\n        \"--model_type\",\n        type=str,\n        default=None,\n        help=\"Model type to use if training from scratch.\",\n        choices=MODEL_TYPES,\n    )\n    parser.add_argument(\n        \"--block_size\",\n        type=int,\n        default=32768,\n        help=(\n            \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n            \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n            \" account special tokens).\"\n        ),\n    )\n    parser.add_argument(\n        \"--preprocessing_num_workers\",\n        type=int,\n        default=None,\n        help=\"The number of processes to use for the preprocessing.\",\n    )\n    parser.add_argument(\n        \"--overwrite_cache\",\n        action=\"store_true\",\n        help=\"Overwrite the cached training and evaluation sets\",\n    )\n    parser.add_argument(\n        \"--no_keep_linebreaks\",\n        action=\"store_true\",\n        help=\"Do not keep line breaks when using TXT files.\",\n    )\n    parser.add_argument(\n        \"--push_to_hub\",\n        action=\"store_true\",\n        help=\"Whether or not to push the model to the Hub.\",\n    )\n    parser.add_argument(\n        \"--hub_model_id\",\n        type=str,\n        help=\"The name of the repository to keep in sync with the local `output_dir`.\",\n    )\n    parser.add_argument(\n        \"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\"\n    )\n    parser.add_argument(\n        \"--trust_remote_code\",\n        type=bool,\n        default=False,\n        help=(\n            \"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option \"\n            \"should only be set to `True` for repositories you trust and in which you have read the code, as it will \"\n            \"execute code present on the Hub on your local machine.\"\n        ),\n    )\n    parser.add_argument(\n        \"--checkpointing_steps\",\n        type=str,\n        default=None,\n        help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n    )\n    parser.add_argument(\n        \"--resume_from_checkpoint\",\n        type=str,\n        default=None,\n        help=\"If the training should continue from a checkpoint folder.\",\n    )\n    parser.add_argument(\n        \"--with_tracking\",\n        action=\"store_true\",\n        help=\"Whether to enable experiment trackers for logging.\",\n    )\n    parser.add_argument(\n        \"--report_to\",\n        type=str,\n        default=\"all\",\n        help=(\n            'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n            ' `\"wandb\"`, `\"comet_ml\"` and `\"clearml\"`. Use `\"all\"` (default) to report to all integrations. '\n            \"Only applicable when `--with_tracking` is passed.\"\n        ),\n    )\n    parser.add_argument(\n        \"--low_cpu_mem_usage\",\n        action=\"store_true\",\n        help=(\n            \"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. \"\n            \"If passed, LLM loading time and RAM consumption will be benefited.\"\n        ),\n    )\n    parser.add_argument(\n        \"--segment_length\",\n        type=int,\n        default=2048,\n        help=\"The length of the segment to split the input into.\",\n    )\n    args = parser.parse_args()\n\n    if args.push_to_hub:\n        if args.output_dir is None:\n            raise ValueError(\n                \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n            )\n\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\n    # information sent is the one passed as arguments along with your Python/PyTorch versions.\n    send_example_telemetry(\"run_clm_no_trainer\", args)\n\n    # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n    # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers\n    # in the environment\n    accelerator_log_kwargs = {}\n\n    if args.with_tracking:\n        accelerator_log_kwargs[\"log_with\"] = args.report_to\n        accelerator_log_kwargs[\"project_dir\"] = args.output_dir\n    segment_length = args.segment_length\n    print(\"block_size:\", args.block_size)\n    print(\"segment_length:\", segment_length)\n    gradient_accumulation_steps = args.block_size // segment_length\n    print(\"gradient_accumulation_steps:\", gradient_accumulation_steps)\n    accelerator = Accelerator(\n        gradient_accumulation_steps=gradient_accumulation_steps,\n        **accelerator_log_kwargs,\n    )\n\n    # Make one log on every process with the configuration for debugging.\n    logging.basicConfig(\n        format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n        datefmt=\"%m/%d/%Y %H:%M:%S\",\n        level=logging.INFO,\n    )\n    logger.info(accelerator.state, main_process_only=False)\n    if accelerator.is_local_main_process:\n        datasets.utils.logging.set_verbosity_warning()\n        transformers.utils.logging.set_verbosity_info()\n    else:\n        datasets.utils.logging.set_verbosity_error()\n        transformers.utils.logging.set_verbosity_error()\n\n    # If passed along, set the training seed now.\n    if args.seed is not None:\n        set_seed(args.seed)\n\n    # Handle the repository creation\n    if accelerator.is_main_process:\n        if args.push_to_hub:\n            # Retrieve of infer repo_name\n            repo_name = args.hub_model_id\n            if repo_name is None:\n                repo_name = Path(args.output_dir).absolute().name\n            # Create repo and retrieve repo_id\n            api = HfApi()\n            repo_id = api.create_repo(\n                repo_name, exist_ok=True, token=args.hub_token\n            ).repo_id\n\n            with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n                if \"step_*\" not in gitignore:\n                    gitignore.write(\"step_*\\n\")\n                if \"epoch_*\" not in gitignore:\n                    gitignore.write(\"epoch_*\\n\")\n        elif args.output_dir is not None:\n            os.makedirs(args.output_dir, exist_ok=True)\n    accelerator.wait_for_everyone()\n\n    # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n    # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n    # (the dataset will be downloaded automatically from the datasets Hub).\n    #\n    # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n    # 'text' is found. You can easily tweak this behavior (see below).\n    #\n    # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n    # download the dataset.\n    if args.dataset_name is not None:\n        # Downloading and loading a dataset from the hub.\n        raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n        if \"validation\" not in raw_datasets.keys():\n            raw_datasets[\"validation\"] = load_dataset(\n                args.dataset_name,\n                args.dataset_config_name,\n                split=f\"train[:{args.validation_split_percentage}%]\",\n            )\n            raw_datasets[\"train\"] = load_dataset(\n                args.dataset_name,\n                args.dataset_config_name,\n                split=f\"train[{args.validation_split_percentage}%:]\",\n            )\n\n    # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n    # https://huggingface.co/docs/datasets/loading_datasets.\n\n    # Load pretrained model and tokenizer\n    #\n    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n    # download model & vocab.\n    if args.config_name:\n        # Not use AutoConfig, to avoid HF Gemma model loading\n        config = GemmaConfig.from_pretrained(\n            args.config_name,\n            trust_remote_code=args.trust_remote_code,\n        )\n    elif args.model_name_or_path:\n        # Not use AutoConfig, to avoid HF Gemma model loading\n        config = GemmaConfig.from_pretrained(\n            args.model_name_or_path,\n            trust_remote_code=args.trust_remote_code,\n        )\n    else:\n        config = CONFIG_MAPPING[args.model_type]()\n        logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n    if args.tokenizer_name:\n        tokenizer = AutoTokenizer.from_pretrained(\n            args.tokenizer_name,\n            use_fast=not args.use_slow_tokenizer,\n            trust_remote_code=args.trust_remote_code,\n        )\n    elif args.model_name_or_path:\n        tokenizer = AutoTokenizer.from_pretrained(\n            args.model_name_or_path,\n            use_fast=not args.use_slow_tokenizer,\n            trust_remote_code=args.trust_remote_code,\n        )\n    else:\n        raise ValueError(\n            \"You are instantiating a new tokenizer from scratch. This is not supported by this script. \"\n            \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n        )\n\n    if args.model_name_or_path:\n        # Not use AutoModelForCausalLM to avoid HF Gemma model loading\n        model = GemmaForCausalLM.from_pretrained(\n            args.model_name_or_path,\n            from_tf=bool(\".ckpt\" in args.model_name_or_path),\n            config=config,\n            low_cpu_mem_usage=args.low_cpu_mem_usage,\n            trust_remote_code=args.trust_remote_code,\n            # torch_dtype=\"auto\",\n            device_map=\"auto\",\n        )\n    else:\n        logger.info(\"Training new model from scratch\")\n        model = GemmaForCausalLM(\n            config,\n            # torch_dtype=torch.bfloat16,\n            device_map=\"auto\",\n        )\n\n    # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch\n    # on a small vocab and want a smaller embedding size, remove this test.\n    embedding_size = model.get_input_embeddings().weight.shape[0]\n    if len(tokenizer) > embedding_size:\n        model.resize_token_embeddings(len(tokenizer))\n\n    # Preprocessing the datasets.\n    # First we tokenize all the texts.\n    column_names = raw_datasets[\"train\"].column_names\n    text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n\n    def tokenize_function(examples):\n        return tokenizer(examples[text_column_name])\n\n    with accelerator.main_process_first():\n        tokenized_datasets = raw_datasets.map(\n            tokenize_function,\n            batched=True,\n            num_proc=args.preprocessing_num_workers,\n            remove_columns=column_names,\n            load_from_cache_file=not args.overwrite_cache,\n            desc=\"Running tokenizer on dataset\",\n        )\n\n    if args.block_size is None:\n        block_size = tokenizer.model_max_length\n        if block_size > config.max_position_embeddings:\n            logger.warning(\n                f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n                f\"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx.\"\n            )\n            block_size = min(1024, config.max_position_embeddings)\n    else:\n        if args.block_size > tokenizer.model_max_length:\n            logger.warning(\n                f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model \"\n                f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\n            )\n        block_size = min(args.block_size, tokenizer.model_max_length)\n\n    # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\n    def group_texts(examples):\n        # Concatenate all texts.\n        concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n        total_length = len(concatenated_examples[list(examples.keys())[0]])\n        # We drop the small remainder, and if the total_length < block_size  we exclude this batch and return an empty dict.\n        # We could add padding if the model supported it instead of this drop, you can customize this part to your needs.\n        total_length = (total_length // block_size) * block_size\n        # Split by chunks of max_len.\n        result = {\n            k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n            for k, t in concatenated_examples.items()\n        }\n        result[\"labels\"] = result[\"input_ids\"].copy()\n        return result\n\n    # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\n    # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\n    # to preprocess.\n    #\n    # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n    # https://huggingface.co/docs/datasets/process#map\n\n    with accelerator.main_process_first():\n        lm_datasets = tokenized_datasets.map(\n            group_texts,\n            batched=True,\n            num_proc=args.preprocessing_num_workers,\n            load_from_cache_file=not args.overwrite_cache,\n            desc=f\"Grouping texts in chunks of {block_size}\",\n        )\n\n    train_dataset = lm_datasets[\"train\"]\n    eval_dataset = lm_datasets[\"validation\"]\n\n    # # Log a few random samples from the training set:\n    # for index in random.sample(range(len(train_dataset)), 3):\n    #     logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n    # DataLoaders creation:\n    train_dataloader = DataLoader(\n        train_dataset,\n        shuffle=True,\n        collate_fn=default_data_collator,\n        batch_size=args.per_device_train_batch_size,\n    )\n    eval_dataloader = DataLoader(\n        eval_dataset,\n        collate_fn=default_data_collator,\n        batch_size=args.per_device_eval_batch_size,\n    )\n\n    # Optimizer\n    # Split weights in two groups, one with weight decay and the other not.\n    no_decay = [\"bias\", \"layer_norm.weight\"]\n    optimizer_grouped_parameters = [\n        {\n            \"params\": [\n                p\n                for n, p in model.named_parameters()\n                if not any(nd in n for nd in no_decay)\n            ],\n            \"weight_decay\": args.weight_decay,\n        },\n        {\n            \"params\": [\n                p\n                for n, p in model.named_parameters()\n                if any(nd in n for nd in no_decay)\n            ],\n            \"weight_decay\": 0.0,\n        },\n    ]\n    optimizer = torch.optim.AdamW(\n        optimizer_grouped_parameters,\n        lr=args.learning_rate,\n    )\n\n    # Scheduler and math around the number of training steps.\n    overrode_max_train_steps = False\n    num_update_steps_per_epoch = math.ceil(\n        len(train_dataloader) / args.gradient_accumulation_steps\n    )\n    if args.max_train_steps is None:\n        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n        overrode_max_train_steps = True\n\n    lr_scheduler = get_scheduler(\n        name=args.lr_scheduler_type,\n        optimizer=optimizer,\n        num_warmup_steps=args.num_warmup_steps * accelerator.num_processes,\n        num_training_steps=(\n            args.max_train_steps\n            if overrode_max_train_steps\n            else args.max_train_steps * accelerator.num_processes\n        ),\n    )\n\n    # Prepare everything with our `accelerator`.\n    model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = (\n        accelerator.prepare(\n            model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n        )\n    )\n\n    # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\n    if accelerator.distributed_type == DistributedType.TPU:\n        model.tie_weights()\n\n    # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n    num_update_steps_per_epoch = math.ceil(\n        len(train_dataloader) / args.gradient_accumulation_steps\n    )\n    if overrode_max_train_steps:\n        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n    # Afterwards we recalculate our number of training epochs\n    args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n    # Figure out how many steps we should save the Accelerator states\n    checkpointing_steps = args.checkpointing_steps\n    if checkpointing_steps is not None and checkpointing_steps.isdigit():\n        checkpointing_steps = int(checkpointing_steps)\n\n    # We need to initialize the trackers we use, and also store our configuration.\n    # The trackers initializes automatically on the main process.\n    if args.with_tracking:\n        experiment_config = vars(args)\n        # TensorBoard cannot log Enums, need the raw value\n        experiment_config[\"lr_scheduler_type\"] = experiment_config[\n            \"lr_scheduler_type\"\n        ].value\n        accelerator.init_trackers(\"InfiniTransformer\", experiment_config)\n\n    # Train!\n    total_batch_size = (\n        args.per_device_train_batch_size\n        * accelerator.num_processes\n        * args.gradient_accumulation_steps\n    )\n\n    logger.info(\"***** Running training *****\")\n    num_examples = len(train_dataset)\n    logger.info(f\"  Num examples = {num_examples}\")\n    logger.info(f\"  Num Epochs = {args.num_train_epochs}\")\n    logger.info(\n        f\"  Instantaneous batch size per device = {args.per_device_train_batch_size}\"\n    )\n    logger.info(\n        f\"  Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\"\n    )\n    logger.info(f\"  Gradient Accumulation steps = {gradient_accumulation_steps}\")\n    logger.info(f\"  Total optimization steps = {args.max_train_steps}\")\n    logger.info(f\"  Number of model parameters = {model.num_parameters()}\")\n    # Only show the progress bar once on each machine.\n    progress_bar = tqdm(\n        range(args.max_train_steps), disable=not accelerator.is_local_main_process\n    )\n    completed_steps = 0\n    starting_epoch = 0\n\n    # Potentially load in the weights and states from a previous save\n    if args.resume_from_checkpoint:\n        if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n            checkpoint_path = args.resume_from_checkpoint\n            path = os.path.basename(args.resume_from_checkpoint)\n        else:\n            # Get the most recent checkpoint\n            dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n            dirs.sort(key=os.path.getctime)\n            path = dirs[\n                -1\n            ]  # Sorts folders by date modified, most recent checkpoint is the last\n            checkpoint_path = path\n            path = os.path.basename(checkpoint_path)\n\n        accelerator.print(f\"Resumed from checkpoint: {checkpoint_path}\")\n        accelerator.load_state(checkpoint_path)\n        # Extract `epoch_{i}` or `step_{i}`\n        training_difference = os.path.splitext(path)[0]\n\n        if \"epoch\" in training_difference:\n            starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n            resume_step = None\n            completed_steps = starting_epoch * num_update_steps_per_epoch\n        else:\n            # need to multiply `gradient_accumulation_steps` to reflect real steps\n            resume_step = (\n                int(training_difference.replace(\"step_\", \"\"))\n                * args.gradient_accumulation_steps\n            )\n            starting_epoch = resume_step // len(train_dataloader)\n            completed_steps = resume_step // args.gradient_accumulation_steps\n            resume_step -= starting_epoch * len(train_dataloader)\n\n    # update the progress_bar if load from checkpoint\n    progress_bar.update(completed_steps)\n\n    for epoch in range(starting_epoch, args.num_train_epochs):\n        model.train()\n        total_loss = 0\n        if (\n            args.resume_from_checkpoint\n            and epoch == starting_epoch\n            and resume_step is not None\n        ):\n            # We skip the first `n` batches in the dataloader when resuming from a checkpoint\n            active_dataloader = accelerator.skip_first_batches(\n                train_dataloader, resume_step\n            )\n        else:\n            active_dataloader = train_dataloader\n        for step, batch in enumerate(active_dataloader):\n            total_segment_loss = 0\n            # Segment the batch items into smaller chunks of 2048 tokens\n            input_ids = torch.tensor_split(\n                batch[\"input_ids\"],\n                list(\n                    range(segment_length, batch[\"input_ids\"].shape[1], segment_length)\n                ),\n                dim=1,\n            )\n            if \"attention_mask\" in batch:\n                attention_mask = torch.tensor_split(\n                    batch[\"attention_mask\"],\n                    list(\n                        range(\n                            segment_length,\n                            batch[\"attention_mask\"].shape[1],\n                            segment_length,\n                        )\n                    ),\n                    dim=1,\n                )\n            if \"labels\" in batch:\n                labels = torch.tensor_split(\n                    batch[\"labels\"],\n                    list(\n                        range(segment_length, batch[\"labels\"].shape[1], segment_length)\n                    ),\n                    dim=1,\n                )\n            memory, norm_term = None, None\n            avg_segment_loss = 0\n            for i in range(len(input_ids)):\n                outputs = model(\n                    input_ids=input_ids[i],\n                    attention_mask=attention_mask[i],\n                    labels=labels[i],\n                    memory=memory,\n                    norm_term=norm_term,\n                )\n                memory = outputs.memory\n                norm_term = outputs.norm_term\n                loss = outputs.loss\n                # print(\"Loss:\", loss.item())\n                # accelerator.backward(loss, retain_graph=True)\n                accelerator.backward(loss)\n                total_loss += loss  # .detach().float()\n                total_segment_loss += loss  # .detach().float()\n                # print(\"Total loss:\", total_loss)\n                # print(\"Total segment loss:\", total_segment_loss)\n            optimizer.step()\n            lr_scheduler.step()\n            optimizer.zero_grad()\n\n            if accelerator.sync_gradients:\n                progress_bar.update(1)\n                completed_steps += 1\n            # Log the training loss and lr every 100 steps\n            LOG_INTERVAL = 1\n            if completed_steps % LOG_INTERVAL == 0:\n                avg_segment_loss = total_segment_loss / len(input_ids)\n                print(\n                    f\"Step: {completed_steps}, Loss: {avg_segment_loss.item()}, LR: {lr_scheduler.get_last_lr()[0]}\"\n                )\n                # Log to wandb by calling `accelerator.log`, `step` is optional\n                accelerator.log(\n                    {\n                        \"train/loss\": avg_segment_loss.item(),\n                        \"train/learning_rate\": lr_scheduler.get_last_lr()[0],\n                        \"train/epoch\": completed_steps / num_examples,\n                    },\n                    step=completed_steps,\n                )\n            if isinstance(checkpointing_steps, int):\n                if completed_steps % checkpointing_steps == 0:\n                    output_dir = f\"step_{completed_steps}\"\n                    if args.output_dir is not None:\n                        output_dir = os.path.join(args.output_dir, output_dir)\n                    accelerator.save_state(output_dir)\n            if completed_steps >= args.max_train_steps:\n                break\n\n        print(\"Finished epoch:\", epoch)\n\n        model.eval()\n        losses = []\n        for step, batch in enumerate(eval_dataloader):\n            input_ids = torch.tensor_split(\n                batch[\"input_ids\"],\n                list(\n                    range(segment_length, batch[\"input_ids\"].shape[1], segment_length)\n                ),\n                dim=1,\n            )\n            if \"attention_mask\" in batch:\n                attention_mask = torch.tensor_split(\n                    batch[\"attention_mask\"],\n                    list(\n                        range(\n                            segment_length,\n                            batch[\"attention_mask\"].shape[1],\n                            segment_length,\n                        )\n                    ),\n                    dim=1,\n                )\n            if \"labels\" in batch:\n                labels = torch.tensor_split(\n                    batch[\"labels\"],\n                    list(\n                        range(segment_length, batch[\"labels\"].shape[1], segment_length)\n                    ),\n                    dim=1,\n                )\n\n            memory, norm_term = None, None\n            for i in range(len(input_ids)):\n                with torch.no_grad():\n                    outputs = model(\n                        input_ids=input_ids[i],\n                        attention_mask=attention_mask[i],\n                        labels=labels[i],\n                        memory=memory,\n                        norm_term=norm_term,\n                    )\n                memory = outputs.memory\n                norm_term = outputs.norm_term\n\n            loss = outputs.loss\n            losses.append(\n                accelerator.gather_for_metrics(\n                    loss.repeat(args.per_device_eval_batch_size)\n                )\n            )\n\n        losses = torch.cat(losses)\n        try:\n            eval_loss = torch.mean(losses)\n            perplexity = math.exp(eval_loss)\n        except OverflowError:\n            perplexity = float(\"inf\")\n\n        logger.info(f\"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}\")\n\n        if args.with_tracking:\n            accelerator.log(\n                {\n                    \"perplexity\": perplexity,\n                    \"eval_loss\": eval_loss,\n                    \"train_loss\": total_loss.item() / len(train_dataloader),\n                    \"epoch\": epoch,\n                    \"step\": completed_steps,\n                },\n                step=completed_steps,\n            )\n\n        if args.push_to_hub and epoch < args.num_train_epochs - 1:\n            accelerator.wait_for_everyone()\n            unwrapped_model = accelerator.unwrap_model(model)\n            unwrapped_model.save_pretrained(\n                args.output_dir,\n                is_main_process=accelerator.is_main_process,\n                save_function=accelerator.save,\n            )\n            if accelerator.is_main_process:\n                tokenizer.save_pretrained(args.output_dir)\n                api.upload_folder(\n                    commit_message=f\"Training in progress epoch {epoch}\",\n                    folder_path=args.output_dir,\n                    repo_id=repo_id,\n                    repo_type=\"model\",\n                    token=args.hub_token,\n                )\n\n        if args.checkpointing_steps == \"epoch\":\n            output_dir = f\"epoch_{epoch}\"\n            if args.output_dir is not None:\n                output_dir = os.path.join(args.output_dir, output_dir)\n            accelerator.save_state(output_dir)\n\n    if args.with_tracking:\n        accelerator.end_training()\n\n    if args.output_dir is not None:\n        accelerator.wait_for_everyone()\n        unwrapped_model = accelerator.unwrap_model(model)\n        unwrapped_model.save_pretrained(\n            args.output_dir,\n            is_main_process=accelerator.is_main_process,\n            save_function=accelerator.save,\n        )\n        if accelerator.is_main_process:\n            tokenizer.save_pretrained(args.output_dir)\n            if args.push_to_hub:\n                api.upload_folder(\n                    commit_message=\"End of training\",\n                    folder_path=args.output_dir,\n                    repo_id=repo_id,\n                    repo_type=\"model\",\n                    token=args.hub_token,\n                )\n            with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n                json.dump({\"perplexity\": perplexity}, f)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "train.gemma.infini.noclm.sh",
    "content": "# export CUDA_VISIBLE_DEVICES=0\n\naccelerate launch --mixed_precision='bf16' \\\n    train.gemma.infini.noclm.py \\\n    --model_name_or_path='google/gemma-2b' \\\n    --segment_length=2048 \\\n    --block_size=32768 \\\n    --dataset_name='wikitext' \\\n    --dataset_config_name='wikitext-2-raw-v1' \\\n    --per_device_train_batch_size=2 \\\n    --per_device_eval_batch_size=2 \\\n    --weight_decay=1.0 \\\n    --output_dir='./models/gemma-2b-infini-noclm-wikitext' \\\n    --checkpointing_steps=10 \\\n    --num_train_epochs=1 \\\n    --learning_rate=5e-5 \\\n    --seed=42 \\\n    --low_cpu_mem_usage \\\n    --report_to='wandb' \\\n    --preprocessing_num_workers=64 \\\n    --with_tracking \\\n"
  },
  {
    "path": "train.llama.infini.noclm.1Mseq.sh",
    "content": "# export CUDA_VISIBLE_DEVICES=0\n\n# DEBUG=true \naccelerate launch --num_processes=1 --mixed_precision='bf16' \\\n    train.llama.infini.noclm.py \\\n    --model_name_or_path='meta-llama/Meta-Llama-3-8B' \\\n    --segment_length=2048 \\\n    --block_size=1048576 \\\n    --dataset_name='JeanKaddour/minipile' \\\n    --per_device_train_batch_size=2 \\\n    --per_device_eval_batch_size=2 \\\n    --output_dir='./models/llama-3-8b-infini-noclm-minipile' \\\n    --checkpointing_steps=1000 \\\n    --num_train_epochs=1 \\\n    --learning_rate=1e-4 \\\n    --seed=42 \\\n    --low_cpu_mem_usage \\\n    --report_to='wandb' \\\n    --preprocessing_num_workers=64 \\\n    --with_tracking \\\n"
  },
  {
    "path": "train.llama.infini.noclm.py",
    "content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\non a text file or a dataset without using HuggingFace Trainer.\n\nHere is the full list of checkpoints on the hub that can be fine-tuned by this script:\nhttps://huggingface.co/models?filter=text-generation\n\"\"\"\n# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n\nimport argparse\nimport json\nimport logging\nimport math\nimport os\nimport random\nfrom itertools import chain\nfrom pathlib import Path\n\nimport datasets\nimport torch\nfrom accelerate import Accelerator, DistributedType\nfrom accelerate.logging import get_logger\nfrom accelerate.utils import set_seed\nfrom datasets import load_dataset\nfrom huggingface_hub import HfApi\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom transformers import (\n    CONFIG_MAPPING,\n    MODEL_MAPPING,\n    AutoTokenizer,\n    SchedulerType,\n    default_data_collator,\n    get_scheduler,\n    Adafactor,\n)\nfrom transformers.utils import check_min_version, send_example_telemetry\nfrom transformers.utils.versions import require_version\nfrom transformers import LlamaConfig\nfrom infini_llama import LlamaForCausalLM\nfrom datasets import DatasetDict, interleave_datasets\n\ntorch.autograd.set_detect_anomaly(True)\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.40.0.dev0\")\n\nlogger = get_logger(__name__)\n\nrequire_version(\n    \"datasets>=2.14.0\",\n    \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\",\n)\n\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description=\"Finetune a transformers model on a causal language modeling task\"\n    )\n    parser.add_argument(\n        \"--dataset_name\",\n        type=str,\n        default=None,\n        help=\"The name of the dataset to use (via the datasets library).\",\n    )\n    parser.add_argument(\n        \"--dataset_config_name\",\n        type=str,\n        default=None,\n        help=\"The configuration name of the dataset to use (via the datasets library).\",\n    )\n    parser.add_argument(\n        \"--train_file\",\n        type=str,\n        default=None,\n        help=\"A csv, txt or a json file containing the training data.\",\n    )\n    parser.add_argument(\n        \"--validation_file\",\n        type=str,\n        default=None,\n        help=\"A csv, txt or a json file containing the validation data.\",\n    )\n    parser.add_argument(\n        \"--validation_split_percentage\",\n        default=5,\n        help=\"The percentage of the train set used as validation set in case there's no validation split\",\n    )\n    parser.add_argument(\n        \"--model_name_or_path\",\n        type=str,\n        help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n        required=False,\n    )\n    parser.add_argument(\n        \"--config_name\",\n        type=str,\n        default=None,\n        help=\"Pretrained config name or path if not the same as model_name\",\n    )\n    parser.add_argument(\n        \"--tokenizer_name\",\n        type=str,\n        default=None,\n        help=\"Pretrained tokenizer name or path if not the same as model_name\",\n    )\n    parser.add_argument(\n        \"--use_slow_tokenizer\",\n        action=\"store_true\",\n        help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n    )\n    parser.add_argument(\n        \"--per_device_train_batch_size\",\n        type=int,\n        default=8,\n        help=\"Batch size (per device) for the training dataloader.\",\n    )\n    parser.add_argument(\n        \"--per_device_eval_batch_size\",\n        type=int,\n        default=8,\n        help=\"Batch size (per device) for the evaluation dataloader.\",\n    )\n    parser.add_argument(\n        \"--learning_rate\",\n        type=float,\n        default=5e-5,\n        help=\"Initial learning rate (after the potential warmup period) to use.\",\n    )\n    parser.add_argument(\n        \"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\"\n    )\n    parser.add_argument(\n        \"--num_train_epochs\",\n        type=int,\n        default=3,\n        help=\"Total number of training epochs to perform.\",\n    )\n    parser.add_argument(\n        \"--max_train_steps\",\n        type=int,\n        default=None,\n        help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n    )\n    parser.add_argument(\n        \"--gradient_accumulation_steps\",\n        type=int,\n        default=1,\n        help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n    )\n    parser.add_argument(\n        \"--lr_scheduler_type\",\n        type=SchedulerType,\n        default=\"linear\",\n        help=\"The scheduler type to use.\",\n        choices=[\n            \"linear\",\n            \"cosine\",\n            \"cosine_with_restarts\",\n            \"polynomial\",\n            \"constant\",\n            \"constant_with_warmup\",\n        ],\n    )\n    parser.add_argument(\n        \"--num_warmup_steps\",\n        type=int,\n        default=0,\n        help=\"Number of steps for the warmup in the lr scheduler.\",\n    )\n    parser.add_argument(\n        \"--output_dir\", type=str, default=None, help=\"Where to store the final model.\"\n    )\n    parser.add_argument(\n        \"--seed\", type=int, default=None, help=\"A seed for reproducible training.\"\n    )\n    parser.add_argument(\n        \"--model_type\",\n        type=str,\n        default=None,\n        help=\"Model type to use if training from scratch.\",\n        choices=MODEL_TYPES,\n    )\n    parser.add_argument(\n        \"--block_size\",\n        type=int,\n        default=32768,\n        help=(\n            \"Optional input sequence length after tokenization. The training dataset will be truncated in block of\"\n            \" this size for training. Default to the model max input length for single sentence inputs (take into\"\n            \" account special tokens).\"\n        ),\n    )\n    parser.add_argument(\n        \"--preprocessing_num_workers\",\n        type=int,\n        default=None,\n        help=\"The number of processes to use for the preprocessing.\",\n    )\n    parser.add_argument(\n        \"--overwrite_cache\",\n        action=\"store_true\",\n        help=\"Overwrite the cached training and evaluation sets\",\n    )\n    parser.add_argument(\n        \"--no_keep_linebreaks\",\n        action=\"store_true\",\n        help=\"Do not keep line breaks when using TXT files.\",\n    )\n    parser.add_argument(\n        \"--push_to_hub\",\n        action=\"store_true\",\n        help=\"Whether or not to push the model to the Hub.\",\n    )\n    parser.add_argument(\n        \"--hub_model_id\",\n        type=str,\n        help=\"The name of the repository to keep in sync with the local `output_dir`.\",\n    )\n    parser.add_argument(\n        \"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\"\n    )\n    parser.add_argument(\n        \"--trust_remote_code\",\n        type=bool,\n        default=False,\n        help=(\n            \"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option \"\n            \"should only be set to `True` for repositories you trust and in which you have read the code, as it will \"\n            \"execute code present on the Hub on your local machine.\"\n        ),\n    )\n    parser.add_argument(\n        \"--checkpointing_steps\",\n        type=str,\n        default=None,\n        help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n    )\n    parser.add_argument(\n        \"--resume_from_checkpoint\",\n        type=str,\n        default=None,\n        help=\"If the training should continue from a checkpoint folder.\",\n    )\n    parser.add_argument(\n        \"--with_tracking\",\n        action=\"store_true\",\n        help=\"Whether to enable experiment trackers for logging.\",\n    )\n    parser.add_argument(\n        \"--report_to\",\n        type=str,\n        default=\"all\",\n        help=(\n            'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`,'\n            ' `\"wandb\"`, `\"comet_ml\"` and `\"clearml\"`. Use `\"all\"` (default) to report to all integrations. '\n            \"Only applicable when `--with_tracking` is passed.\"\n        ),\n    )\n    parser.add_argument(\n        \"--low_cpu_mem_usage\",\n        action=\"store_true\",\n        help=(\n            \"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. \"\n            \"If passed, LLM loading time and RAM consumption will be benefited.\"\n        ),\n    )\n    parser.add_argument(\n        \"--segment_length\",\n        type=int,\n        default=2048,\n        help=\"The length of the segment to split the input into.\",\n    )\n    args = parser.parse_args()\n\n    if args.push_to_hub:\n        if args.output_dir is None:\n            raise ValueError(\n                \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n            )\n\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\n    # information sent is the one passed as arguments along with your Python/PyTorch versions.\n    send_example_telemetry(\"run_clm_no_trainer\", args)\n\n    # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n    # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers\n    # in the environment\n    accelerator_log_kwargs = {}\n\n    if args.with_tracking:\n        accelerator_log_kwargs[\"log_with\"] = args.report_to\n        accelerator_log_kwargs[\"project_dir\"] = args.output_dir\n    segment_length = args.segment_length\n    print(\"block_size:\", args.block_size)\n    print(\"segment_length:\", segment_length)\n    gradient_accumulation_steps = args.block_size // segment_length\n    print(\"gradient_accumulation_steps:\", gradient_accumulation_steps)\n    accelerator = Accelerator(\n        gradient_accumulation_steps=gradient_accumulation_steps,\n        **accelerator_log_kwargs,\n    )\n\n    # Make one log on every process with the configuration for debugging.\n    logging.basicConfig(\n        format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n        datefmt=\"%m/%d/%Y %H:%M:%S\",\n        level=logging.INFO,\n    )\n    logger.info(accelerator.state, main_process_only=False)\n    if accelerator.is_local_main_process:\n        datasets.utils.logging.set_verbosity_warning()\n        transformers.utils.logging.set_verbosity_info()\n    else:\n        datasets.utils.logging.set_verbosity_error()\n        transformers.utils.logging.set_verbosity_error()\n\n    # If passed along, set the training seed now.\n    if args.seed is not None:\n        set_seed(args.seed)\n\n    # Handle the repository creation\n    if accelerator.is_main_process:\n        if args.push_to_hub:\n            # Retrieve of infer repo_name\n            repo_name = args.hub_model_id\n            if repo_name is None:\n                repo_name = Path(args.output_dir).absolute().name\n            # Create repo and retrieve repo_id\n            api = HfApi()\n            repo_id = api.create_repo(\n                repo_name, exist_ok=True, token=args.hub_token\n            ).repo_id\n\n            with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n                if \"step_*\" not in gitignore:\n                    gitignore.write(\"step_*\\n\")\n                if \"epoch_*\" not in gitignore:\n                    gitignore.write(\"epoch_*\\n\")\n        elif args.output_dir is not None:\n            os.makedirs(args.output_dir, exist_ok=True)\n    accelerator.wait_for_everyone()\n\n    # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n    # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n    # (the dataset will be downloaded automatically from the datasets Hub).\n    #\n    # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n    # 'text' is found. You can easily tweak this behavior (see below).\n    #\n    # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n    # download the dataset.\n    if args.dataset_name is not None:\n        # Downloading and loading a dataset from the hub.\n        raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n        if \"validation\" not in raw_datasets.keys():\n            raw_datasets[\"validation\"] = load_dataset(\n                args.dataset_name,\n                args.dataset_config_name,\n                split=f\"train[:{args.validation_split_percentage}%]\",\n            )\n            raw_datasets[\"train\"] = load_dataset(\n                args.dataset_name,\n                args.dataset_config_name,\n                split=f\"train[{args.validation_split_percentage}%:]\",\n            )\n\n    # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n    # https://huggingface.co/docs/datasets/loading_datasets.\n\n    # Load pretrained model and tokenizer\n    #\n    # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n    # download model & vocab.\n    if args.config_name:\n        # Not use AutoConfig, to avoid HF Llama model loading\n        config = LlamaConfig.from_pretrained(\n            args.config_name,\n            trust_remote_code=args.trust_remote_code,\n        )\n    elif args.model_name_or_path:\n        # Not use AutoConfig, to avoid HF Llama model loading\n        config = LlamaConfig.from_pretrained(\n            args.model_name_or_path,\n            trust_remote_code=args.trust_remote_code,\n        )\n    else:\n        config = CONFIG_MAPPING[args.model_type]()\n        logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n    if args.tokenizer_name:\n        tokenizer = AutoTokenizer.from_pretrained(\n            args.tokenizer_name,\n            use_fast=not args.use_slow_tokenizer,\n            trust_remote_code=args.trust_remote_code,\n        )\n    elif args.model_name_or_path:\n        tokenizer = AutoTokenizer.from_pretrained(\n            args.model_name_or_path,\n            use_fast=not args.use_slow_tokenizer,\n            trust_remote_code=args.trust_remote_code,\n        )\n    else:\n        raise ValueError(\n            \"You are instantiating a new tokenizer from scratch. This is not supported by this script. \"\n            \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n        )\n\n    if args.model_name_or_path:\n        # Not use AutoModelForCausalLM to avoid HF Llama model loading\n        model = LlamaForCausalLM.from_pretrained(\n            args.model_name_or_path,\n            from_tf=bool(\".ckpt\" in args.model_name_or_path),\n            config=config,\n            low_cpu_mem_usage=args.low_cpu_mem_usage,\n            trust_remote_code=args.trust_remote_code,\n            # torch_dtype=\"auto\",\n            device_map=\"auto\",\n        )\n    else:\n        logger.info(\"Training new model from scratch\")\n        model = LlamaForCausalLM(\n            config,\n            # torch_dtype=torch.bfloat16,\n            device_map=\"auto\",\n        )\n\n    # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch\n    # on a small vocab and want a smaller embedding size, remove this test.\n    embedding_size = model.get_input_embeddings().weight.shape[0]\n    if len(tokenizer) > embedding_size:\n        model.resize_token_embeddings(len(tokenizer))\n\n    # Preprocessing the datasets.\n    # First we tokenize all the texts.\n    column_names = raw_datasets[\"train\"].column_names\n    text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n\n    def tokenize_function(examples):\n        return tokenizer(examples[text_column_name])\n\n    with accelerator.main_process_first():\n        tokenized_datasets = raw_datasets.map(\n            tokenize_function,\n            batched=True,\n            num_proc=args.preprocessing_num_workers,\n            remove_columns=column_names,\n            load_from_cache_file=not args.overwrite_cache,\n            desc=\"Running tokenizer on dataset\",\n        )\n\n    if args.block_size is None:\n        block_size = tokenizer.model_max_length\n        if block_size > config.max_position_embeddings:\n            logger.warning(\n                f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n                f\"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx.\"\n            )\n            block_size = min(1024, config.max_position_embeddings)\n    else:\n        if args.block_size > tokenizer.model_max_length:\n            logger.warning(\n                f\"The block_size passed ({args.block_size}) is larger than the maximum length for the model \"\n                f\"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.\"\n            )\n        block_size = min(args.block_size, tokenizer.model_max_length)\n\n    # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.\n    def group_texts(examples):\n        # Concatenate all texts.\n        concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}\n        total_length = len(concatenated_examples[list(examples.keys())[0]])\n        # We drop the small remainder, and if the total_length < block_size  we exclude this batch and return an empty dict.\n        # We could add padding if the model supported it instead of this drop, you can customize this part to your needs.\n        total_length = (total_length // block_size) * block_size\n        # Split by chunks of max_len.\n        result = {\n            k: [t[i : i + block_size] for i in range(0, total_length, block_size)]\n            for k, t in concatenated_examples.items()\n        }\n        result[\"labels\"] = result[\"input_ids\"].copy()\n        return result\n\n    # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder\n    # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower\n    # to preprocess.\n    #\n    # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n    # https://huggingface.co/docs/datasets/process#map\n\n    with accelerator.main_process_first():\n        lm_datasets = tokenized_datasets.map(\n            group_texts,\n            batched=True,\n            num_proc=args.preprocessing_num_workers,\n            load_from_cache_file=not args.overwrite_cache,\n            desc=f\"Grouping texts in chunks of {block_size}\",\n        )\n\n    train_dataset = lm_datasets[\"train\"]\n    eval_dataset = lm_datasets[\"validation\"]\n\n    # # Log a few random samples from the training set:\n    # for index in random.sample(range(len(train_dataset)), 3):\n    #     logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n    # DataLoaders creation:\n    train_dataloader = DataLoader(\n        train_dataset,\n        shuffle=False,  # Remove shuffle\n        collate_fn=default_data_collator,\n        batch_size=args.per_device_train_batch_size,\n    )\n    eval_dataloader = DataLoader(\n        eval_dataset,\n        collate_fn=default_data_collator,\n        batch_size=args.per_device_eval_batch_size,\n    )\n\n    # Optimizer\n    # Split weights in two groups, one with weight decay and the other not.\n    no_decay = [\"bias\", \"layer_norm.weight\"]\n    optimizer_grouped_parameters = [\n        {\n            \"params\": [\n                p\n                for n, p in model.named_parameters()\n                if not any(nd in n for nd in no_decay)\n            ],\n            \"weight_decay\": args.weight_decay,\n        },\n        {\n            \"params\": [\n                p\n                for n, p in model.named_parameters()\n                if any(nd in n for nd in no_decay)\n            ],\n            \"weight_decay\": 0.0,\n        },\n    ]\n    optimizer = Adafactor(\n        optimizer_grouped_parameters,\n        lr=args.learning_rate,\n        relative_step=False,\n    )\n\n    # Scheduler and math around the number of training steps.\n    overrode_max_train_steps = False\n    num_update_steps_per_epoch = math.ceil(\n        len(train_dataloader) / args.gradient_accumulation_steps\n    )\n    if args.max_train_steps is None:\n        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n        overrode_max_train_steps = True\n\n    lr_scheduler = get_scheduler(\n        name=args.lr_scheduler_type,\n        optimizer=optimizer,\n        num_warmup_steps=args.num_warmup_steps * accelerator.num_processes,\n        num_training_steps=(\n            args.max_train_steps\n            if overrode_max_train_steps\n            else args.max_train_steps * accelerator.num_processes\n        ),\n    )\n\n    # Prepare everything with our `accelerator`.\n    model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = (\n        accelerator.prepare(\n            model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n        )\n    )\n\n    # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.\n    if accelerator.distributed_type == DistributedType.TPU:\n        model.tie_weights()\n\n    # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n    num_update_steps_per_epoch = math.ceil(\n        len(train_dataloader) / args.gradient_accumulation_steps\n    )\n    if overrode_max_train_steps:\n        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n    # Afterwards we recalculate our number of training epochs\n    args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n    # Figure out how many steps we should save the Accelerator states\n    checkpointing_steps = args.checkpointing_steps\n    if checkpointing_steps is not None and checkpointing_steps.isdigit():\n        checkpointing_steps = int(checkpointing_steps)\n\n    # We need to initialize the trackers we use, and also store our configuration.\n    # The trackers initializes automatically on the main process.\n    if args.with_tracking:\n        experiment_config = vars(args)\n        # TensorBoard cannot log Enums, need the raw value\n        experiment_config[\"lr_scheduler_type\"] = experiment_config[\n            \"lr_scheduler_type\"\n        ].value\n        accelerator.init_trackers(\"InfiniTransformer\", experiment_config)\n\n    # Train!\n    total_batch_size = (\n        args.per_device_train_batch_size\n        * accelerator.num_processes\n        * args.gradient_accumulation_steps\n    )\n\n    logger.info(\"***** Running training *****\")\n    num_examples = len(train_dataset)\n    logger.info(f\"  Num examples = {num_examples}\")\n    logger.info(f\"  Num Epochs = {args.num_train_epochs}\")\n    logger.info(\n        f\"  Instantaneous batch size per device = {args.per_device_train_batch_size}\"\n    )\n    logger.info(\n        f\"  Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\"\n    )\n    logger.info(f\"  Gradient Accumulation steps = {gradient_accumulation_steps}\")\n    logger.info(f\"  Total optimization steps = {args.max_train_steps}\")\n    logger.info(f\"  Number of model parameters = {model.num_parameters()}\")\n    # Only show the progress bar once on each machine.\n    progress_bar = tqdm(\n        range(args.max_train_steps), disable=not accelerator.is_local_main_process\n    )\n    completed_steps = 0\n    starting_epoch = 0\n\n    # Potentially load in the weights and states from a previous save\n    if args.resume_from_checkpoint:\n        if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n            checkpoint_path = args.resume_from_checkpoint\n            path = os.path.basename(args.resume_from_checkpoint)\n        else:\n            # Get the most recent checkpoint\n            dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n            dirs.sort(key=os.path.getctime)\n            path = dirs[\n                -1\n            ]  # Sorts folders by date modified, most recent checkpoint is the last\n            checkpoint_path = path\n            path = os.path.basename(checkpoint_path)\n\n        accelerator.print(f\"Resumed from checkpoint: {checkpoint_path}\")\n        accelerator.load_state(checkpoint_path)\n        # Extract `epoch_{i}` or `step_{i}`\n        training_difference = os.path.splitext(path)[0]\n\n        if \"epoch\" in training_difference:\n            starting_epoch = int(training_difference.replace(\"epoch_\", \"\")) + 1\n            resume_step = None\n            completed_steps = starting_epoch * num_update_steps_per_epoch\n        else:\n            # need to multiply `gradient_accumulation_steps` to reflect real steps\n            resume_step = (\n                int(training_difference.replace(\"step_\", \"\"))\n                * args.gradient_accumulation_steps\n            )\n            starting_epoch = resume_step // len(train_dataloader)\n            completed_steps = resume_step // args.gradient_accumulation_steps\n            resume_step -= starting_epoch * len(train_dataloader)\n\n    # update the progress_bar if load from checkpoint\n    progress_bar.update(completed_steps)\n\n    for epoch in range(starting_epoch, args.num_train_epochs):\n        model.train()\n        total_loss = 0\n        if (\n            args.resume_from_checkpoint\n            and epoch == starting_epoch\n            and resume_step is not None\n        ):\n            # We skip the first `n` batches in the dataloader when resuming from a checkpoint\n            active_dataloader = accelerator.skip_first_batches(\n                train_dataloader, resume_step\n            )\n        else:\n            active_dataloader = train_dataloader\n        for step, batch in enumerate(active_dataloader):\n            total_segment_loss = 0\n            # Segment the batch items into smaller chunks of 2048 tokens\n            input_ids = torch.tensor_split(\n                batch[\"input_ids\"],\n                list(\n                    range(segment_length, batch[\"input_ids\"].shape[1], segment_length)\n                ),\n                dim=1,\n            )\n            if \"attention_mask\" in batch:\n                attention_mask = torch.tensor_split(\n                    batch[\"attention_mask\"],\n                    list(\n                        range(\n                            segment_length,\n                            batch[\"attention_mask\"].shape[1],\n                            segment_length,\n                        )\n                    ),\n                    dim=1,\n                )\n            if \"labels\" in batch:\n                labels = torch.tensor_split(\n                    batch[\"labels\"],\n                    list(\n                        range(segment_length, batch[\"labels\"].shape[1], segment_length)\n                    ),\n                    dim=1,\n                )\n            memory, norm_term = {}, {}\n            avg_segment_loss = 0\n            for i in range(len(input_ids)):\n                outputs = model(\n                    input_ids=input_ids[i],\n                    attention_mask=attention_mask[i],\n                    labels=labels[i],\n                    memory=memory,\n                    norm_term=norm_term,\n                )\n                memory = outputs.memory\n                norm_term = outputs.norm_term\n                loss = outputs.loss\n                print(f\"Loss @ segment {i}:\", loss.item())\n                # print(input_ids[i])\n                # accelerator.backward(loss, retain_graph=True)\n                accelerator.backward(loss)\n                total_loss += loss.detach().float()\n                total_segment_loss += loss.detach().float()\n                # print(\"Total loss:\", total_loss)\n                # print(\"Total segment loss:\", total_segment_loss)\n\n            # Gradient clipping\n            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n            optimizer.step()\n            lr_scheduler.step()\n            optimizer.zero_grad()\n\n            if accelerator.sync_gradients:\n                progress_bar.update(1)\n                completed_steps += 1\n            # Log the training loss and lr every 100 steps\n            LOG_INTERVAL = 1\n            if completed_steps % LOG_INTERVAL == 0:\n                avg_segment_loss = total_segment_loss / len(input_ids)\n                print(\n                    f\"Step: {completed_steps}, Loss: {avg_segment_loss.item()}, LR: {lr_scheduler.get_last_lr()[0]}\"\n                )\n                print(\"-\" * 10)\n                # Log to wandb by calling `accelerator.log`, `step` is optional\n                accelerator.log(\n                    {\n                        \"train/loss\": avg_segment_loss.item(),\n                        \"train/learning_rate\": lr_scheduler.get_last_lr()[0],\n                        \"train/epoch\": completed_steps / num_examples,\n                    },\n                    step=completed_steps,\n                )\n            if isinstance(checkpointing_steps, int):\n                if completed_steps % checkpointing_steps == 0:\n                    output_dir = f\"step_{completed_steps}\"\n                    if args.output_dir is not None:\n                        output_dir = os.path.join(args.output_dir, output_dir)\n                    accelerator.save_state(output_dir)\n            if completed_steps >= args.max_train_steps:\n                break\n\n        print(\"Finished epoch:\", epoch)\n\n        model.eval()\n        losses = []\n        for step, batch in enumerate(eval_dataloader):\n            input_ids = torch.tensor_split(\n                batch[\"input_ids\"],\n                list(\n                    range(segment_length, batch[\"input_ids\"].shape[1], segment_length)\n                ),\n                dim=1,\n            )\n            if \"attention_mask\" in batch:\n                attention_mask = torch.tensor_split(\n                    batch[\"attention_mask\"],\n                    list(\n                        range(\n                            segment_length,\n                            batch[\"attention_mask\"].shape[1],\n                            segment_length,\n                        )\n                    ),\n                    dim=1,\n                )\n            if \"labels\" in batch:\n                labels = torch.tensor_split(\n                    batch[\"labels\"],\n                    list(\n                        range(segment_length, batch[\"labels\"].shape[1], segment_length)\n                    ),\n                    dim=1,\n                )\n\n            memory, norm_term = None, None\n            for i in range(len(input_ids)):\n                with torch.no_grad():\n                    outputs = model(\n                        input_ids=input_ids[i],\n                        attention_mask=attention_mask[i],\n                        labels=labels[i],\n                        memory=memory,\n                        norm_term=norm_term,\n                    )\n                memory = outputs.memory\n                norm_term = outputs.norm_term\n\n            loss = outputs.loss\n            losses.append(\n                accelerator.gather_for_metrics(\n                    loss.repeat(args.per_device_eval_batch_size)\n                )\n            )\n\n        losses = torch.cat(losses)\n        try:\n            eval_loss = torch.mean(losses)\n            perplexity = math.exp(eval_loss)\n        except OverflowError:\n            perplexity = float(\"inf\")\n\n        logger.info(f\"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}\")\n\n        if args.with_tracking:\n            accelerator.log(\n                {\n                    \"perplexity\": perplexity,\n                    \"eval_loss\": eval_loss,\n                    \"train_loss\": total_loss.item() / len(train_dataloader),\n                    \"epoch\": epoch,\n                    \"step\": completed_steps,\n                },\n                step=completed_steps,\n            )\n\n        if args.push_to_hub and epoch < args.num_train_epochs - 1:\n            accelerator.wait_for_everyone()\n            unwrapped_model = accelerator.unwrap_model(model)\n            unwrapped_model.save_pretrained(\n                args.output_dir,\n                is_main_process=accelerator.is_main_process,\n                save_function=accelerator.save,\n            )\n            if accelerator.is_main_process:\n                tokenizer.save_pretrained(args.output_dir)\n                api.upload_folder(\n                    commit_message=f\"Training in progress epoch {epoch}\",\n                    folder_path=args.output_dir,\n                    repo_id=repo_id,\n                    repo_type=\"model\",\n                    token=args.hub_token,\n                )\n\n        if args.checkpointing_steps == \"epoch\":\n            output_dir = f\"epoch_{epoch}\"\n            if args.output_dir is not None:\n                output_dir = os.path.join(args.output_dir, output_dir)\n            accelerator.save_state(output_dir)\n\n    if args.with_tracking:\n        accelerator.end_training()\n\n    if args.output_dir is not None:\n        accelerator.wait_for_everyone()\n        unwrapped_model = accelerator.unwrap_model(model)\n        unwrapped_model.save_pretrained(\n            args.output_dir,\n            is_main_process=accelerator.is_main_process,\n            save_function=accelerator.save,\n        )\n        if accelerator.is_main_process:\n            tokenizer.save_pretrained(args.output_dir)\n            if args.push_to_hub:\n                api.upload_folder(\n                    commit_message=\"End of training\",\n                    folder_path=args.output_dir,\n                    repo_id=repo_id,\n                    repo_type=\"model\",\n                    token=args.hub_token,\n                )\n            with open(os.path.join(args.output_dir, \"all_results.json\"), \"w\") as f:\n                json.dump({\"perplexity\": perplexity}, f)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  }
]