Repository: andrewkchan/deepseek.cpp Branch: main Commit: 8db9e56af867 Files: 35 Total size: 1.6 MB Directory structure: gitextract_edm78p7f/ ├── .gitignore ├── LICENSE.md ├── Makefile ├── README.md ├── convert.py ├── pyproject.toml ├── quantizer.cpp ├── quantizer.py ├── setup.py ├── src/ │ ├── codec.cpp │ ├── codec.h │ ├── debug.cpp │ ├── debug.h │ ├── infer.cpp │ ├── main.cpp │ ├── model.cpp │ ├── model.h │ ├── profile.cpp │ ├── profile.h │ ├── quant.cpp │ ├── quant.h │ ├── sampler.cpp │ ├── sampler.h │ ├── test.cpp │ ├── time_utils.cpp │ ├── time_utils.h │ ├── tokenizer.cpp │ ├── tokenizer.h │ ├── wikitest.cat.1chunk.v2-encoded.txt │ └── wikitest.cat.1chunk.v3-encoded.txt └── vendor/ ├── fmt/ │ ├── base.h │ ├── format-inl.h │ └── format.h ├── format.cc └── json.hpp ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ env/ # build intermediates .vscode/ build/ __pycache__/ *.egg-info/ *.cpython-312-x86_64-linux-gnu.so # profiling tools *.sqlite *.nsys-rep *.ncu-rep perf.data* *.gputrace/ **/.DS_Store ================================================ FILE: LICENSE.md ================================================ MIT License Copyright (c) 2025 Andrew Chan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================= K-quants adapted from llama.cpp MIT License Copyright (c) 2023-2024 The ggml authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================= nlohmann/json MIT License Copyright (c) 2013-2025 Niels Lohmann Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================= fmt Copyright (c) 2012 - present, Victor Zverovich and {fmt} contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --- Optional exception to the license --- As an exception, if, as a result of your compiling your source code, portions of this Software are embedded into a machine-executable object form of such source code, you may redistribute such embedded portions in such object form without including the above copyright and permission notices. ================================================ FILE: Makefile ================================================ MAKEFLAGS+=-r -j UNAME=$(shell uname) BUILD=build ASM_DIR=$(BUILD)/asm # compile .c, .cpp, .cu files SOURCES=$(filter-out src/test.cpp,$(wildcard src/*.c)) SOURCES+=$(filter-out src/test.cpp,$(wildcard src/*.cc)) SOURCES+=$(filter-out src/test.cpp,$(wildcard src/*.cpp)) SOURCES+=$(filter-out src/test.cpp,$(wildcard src/*.cu)) SOURCES+=$(wildcard vendor/*.c) SOURCES+=$(wildcard vendor/*.cc) SOURCES+=$(wildcard vendor/*.cpp) SOURCES+=$(wildcard vendor/*.cu) # Define test sources separately TEST_SOURCES=src/test.cpp TEST_SOURCES+=$(filter-out src/main.cpp,$(SOURCES)) OBJECTS=$(SOURCES:%=$(BUILD)/%.o) TEST_OBJECTS=$(TEST_SOURCES:%=$(BUILD)/%.o) ASM_FILES=$(patsubst %.cpp,$(ASM_DIR)/%.s,$(filter %.cpp,$(SOURCES))) TEST_ASM_FILES=$(patsubst %.cpp,$(ASM_DIR)/%.s,$(filter %.cpp,$(TEST_SOURCES))) BINARY=$(BUILD)/main TEST_BINARY=$(BUILD)/test PROFILE_BINARY=$(BUILD)/main_profile BASE_CFLAGS=-g -Wall -Wpointer-arith -Werror -O3 -ffast-math -Ivendor -std=c++20 BASE_LDFLAGS=-lm BASE_CFLAGS+=-fopenmp -mf16c -mavx2 -mfma BASE_LDFLAGS+=-fopenmp PROFILE_CFLAGS=$(BASE_CFLAGS) -pg -fno-omit-frame-pointer PROFILE_LDFLAGS=$(BASE_LDFLAGS) -pg CFLAGS=$(BASE_CFLAGS) LDFLAGS=$(BASE_LDFLAGS) all: $(BINARY) asm profile: CFLAGS=$(PROFILE_CFLAGS) profile: LDFLAGS=$(PROFILE_LDFLAGS) profile: $(PROFILE_BINARY) test: $(TEST_BINARY) test-asm # Target to build just assembly files asm: $(ASM_FILES) test-asm: $(TEST_ASM_FILES) format: clang-format -i src/* $(BINARY): $(OBJECTS) $(CXX) $^ $(LDFLAGS) -o $@ $(TEST_BINARY): $(TEST_OBJECTS) $(CXX) $^ $(LDFLAGS) -o $@ $(PROFILE_BINARY): $(OBJECTS) $(CXX) $^ $(PROFILE_LDFLAGS) -o $@ # Rule to generate assembly for cpp files $(ASM_DIR)/%.s: %.cpp @mkdir -p $(dir $@) $(CXX) $< $(CFLAGS) -S -masm=intel -o $@ $(BUILD)/%.c.o: %.c @mkdir -p $(dir $@) $(CXX) $< $(CFLAGS) -c -MMD -MP -o $@ $(BUILD)/%.cpp.o: %.cpp @mkdir -p $(dir $@) $(CXX) $< $(CFLAGS) -c -MMD -MP -o $@ $(BUILD)/%.cc.o: %.cc @mkdir -p $(dir $@) $(CXX) $< $(CFLAGS) -c -MMD -MP -o $@ -include $(OBJECTS:.o=.d) -include $(TEST_OBJECTS:.o=.d) clean: rm -rf $(BUILD) .PHONY: all clean format test asm test-asm profile ================================================ FILE: README.md ================================================ This is an CPU-only inference implementation for the DeepSeek family of large language models written in C++, based on [Yet Another Language Model](https://github.com/andrewkchan/yalm). ## Why? For fun and learning! I was initially adding DeepSeek support to `yalm` but realized that the changes were large and complex enough that it might ruin the simplicity of that project. Maybe at some point I'll upstream the changes, but for now I've decided to fork them into a separate, smaller, leaner codebase. Since this program only supports DeepSeek, it's tiny compared to other inference engines (<2k LOC not including `fmt` and `json`, vs. >250k for llama.cpp and vllm) and is extra hackable. I'm currently using it as a testbed to study single-batch DeepSeek decoding performance on CPU. ## Model and hardware support Quantizations other than FP32 require AVX2 and F16C support. | Model | Q2_K | Q3_K | Q4_K | F8E5M2 | F8E4M3 | FP16 | BF16 | FP32 | | ----- | ---- | ---- | ------ | ------ | ---- | ---- | ---- | ---- | | DeepSeek-V2-Lite | ✅ | ✅ | WIP | ✅ | WIP | ✅ | WIP | ✅ | | DeepSeek-V2 | ✅ | ✅ | WIP | ✅ | WIP | ✅ | WIP | ✅ | | DeepSeek-V2.5 | ✅ | ✅ | WIP | ✅ | WIP | ✅ | WIP | ✅ | | DeepSeek-V3 | ✅ | ✅ | WIP | ✅ | WIP | - | - | - | | DeepSeek-V3.1 (Terminus) | ✅ | ✅ | WIP | ✅ | WIP | - | - | - | | DeepSeek-R1 | ✅ | ✅ | WIP | ✅ | WIP | - | - | - | deepseek.cpp is missing important optimizations for production use (see notes below), but gets pretty close to llama.cpp in single-batch decode speed. Benchmarking DeepSeek-V3-Base with Q2_K quantization on an AWS r6a.12xlarge instance (AMD EPYC 7R13, 2x24 cores, 384GB DDR4 RAM): - llama.cpp ([DeepSeek-V3-Q2_K_XS](https://huggingface.co/unsloth/DeepSeek-V3-GGUF/tree/main/DeepSeek-V3-Q2_K_XS) 207GB, tg128, best of 16/24/32/48 threads): 4.57 tok/s - deepseek.cpp (Q2_K 207GB, MHA, `-n 128 -L` completion with 16 threads): 4.02 tok/s A big part of this is that deepseek.cpp uses the llama.cpp vec_dot kernels for Q2_K, so I can't claim to have matched its performance purely through my own ingenuity. But it is surprising given the inference code is much simpler, opting for OpenMP over a [global threadpool with spinlock kernel barriers](https://justine.lol/matmul/#threads). I'm hoping that in addition to serving as a testbed for myself, this gives a good base for others to hack on. # Instructions deepseek.cpp requires a computer with a C++20-compatible compiler. You'll also need a directory containing LLM safetensor weights and configuration files in huggingface format, which you'll need to convert by providing a directory into which `.dseek` files containing the converted weights will go. Follow the below to download DeepSeek-V2-Lite, build `deepseek.cpp`, and run it: ``` # install git LFS and build tools curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash sudo apt-get -y install git-lfs python3-dev build-essential # download DeepSeek-V2-Lite git clone https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite # clone this repository git clone https://github.com/andrewkchan/deepseek.cpp.git cd deepseek.cpp pip install . python convert.py --quant fp16 v2-lite-f16 ../DeepSeek-V2-Lite/ ./build/main v2-lite-f16 -i "What is a large language model?" -m c -t 1.0 ``` ## Usage See the CLI help documentation below for `./build/main`: ``` Usage: main [options] Example: main model_weights_dir/ -i "Q: What is the meaning of life?" Options: -h Display this help message -L Locks model weights to RAM, disabling swap. Requires sudo. -m [completion,passkey,perplexity,interactive] which mode to run in (default - completion) -T sliding window context length (0 - max) Perplexity mode options: Choose one: -i input prompt -f input file with prompt -w use wikitext as input Completion mode options: -n number of steps to run for in completion mode, default 256. 0 = max_seq_len, -1 = infinite Choose one: -i input prompt -t temperature (default - 1.0) -p p for top-p sampling (default - 0.95) -f input file with prompt Passkey mode options: -n number of junk lines to insert (default - 250) -l passkey position (-1 - random) ``` You will likely need to tune the number of OpenMP threads to achieve good performance. For example: ``` OMP_NUM_THREADS=32 ./build/main <...args> ``` The default OpenMP thread count can result in severely degraded throughput, likely due to thread contention. I have found a good heuristic to be half the number of cores. ## Notes - `--quant=f8e5m2` specifies model weight quantization using 128x128 blocks. MoE gates and layer norms are left in full precision. This should provide better accuracy than per-tensor quantization or the naive truncating quantization done by `yalm` (which results in nonsensical output for the DeepSeek family of models). - `--quant=q2_k` and `--quant=q3_k` specify model weight quantization using the 2-bit and 3-bit llama.cpp [K-quantization schemes](https://github.com/ggml-org/llama.cpp/pull/1684), which use a two-level hierarchy of blocks and super-blocks to store scales/biases for ranges of weights. - The models have a tendency to repeat themselves and get into infinite loops at lower temperatures. In my testing, a temperature of ~1.0 avoids this failure mode but also keeps the models reasonably grounded. - Some new, optional architectural features (e.g. the `noaux_tc` method of expert selection) of DeepSeek V3 have not yet been implemented, so the model accuracy may be lower than the reference model. - You will need ~650GB of memory to run DeepSeek V3 in F8E5M2, or 206GB for 2-bit Q2_K. For best performance, you should ensure there is enough physical RAM available and run as `sudo` with `-L` to force weights to stay in RAM, but otherwise, most operating systems will also automatically supplement this with swap space (storing some memory on disk and some in RAM) at the cost of severely degraded token throughput. More aggressive quantization methods such as [1.58-bit](https://unsloth.ai/blog/deepseekr1-dynamic) are planned. - Model quality is not stable because I've been using this repository as an experiment testbed. See (https://github.com/andrewkchan/deepseek.cpp/pull/14) for the latest perplexity measurements on DeepSeek-V2-Lite as well as instructions on how to run standard measurements yourself. Known issues impacting generation quality include the tokenizer (which is not a true BPE tokenizer) and the use of attention sinks rather than yarn (https://github.com/andrewkchan/deepseek.cpp/pull/15). - Only decoding (e.g. incremental, iterative generation or reading of one token at a time) has been implemented. Prefills (reading a batch of prompt tokens in a single pass) have not been implemented, nor prefill-based optimizations for the decoding phase such as speculative decoding or multi-token prediction. Finally, the current multi-latent attention implementation is still slower than multi-latent attention in surprising scenarios (https://github.com/andrewkchan/deepseek.cpp/pull/8) and appears to be under-utilizing memory bandwidth. I have limited time to implement these optimizations as this is a side project for me, but PRs are welcome! ================================================ FILE: convert.py ================================================ # Converts a model consisting of a huggingface config.json, tokenizer.json, and .safetensors weights into a .yalm file, # which: # - Normalizes the config to a common format in the header # - Combines any safetensors shards # - Reads the token vocabulary into a simpler format # - Performs quantization if specified import argparse import os import json import safetensors from safetensors.torch import save_file import torch from quantizer import k_quantize from typing import Tuple, List, Literal, Union import dataclasses SUPPORTED_ARCHITECTURES = [ "DeepseekV2ForCausalLM", "DeepseekV3ForCausalLM", ] @dataclasses.dataclass class BlockQuant: name: Literal["fp32", "fp16", "f8e5m2"] block_size: Union[Tuple[int, int], None] dtype: torch.dtype @dataclasses.dataclass class KQuant: name: Literal["q2_k", "q3_k"] dtype: torch.dtype Quant = Union[BlockQuant, KQuant] SUPPORTED_QUANTS = { "fp32": BlockQuant(name="fp32", block_size=None, dtype=torch.float32), "fp16": BlockQuant(name="fp16", block_size=None, dtype=torch.float16), "f8e5m2": BlockQuant(name="f8e5m2", block_size=(128, 128), dtype=torch.float8_e5m2), "q2_k": KQuant(name="q2_k", dtype=torch.uint8), "q3_k": KQuant(name="q3_k", dtype=torch.uint8), } class Metadata: def __init__(self, config, tokenizer_config, quant, n_layers, use_mla, bsize): arch = config["architectures"][0] if arch not in SUPPORTED_ARCHITECTURES: raise Exception(f"Architecture {arch} is not supported, must be one of {SUPPORTED_ARCHITECTURES}") self.arch = arch self.use_mla = bool(use_mla) if quant not in SUPPORTED_QUANTS: raise Exception(f"Quantization {quant} is not supported, must be one of {SUPPORTED_QUANTS}") self.quant: Quant = SUPPORTED_QUANTS[quant] if isinstance(self.quant, BlockQuant): is_bsize_configurable = self.quant.block_size is not None if is_bsize_configurable and bsize is not None: self.quant.block_size = (bsize, bsize) if arch in ["DeepseekV2ForCausalLM", "DeepseekV3ForCausalLM"]: self.dim = config["hidden_size"] self.hidden_dim = config["intermediate_size"] self.n_layers = config["num_hidden_layers"] if n_layers is not None and self.n_layers > n_layers: self.n_layers = n_layers self.n_heads = config["num_attention_heads"] self.vocab_size = config["vocab_size"] self.max_seq_len = tokenizer_config["model_max_length"] self.bos_token_id = config["bos_token_id"] self.eos_token_id = config["eos_token_id"] self.rope_theta = config.get("rope_theta", 10000.0) self.norm_eps = config["rms_norm_eps"] self.norm_type = "rmsnorm" # quantization self.original_quantization_config = config.get("quantization_config", None) if self.original_quantization_config is not None: dequant_block_sizes = self.original_quantization_config["weight_block_size"] assert type(dequant_block_sizes) == list and len(dequant_block_sizes) == 2 assert self.original_quantization_config["quant_method"] == "fp8" assert config.get("attention_bias", False) == False assert config.get("mlp_bias", False) == False assert config["hidden_act"] in ["gelu", "silu"] self.act_type = config["hidden_act"] self.first_k_dense_replace = config["first_k_dense_replace"] # multi-latent attention self.kv_lora_rank = config["kv_lora_rank"] self.q_lora_rank = config["q_lora_rank"] or 0 if self.use_mla: # TODO: support MLA with q_lora_rank == 0 (DeepSeek V2 Lite) assert self.q_lora_rank > 0 and self.kv_lora_rank > 0 self.qk_nope_head_dim = config["qk_nope_head_dim"] self.qk_rope_head_dim = config["qk_rope_head_dim"] self.v_head_dim = config["v_head_dim"] # mixture of experts self.n_shared_experts = config["n_shared_experts"] self.n_routed_experts = config["n_routed_experts"] self.n_active_routed = config["num_experts_per_tok"] self.moe_intermediate_size = config["moe_intermediate_size"] self.routed_scaling_factor = config["routed_scaling_factor"] self.n_group = config["n_group"] self.norm_topk_prob = config["norm_topk_prob"] self.scoring_func = config["scoring_func"] self.topk_group = config["topk_group"] self.topk_method = config["topk_method"] if self.topk_method == "noaux_tc": self.topk_method = "group_limited_greedy" # TODO: support for Deepseek v3 # rope rope_scaling = config["rope_scaling"] assert rope_scaling["type"] == "yarn" self.rope_scaling_beta_fast = rope_scaling["beta_fast"] self.rope_scaling_beta_slow = rope_scaling["beta_slow"] self.rope_scaling_factor = rope_scaling["factor"] self.rope_scaling_mscale = rope_scaling["mscale"] self.rope_scaling_mscale_all_dim = rope_scaling["mscale_all_dim"] self.rope_scaling_original_max_position_embeddings = rope_scaling["original_max_position_embeddings"] def to_dict(self): result = {} result["arch"] = self.arch result["use_mla"] = str(int(self.use_mla)) result["quant"] = self.quant.name if self.arch in ["DeepseekV2ForCausalLM", "DeepseekV3ForCausalLM"]: result["dim"] = str(self.dim) result["hidden_dim"] = str(self.hidden_dim) result["n_layers"] = str(self.n_layers) result["n_heads"] = str(self.n_heads) result["vocab_size"] = str(self.vocab_size) result["max_seq_len"] = str(self.max_seq_len) result["bos_token_id"] = str(self.bos_token_id) result["eos_token_id"] = str(self.eos_token_id) result["rope_theta"] = str(self.rope_theta) result["norm_eps"] = str(self.norm_eps) result["norm_type"] = str(self.norm_type) result["act_type"] = str(self.act_type) result["first_k_dense_replace"] = str(self.first_k_dense_replace) # quantization if isinstance(self.quant, BlockQuant) and self.quant.block_size is not None: result["quantization_block_size_0"] = str(self.quant.block_size[0]) result["quantization_block_size_1"] = str(self.quant.block_size[1]) # multi-latent attention result["kv_lora_rank"] = str(self.kv_lora_rank) result["q_lora_rank"] = str(self.q_lora_rank) result["qk_nope_head_dim"] = str(self.qk_nope_head_dim) result["qk_rope_head_dim"] = str(self.qk_rope_head_dim) result["v_head_dim"] = str(self.v_head_dim) # mixture of experts result["n_shared_experts"] = str(self.n_shared_experts) result["n_routed_experts"] = str(self.n_routed_experts) result["n_active_routed"] = str(self.n_active_routed) result["moe_intermediate_size"] = str(self.moe_intermediate_size) result["routed_scaling_factor"] = str(self.routed_scaling_factor) result["n_group"] = str(self.n_group) result["norm_topk_prob"] = str(self.norm_topk_prob) result["scoring_func"] = str(self.scoring_func) result["topk_group"] = str(self.topk_group) result["topk_method"] = str(self.topk_method) # rope scaling result["rope_scaling_beta_fast"] = str(self.rope_scaling_beta_fast) result["rope_scaling_beta_slow"] = str(self.rope_scaling_beta_slow) result["rope_scaling_factor"] = str(self.rope_scaling_factor) result["rope_scaling_mscale"] = str(self.rope_scaling_mscale) result["rope_scaling_mscale_all_dim"] = str(self.rope_scaling_mscale_all_dim) result["rope_scaling_original_max_position_embeddings"] = str(self.rope_scaling_original_max_position_embeddings) return result # this is a horrible gpt-2 unicode byte encoder hack from https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9 # this has poisoned all HF tokenizer configs that use ByteLevel decoder/preprocessor # as a result we get crazy UTF-8-as-bytes-as-UTF8 in the tokenizer data that we need to convert back def gpt2_bytes_to_unicode(): bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def load_tokens(tokenizer_path, vocab_size): tokens = [""] * vocab_size with open(tokenizer_path, "r") as f: tokenizer = json.load(f) use_gpt2_byte_preprocessing = not tokenizer["model"].get("byte_fallback", False) vocab = tokenizer["model"]["vocab"] assert len(vocab) <= vocab_size for t, i in vocab.items(): tokens[i] = t for added in tokenizer["added_tokens"]: tokens[added["id"]] = added["content"] gpt2_decode = {v: k for k, v in gpt2_bytes_to_unicode().items()} # Preprocess tokens into UTF-8 encoding for i, t in enumerate(tokens): if use_gpt2_byte_preprocessing: b = bytes([gpt2_decode.get(c, 0) for c in t]) else: t = t.replace('\u2581', ' ') # sentencepiece uses this character as whitespace b = t.encode('utf-8') b = b.replace(b"\0", b"\7") # replace null bytes with bell characters assert b.count(0) == 0 # no null bytes allowed tokens[i] = b return tokens def per_tensor_quantize(tensor: torch.Tensor, dtype: torch.dtype) -> Tuple[torch.Tensor, torch.Tensor]: """Quantize a tensor using per-tensor static scaling factor. Args: tensor: The input tensor. dtype: The data type to quantize to. """ finfo = torch.finfo(dtype) # Calculate the scale as dtype max divided by absmax. # Since .abs() creates a new tensor, we use aminmax to get # the min and max first and then calculate the absmax. if tensor.numel() == 0: # Deal with empty tensors (triggered by empty MoE experts) min_val, max_val = ( torch.tensor(-16.0, dtype=tensor.dtype), torch.tensor(16.0, dtype=tensor.dtype), ) else: min_val, max_val = tensor.aminmax() amax = torch.maximum(min_val.abs(), max_val.abs()) scale = finfo.max / amax.clamp(min=1e-12) # scale and clamp the tensor to bring it to # the representative range of float8 data type # (as default cast is unsaturated) qweight = (tensor * scale).clamp(min=finfo.min, max=finfo.max) # Return both float8 data and the inverse scale (as float), # as both required as inputs to torch._scaled_mm qweight = qweight.to(dtype) scale = scale.float().reciprocal() return qweight, scale def per_tensor_dequantize(qweight: torch.Tensor, scale: torch.Tensor) -> torch.Tensor: assert scale.numel() == 1 return qweight.to(torch.float32) * scale def blockwise_dequantize(qweight: torch.Tensor, scale: torch.Tensor, block_size: torch.Tensor) -> torch.Tensor: assert qweight.ndim == scale.ndim and scale.ndim == block_size.numel() and scale.ndim == 2 assert torch.all((torch.tensor(list(qweight.shape)) / block_size).ceil() == torch.tensor(list(scale.shape))) out = torch.empty_like(qweight, dtype=torch.float32) for i in range(scale.shape[0]): for j in range(scale.shape[1]): block_size_i = block_size[0] block_size_j = block_size[1] qw_block = qweight[i*block_size_i:(i+1)*block_size_i, j*block_size_j:(j+1)*block_size_j] out[i*block_size_i:(i+1)*block_size_i, j*block_size_j:(j+1)*block_size_j] = per_tensor_dequantize(qw_block, scale[i, j]) return out def blockwise_quantize(weight: torch.Tensor, block_size: torch.Tensor, dtype: torch.dtype) -> Tuple[torch.Tensor, torch.Tensor]: assert weight.ndim == block_size.numel() and weight.ndim == 2 scale_shape = torch.Size((torch.tensor(list(weight.shape)) / block_size).ceil().long()) scale = torch.empty(scale_shape, dtype=torch.float32) out = torch.empty_like(weight, dtype=dtype) for i in range(scale.shape[0]): for j in range(scale.shape[1]): block_size_i = block_size[0] block_size_j = block_size[1] w_block = weight[i*block_size_i:(i+1)*block_size_i, j*block_size_j:(j+1)*block_size_j] qw_block, scale_block = per_tensor_quantize(w_block, dtype) out[i*block_size_i:(i+1)*block_size_i, j*block_size_j:(j+1)*block_size_j] = qw_block scale[i, j] = scale_block return out, scale def per_expert_blockwise_quantize(expert_weights: torch.Tensor, block_size: torch.Tensor, dtype: torch.dtype) -> Tuple[torch.Tensor, torch.Tensor]: assert expert_weights.ndim == 3 num_experts = expert_weights.shape[0] output_weights = [] scales = [] for e in range(num_experts): weight, scale = blockwise_quantize(expert_weights[e], block_size, dtype) output_weights.append(weight) scales.append(scale) return torch.stack(output_weights), torch.stack(scales) def per_expert_k_quantize(expert_weights: torch.Tensor, method: Literal["q2_k", "q3_k"]) -> torch.Tensor: assert expert_weights.ndim == 3 num_experts = expert_weights.shape[0] output_weights = [] for e in range(num_experts): output_weights.append(k_quantize(expert_weights[e], method)) return torch.stack(output_weights) def load_weights(model_files: List[str], metadata: Metadata, tie_word_embeddings: bool, n_layers: int): """ Generator that yields shards of weights loaded from the model files in huggingface format. Each shard contains a dictionary of tensors, with weights normalized and cast to the specified dtype (except layer norm weights which are converted to float32). """ weights = {} for model_path in model_files: ext = os.path.splitext(model_path)[1] if ext == ".safetensors": with safetensors.safe_open(model_path, framework="pt") as f: for k in f.keys(): assert(k not in weights) weights[k] = f.get_tensor(k) dtype = metadata.quant.dtype # convert weights progress = 0 dequant_block_size = None if metadata.original_quantization_config is not None: dequant_block_size = torch.tensor(metadata.original_quantization_config["weight_block_size"]) tensors = {} def load_and_dequantize(weight_name: str, scale_name: str) -> torch.Tensor: t = weights[weight_name] if scale_name in weights: scale = weights[scale_name] t = blockwise_dequantize(t, scale, dequant_block_size) return t def quantize(t: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: if dtype not in [torch.float32, torch.float16]: if isinstance(metadata.quant, KQuant): t = k_quantize(t.to(torch.float32), metadata.quant.name) elif metadata.quant.block_size is None: return per_tensor_quantize(t, dtype) else: quant_block_size = torch.tensor(metadata.quant.block_size) return blockwise_quantize(t, quant_block_size, dtype) return t.to(dtype), None def conv(weight_name: str, scale_name: str) -> Tuple[torch.Tensor, torch.Tensor]: nonlocal progress progress += 1 t = load_and_dequantize(weight_name, scale_name) print(f"\rConverting tensor {progress}: {t.shape}", end="", flush=True) return quantize(t) def conv_experts(weight_and_scale_names: List[Tuple[str, str]]) -> Tuple[torch.Tensor, torch.Tensor]: nonlocal progress progress += 1 expert_weights = [weights[weight_name] for weight_name, _ in weight_and_scale_names] if weight_and_scale_names[0][1] in weights: for i in range(len(weight_and_scale_names)): scale = weights[weight_and_scale_names[i][1]] expert_weights[i] = blockwise_dequantize(expert_weights[i], scale, dequant_block_size) t = torch.stack(expert_weights) print(f"\rConverting tensor {progress}: {t.shape}", end="", flush=True) if dtype not in [torch.float32, torch.float16]: if isinstance(metadata.quant, KQuant): t = per_expert_k_quantize(t.to(torch.float32), metadata.quant.name) elif metadata.quant.block_size is None: return per_tensor_quantize(t, dtype) else: quant_block_size = torch.tensor(metadata.quant.block_size) return per_expert_blockwise_quantize(t, quant_block_size, dtype) return t.to(dtype), None def save_weight_and_scale(weight_name: str, scale_name: str, weight_and_scale: Tuple[torch.Tensor, torch.Tensor]): tensors[weight_name] = weight_and_scale[0] if weight_and_scale[1] is not None: tensors[scale_name] = weight_and_scale[1] save_weight_and_scale( "model.embed.weight", "model.embed.scale", conv("model.embed_tokens.weight", "model.embed_tokens.weight_scale_inv") ) for l in range(config["num_hidden_layers"]): if l % 8 == 0 and l > 0: yield tensors tensors = {} if n_layers is not None and l >= n_layers: break tensors[f"model.layers.{l}.attn.norm.weight"] = weights[f"model.layers.{l}.input_layernorm.weight"].float() tensors[f"model.layers.{l}.attn.kv_a_norm.weight"] = weights[f"model.layers.{l}.self_attn.kv_a_layernorm.weight"].float() if metadata.use_mla: assert metadata.q_lora_rank > 0 head_dim = metadata.qk_nope_head_dim + metadata.qk_rope_head_dim save_weight_and_scale( f"model.layers.{l}.attn.wkv_a.weight", f"model.layers.{l}.attn.wkv_a.scale", conv(f"model.layers.{l}.self_attn.kv_a_proj_with_mqa.weight", f"model.layers.{l}.self_attn.kv_a_proj_with_mqa.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.attn.wq_a.weight", f"model.layers.{l}.attn.wq_a.scale", conv(f"model.layers.{l}.self_attn.q_a_proj.weight", f"model.layers.{l}.self_attn.q_a_proj.weight_scale_inv") ) tensors[f"model.layers.{l}.attn.q_a_norm.weight"] = weights[f"model.layers.{l}.self_attn.q_a_layernorm.weight"].float() # (n_heads, head_dim-qk_rope_head_dim+v_head_dim, kv_lora_rank) kv_b_proj = load_and_dequantize( f"model.layers.{l}.self_attn.kv_b_proj.weight", f"model.layers.{l}.self_attn.kv_b_proj.weight_scale_inv" ).reshape( metadata.n_heads, -1, metadata.kv_lora_rank ) # (n_heads, head_dim, q_lora_rank) q_b_proj = load_and_dequantize( f"model.layers.{l}.self_attn.q_b_proj.weight", f"model.layers.{l}.self_attn.q_b_proj.weight_scale_inv" ).reshape( metadata.n_heads, -1, metadata.q_lora_rank ) # (n_heads, head_dim-qk_rope_head_dim, kv_lora_rank) k_nope_b_proj = kv_b_proj[:, :head_dim-metadata.qk_rope_head_dim] # (n_heads * v_head_dim, kv_lora_rank) v_b_proj = kv_b_proj[:, head_dim-metadata.qk_rope_head_dim:].reshape( metadata.n_heads * metadata.v_head_dim, metadata.kv_lora_rank ) # (n_heads, head_dim-qk_rope_head_dim, q_lora_rank) q_nope_b_proj = q_b_proj[:, :head_dim-metadata.qk_rope_head_dim] # (n_heads, qk_rope_head_dim, q_lora_rank) q_rope_b_proj = q_b_proj[:, head_dim-metadata.qk_rope_head_dim:] # (n_heads, kv_lora_rank, q_lora_rank) c_proj = torch.bmm(k_nope_b_proj.transpose(1, 2), q_nope_b_proj) # NOTE: k_rope gets split from kv_a, so there is no k_rope_b_proj save_weight_and_scale( f"model.layers.{l}.attn.wq_rope_b.weight", f"model.layers.{l}.attn.wq_rope_b.scale", quantize(q_rope_b_proj.reshape(-1, q_rope_b_proj.shape[-1])) ) save_weight_and_scale( f"model.layers.{l}.attn.wc.weight", f"model.layers.{l}.attn.wc.scale", quantize(c_proj.reshape(-1, c_proj.shape[-1])) ) save_weight_and_scale( f"model.layers.{l}.attn.wv_b.weight", f"model.layers.{l}.attn.wv_b.scale", quantize(v_b_proj) ) save_weight_and_scale( f"model.layers.{l}.attn.wo.weight", f"model.layers.{l}.attn.wo.scale", conv(f"model.layers.{l}.self_attn.o_proj.weight", f"model.layers.{l}.self_attn.o_proj.weight_scale_inv") ) else: save_weight_and_scale( f"model.layers.{l}.attn.wkv_a.weight", f"model.layers.{l}.attn.wkv_a.scale", conv(f"model.layers.{l}.self_attn.kv_a_proj_with_mqa.weight", f"model.layers.{l}.self_attn.kv_a_proj_with_mqa.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.attn.wkv_b.weight", f"model.layers.{l}.attn.wkv_b.scale", conv(f"model.layers.{l}.self_attn.kv_b_proj.weight", f"model.layers.{l}.self_attn.kv_b_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.attn.wo.weight", f"model.layers.{l}.attn.wo.scale", conv(f"model.layers.{l}.self_attn.o_proj.weight", f"model.layers.{l}.self_attn.o_proj.weight_scale_inv") ) if metadata.q_lora_rank > 0: save_weight_and_scale( f"model.layers.{l}.attn.wq_a.weight", f"model.layers.{l}.attn.wq_a.scale", conv(f"model.layers.{l}.self_attn.q_a_proj.weight", f"model.layers.{l}.self_attn.q_a_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.attn.wq_b.weight", f"model.layers.{l}.attn.wq_b.scale", conv(f"model.layers.{l}.self_attn.q_b_proj.weight", f"model.layers.{l}.self_attn.q_b_proj.weight_scale_inv") ) tensors[f"model.layers.{l}.attn.q_a_norm.weight"] = weights[f"model.layers.{l}.self_attn.q_a_layernorm.weight"].float() else: save_weight_and_scale( f"model.layers.{l}.attn.wq.weight", f"model.layers.{l}.attn.wq.scale", conv(f"model.layers.{l}.self_attn.q_proj.weight", f"model.layers.{l}.self_attn.q_proj.weight_scale_inv") ) tensors[f"model.layers.{l}.mlp.norm.weight"] = weights[f"model.layers.{l}.post_attention_layernorm.weight"].float() if l < metadata.first_k_dense_replace: save_weight_and_scale( f"model.layers.{l}.mlp.w1.weight", f"model.layers.{l}.mlp.w1.scale", conv(f"model.layers.{l}.mlp.gate_proj.weight", f"model.layers.{l}.mlp.gate_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.mlp.w2.weight", f"model.layers.{l}.mlp.w2.scale", conv(f"model.layers.{l}.mlp.down_proj.weight", f"model.layers.{l}.mlp.down_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.mlp.w3.weight", f"model.layers.{l}.mlp.w3.scale", conv(f"model.layers.{l}.mlp.up_proj.weight", f"model.layers.{l}.mlp.up_proj.weight_scale_inv") ) else: tensors[f"model.layers.{l}.moegate.weight"] = weights[f"model.layers.{l}.mlp.gate.weight"].float() if metadata.arch == "DeepseekV3ForCausalLM": tensors[f"model.layers.{l}.moegate.bias"] = weights[f"model.layers.{l}.mlp.gate.e_score_correction_bias"].float() save_weight_and_scale( f"model.layers.{l}.mlp.w1.weight", f"model.layers.{l}.mlp.w1.scale", conv_experts([ (f"model.layers.{l}.mlp.experts.{e}.gate_proj.weight", f"model.layers.{l}.mlp.experts.{e}.gate_proj.weight_scale_inv") for e in range(metadata.n_routed_experts) ]) ) save_weight_and_scale( f"model.layers.{l}.mlp.w2.weight", f"model.layers.{l}.mlp.w2.scale", conv_experts([ (f"model.layers.{l}.mlp.experts.{e}.down_proj.weight", f"model.layers.{l}.mlp.experts.{e}.down_proj.weight_scale_inv") for e in range(metadata.n_routed_experts) ]) ) save_weight_and_scale( f"model.layers.{l}.mlp.w3.weight", f"model.layers.{l}.mlp.w3.scale", conv_experts([ (f"model.layers.{l}.mlp.experts.{e}.up_proj.weight", f"model.layers.{l}.mlp.experts.{e}.up_proj.weight_scale_inv") for e in range(metadata.n_routed_experts) ]) ) save_weight_and_scale( f"model.layers.{l}.shared_mlp.w1.weight", f"model.layers.{l}.shared_mlp.w1.scale", conv(f"model.layers.{l}.mlp.shared_experts.gate_proj.weight", f"model.layers.{l}.mlp.shared_experts.gate_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.shared_mlp.w2.weight", f"model.layers.{l}.shared_mlp.w2.scale", conv(f"model.layers.{l}.mlp.shared_experts.down_proj.weight", f"model.layers.{l}.mlp.shared_experts.down_proj.weight_scale_inv") ) save_weight_and_scale( f"model.layers.{l}.shared_mlp.w3.weight", f"model.layers.{l}.shared_mlp.w3.scale", conv(f"model.layers.{l}.mlp.shared_experts.up_proj.weight", f"model.layers.{l}.mlp.shared_experts.up_proj.weight_scale_inv") ) tensors["model.norm.weight"] = weights["model.norm.weight"].float() if tie_word_embeddings == False: save_weight_and_scale( "model.output.weight", "model.output.scale", conv("lm_head.weight", "lm_head.weight_scale_inv") ) else: # Model output classifier just uses the word embeddings matrix pass print() # newline yield tensors if __name__ == "__main__": argp = argparse.ArgumentParser() argp.add_argument("output_dir", type=str) argp.add_argument("input", type=str, nargs="?") argp.add_argument("--mla", action="store_true") argp.add_argument("--quant", type=str, default="fp16", choices=SUPPORTED_QUANTS) argp.add_argument("--bsize", type=int, default=None, help="block size for blockwise quantization") argp.add_argument("--n-layers", type=int, default=None, help="number of layers to convert (if None, convert all)") args = argp.parse_args() if os.path.exists(args.output_dir) and not os.path.isdir(args.output_dir): argp.error(f"output directory {args.output_dir} already exists and is not a directory") os.makedirs(args.output_dir, exist_ok=True) if args.input is not None: # Input is a directory with HuggingFace layout, e.g. files: # config.json # tokenizer.json # *.safetensors args.config = os.path.join(args.input, "config.json") if not os.path.exists(args.config): argp.error(f"config.json not found in {args.input}") args.tokenizer = os.path.join(args.input, "tokenizer.json") if not os.path.exists(args.tokenizer): argp.error(f"tokenizer.json not found in {args.input}") args.tokenizer_config = os.path.join(args.input, "tokenizer_config.json") if not os.path.exists(args.tokenizer_config): argp.error(f"tokenizer_config.json not found in {args.input}") files = os.listdir(args.input) args.models = [os.path.join(args.input, fname) for fname in files if os.path.splitext(fname)[1] == ".safetensors"] if len(args.models) == 0: argp.error(f"no .safetensors files found in {args.input}") else: argp.error("argument input is required") with open(args.tokenizer_config, "r") as f: tokenizer_config = json.load(f) with open(args.config, "r") as f: config = json.load(f) metadata = Metadata(config, tokenizer_config,args.quant, args.n_layers, args.mla, args.bsize) tokens = load_tokens(args.tokenizer, metadata.vocab_size) # Process and save weight shards for shard_idx, shard in enumerate(load_weights(args.models, metadata, config.get("tie_word_embeddings", None), args.n_layers)): if shard_idx == 0: shard["tokenizer.tokens"] = torch.cat([torch.tensor([x for x in b] + [0], dtype=torch.uint8) for b in tokens]) save_file(shard, os.path.join(args.output_dir, f"shard_{shard_idx:03d}.dseek"), metadata.to_dict()) else: save_file(shard, os.path.join(args.output_dir, f"shard_{shard_idx:03d}.dseek"), {}) print(f"\nSaved shard {shard_idx}", flush=True) ================================================ FILE: pyproject.toml ================================================ [build-system] requires = ["setuptools>=42", "wheel", "torch>=2.0.0", "ninja", "numpy"] build-backend = "setuptools.build_meta" [project] name = "deepseek-cpp" version = "0.1.0" requires-python = ">=3.8" dependencies = [ "safetensors", "torch>=2.0.0", "ninja", "numpy" ] ================================================ FILE: quantizer.cpp ================================================ #include #include "quant.h" torch::Tensor quantize_q2_k(torch::Tensor& input) { // Row-major quantization (equivalent to block size [1, 256]) // of input tensor using Q2_K scheme. TORCH_CHECK(input.ndimension() == 2, "input must be 2D"); TORCH_CHECK(input.size(1) % QK_K == 0, "ncols must be divisible by QK_K"); TORCH_CHECK(input.dtype() == torch::kFloat32, "input must be float32"); if (!input.is_contiguous()) { input = input.contiguous(); } const int64_t nrows = input.size(0); const int64_t ncols = input.size(1); const int64_t blocks_per_row = ncols / QK_K; const int64_t block_size = sizeof(block_q2_K); auto options = torch::TensorOptions().dtype(torch::kUInt8).device(torch::kCPU); auto output = torch::empty({nrows, blocks_per_row * block_size}, options); const float* input_ptr = input.data_ptr(); uint8_t* output_ptr = output.data_ptr(); // Parallelize over rows #pragma omp parallel for for (int64_t row = 0; row < nrows; row++) { const float* row_input = input_ptr + row * ncols; block_q2_K* row_output = reinterpret_cast(output_ptr + row * blocks_per_row * block_size); quantize_row_q2_K_ref(row_input, row_output, ncols); } return output; } torch::Tensor quantize_q3_k(torch::Tensor& input) { // Row-major quantization (equivalent to block size [1, 256]) // of input tensor using Q3_K scheme. TORCH_CHECK(input.ndimension() == 2, "input must be 2D"); TORCH_CHECK(input.size(1) % QK_K == 0, "ncols must be divisible by QK_K"); TORCH_CHECK(input.dtype() == torch::kFloat32, "input must be float32"); if (!input.is_contiguous()) { input = input.contiguous(); } const int64_t nrows = input.size(0); const int64_t ncols = input.size(1); const int64_t blocks_per_row = ncols / QK_K; const int64_t block_size = sizeof(block_q3_K); auto options = torch::TensorOptions().dtype(torch::kUInt8).device(torch::kCPU); auto output = torch::empty({nrows, blocks_per_row * block_size}, options); const float* input_ptr = input.data_ptr(); uint8_t* output_ptr = output.data_ptr(); // Parallelize over rows #pragma omp parallel for for (int64_t row = 0; row < nrows; row++) { const float* row_input = input_ptr + row * ncols; block_q3_K* row_output = reinterpret_cast(output_ptr + row * blocks_per_row * block_size); quantize_row_q3_K_ref(row_input, row_output, ncols); } return output; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("quantize_q2_k", &quantize_q2_k, "Quantize a tensor to Q2_K format"); m.def("quantize_q3_k", &quantize_q3_k, "Quantize a tensor to Q3_K format"); } ================================================ FILE: quantizer.py ================================================ import torch import quantizer_cpp from typing import Literal def k_quantize(tensor: torch.Tensor, method: Literal["q2_k", "q3_k"]) -> torch.Tensor: """ Quantize a 2D float32 tensor to Q2_K or Q3_K format. Args: tensor: Input tensor of shape (M, N) where N must be a multiple of 256 Returns: Quantized tensor of type uint8 and shape (M, sizeof(block_q2_K) * N/256) containing the block_q2_K data """ if method == "q2_k": return quantizer_cpp.quantize_q2_k(tensor) elif method == "q3_k": return quantizer_cpp.quantize_q3_k(tensor) else: raise ValueError(f"Invalid method: {method}") ================================================ FILE: setup.py ================================================ from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CppExtension from setuptools.dist import Distribution import os class BinaryDistribution(Distribution): def has_ext_modules(self): return True setup( name="quantizer_cpp", ext_modules=[ CppExtension( name="quantizer_cpp", sources=["quantizer.cpp", "src/quant.cpp"], include_dirs=[ os.path.join(os.path.dirname(__file__), "src"), os.path.join(os.path.dirname(__file__), "vendor") ], extra_compile_args=["-O3", "-march=native", "-std=c++20", "-fopenmp"], extra_link_args=["-fopenmp"], ), ], cmdclass={ 'build_ext': BuildExtension }, python_requires='>=3.8', install_requires=[ 'torch>=2.0.0', ], setup_requires=[ 'torch>=2.0.0', 'ninja', 'numpy', ], distclass=BinaryDistribution, ) ================================================ FILE: src/codec.cpp ================================================ #include "codec.h" #include "quant.h" #include "fmt/format.h" #include #include #include #include #include std::string quant_to_string(Quant quant) { switch (quant) { case Quant::F32: return "F32"; case Quant::F16: return "F16"; case Quant::F8E5M2: return "F8_E5M2"; case Quant::Q2_K: return "Q2_K"; case Quant::Q3_K: return "Q3_K"; } __builtin_unreachable(); } std::optional string_to_quant(const std::string& quant_str) { if (quant_str == "F32") { return Quant::F32; } else if (quant_str == "F16") { return Quant::F16; } else if (quant_str == "F8_E5M2") { return Quant::F8E5M2; } else if (quant_str == "Q2_K") { return Quant::Q2_K; } else if (quant_str == "Q3_K") { return Quant::Q3_K; } else { return std::nullopt; } } double bits_per_weight(Quant quant, size_t blockwise_quant_size) { if (blockwise_quant_size > 0 && quant != Quant::F8E5M2) { std::cerr << "blockwise quantization should only be used with F8E5M2" << std::endl; assert(false); } switch (quant) { case Quant::F32: return 32; case Quant::F16: return 16; case Quant::F8E5M2: return (8 + blockwise_quant_size) / blockwise_quant_size; case Quant::Q2_K: return 2.5625; case Quant::Q3_K: return 3.4375; } __builtin_unreachable(); } CodecDType quant_to_codec_dtype(Quant quant) { switch (quant) { case Quant::F32: return CodecDType::F32; case Quant::F16: return CodecDType::F16; case Quant::F8E5M2: return CodecDType::F8E5M2; case Quant::Q2_K: return CodecDType::U8; case Quant::Q3_K: return CodecDType::U8; } __builtin_unreachable(); } bool is_k_quant(Quant quant) { return quant == Quant::Q2_K || quant == Quant::Q3_K; } std::string codec_dtype_to_string(CodecDType dtype) { switch (dtype) { case CodecDType::F32: return "F32"; case CodecDType::F16: return "F16"; case CodecDType::BF16: return "BF16"; case CodecDType::F8E5M2: return "F8_E5M2"; case CodecDType::F8E4M3: return "F8_E4M3"; case CodecDType::I32: return "I32"; case CodecDType::I16: return "I16"; case CodecDType::I8: return "I8"; case CodecDType::U8: return "U8"; } return "UNKNOWN"; } std::optional string_to_codec_dtype(const std::string& dtype_str) { if (dtype_str == "F32") { return CodecDType::F32; } else if (dtype_str == "F16") { return CodecDType::F16; } else if (dtype_str == "BF16") { return CodecDType::BF16; } else if (dtype_str == "F8_E5M2") { return CodecDType::F8E5M2; } else if (dtype_str == "F8_E4M3") { return CodecDType::F8E4M3; } else if (dtype_str == "I32") { return CodecDType::I32; } else if (dtype_str == "I16") { return CodecDType::I16; } else if (dtype_str == "I8") { return CodecDType::I8; } else if (dtype_str == "U8") { return CodecDType::U8; } else { return std::nullopt; } } size_t codec_dtype_size(CodecDType dtype) { switch (dtype) { case CodecDType::F32: return 4; case CodecDType::F16: return 2; case CodecDType::BF16: return 2; case CodecDType::F8E5M2: return 1; case CodecDType::F8E4M3: return 1; case CodecDType::I32: return 4; case CodecDType::I16: return 2; case CodecDType::I8: return 1; case CodecDType::U8: return 1; } return 0; } int Tensor::from_json(const std::string& name, const json& val, void* bytes_ptr, size_t bytes_size) { this->name = name; std::string dtype_str = val.value("dtype", ""); if (auto dtype = string_to_codec_dtype(dtype_str)) { this->dtype = *dtype; } else { std::cerr << "bad dtype" << std::endl; return -1; } size_t dsize = codec_dtype_size(this->dtype); size_t numel = 1; if (val.at("shape").size() > 4) { std::cerr << "shape exceeds 4 dimensions" << std::endl; } for (size_t i = 0; i < val.at("shape").size() && i < 4; i++) { if (val.at("shape")[i].get() != val.at("shape")[i]) { std::cerr << "bad shape" << std::endl; return -1; } shape[i] = val.at("shape")[i].get(); numel *= shape[i]; } if (val.at("data_offsets").size() != 2) { return -1; } size_t offset_start = static_cast(val.at("data_offsets")[0]); size_t offset_end = static_cast(val.at("data_offsets")[1]); if (offset_start < 0 || offset_end <= offset_start || offset_end > bytes_size) { std::cerr << "bad offsets" << std::endl; return -1; } this->data = (char*)bytes_ptr + offset_start; this->size = offset_end - offset_start; // validate the shape matches the size if (numel * dsize != this->size) { std::cerr << "bad size" << std::endl; return -1; } return 0; } QTensor QTensor::from_codec_tensor(const Tensor& tensor, Quant weight_quant, std::array shape, const int debug_line) { QTensor qtensor; CodecDType expected_dtype = quant_to_codec_dtype(weight_quant); std::array expected_shape = shape; if (is_k_quant(weight_quant)) { size_t numel = 1; for (int i = 0; i < 4; i++) { if (shape[i] > 0) { numel *= shape[i]; } } size_t block_size = sizeof(block_q2_K); switch (weight_quant) { case Quant::Q2_K: { block_size = sizeof(block_q2_K); break; } case Quant::Q3_K: { block_size = sizeof(block_q3_K); break; } default: {} } size_t total_blocks = numel / QK_K; size_t total_bytes = total_blocks * block_size; if (tensor.dtype != expected_dtype || tensor.size != total_bytes) { std::cerr << "FATAL: tensor mismatch for " << tensor.name << std::endl; std::cerr << fmt::format( "expected: dtype={}, size={}", codec_dtype_to_string(expected_dtype), total_bytes ) << std::endl; std::cerr << fmt::format( "got: dtype={}, size={}", codec_dtype_to_string(tensor.dtype), tensor.size ) << std::endl; assert(false); } } else if (tensor.dtype != expected_dtype || tensor.shape != expected_shape) { std::cerr << "FATAL: tensor mismatch for " << tensor.name << std::endl; std::cerr << fmt::format( "expected: dtype={}, shape=[{},{},{},{}]", codec_dtype_to_string(expected_dtype), expected_shape[0], expected_shape[1], expected_shape[2], expected_shape[3] ) << std::endl; std::cerr << fmt::format( "got: dtype={}, shape=[{},{},{},{}]", codec_dtype_to_string(tensor.dtype), tensor.shape[0], tensor.shape[1], tensor.shape[2], tensor.shape[3] ) << std::endl; assert(false); } qtensor.quant = weight_quant; qtensor.shape = shape; qtensor.size = tensor.size; qtensor.data = tensor.data; return qtensor; } size_t QTensor::ndim() const { for (size_t i = 0; i < shape.size(); i++) { if (shape[i] == 0) { return i; } } return shape.size(); } size_t QTensor::n_elements() const { size_t numel = 1; for (size_t i = 0; i < shape.size(); i++) { if (shape[i] > 0) { numel *= shape[i]; } } return numel; } YALMData::YALMData(const std::string& dirname, bool lock_model_weights) { if (from_directory(dirname, lock_model_weights) != 0) { std::cerr << "failed to load YALMData from directory" << std::endl; assert(false); } } int YALMData::update_from_file(const std::string& filename, bool read_metadata, bool lock_model_weights) { std::cout << "loading data from file: " << filename << std::endl; int fd = open(filename.c_str(), O_RDONLY); if (fd == -1) { return -1; } struct stat st; if (fstat(fd, &st) != 0) { close(fd); return -1; } size_t size = st.st_size; int mmap_flags = MAP_PRIVATE; if (lock_model_weights) { // Eagerly load memory-mapped file into memory. // This ensures the mlock call later is locking memory already in RAM. mmap_flags |= MAP_POPULATE; } void* data = mmap(NULL, size, PROT_READ | PROT_WRITE, mmap_flags, fd, 0); if (data == MAP_FAILED) { close(fd); return -1; } if (lock_model_weights && mlock(data, size) != 0) { std::cerr << "Warning: mlock failed for model data. Performance may be suboptimal. Are you running as sudo?" << std::endl; } #ifdef __linux__ // increases readahead buffer size, resulting in faster cold loads posix_fadvise(fd, 0, size, POSIX_FADV_SEQUENTIAL); #endif close(fd); // Parse the metadata JSON and the tensors if (size < sizeof(uint64_t)) { munmap(data, size); return -1; } uint64_t json_size = *(uint64_t*)data; if (json_size == 0 || json_size > size - sizeof(uint64_t)) { munmap(data, size); return -1; } char* json_ptr = (char*)data + sizeof(uint64_t); void* bytes_ptr = (char*)data + sizeof(uint64_t) + json_size; size_t bytes_size = size - sizeof(uint64_t) - json_size; std::string json_str(json_ptr, json_size); json header = json::parse(json_str); for (auto& [key, val] : header.items()) { if (key == "__metadata__" && read_metadata) { metadata = val; } else if (key != "__metadata__") { Tensor& tensor = tensors[key]; if (tensor.from_json(key, val, bytes_ptr, bytes_size) != 0) { std::cerr << "failed to parse tensor " << key << std::endl; munmap(data, size); return -1; } } } return 0; } int YALMData::from_directory(const std::string& dirname, bool lock_model_weights) { std::vector files; DIR* dir = opendir(dirname.c_str()); if (dir == nullptr) { std::cout << "failed to open directory" << std::endl; return -1; } // Collect all files struct dirent* entry; while ((entry = readdir(dir)) != nullptr) { std::string filename = entry->d_name; // Skip . and .. directory entries if (filename != "." && filename != "..") { files.push_back(dirname + "/" + filename); } } closedir(dir); if (files.empty()) { std::cout << "no files found" << std::endl; return -1; } // Sort files to ensure consistent ordering std::sort(files.begin(), files.end()); // Read first file with metadata if (update_from_file(files[0], true, lock_model_weights) != 0) { std::cout << "failed to read metadata" << std::endl; return -1; } std::cout << "read metadata " << metadata << std::endl; // Read remaining files without metadata for (size_t i = 1; i < files.size(); i++) { if (update_from_file(files[i], false, lock_model_weights) != 0) { std::cout << "failed to read file " << files[i] << std::endl; return -1; } } return 0; } ================================================ FILE: src/codec.h ================================================ #pragma once #include "json.hpp" #include #include #include #include #include #include #include #include #include "immintrin.h" #include "f16cintrin.h" using json = nlohmann::json; typedef uint16_t f16_t; typedef uint8_t f8e5m2_t; #if defined(__AVX2__) && defined(__F16C__) inline float half_to_float(f16_t x) { return _cvtsh_ss(x); } inline f16_t float_to_half(float x) { return _cvtss_sh(x, 0); } #else inline float half_to_float(f16_t x) { assert(false && "float16 not supported on this platform"); return 0.0f; } inline f16_t float_to_half(float x) { assert(false && "float16 not supported on this platform"); return 0; } #endif inline float float8e5m2_to_float(f8e5m2_t x) { f16_t val = 0; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ memcpy(&val, &x, sizeof(f8e5m2_t)); #else memcpy((char*)&val + sizeof(f8e5m2_t), &x, sizeof(f8e5m2_t)); #endif return half_to_float(val); } [[maybe_unused]] inline f8e5m2_t float_to_float8e5m2(float x) { f16_t val = float_to_half(x); f8e5m2_t out; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ memcpy(&out, (char*)&val, sizeof(f8e5m2_t)); // TODO: round instead of truncate? #else memcpy(&out, (char*)&val + sizeof(f8e5m2_t), sizeof(f8e5m2_t)); // TODO: round instead of truncate? #endif return out; } // Quant of tensors saved in the file. // This corresponds to PyTorch tensor dtypes. enum class CodecDType { F32, F16, BF16, F8E5M2, F8E4M3, I32, I16, I8, U8, }; std::string codec_dtype_to_string(CodecDType dtype); std::optional string_to_codec_dtype(const std::string& dtype_str); size_t codec_dtype_size(CodecDType dtype); // Internal Quant. // This corresponds to the in-memory representation of tensors in the model. enum class Quant { F32, F16, F8E5M2, Q2_K, // 2-bit llama.cpp K-quants Q3_K, // 3-bit llama.cpp K-quants }; std::string quant_to_string(Quant quant); std::optional string_to_quant(const std::string& quant_str); double bits_per_weight(Quant quant, size_t blockwise_quant_size); CodecDType quant_to_codec_dtype(Quant quant); bool is_k_quant(Quant quant); // Tensor data as read from the file, which serializes tensors // in PyTorch format. struct Tensor { std::string name; CodecDType dtype; std::array shape = {0, 0, 0, 0}; void* data = nullptr; // not managed by Tensor size_t size; // size in bytes (number of elements * element size) // Returns 0 if successful, other if failed int from_json(const std::string& name, const json& j, void* bytes_ptr, size_t bytes_size); }; // Tensor with quantization metadata. struct QTensor { Quant quant = Quant::F32; std::array shape = {0, 0, 0, 0}; void* data = nullptr; // not managed by QTensor size_t size = 0; // size in bytes QTensor() = default; QTensor(Quant quant, std::array shape, void* data, size_t size) : quant(quant), shape(shape), data(data), size(size) {} QTensor(const QTensor& other) = default; static QTensor from_codec_tensor(const Tensor& tensor, Quant weight_quant, std::array shape, const int debug_line); size_t ndim() const; size_t n_elements() const; }; struct YALMData { json metadata; std::unordered_map tensors; YALMData(const std::string& dirname, bool lock_model_weights); private: // Update YALMData with tensors from a file // If read_metadata is true, also update metadata from this file // Returns 0 if successful, other if failed int update_from_file(const std::string& filename, bool read_metadata, bool lock_model_weights); // Initialize YALMData from all files in a directory // Metadata is read from the first file (in sorted order) // Returns 0 if successful, other if failed int from_directory(const std::string& dirname, bool lock_model_weights); }; ================================================ FILE: src/debug.cpp ================================================ #include "debug.h" #include "model.h" template bool BinaryDumper::save(const std::string& filename, const T* data, size_t count) { std::ofstream file(filename, std::ios::binary); if (!file) return false; // Write count first file.write(reinterpret_cast(&count), sizeof(count)); // Write T data file.write(reinterpret_cast(data), count * sizeof(T)); return file.good(); } template bool BinaryDumper::save(const std::string&, const float*, size_t); template bool BinaryDumper::save(const std::string&, const f16_t*, size_t); template std::vector BinaryDumper::load(const std::string& filename) { std::ifstream file(filename, std::ios::binary); if (!file) return {}; // Read count size_t count; file.read(reinterpret_cast(&count), sizeof(count)); // Read T data std::vector data(count); file.read(reinterpret_cast(data.data()), count * sizeof(T)); if (!file.good()) return {}; return data; } template std::vector BinaryDumper::load(const std::string&); template std::vector BinaryDumper::load(const std::string&); ================================================ FILE: src/debug.h ================================================ #include #include #include #include struct BinaryDumper { // Save T array to binary file template static bool save(const std::string& filename, const T* data, size_t count); // Load T array from binary file template static std::vector load(const std::string& filename); }; ================================================ FILE: src/infer.cpp ================================================ #include "model.h" #include #include #include #include "quant.h" #include "profile.h" #if DEBUG_MODEL #include "json.hpp" #include #include "fmt/format.h" static std::map _debug_map; std::map& debug_map_cpu() { return _debug_map; } template static std::vector copy_debug_tensor(T* x, size_t size) { std::vector out(size); for (size_t i = 0; i < size; i++) { out[i] = x[i]; } return out; } template static void save_debug_tensor(const std::string& name, T* x, size_t size) { _debug_map[name] = DebugTensor(copy_debug_tensor(x, size)); } void dump_debug_map(const std::string& filename) { std::ofstream out(filename); if (!out.is_open()) { fprintf(stderr, "Failed to open %s for writing\n", filename.c_str()); return; } // Write Python imports out << "import torch\n\n"; out << "debug_tensors = {\n"; // Iterate through debug map and write each tensor bool first = true; for (const auto& pair : _debug_map) { if (!first) { out << ",\n"; } first = false; const std::string& name = pair.first; const DebugTensor& tensor = pair.second; out << " '" << name << "': torch.tensor(["; // Write tensor values bool first_val = true; assert(tensor.data_type == DebugTensor::DataType::F32); for (const auto& val : tensor.data_f32) { if (!first_val) { out << ", "; } first_val = false; // Use scientific notation with high precision out << std::scientific << std::setprecision(8) << val; } out << "])"; } out << "\n}\n"; out.close(); } void dump_debug_map_as_safetensors(const std::string& filename) { std::ofstream out(filename, std::ios::binary); if (!out.is_open()) { fprintf(stderr, "Failed to open %s for writing\n", filename.c_str()); return; } json header; size_t offset = 0; for (auto& [key, val] : _debug_map) { size_t offset_end = offset; CodecDType dtype = val.data_type == DebugTensor::DataType::F32 ? CodecDType::F32 : CodecDType::F16; if (dtype == CodecDType::F32) { offset_end += val.data_f32.size() * sizeof(float); header[key] = { {"dtype", codec_dtype_to_string(dtype)}, {"shape", {val.data_f32.size()}}, {"data_offsets", {offset, offset_end}} }; } else { offset_end += val.data_f16.size() * sizeof(f16_t); header[key] = { {"dtype", codec_dtype_to_string(dtype)}, {"shape", {val.data_f16.size()}}, {"data_offsets", {offset, offset_end}} }; } offset = offset_end; } header["__metadata__"] = {{"debug", ""}}; std::string header_str = header.dump(); // 1. write uint64 (size of json header) uint64_t header_len = static_cast(header_str.size()); out.write(reinterpret_cast(&header_len), sizeof(header_len)); // 2. write json header out.write(header_str.c_str(), header_len); // 3. write tensor data for (auto& [key, val] : _debug_map) { if (val.data_type == DebugTensor::DataType::F32) { out.write(reinterpret_cast(val.data_f32.data()), val.data_f32.size() * sizeof(float)); } else { out.write(reinterpret_cast(val.data_f16.data()), val.data_f16.size() * sizeof(f16_t)); } } out.close(); } #endif static void _matmul( float* xout, float* x, float* w, int n, int d, const int* block_size, float* scale, void* unused_aqb ) { // W (d,n) @ x (n,) -> xout (d,) (void)unused_aqb; static float one = 1.0f; int dummy_block_size[2] = {d, n}; if (scale == nullptr) { scale = &one; block_size = dummy_block_size; } int scale_num_cols = (n + block_size[1] - 1) / block_size[1]; for (int scale_i = 0; scale_i < cdiv(d, block_size[0]); scale_i++) { int ii; #pragma omp parallel for private(ii) for (ii = 0; ii < block_size[0]; ii++) { int i = scale_i * block_size[0] + ii; if (i >= d) { continue; } float val = 0.0f; for (int scale_j = 0; scale_j < cdiv(n, block_size[1]); scale_j++) { float scale_val = scale[scale_i * scale_num_cols + scale_j]; for (int jj = 0; jj < block_size[1]; jj++) { int j = scale_j * block_size[1] + jj; if (j >= n) { break; } val += (w[i * n + j] * x[j]) * scale_val; } } xout[i] = val; } } } // matmul supporting float16 weights via the F16C extension, which allows // conversion into float32 values before calculations. static void _matmul( float* xout, float* x, f16_t* w, int n, int d, const int* block_size, float* scale, void* unused_aqb ) { (void)unused_aqb; #if defined(__AVX2__) && defined(__F16C__) // W (d,n) @ x (n,) -> xout (d,) assert(n % 16 == 0); assert(scale == nullptr || block_size[1] % 16 == 0); static float one = 1.0f; int dummy_block_size[2] = {d, n}; if (scale == nullptr) { scale = &one; block_size = dummy_block_size; } int scale_num_cols = (n + block_size[1] - 1) / block_size[1]; for (int scale_i = 0; scale_i < cdiv(d, block_size[0]); scale_i++) { int ii; #pragma omp parallel for private(ii) for (ii = 0; ii < block_size[0]; ii++) { int i = scale_i * block_size[0] + ii; if (i >= d) { continue; } // Vectorized dot product of w[i][:] and x[:] where w is a packed float16 array. __m256 sumlo = _mm256_setzero_ps(); __m256 sumhi = _mm256_setzero_ps(); for (int scale_j = 0; scale_j < cdiv(n, block_size[1]); scale_j++) { // Broadcast scale_val to all elements of a vector float scale_val = scale[scale_i * scale_num_cols + scale_j]; __m256 scale_vec = _mm256_set1_ps(scale_val); for (int jj = 0; jj < block_size[1]; jj+=16) { int j = scale_j * block_size[1] + jj; if (j >= n) { break; } // Extract the next set of 16 float16 weights from `w` and store them // to two separate float32 vectors of width 8 (`wveclo_ps`, `wvechi_ps`) __m256i wvec = _mm256_loadu_si256((__m256i*)&w[i * n + j]); __m128i wveclo = _mm256_extractf128_si256(wvec, 0); __m128i wvechi = _mm256_extractf128_si256(wvec, 1); __m256 wveclo_ps = _mm256_cvtph_ps(wveclo); __m256 wvechi_ps = _mm256_cvtph_ps(wvechi); // Scale the weight vectors wveclo_ps = _mm256_mul_ps(wveclo_ps, scale_vec); wvechi_ps = _mm256_mul_ps(wvechi_ps, scale_vec); // Extract the next two float32 vectors of width 8 `xveclo`, `xvechi` from `x` __m256 xveclo = _mm256_loadu_ps(&x[j]); __m256 xvechi = _mm256_loadu_ps(&x[j + 8]); // Compute vectorized FMAs: sumlo += wveclo * xveclo, sumhi += wvechi * xvechi sumlo = _mm256_fmadd_ps(wveclo_ps, xveclo, sumlo); sumhi = _mm256_fmadd_ps(wvechi_ps, xvechi, sumhi); } } // Horizontally reduce width-8 float32 vectors sumlo, sumhi to a scalar. __m256 sum8 = _mm256_add_ps(sumlo, sumhi); // sum8[0:8] = sumlo[0:8] + sumhi[0:8] __m128 sum4 = _mm_add_ps( // sum4[0:4] = sum8[0:4] + sum8[4:8] _mm256_extractf128_ps(sum8, 0), _mm256_extractf128_ps(sum8, 1) ); __m128 sum1 = _mm_dp_ps(sum4, _mm_set1_ps(1.0f), 0xf1); // sum1[0] = dot(sum4, [1,1,1,1]) xout[i] = _mm_cvtss_f32(sum1); } } #else assert(false && "float16 not supported on this platform"); #endif } // matmul supporting float8e5m2 weights via AVX2 and F16C extensions, which (1) // allows vectorized conversion from f8e5m2 to float16 and (2) conversion from // float16 to float32 values before calculations. static void _matmul( float* xout, float* x, f8e5m2_t* w, int n, int d, const int* block_size, float* scale, void* unused_aqb ) { (void)unused_aqb; #if defined(__AVX2__) && defined(__F16C__) // W (d,n) @ x (n,) -> xout (d,) assert(n % 16 == 0); assert(scale == nullptr || block_size[1] % 16 == 0); static float one = 1.0f; int dummy_block_size[2] = {d, n}; if (scale == nullptr) { scale = &one; block_size = dummy_block_size; } int scale_num_cols = (n + block_size[1] - 1) / block_size[1]; for (int scale_i = 0; scale_i < cdiv(d, block_size[0]); scale_i++) { int ii; #pragma omp parallel for private(ii) for (ii = 0; ii < block_size[0]; ii++) { int i = scale_i * block_size[0] + ii; if (i >= d) { continue; } // Vectorized dot product of w[i][:] and x[:] where w is a packed float8e5m2 array. __m256 sumlo = _mm256_setzero_ps(); __m256 sumhi = _mm256_setzero_ps(); for (int scale_j = 0; scale_j < cdiv(n, block_size[1]); scale_j++) { // Broadcast scale_val to all elements of a vector float scale_val = scale[scale_i * scale_num_cols + scale_j]; __m256 scale_vec = _mm256_set1_ps(scale_val); for (int jj = 0; jj < block_size[1]; jj+=16) { int j = scale_j * block_size[1] + jj; if (j >= n) { break; } // Extract the next set of 16 float8e5m2 weights from `w` and store them // to two separate float32 vectors of width 8 (`wveclo_ps`, `wvechi_ps`) __m128i wvec = _mm_loadu_si128((__m128i*)&w[i * n + j]); // Take each half of `wvec` which consists of 8 float8e5m2 weights and // pad each 8-bit float8e5m2 value with 8 zeros in the mantissa (least significant bits), // converting to 8 float16 values. __m128i wveclo = _mm_unpacklo_epi8(_mm_setzero_si128(), wvec); __m128i wvechi = _mm_unpackhi_epi8(_mm_setzero_si128(), wvec); // Widen each 8xf16 vector to 8xf32. __m256 wveclo_ps = _mm256_cvtph_ps(wveclo); __m256 wvechi_ps = _mm256_cvtph_ps(wvechi); // Scale the weight vectors wveclo_ps = _mm256_mul_ps(wveclo_ps, scale_vec); wvechi_ps = _mm256_mul_ps(wvechi_ps, scale_vec); // Extract the next two float32 vectors of width 8 `xveclo`, `xvechi` from `x` __m256 xveclo = _mm256_loadu_ps(&x[j]); __m256 xvechi = _mm256_loadu_ps(&x[j + 8]); // Compute vectorized FMAs: sumlo += wveclo * xveclo, sumhi += wvechi * xvechi sumlo = _mm256_fmadd_ps(wveclo_ps, xveclo, sumlo); sumhi = _mm256_fmadd_ps(wvechi_ps, xvechi, sumhi); } } // Horizontally reduce width-8 float32 vectors sumlo, sumhi to a scalar. __m256 sum8 = _mm256_add_ps(sumlo, sumhi); // sum8[0:8] = sumlo[0:8] + sumhi[0:8] __m128 sum4 = _mm_add_ps( // sum4[0:4] = sum8[0:4] + sum8[4:8] _mm256_extractf128_ps(sum8, 0), _mm256_extractf128_ps(sum8, 1) ); __m128 sum1 = _mm_dp_ps(sum4, _mm_set1_ps(1.0f), 0xf1); // sum1[0] = dot(sum4, [1,1,1,1]) xout[i] = _mm_cvtss_f32(sum1); } } #else assert(false && "float8e5m2 not supported on this platform"); #endif } static void _matmul( float* xout, float* x, block_q2_K* w, int n, int d, const int* unused_block_size, float* unused_scale, void* aqb ) { // W (d,n) @ x (n,) -> xout (d,) (void)unused_block_size; (void)unused_scale; size_t blocks_per_row = n / QK_K; block_q8_K* aqb_q8 = (block_q8_K*)aqb; int chunk_size = QK_K * 2; int num_chunks = cdiv(n, chunk_size); { PROFILE_BLOCK("quantize_acts"); #pragma omp parallel for for (int i = 0; i < num_chunks; i++) { int start = i * chunk_size; int k = (i == num_chunks - 1) ? (n - start) : chunk_size; if (k > 0) { quantize_row_q8_K_ref(x + start, aqb_q8 + (start/QK_K), k); } } } { PROFILE_BLOCK("matmul_w2a8"); int i; #pragma omp parallel for private(i) for (i = 0; i < d; i++) { ggml_vec_dot_q2_K_q8_K(n, xout + i, w + i * blocks_per_row, aqb_q8); } } } static void _matmul( float* xout, float* x, block_q3_K* w, int n, int d, const int* unused_block_size, float* unused_scale, void* aqb ) { // W (d,n) @ x (n,) -> xout (d,) (void)unused_block_size; (void)unused_scale; size_t blocks_per_row = n / QK_K; block_q8_K* aqb_q8 = (block_q8_K*)aqb; int chunk_size = QK_K * 2; int num_chunks = cdiv(n, chunk_size); { PROFILE_BLOCK("quantize_acts"); #pragma omp parallel for for (int i = 0; i < num_chunks; i++) { int start = i * chunk_size; int k = (i == num_chunks - 1) ? (n - start) : chunk_size; if (k > 0) { quantize_row_q8_K_ref(x + start, aqb_q8 + (start/QK_K), k); } } } { PROFILE_BLOCK("matmul_w3a8"); int i; #pragma omp parallel for private(i) for (i = 0; i < d; i++) { ggml_vec_dot_q3_K_q8_K(n, xout + i, w + i * blocks_per_row, aqb_q8); } } } static void matmul( float* xout, float* x, const QTensor& w, const int* block_size, std::optional scale, void* aqb ) { // W (d,n) @ x (n,) -> xout (d,) int n = w.shape[1]; int d = w.shape[0]; float* scale_data = nullptr; if (scale) { assert(scale->quant == Quant::F32); scale_data = static_cast(scale->data); } switch (w.quant) { case Quant::F32: { _matmul(xout, x, static_cast(w.data), n, d, block_size, scale_data, aqb); break; } case Quant::F16: { _matmul(xout, x, static_cast(w.data), n, d, block_size, scale_data, aqb); break; } case Quant::F8E5M2: { _matmul(xout, x, static_cast(w.data), n, d, block_size, scale_data, aqb); break; } case Quant::Q2_K: { _matmul(xout, x, static_cast(w.data), n, d, block_size, scale_data, aqb); break; } case Quant::Q3_K: { _matmul(xout, x, static_cast(w.data), n, d, block_size, scale_data, aqb); break; } default: assert(false); } } void matmul_unscaled(float* xout, float* x, const QTensor& w) { matmul(xout, x, w, nullptr, std::nullopt, nullptr); } static void matmul_expert( float* xout, float* x, const QTensor& w_experts, int expert_index, const int* block_size, std::optional scale_experts, void* aqb ) { // W_experts (n_experts,d,n) // W (d,n) @ x (n,) -> xout (d,) int n = w_experts.shape[2]; int d = w_experts.shape[1]; size_t expert_size = n * d; float* scale_data = nullptr; if (scale_experts) { assert(scale_experts->quant == Quant::F32); int expert_scale_size = cdiv(d, block_size[0]) * cdiv(n, block_size[1]); size_t scale_offset = expert_index * expert_scale_size; scale_data = static_cast(scale_experts->data) + scale_offset; } size_t weight_offset = expert_index * expert_size; if (is_k_quant(w_experts.quant)) { // In K-quants, each element of the weight tensor is a block of QK_K elements weight_offset = weight_offset / QK_K; } switch (w_experts.quant) { case Quant::F32: { _matmul(xout, x, static_cast(w_experts.data) + weight_offset, n, d, block_size, scale_data, aqb); break; } case Quant::F16: { _matmul(xout, x, static_cast(w_experts.data) + weight_offset, n, d, block_size, scale_data, aqb); break; } case Quant::F8E5M2: { _matmul(xout, x, static_cast(w_experts.data) + weight_offset, n, d, block_size, scale_data, aqb); break; } case Quant::Q2_K: { _matmul(xout, x, static_cast(w_experts.data) + weight_offset, n, d, block_size, scale_data, aqb); break; } case Quant::Q3_K: { _matmul(xout, x, static_cast(w_experts.data) + weight_offset, n, d, block_size, scale_data, aqb); break; } default: assert(false); } } // Compute the softmax of an input vector `x` of length `size` and store it in `o`. static void softmax(float* o, float* x, int size) { float score_max = -FLT_MAX; for (int i = 0; i < size; ++i) { if (x[i] > score_max) { score_max = x[i]; } } float score_sum = 0.0f; for (int i = 0; i < size; ++i) { o[i] = expf(x[i] - score_max); score_sum += o[i]; } for (int i = 0; i < size; ++i) { o[i] /= score_sum; } } inline float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } static void moe_gate( float* moe_weights, std::optional moegate_bias, int* active_experts, float* x, int n_routed_experts, int n_active_routed, bool norm_topk_prob, float routed_scaling_factor, ScoringFunc scoring_func, TopKMethod topk_method, int n_group, int topk_group ) { // Set moe_weights[:n_active_routed] to the weights of the top K experts. // Set active_experts[:n_active_routed] to the indices of the top K experts. if (scoring_func == ScoringFunc::SOFTMAX) { softmax(x, x, n_routed_experts); } else if (scoring_func == ScoringFunc::SIGMOID) { for (int i = 0; i < n_routed_experts; i++) { x[i] = sigmoid(x[i]); } } if (moegate_bias) { float* bias_data = static_cast(moegate_bias->data); for (int i = 0; i < n_routed_experts; ++i) { x[i] += bias_data[i]; } } // top k float wsum = 0.0f; if (topk_method == TopKMethod::GREEDY) { assert(n_routed_experts <= 256); std::array mask{}; for (int k = 0; k < n_active_routed; ++k) { int best = -1; for (int j = 0; j < n_routed_experts; ++j) { int mask_i = j / 8; int mask_r = j % 8; if ((mask[mask_i] & (1ull << mask_r)) == 0 && (best == -1 || x[j] > x[best])) { best = j; } } active_experts[k] = best; wsum += x[active_experts[k]]; int best_mask_i = best / 8; int best_mask_r = best % 8; mask[best_mask_i] |= 1ull << best_mask_r; } } else if (topk_method == TopKMethod::GROUP_LIMITED_GREEDY) { int group_size = n_routed_experts / n_group; // First pass: select topk_group within each group std::array mask{}; for (int g = 0; g < n_group; g++) { // Select topk_group items from this group for (int k = 0; k < topk_group; k++) { int best = -1; for (int j = g*group_size; j < (g+1)*group_size; j++) { int mask_i = j / 8; int mask_r = j % 8; if ((mask[mask_i] & (1u << mask_r)) == 0 && x[j] > x[best]) { best = j; } } int best_mask_i = best / 8; int best_mask_r = best % 8; mask[best_mask_i] |= 1u << best_mask_r; } } // Flip mask so that now we only look at the topk_group items in each group for (int i = 0; i < 32; i++) { mask[i] = ~mask[i]; } // Second pass: select top n_active_routed overall for (int k = 0; k < n_active_routed; ++k) { int best = -1; for (int j = 0; j < n_routed_experts; ++j) { int mask_i = j / 8; int mask_r = j % 8; if ((mask[mask_i] & (1ull << mask_r)) == 0 && (best == -1 || x[j] > x[best])) { best = j; } } active_experts[k] = best; wsum += x[active_experts[k]]; int best_mask_i = best / 8; int best_mask_r = best % 8; mask[best_mask_i] |= 1ull << best_mask_r; } } else if (topk_method == TopKMethod::NOAUX_TC) { assert(false && "TODO: implement noaux_tc"); } if (!norm_topk_prob) { wsum = 1.0; } for (int k = 0; k < n_active_routed; ++k) { moe_weights[k] = x[active_experts[k]] / wsum * routed_scaling_factor; } } static void rmsnorm(float* o, float* x, float* weight, int size, float eps) { float rms = 0.0f; for (int i = 0; i < size; ++i) { rms += x[i] * x[i]; } rms = sqrtf(rms / size + eps); float scale = 1.0f / rms; for (int i = 0; i < size; ++i) { o[i] = x[i] * scale * weight[i]; } } [[maybe_unused]] static void layernorm(float* o, float* x, float* weight, float* bias, int size, float eps) { float mean = 0.0f; for (int i = 0; i < size; ++i) { mean += x[i]; } mean /= size; float var = 0.0f; for (int i = 0; i < size; ++i) { var += (x[i] - mean) * (x[i] - mean); } var /= size; float scale = 1.0f / sqrtf(var + eps); if (bias) { for (int i = 0; i < size; ++i) { o[i] = (x[i] - mean) * scale * weight[i] + bias[i]; } } else { for (int i = 0; i < size; ++i) { o[i] = (x[i] - mean) * scale * weight[i]; } } } inline float gelu(float x) { return 0.5f * x * (1.0f + tanhf(0.797885f * (x + 0.044715f * x * x * x))); } inline float silu(float x) { return x / (1.0f + expf(-x)); } inline float clip(float x, float v) { return x < -v ? -v : (x > v ? v : x); } static void rope(float* buf, float* vec, int d, int head_dim, int pos, float theta) { // For some reason, DeepSeek-V2 was trained using rope output // layout transposed compared to the input. This means we need a buffer // to hold intermediate results. assert(d % 2 == 0); for (int i = 0; i < d; i += 2) { int j_head = i % head_dim; float freq = 1.0f / powf(theta, (float)j_head / (float)head_dim); float val = pos * freq; float fcr = cosf(val); float fci = sinf(val); float v0 = vec[i]; float v1 = vec[i + 1]; buf[i/2] = v0 * fcr - v1 * fci; buf[i/2 + d/2] = v0 * fci + v1 * fcr; } for (int i = 0; i < d; i++) { vec[i] = buf[i]; } } static void rope_v3(float* vec, int d, int head_dim, int pos, float theta) { int rotary_dim = head_dim; for (int i = 0; i < d; i += 2) { int j_head = i % head_dim; float freq = j_head >= rotary_dim ? 0.f : 1.0f / powf(theta, (float)j_head / (float)rotary_dim); float val = pos * freq; float fcr = cosf(val); float fci = sinf(val); float v0 = vec[i]; float v1 = vec[i + 1]; vec[i] = v0 * fcr - v1 * fci; vec[i + 1] = v0 * fci + v1 * fcr; } } static void rope(float* buf, f16_t* vec, int d, int head_dim, int pos, float theta) { // For some reason, DeepSeek-V2 was trained using rope output // layout transposed compared to the input. This means we need a buffer // to hold intermediate results. assert(d % 2 == 0); for (int i = 0; i < d; i += 2) { int j_head = i % head_dim; float freq = 1.0f / powf(theta, (float)j_head / (float)head_dim); float val = pos * freq; float fcr = cosf(val); float fci = sinf(val); float v0 = half_to_float(vec[i]); float v1 = half_to_float(vec[i + 1]); buf[i/2] = v0 * fcr - v1 * fci; buf[i/2 + d/2] = v0 * fci + v1 * fcr; } for (int i = 0; i < d; i++) { vec[i] = float_to_half(buf[i]); } } static void rope_v3(f16_t* vec, int d, int head_dim, int pos, float theta) { int rotary_dim = head_dim; for (int i = 0; i < d; i += 2) { int j_head = i % head_dim; float freq = j_head >= rotary_dim ? 0.f : 1.0f / powf(theta, (float)j_head / (float)rotary_dim); float val = pos * freq; float fcr = cosf(val); float fci = sinf(val); float v0 = half_to_float(vec[i]); float v1 = half_to_float(vec[i + 1]); vec[i] = float_to_half(v0 * fcr - v1 * fci); vec[i + 1] = float_to_half(v0 * fci + v1 * fcr); } } // Compute next value in a sequence for a single causal self-attention head. void attn( float* xout, // (n_heads * v_head_dim,) - output vector float* atth, // (kv_len,) - scratch space to hold attention scores of the sequence const float* qh, // (head_dim,) - query vector for this head const f16_t* kh, // (kv_len, n_heads, head_dim) - buffer containing key vectors of the sequence for all KV heads const f16_t* vh, // (kv_len, n_heads, v_head_dim) - buffer containing value vectors of the sequence for all KV heads int head_dim, // size of the "key-space" int v_head_dim, // size of the "value-space" int n_heads, // number of attention heads int kv_len // number of tokens of the sequence we will attend over ) { int k_stride = n_heads * head_dim; // stride per token in this k head // calculate attention scores as dot products of q and k for (int t = 0; t < kv_len; ++t) { float score = 0.0f; for (int i = 0; i < head_dim; ++i) { score += qh[i] * half_to_float(kh[t * k_stride + i]); } score /= sqrtf(head_dim); atth[t] = score; } // softmax the scores to get attention weights over [0..kv_len) softmax(atth, atth, kv_len); int v_stride = n_heads * v_head_dim; // stride per token in this v head // mix values with attention weights for (int i = 0; i < v_head_dim; ++i) { float vi = 0.0f; for (int t = 0; t < kv_len; ++t) { vi += atth[t] * half_to_float(vh[t * v_stride + i]); } xout[i] = vi; } } // Compute next value in a sequence for a single causal self-attention head. // MLA variant: uses combined latent-KV cache and PE-KV cache. void attn_mla( float* xout, // (n_heads * kv_lora_rank,) - output vector float* atth, // (kv_len,) - scratch space to hold attention scores of the sequence const float* qh_c, // (kv_lora_rank,) - transformed latent query vector for this head const float* qh_rope, // (qk_rope_head_dim,) - PE-query vector for this head const f16_t* compressed_kv, // (kv_len, kv_lora_rank) - buffer containing latent vectors of the sequence const f16_t* k_rope, // (kv_len, qk_rope_head_dim) - buffer containing PE key-vectors of the sequence int head_dim, // used for softmax scale factor int kv_lora_rank, // size of the "latent-space" int qk_rope_head_dim, // size of the "PE-space" int kv_len // number of tokens of the sequence we will attend over ) { int kv_stride = kv_lora_rank; // stride per token in the latent buffer int k_rope_stride = qk_rope_head_dim; // stride per token in the PE buffer // calculate attention scores as dot products of q and k for (int t = 0; t < kv_len; ++t) { float score = 0.0f; for (int i = 0; i < kv_lora_rank; ++i) { score += qh_c[i] * half_to_float(compressed_kv[t * kv_stride + i]); } for (int i = 0; i < qk_rope_head_dim; ++i) { score += qh_rope[i] * half_to_float(k_rope[t * k_rope_stride + i]); } score /= sqrtf(head_dim); atth[t] = score; } // softmax the scores to get attention weights over [0..kv_len) softmax(atth, atth, kv_len); // mix latents with attention weights for (int i = 0; i < kv_lora_rank; ++i) { float vi = 0.0f; for (int t = 0; t < kv_len; ++t) { vi += atth[t] * half_to_float(compressed_kv[t * kv_stride + i]); } xout[i] = vi; } } // Compute forward pass for a single block and update the inference state accordingly. // PRECONDITIONS: // - `s.x()` contains the input to the block. Output will also go here. // - Block KV cache is hydrated. template void Block::_block_cpu( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const { const Config& c = *_config; // Attention pre-norm switch (c.norm_type) { case LayerNormType::RMSNorm: { rmsnorm(s.xb(), s.x(), rms_att_weight(), c.dim, c.norm_eps); break; } } // Attention output into `hb` attention_impl(s, pos, kv_sink, kv_pos, kv_len); // Residual back into `x` for (int i = 0; i < c.dim; ++i) { s.x()[i] += s.hb()[i]; } // FFN pre-norm switch (c.norm_type) { case LayerNormType::RMSNorm: { rmsnorm(s.xb(), s.x(), rms_ffn_weight(), c.dim, c.norm_eps); break; } } if (c.n_routed_experts > 0 && moegate() != std::nullopt) { PROFILE_BLOCK(ffn_moe); // Block is a sparse MoE FFN layer PROFILE(matmul_unscaled(s.moe_weights(), s.xb(), *moegate())); moe_gate( s.active_experts_weights(), moegate_bias(), s.active_experts(), s.moe_weights(), c.n_routed_experts, c.n_active_routed, c.norm_topk_prob, c.routed_scaling_factor, c.scoring_func, c.topk_method, c.n_group, c.topk_group ); for (int k = 0; k < c.n_active_routed; ++k) { int expert_index = s.active_experts()[k]; // mix self.w2(F.silu(self.w1(x)) * self.w3(x)) // Note this is a feedforward with a GLU, not a simple MLP. PROFILE(matmul_expert(s.hb(), s.xb(), *w1(), expert_index, c.block_size.data(), _s1, s.aqb())); PROFILE(matmul_expert(s.hb2(), s.xb(), *w3(), expert_index, c.block_size.data(), _s3, s.aqb())); switch (c.act) { case ActivationType::GELU: { for (int i = 0; i < c.moe_intermediate_size; ++i) { s.hb()[i] = gelu(s.hb()[i]) * s.hb2()[i]; } break; } case ActivationType::SILU: { for (int i = 0; i < c.moe_intermediate_size; ++i) { s.hb()[i] = silu(s.hb()[i]) * s.hb2()[i]; } break; } } PROFILE(matmul_expert(s.xb2(), s.hb(), *w2(), expert_index, c.block_size.data(), _s2, s.aqb())); float expert_weight = s.active_experts_weights()[k]; for (int i = 0; i < c.dim; ++i) { s.x()[i] += s.xb2()[i] * expert_weight; } } if (c.n_shared_experts > 0) { // mix self.w2(F.silu(self.w1(x)) * self.w3(x)) // Note this is a feedforward with a GLU, not a simple MLP. PROFILE(matmul(s.hb(), s.xb(), *shared_w1(), c.block_size.data(), _shared_s1, s.aqb())); PROFILE(matmul(s.hb2(), s.xb(), *shared_w3(), c.block_size.data(), _shared_s3, s.aqb())); switch (c.act) { case ActivationType::GELU: { for (int i = 0; i < c.n_shared_experts * c.moe_intermediate_size; ++i) { s.hb()[i] = gelu(s.hb()[i]) * s.hb2()[i]; } break; } case ActivationType::SILU: { for (int i = 0; i < c.n_shared_experts * c.moe_intermediate_size; ++i) { s.hb()[i] = silu(s.hb()[i]) * s.hb2()[i]; } break; } } PROFILE(matmul(s.xb2(), s.hb(), *shared_w2(), c.block_size.data(), _shared_s2, s.aqb())); // residual connection back into x for (int i = 0; i < c.dim; ++i) { s.x()[i] += s.xb2()[i]; } } } else { PROFILE_BLOCK(ffn_dense); // Block is a dense FFN layer // mix self.w2(F.silu(self.w1(x)) * self.w3(x)) // Note this is a feedforward with a GLU, not a simple MLP. PROFILE(matmul(s.hb(), s.xb(), *w1(), c.block_size.data(), _s1, s.aqb())); PROFILE(matmul(s.hb2(), s.xb(), *w3(), c.block_size.data(), _s3, s.aqb())); switch (c.act) { case ActivationType::GELU: { for (int i = 0; i < c.hidden_dim; ++i) { s.hb()[i] = gelu(s.hb()[i]) * s.hb2()[i]; } break; } case ActivationType::SILU: { for (int i = 0; i < c.hidden_dim; ++i) { s.hb()[i] = silu(s.hb()[i]) * s.hb2()[i]; } break; } } PROFILE(matmul(s.xb2(), s.hb(), *w2(), c.block_size.data(), _s2, s.aqb())); // residual connection back into x for (int i = 0; i < c.dim; ++i) { s.x()[i] += s.xb2()[i]; } } } template void BlockMHA::_attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const { PROFILE_BLOCK(attn_mha); const Config& c = *_config; // qkv matmuls for this position if (c.q_lora_rank > 0) { PROFILE(matmul(s.q_a(), s.xb(), *wq_a(), c.block_size.data(), _sq_a, s.aqb())); switch (c.norm_type) { case LayerNormType::RMSNorm: { rmsnorm(s.q_a(), s.q_a(), this->rms_q_a_weight(), c.q_lora_rank, c.norm_eps); break; } } PROFILE(matmul(s.q(), s.q_a(), *wq_b(), c.block_size.data(), _sq_b, s.aqb())); } else { PROFILE(matmul(s.q(), s.xb(), *wq(), c.block_size.data(), _sq, s.aqb())); } PROFILE(matmul(s.kv_a(), s.xb(), *wkv_a(), c.block_size.data(), _skv_a, s.aqb())); // Apply RoPE positional encoding int q_pe_offset = c.head_dim - c.qk_rope_head_dim; bool is_v3 = c.has_moegate_bias; for (int h = 0; h < c.n_heads; h++) { if (is_v3) { rope_v3(s.q(h) + q_pe_offset, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } else { rope(s.ropebuf(), s.q(h) + q_pe_offset, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } } int kv_pe_offset = c.kv_lora_rank; float* k_rope = s.kv_a() + kv_pe_offset; if (is_v3) { rope_v3(k_rope, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } else { rope(s.ropebuf(), k_rope, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } // rms norm to non-pe chunk of kv_a rmsnorm(s.kv_a(), s.kv_a(), this->rms_kv_a_weight(), c.kv_lora_rank, c.norm_eps); // un-compress the latent kv via multiplication with wkv_b int qk_nope_head_dim = c.head_dim - c.qk_rope_head_dim; PROFILE(matmul(s.kv_b(), s.kv_a(), *wkv_b(), c.block_size.data(), _skv_b, s.aqb())); // concatenate kv_b and k_rope in each head to build key heads for (int h = 0; h < c.n_heads; h++) { for (int i = 0; i < qk_nope_head_dim; i++) { s.k(h)[i] = s.kv_b(h)[i]; } for (int i = 0; i < c.qk_rope_head_dim; i++) { s.k(h)[qk_nope_head_dim + i] = k_rope[i]; } } // transfer value heads from kv_b for (int h = 0; h < c.n_heads; h++) { for (int i = 0; i < c.v_head_dim; i++) { s.v(h)[i] = s.kv_b(h)[qk_nope_head_dim + i]; } } // update kv cache int key_dim = c.n_heads * c.head_dim; for (int i = 0; i < key_dim; ++i) { this->key_cache(kv_pos)[i] = float_to_half(s.k()[i]); } int value_dim = c.n_heads * c.v_head_dim; for (int i = 0; i < value_dim; ++i) { this->value_cache(kv_pos)[i] = float_to_half(s.v()[i]); } // Sink tokens remain untouched while the rest of the KV cache is incrementally // replaced in ring order, but sink i must always be positioned (max_seq_len - i) // away from current timestep. Hence, each forward pass, rotate sink tokens // forward by 1. See https://arxiv.org/abs/2309.17453 for more. for (int r = 0; r < kv_sink; r++) { f16_t* key = key_cache(r); // in-place update PE-chunk of each key head int q_pe_offset = c.head_dim - c.qk_rope_head_dim; for (int h = 0; h < c.n_heads; h++) { f16_t* kh = key + h * c.head_dim; if (is_v3) { rope_v3(kh + q_pe_offset, c.qk_rope_head_dim, c.qk_rope_head_dim, 1, c.rope_theta); } else { rope(s.ropebuf(), kh + q_pe_offset, c.qk_rope_head_dim, c.qk_rope_head_dim, 1, c.rope_theta); } } } { PROFILE_BLOCK(self_attn_mha_inner); f16_t* kb = this->key_cache(); f16_t* vb = this->value_cache(); int h; #pragma omp parallel for private(h) for (h = 0; h < c.n_heads; h++) { int k_head_offset = h * c.head_dim; int v_head_offset = h * c.v_head_dim; f16_t* kh = kb + k_head_offset; // Use pointer arithmetic for base address f16_t* vh = vb + v_head_offset; // Use pointer arithmetic for base address attn( s.xb2(h, c.v_head_dim), // Output per Q head s.att(h), // Attention scores per Q head s.q(h), // Query vector for this head kh, // Pointer to start of relevant K cache base vh, // Pointer to start of relevant V cache base c.head_dim, // Dimension of K space c.v_head_dim, // Dimension of V space c.n_heads, // Total number of KV heads (passed to inner attn func for stride calculation) kv_len // Sequence length to attend over ); } } // final matmul to get output of the attention, place result in s.hb() for residual connection PROFILE(matmul(s.hb(), s.xb2(), *wo(), c.block_size.data(), _so, s.aqb())); } template void BlockMLA::_attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const { PROFILE_BLOCK(attn_mla); const Config& c = *_config; assert(c.q_lora_rank > 0); // MLA requires q_lora_rank > 0 // qkv down projections PROFILE(matmul(s.q_a(), s.xb(), *wq_a(), c.block_size.data(), _sq_a, s.aqb())); switch (c.norm_type) { case LayerNormType::RMSNorm: { rmsnorm(s.q_a(), s.q_a(), this->rms_q_a_weight(), c.q_lora_rank, c.norm_eps); break; } } PROFILE(matmul(s.kv_a(), s.xb(), *wkv_a(), c.block_size.data(), _skv_a, s.aqb())); // query transformations PROFILE(matmul(s.q_rope(), s.q_a(), *wq_rope_b(), c.block_size.data(), _sq_rope_b, s.aqb())); PROFILE(matmul(s.q_c(), s.q_a(), *wc(), c.block_size.data(), _sc, s.aqb())); // Apply RoPE positional encoding bool is_v3 = c.has_moegate_bias; for (int h = 0; h < c.n_heads; h++) { if (is_v3) { rope_v3(s.q_rope(h), c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } else { rope(s.ropebuf(), s.q_rope(h), c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } } int kv_pe_offset = c.kv_lora_rank; float* k_rope = s.kv_a() + kv_pe_offset; if (is_v3) { rope_v3(k_rope, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } else { rope(s.ropebuf(), k_rope, c.qk_rope_head_dim, c.qk_rope_head_dim, pos, c.rope_theta); } // rms norm to non-pe chunk of kv_a (compressed latent kv) rmsnorm(s.kv_a(), s.kv_a(), this->rms_kv_a_weight(), c.kv_lora_rank, c.norm_eps); // update kv cache for (int i = 0; i < c.kv_lora_rank; ++i) { this->kv_nope_cache(kv_pos)[i] = float_to_half(s.kv_a()[i]); } for (int i = 0; i < c.qk_rope_head_dim; ++i) { this->kv_rope_cache(kv_pos)[i] = float_to_half(k_rope[i]); } // Sink tokens remain untouched while the rest of the KV cache is incrementally // replaced in ring order, but sink i must always be positioned (max_seq_len - i) // away from current timestep. Hence, each forward pass, rotate sink tokens // forward by 1. See https://arxiv.org/abs/2309.17453 for more. for (int r = 0; r < kv_sink; r++) { f16_t* kv = this->kv_rope_cache(r); if (is_v3) { rope_v3(kv, c.qk_rope_head_dim, c.qk_rope_head_dim, 1, c.rope_theta); } else { rope(s.ropebuf(), kv, c.qk_rope_head_dim, c.qk_rope_head_dim, 1, c.rope_theta); } } { PROFILE_BLOCK(self_attn_mla_inner); int h; #pragma omp parallel for private(h) for (h = 0; h < c.n_heads; h++) { attn_mla( s.xb2(h, c.kv_lora_rank), // Output is per-head latent vector s.att(h), s.q_c(h), s.q_rope(h), this->kv_nope_cache(), this->kv_rope_cache(), c.head_dim, c.kv_lora_rank, c.qk_rope_head_dim, kv_len ); } } // Uncompress latent kvs output by each attention head, storing result in `kv_b`. // We reuse kv_b buffer here for the uncompressed value outputs. for (int h = 0; h < c.n_heads; h++) { float* v_b_head = s.kv_b() + h * c.v_head_dim; PROFILE(matmul_expert(v_b_head, s.xb2(h, c.kv_lora_rank), *wv_b(), h, c.block_size.data(), _sv_b, s.aqb())); } // final matmul to get output of the attention, place result in s.hb() for residual connection PROFILE(matmul(s.hb(), s.kv_b(), *wo(), c.block_size.data(), _so, s.aqb())); } void mha_cpu( float* xout, // (n_heads, head_dim) float* att, // (n_heads, max_seq_len) f16_t* kb, // (max_seq_len, n_heads, head_dim) f16_t* vb, // (max_seq_len, n_heads, head_dim) float* q, // (n_heads, head_dim) int head_dim, int v_head_dim, int kv_len, int max_seq_len, int n_heads ) { // Multihead attention. Iterate over all heads. int h; #pragma omp parallel for private(h) for (h = 0; h < n_heads; h++) { int k_head_offset = h * head_dim; int v_head_offset = h * v_head_dim; f16_t* kh = kb + k_head_offset; f16_t* vh = vb + v_head_offset; attn( xout + head_dim * h, att + max_seq_len * h, q + head_dim * h, kh, vh, head_dim, v_head_dim, n_heads, kv_len ); } } void ffn_cpu( float* xout, float* x, float* w1, float* w2, float* w3, int hidden_dim, int dim, ActivationType act ) { float* hb = new float[hidden_dim]; float* hb2 = new float[hidden_dim]; // mix self.w2(F.silu(self.w1(x)) * self.w3(x)) // Note this is a feedforward with a GLU, not a simple MLP. matmul_unscaled(hb, x, QTensor(Quant::F32, {dim, hidden_dim}, w1, dim*hidden_dim*sizeof(float))); matmul_unscaled(hb2, x, QTensor(Quant::F32, {dim, hidden_dim}, w3, dim*hidden_dim*sizeof(float))); switch (act) { case ActivationType::GELU: { for (int i = 0; i < hidden_dim; ++i) { hb[i] = gelu(hb[i]) * hb2[i]; } break; } case ActivationType::SILU: { for (int i = 0; i < hidden_dim; ++i) { hb[i] = silu(hb[i]) * hb2[i]; } break; } } matmul_unscaled(xout, hb, QTensor(Quant::F32, {hidden_dim, dim}, w2, hidden_dim*dim*sizeof(float))); delete[] hb; delete[] hb2; } template void Block::_block_cpu(InferenceState&, int, int, int, int) const; template void Block::_block_cpu(InferenceState&, int, int, int, int) const; template void Block::_block_cpu(InferenceState&, int, int, int, int) const; template void Block::_block_cpu(InferenceState&, int, int, int, int) const; template void Block::_block_cpu(InferenceState&, int, int, int, int) const; template void BlockMHA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMHA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMHA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMHA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMHA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMLA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMLA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMLA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMLA::_attention_impl(InferenceState&, int, int, int, int) const; template void BlockMLA::_attention_impl(InferenceState&, int, int, int, int) const; void Model::_copy_embedding(InferenceState& s, int token) { const Config& c = *config; switch (c.weight_quant) { case Quant::F32: { float* emb = static_cast(token_embedding_table->data); for (int i = 0; i < c.dim; ++i) { s.x()[i] = emb[token * c.dim + i]; } break; } case Quant::F16: { f16_t* emb = static_cast(token_embedding_table->data); for (int i = 0; i < c.dim; i+=1) { s.x()[i] = half_to_float(emb[token * c.dim + i]); } break; } case Quant::F8E5M2: { f8e5m2_t* emb = static_cast(token_embedding_table->data); float* emb_scale = static_cast(token_embedding_scale->data); int* block_size = config->block_size.data(); int scale_num_cols = (c.dim + block_size[1] - 1) / block_size[1]; for (int i = 0; i < c.dim; i+=1) { int scale_i = token / block_size[0]; int scale_j = i / block_size[1]; float scale = emb_scale[scale_i * scale_num_cols + scale_j]; s.x()[i] = float8e5m2_to_float(emb[token * c.dim + i]) * scale; } break; } case Quant::Q2_K: { block_q2_K* emb = static_cast(token_embedding_table->data); int blocks_per_row = c.dim / QK_K; dequantize_row_q2_K(emb + token * blocks_per_row, s.x(), c.dim); break; } case Quant::Q3_K: { block_q3_K* emb = static_cast(token_embedding_table->data); int blocks_per_row = c.dim / QK_K; dequantize_row_q3_K(emb + token * blocks_per_row, s.x(), c.dim); break; } default: { assert(false && "unsupported weight quantization"); } } } void Model::_forward_cpu(InferenceState& s, int token, int pos, InferenceMode mode) { const Config& c = *config; // copy the token embedding into `x` PROFILE(_copy_embedding(s, token)); // When decoding past the context length, keep the first few tokens in the KV cache // untouched as "attention sinks" while replacing the rest in ring order. // See StreamingLLM (https://arxiv.org/pdf/2309.17453) for more. int original_max_position = c.rs_original_max_position_embeddings; int kv_sink = pos >= original_max_position ? KV_SINKS : 0; int kv_pos = kv_sink + (pos - kv_sink) % (original_max_position - kv_sink); int kv_len = pos >= original_max_position ? original_max_position : pos + 1; // forward all layers in order for (auto b : blocks) { b->block(s, pos, kv_sink, kv_pos, kv_len); } if (mode == InferenceMode::HYDRATE_KV_CACHE) { // only hydrate the KV cache and don't compute output logits return; } // final layer norm switch (c.norm_type) { case LayerNormType::RMSNorm: { rmsnorm(s.x(), s.x(), static_cast(rms_final_weight->data), c.dim, c.norm_eps); break; } } // classifier into logits { PROFILE_BLOCK(lm_head); switch (c.weight_quant) { case Quant::F32: case Quant::F16: { matmul_unscaled(s.logits(), s.x(), *wcls); break; } case Quant::F8E5M2: case Quant::Q2_K: case Quant::Q3_K: { matmul(s.logits(), s.x(), *wcls, c.block_size.data(), scls, s.aqb()); break; } default: { assert(false && "unsupported weight quantization"); } } } } ================================================ FILE: src/main.cpp ================================================ #include #include #include #include #include #include #include #include "fmt/format.h" #include "codec.h" #include "model.h" #include "profile.h" #include "sampler.h" #include "time_utils.h" #include "tokenizer.h" void error_usage() { fprintf(stderr, "Usage: main [options]\n"); fprintf(stderr, "Example: main model_weights_dir/ -i \"Q: What is the meaning of life?\"\n"); fprintf(stderr, "Options:\n"); fprintf(stderr, " -h Display this help message\n"); fprintf(stderr, " -L Locks model weights to RAM, disabling swap. Requires sudo.\n"); fprintf(stderr, " -m [completion,passkey,perplexity,interactive] which mode to run in (default - completion)\n"); fprintf(stderr, " -T sliding window context length (0 - max)\n"); fprintf(stderr, "\n"); fprintf(stderr, "Perplexity mode options:\n"); fprintf(stderr, " Choose one:\n"); fprintf(stderr, " -i input prompt\n"); fprintf(stderr, " -f input file with prompt\n"); fprintf(stderr, " -w use wikitext as input\n"); fprintf(stderr, "Completion mode options:\n"); fprintf(stderr, " -n number of steps to run for in completion mode, default 256. 0 = max_seq_len, -1 = infinite\n"); fprintf(stderr, " Choose one:\n"); fprintf(stderr, " -i input prompt\n"); fprintf(stderr, " -t temperature (default - 1.0)\n"); fprintf(stderr, " -p p for top-p sampling (default - 0.95)\n"); fprintf(stderr, " -f input file with prompt\n"); fprintf(stderr, "Passkey mode options:\n"); fprintf(stderr, " -n number of junk lines to insert (default - 250)\n"); fprintf(stderr, " -l passkey position (-1 - random)\n"); exit(1); } void help_usage_interactive() { fprintf(stderr, "Usage: [options]\n"); fprintf(stderr, "Example: c -i \"Q: What is the meaning of life?\"\n"); fprintf(stderr, "Modes:\n"); fprintf(stderr, " h Display this help message\n"); fprintf(stderr, " c Completion - complete a single prompt \n"); fprintf(stderr, " p Perplexity - compute perplexity of a single prompt \n"); fprintf(stderr, " k Passkey - test passkey extraction \n"); fprintf(stderr, "\n"); fprintf(stderr, "Perplexity mode options:\n"); fprintf(stderr, " Choose one:\n"); fprintf(stderr, " -i input prompt\n"); fprintf(stderr, " -f input file with prompt\n"); fprintf(stderr, " -w use wikitext as input\n"); fprintf(stderr, "Completion mode options:\n"); fprintf(stderr, " -n number of steps to run for in completion mode, default 256. 0 = max_seq_len, -1 = infinite\n"); fprintf(stderr, " Choose one:\n"); fprintf(stderr, " -i input prompt\n"); fprintf(stderr, " -t temperature (default - 1.0)\n"); fprintf(stderr, " -p p for top-p sampling (default - 0.95)\n"); fprintf(stderr, " -f input file with prompt\n"); fprintf(stderr, "Passkey mode options:\n"); fprintf(stderr, " -n number of junk lines to insert (default - 250)\n"); fprintf(stderr, " -l passkey position (-1 - random)\n"); } struct Session { Session(const std::string& checkpoint_dir, bool lock_model_weights, int context, uint64_t sampler_seed): model_data(checkpoint_dir, lock_model_weights), model(model_data, context), state(model.config), sampler(model.config, sampler_seed), tokenizer(model_data) {} YALMData model_data; Model model; InferenceState state; Sampler sampler; Tokenizer tokenizer; }; struct CompletionArgs { std::string prompt; int num_steps; float temperature = 1.0; float top_p = 0.95; // Returns true if args are valid, false otherwise bool parse_args(const std::vector& args) { std::string prompt_path = ""; for (size_t i = 0; i < args.size();) { // do some basic validation if (args[i][0] != '-') { return false; } // must start with dash if (strlen(args[i]) != 2) { return false; } // must be -x (one dash, one letter) // read in the args if (args[i][1] == 'h') { return false; } else if (args[i][1] == 'i') { if (i + 1 >= args.size()) { return false; } prompt = args[i + 1]; i += 2; } else if (args[i][1] == 't') { if (i + 1 >= args.size()) { return false; } temperature = std::stof(args[i + 1]); i += 2; } else if (args[i][1] == 'p') { if (i + 1 >= args.size()) { return false; } top_p = std::stof(args[i + 1]); i += 2; } else if (args[i][1] == 'f') { if (i + 1 >= args.size()) { return false; } prompt_path = args[i + 1]; i += 2; } else if (args[i][1] == 'n') { if (i + 1 >= args.size()) { return false; } num_steps = std::stoi(args[i + 1]); i += 2; } else { return false; } } int has_prompt = prompt.size() > 0 ? 1 : 0; int has_prompt_path = prompt_path.size() > 0 ? 1 : 0; if ((has_prompt + has_prompt_path) != 1) { return false; } else if (has_prompt_path) { std::ifstream file(prompt_path); if (!file.is_open()) { std::cerr << "Error: could not open file " << prompt_path << std::endl; return false; } std::stringstream buffer; buffer << file.rdbuf(); prompt = buffer.str(); } return true; } }; struct PasskeyArgs { int n_junk; int passkey_pos; // Returns true if args are valid, false otherwise bool parse_args(const std::vector& args) { for (size_t i = 2; i < args.size();) { // do some basic validation if (args[i][0] != '-') { return false; } // must start with dash if (strlen(args[i]) != 2) { return false; } // must be -x (one dash, one letter) // read in the args if (args[i][1] == 'h') { return false; } else if (args[i][1] == 'l') { if (i + 1 >= args.size()) { return false; } passkey_pos = std::stoi(args[i + 1]); i += 2; } else if (args[i][1] == 'n') { if (i + 1 >= args.size()) { return false; } n_junk = std::stoi(args[i + 1]); i += 2; } else { return false; } } if (passkey_pos != -1 && (passkey_pos >= n_junk || passkey_pos < 0)) { std::cerr << "Error: passkey position must be between 0 and " << n_junk - 1 << std::endl; return false; } return true; } }; struct PerplexityArgs { std::string prompt; bool use_wikitext = false; // Returns true if args are valid, false otherwise bool parse_args(const std::vector& args) { std::string prompt_path = ""; for (size_t i = 0; i < args.size();) { // do some basic validation if (args[i][0] != '-') { return false; } // must start with dash if (strlen(args[i]) != 2) { return false; } // must be -x (one dash, one letter) // read in the args if (args[i][1] == 'h') { return false; } else if (args[i][1] == 'i') { if (i + 1 >= args.size()) { return false; } prompt = args[i + 1]; i += 2; } else if (args[i][1] == 'f') { if (i + 1 >= args.size()) { return false; } prompt_path = args[i + 1]; i += 2; } else if (args[i][1] == 'w') { use_wikitext = true; i += 1; } else { return false; } } int has_prompt = prompt.size() > 0 ? 1 : 0; int has_prompt_path = prompt_path.size() > 0 ? 1 : 0; int has_wikitext = use_wikitext ? 1 : 0; if ((has_prompt + has_prompt_path + has_wikitext) != 1) { std::cerr << "Error: must provide exactly one nonempty -i or -f or -w" << std::endl; return false; } else if (has_prompt_path) { std::ifstream file(prompt_path); if (!file.is_open()) { std::cerr << "Error: could not open file " << prompt_path << std::endl; return false; } std::stringstream buffer; buffer << file.rdbuf(); prompt = buffer.str(); } return true; } }; std::vector encode_prompt(const std::string& prompt, Tokenizer& tokenizer) { std::vector encoding; { uint64_t encode_start_ms = get_timestamp_ms(); encoding = tokenizer.encode(prompt, true); uint64_t encode_end_ms = get_timestamp_ms(); std::cout << tokenizer.encoding_to_debug_string(encoding) << std::endl; uint64_t encoding_ms = encode_end_ms - encode_start_ms; std::cout << fmt::format( "Encoding stats: ({} tokens, throughput: {:.5}tok/s, latency: {:.5}s/tok, total: {:.5}s)\n", encoding.size(), encoding.size() / (encoding_ms / 1000.0), (encoding_ms / 1000.0) / encoding.size(), encoding_ms / 1000.0 ) << std::endl; } return encoding; } void run_completion( Session& session, const std::string& prompt, int num_steps, float temperature, float top_p ) { Model& model = session.model; InferenceState& state = session.state; Sampler& sampler = session.sampler; Tokenizer& tokenizer = session.tokenizer; std::cout << "Model active bytes with full context window: " << model.active_bytes(model.config->max_seq_len) << std::endl; std::cout << "Model active bytes with no context: " << model.active_bytes(0) << std::endl; if (num_steps == 0) { // `-n 0` means use the full context length num_steps = model.config->max_seq_len; } { ProfileDisabledScope profile_disabled; std::cout << "Running warmup..." << std::endl; // Do one inference as warmup. // On CPU, this ensures all tensors are loaded into memory via mmap. model.forward(state, 0, 0); std::cout << "Warmup complete" << std::endl; } std::vector encoding = encode_prompt(prompt, tokenizer); uint64_t start_ms = get_timestamp_ms(); size_t read_bytes = 0; // Hydrate KV cache by forwarding model on all prompt tokens and discarding output. // This also generates output logits for the last token. for (size_t pos = 0; pos < encoding.size(); pos++) { ProfileScope scope(fmt::format("fwd_pos_{:03d}_hydrate", pos)); int token_id = encoding[pos]; InferenceMode inferMode = pos + 1 == encoding.size() ? InferenceMode::OUTPUT_LOGITS : InferenceMode::HYDRATE_KV_CACHE; model.forward(state, token_id, pos, inferMode); read_bytes += model.active_bytes(pos); } uint64_t end_hydrate_ms = get_timestamp_ms(); // For N steps: // - Sample + decode output logits // - Forward the model for (int i = 0; i < num_steps || num_steps == -1; i++) { int token_id = sampler.sample(state, temperature, top_p); std::string token_str = tokenizer.decode_one(encoding.back(), token_id); std::cout << token_str << std::flush; encoding.push_back(token_id); if (token_id == tokenizer.eos_id || token_id == tokenizer.eot_id) { break; } ProfileScope scope(fmt::format("fwd_pos_{:03d}_decode", encoding.size() - 1)); model.forward(state, token_id, encoding.size() - 1); read_bytes += model.active_bytes(encoding.size() - 1); } std::cout << "\n" << std::endl; uint64_t end_ms = get_timestamp_ms(); double elapsed_s = (end_ms - start_ms) / 1000.0; std::cout << fmt::format( "Generation stats:\n" " {} tokens\n" " throughput: {:.5}tok/s\n" " latency: {:.5}s/tok\n" " hydrate: {:.5}s\n" " bandwidth: {:.5}GB/s\n" " total: {:.5}s\n", encoding.size(), encoding.size() / elapsed_s, elapsed_s / encoding.size(), (end_hydrate_ms - start_ms) / 1000.0, ((double)read_bytes / 1e9) / elapsed_s, elapsed_s ) << std::endl; #if PROFILE_ENABLED std::cout << "Profile total times (sec): " << std::endl; for (const auto& [key, value] : profile_times()) { std::cout << key << ": " << value << std::endl; } #endif } std::vector V2_ENCODED_WIKITEXT = { #include "wikitest.cat.1chunk.v2-encoded.txt" }; std::vector V3_ENCODED_WIKITEXT = { #include "wikitest.cat.1chunk.v3-encoded.txt" }; void run_perplexity( Session& session, const std::vector& encoding ) { Model& model = session.model; InferenceState& state = session.state; Sampler& sampler = session.sampler; std::cout << "Model active bytes with full context window: " << model.active_bytes(model.config->max_seq_len) << std::endl; { ProfileDisabledScope profile_disabled; std::cout << "Running warmup..." << std::endl; // Do one inference as warmup. // On CPU, this ensures all tensors are loaded into memory via mmap. model.forward(state, 0, 0); std::cout << "Warmup complete" << std::endl; } double sum_logprob = 0.0; double ss_logprob = 0.0; // Generates output logits for all tokens in the prompt and sum log probs to // compute perplexity. uint64_t start_ms = get_timestamp_ms(); size_t read_bytes = 0; size_t N = encoding.size() - 1; for (size_t pos = 0; pos + 1 < encoding.size(); pos++) { std::cout << "\r Computing perplexity..." << pos + 1 << "/" << N << std::flush; int token_id = encoding[pos]; model.forward(state, token_id, pos); read_bytes += model.active_bytes(pos); double logprob = std::log(sampler.sample_prob(encoding[pos + 1], state)); sum_logprob += logprob; ss_logprob += logprob * logprob; } std::cout << std::endl; uint64_t end_ms = get_timestamp_ms(); double elapsed_s = (end_ms - start_ms)/1000.0; double perplexity = std::exp(-sum_logprob / N); double perplexity_error = perplexity * std::sqrt( (ss_logprob - sum_logprob * sum_logprob / N) / N / N ); std::cout << fmt::format( "Stats:\n" " {} tokens\n" " perplexity: {:.5} ± {:.5}\n" " throughput: {:.5}tok/s\n" " latency: {:.5}s/tok\n" " bandwidth: {:.5}GB/s\n" " total: {:.5}s\n", N, perplexity, perplexity_error, N / elapsed_s, elapsed_s / N, ((double)read_bytes / 1e9) / elapsed_s, elapsed_s ) << std::endl; } void run_passkey( Session& session, const int n_junk, const int passkey_pos ) { Model& model = session.model; InferenceState& state = session.state; Sampler& sampler = session.sampler; Tokenizer& tokenizer = session.tokenizer; std::cout << "Model active bytes with full context window: " << model.active_bytes(model.config->max_seq_len) << std::endl; const std::string PROMPT_PREFIX = "There is an important info hidden inside a lot of irrelevant text. " "Find it and memorize them. I will quiz you about the important information there."; const std::string PROMPT_SUFFIX = " What is the pass key? The pass key is"; const int passkey = std::rand() % 50000 + 1; const int pos = passkey_pos == -1 ? std::rand() % n_junk : passkey_pos; std::string prompt = PROMPT_PREFIX; for (int i = 0; i < n_junk; i++) { if (i % n_junk == pos) { prompt += " The pass key is " + std::to_string(passkey) + ". Remember it. " + std::to_string(passkey) + " is the pass key."; } prompt += " The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again."; } prompt += PROMPT_SUFFIX; std::vector encoding; { uint64_t encode_start_ms = get_timestamp_ms(); encoding = tokenizer.encode(prompt, true); uint64_t encode_end_ms = get_timestamp_ms(); uint64_t encoding_ms = encode_end_ms - encode_start_ms; std::cout << fmt::format( "Encoding stats: ({} tokens, throughput: {:.5}tok/s, latency: {:.5}s/tok, total: {:.5}s)\n", encoding.size(), encoding.size() / (encoding_ms / 1000.0), (encoding_ms / 1000.0) / encoding.size(), encoding_ms / 1000.0 ) << std::endl; } // Allow max 16 steps to generate passkey const size_t MAX_GENERATION_STEPS = 16; std::cout << fmt::format( "Passkey test:\n" " prompt: {} tokens\n" " passkey: {}\n" " passkey token index: ~{}\n", encoding.size(), passkey, (int)(((float)pos) / n_junk * encoding.size()) ) << std::endl; size_t N = encoding.size(); for (size_t pos = 0; pos < N; pos++) { std::cout << "\r Running passkey test..." << pos + 1 << "/" << N << std::flush; int token_id = encoding[pos]; InferenceMode inferMode = pos + 1 == N ? InferenceMode::OUTPUT_LOGITS : InferenceMode::HYDRATE_KV_CACHE; model.forward(state, token_id, pos, inferMode); } std::cout << std::endl; std::cout << PROMPT_SUFFIX << std::flush; for (size_t pos = N; pos < N + MAX_GENERATION_STEPS; pos++) { int token_id = sampler.sample(state); std::string token_str = tokenizer.decode_one(encoding.back(), token_id); std::cout << token_str << std::flush; encoding.push_back(token_id); if (token_id == tokenizer.eos_id || token_id == tokenizer.eot_id) { break; } model.forward(state, token_id, pos); } std::cout << std::endl; } void run_interactive(Session& session) { std::string input = ""; while (true) { std::cout << "> " << std::flush; std::getline(std::cin, input); if (input == "exit") { break; } // Split string by space std::vector arg_strs; std::stringstream ss(input); std::string arg; while (ss >> arg) { if (arg_strs.size() > 0 && arg_strs[arg_strs.size() - 1].starts_with("\"") && !arg_strs[arg_strs.size() - 1].ends_with("\"")) { // Double quotes enclose strings that can contain spaces arg_strs[arg_strs.size() - 1] += " " + arg; if (arg.ends_with("\"")) { // Remove the double quotes arg_strs[arg_strs.size() - 1] = arg_strs[arg_strs.size() - 1].substr(1, arg_strs[arg_strs.size() - 1].size() - 2); } } else { arg_strs.push_back(arg); } } if (arg_strs.size() == 0) { help_usage_interactive(); continue; } std::string mode = arg_strs[0]; if (std::string("completion").starts_with(mode)) { mode = "completion"; } else if (std::string("passkey").starts_with(mode) && mode != "p") { mode = "passkey"; } else if (std::string("perplexity").starts_with(mode) && mode != "p") { mode = "perplexity"; } else if (std::string("interactive").starts_with(mode)) { mode = "interactive"; } else { help_usage_interactive(); continue; } std::vector args; for (size_t i = 1; i < arg_strs.size(); i++) { args.push_back(arg_strs[i].c_str()); } if (mode == "completion") { CompletionArgs completion_args; if (!completion_args.parse_args(args)) { help_usage_interactive(); continue; } run_completion(session, completion_args.prompt, completion_args.num_steps, completion_args.temperature, completion_args.top_p); } else if (mode == "passkey") { PasskeyArgs passkey_args; if (!passkey_args.parse_args(args)) { help_usage_interactive(); continue; } run_passkey(session, passkey_args.n_junk, passkey_args.passkey_pos); } else if (mode == "perplexity") { PerplexityArgs perplexity_args; if (!perplexity_args.parse_args(args)) { help_usage_interactive(); continue; } std::vector encoding; if (perplexity_args.use_wikitext) { if (session.model_data.metadata.at("arch").get() == "DeepseekV3ForCausalLM") { encoding = V3_ENCODED_WIKITEXT; } else { encoding = V2_ENCODED_WIKITEXT; } } else { encoding = encode_prompt(perplexity_args.prompt, session.tokenizer); } run_perplexity(session, encoding); } } } int main(int argc, char* argv[]) { std::vector args(argv, argv + argc); std::vector next_args; std::string checkpoint_dir = ""; // e.g. out/model.bin // Options std::string mode = "completion"; // completion, passkey, perplexity, or interactive int context = 0; bool lock_model_weights = false; if (args.size() >= 2) { checkpoint_dir = args[1]; } else { error_usage(); } // read in session args first, put everything else in next_args for (size_t i = 2; i < args.size();) { if (args[i][0] == '-' && strlen(args[i]) == 2) { if (args[i][1] == 'h') { error_usage(); } else if (args[i][1] == 'L') { lock_model_weights = true; i += 1; } else if (args[i][1] == 'm') { if (i + 1 >= args.size()) { error_usage(); } mode = args[i + 1]; if (std::string("completion").starts_with(mode)) { mode = "completion"; } else if (std::string("passkey").starts_with(mode) && mode != "p") { mode = "passkey"; } else if (std::string("perplexity").starts_with(mode) && mode != "p") { mode = "perplexity"; } else if (std::string("interactive").starts_with(mode)) { mode = "interactive"; } else { error_usage(); } i += 2; } else if (args[i][1] == 'T') { if (i + 1 >= args.size()) { error_usage(); } context = std::stoi(args[i + 1]); i += 2; } else { next_args.push_back(args[i]); i += 1; } } else { next_args.push_back(args[i]); i += 1; } } if (mode == "completion") { CompletionArgs completion_args; if (!completion_args.parse_args(next_args)) { error_usage(); } Session session(checkpoint_dir, lock_model_weights, context, get_timestamp_ms()); run_completion(session, completion_args.prompt, completion_args.num_steps, completion_args.temperature, completion_args.top_p); } else if (mode == "passkey") { PasskeyArgs passkey_args; if (!passkey_args.parse_args(next_args)) { error_usage(); } Session session(checkpoint_dir, lock_model_weights, context, get_timestamp_ms()); run_passkey(session, passkey_args.n_junk, passkey_args.passkey_pos); } else if (mode == "perplexity") { PerplexityArgs perplexity_args; if (!perplexity_args.parse_args(next_args)) { error_usage(); } Session session(checkpoint_dir, lock_model_weights, context, get_timestamp_ms()); std::vector encoding; if (perplexity_args.use_wikitext) { if (session.model_data.metadata.at("arch").get() == "DeepseekV3ForCausalLM") { encoding = V3_ENCODED_WIKITEXT; } else { encoding = V2_ENCODED_WIKITEXT; } } else { encoding = encode_prompt(perplexity_args.prompt, session.tokenizer); } run_perplexity(session, encoding); } else if (mode == "interactive") { if (next_args.size() != 0) { error_usage(); } Session session(checkpoint_dir, lock_model_weights, context, get_timestamp_ms()); run_interactive(session); } return 0; } ================================================ FILE: src/model.cpp ================================================ #include "model.h" #include "json.hpp" #include #include #include #include "fmt/format.h" #include #include #include #include "immintrin.h" #include "quant.h" using json = nlohmann::json; int cdiv(int a, int b) { return (a + b - 1) / b; } void Config::from_yalm(YALMData& yalm, int context) { dim = std::stoi(yalm.metadata.at("dim").get()); hidden_dim = std::stoi(yalm.metadata.at("hidden_dim").get()); n_layers = std::stoi(yalm.metadata.at("n_layers").get()); n_heads = std::stoi(yalm.metadata.at("n_heads").get()); vocab_size = std::stoi(yalm.metadata.at("vocab_size").get()); // mixture of experts n_shared_experts = yalm.metadata.contains("n_shared_experts") ? std::stoi(yalm.metadata.at("n_shared_experts").get()) : 0; n_routed_experts = yalm.metadata.contains("n_routed_experts") ? std::stoi(yalm.metadata.at("n_routed_experts").get()) : 0; n_active_routed = yalm.metadata.contains("n_active_routed") ? std::stoi(yalm.metadata.at("n_active_routed").get()) : 0; moe_intermediate_size = yalm.metadata.contains("moe_intermediate_size") ? std::stoi(yalm.metadata.at("moe_intermediate_size").get()) : 0; routed_scaling_factor = yalm.metadata.contains("routed_scaling_factor") ? std::stof(yalm.metadata.at("routed_scaling_factor").get()) : 1.0; n_group = yalm.metadata.contains("n_group") ? std::stoi(yalm.metadata.at("n_group").get()) : 1; norm_topk_prob = yalm.metadata.contains("norm_topk_prob") ? yalm.metadata.at("norm_topk_prob").get() == "True" : false; std::string scoring_func_str = yalm.metadata.value("scoring_func", "softmax"); if (scoring_func_str == "softmax") { scoring_func = ScoringFunc::SOFTMAX; } else if (scoring_func_str == "sigmoid") { scoring_func = ScoringFunc::SIGMOID; } else { std::cerr << "unsupported scoring_func '" << scoring_func_str << "', defaulting to softmax" << std::endl; scoring_func = ScoringFunc::SOFTMAX; } topk_group = yalm.metadata.contains("topk_group") ? std::stoi(yalm.metadata.at("topk_group").get()) : 0; std::string topk_method_str = yalm.metadata.value("topk_method", ""); if (topk_method_str == "greedy") { topk_method = TopKMethod::GREEDY; } else if (topk_method_str == "group_limited_greedy") { topk_method = TopKMethod::GROUP_LIMITED_GREEDY; } else if (topk_method_str == "noaux_tc") { topk_method = TopKMethod::NOAUX_TC; assert(false && "TODO: support for Deepseek v3"); } else { std::cerr << "unsupported topk_method '" << topk_method_str << "', defaulting to greedy" << std::endl; topk_method = TopKMethod::GREEDY; } has_moegate_bias = yalm.metadata.at("arch").get() == "DeepseekV3ForCausalLM"; // multi-latent attention use_mla = yalm.metadata.contains("use_mla") ? static_cast(std::stoi(yalm.metadata.at("use_mla").get())) : false; kv_lora_rank = yalm.metadata.contains("kv_lora_rank") ? std::stoi(yalm.metadata.at("kv_lora_rank").get()) : 0; q_lora_rank = yalm.metadata.contains("q_lora_rank") ? std::stoi(yalm.metadata.at("q_lora_rank").get()) : 0; qk_nope_head_dim = yalm.metadata.contains("qk_nope_head_dim") ? std::stoi(yalm.metadata.at("qk_nope_head_dim").get()) : 0; qk_rope_head_dim = yalm.metadata.contains("qk_rope_head_dim") ? std::stoi(yalm.metadata.at("qk_rope_head_dim").get()) : 0; v_head_dim = yalm.metadata.contains("v_head_dim") ? std::stoi(yalm.metadata.at("v_head_dim").get()) : 0; head_dim = qk_nope_head_dim + qk_rope_head_dim; max_seq_len = std::stoi(yalm.metadata.at("max_seq_len").get()); if (context) { max_seq_len = std::min(max_seq_len, context); } rope_theta = std::stof(yalm.metadata.at("rope_theta").get()); norm_eps = std::stof(yalm.metadata.value("norm_eps", "1e-5")); std::string act_str = yalm.metadata.value("act_type", "gelu"); if (act_str == "gelu") { act = ActivationType::GELU; } else if (act_str == "silu") { act = ActivationType::SILU; } else { std::cerr << "unsupported act_type, defaulting to gelu" << std::endl; act = ActivationType::GELU; } std::string norm_type_str = yalm.metadata.value("norm_type", "rmsnorm"); if (norm_type_str == "rmsnorm") { norm_type = LayerNormType::RMSNorm; } else { std::cerr << "unsupported norm_type, defaulting to rmsnorm" << std::endl; norm_type = LayerNormType::RMSNorm; } first_k_dense_replace = yalm.metadata.contains("first_k_dense_replace") ? std::stoi(yalm.metadata.at("first_k_dense_replace").get()) : 0; std::string quant = yalm.metadata.at("quant").get(); if (quant == "fp32") { weight_quant = Quant::F32; } else if (quant == "fp16") { weight_quant = Quant::F16; } else if (quant == "f8e5m2") { weight_quant = Quant::F8E5M2; } else if (quant == "q2_k") { weight_quant = Quant::Q2_K; } else if (quant == "q3_k") { weight_quant = Quant::Q3_K; } else { std::cerr << "FATAL: unsupported quant: " << quant << std::endl; assert(false); } // quantization if (yalm.metadata.contains("quantization_block_size_0")) { block_size[0] = std::stoi(yalm.metadata.at("quantization_block_size_0").get()); block_size[1] = std::stoi(yalm.metadata.at("quantization_block_size_1").get()); } // RoPE scaling rs_beta_fast = std::stoi(yalm.metadata.at("rope_scaling_beta_fast").get()); rs_beta_slow = std::stoi(yalm.metadata.at("rope_scaling_beta_slow").get()); rs_factor = std::stof(yalm.metadata.at("rope_scaling_factor").get()); rs_mscale = std::stof(yalm.metadata.at("rope_scaling_mscale").get()); rs_mscale_all_dim = std::stof(yalm.metadata.at("rope_scaling_mscale_all_dim").get()); rs_original_max_position_embeddings = std::stoi(yalm.metadata.at("rope_scaling_original_max_position_embeddings").get()); } std::optional check_tensor(const Tensor* tensor, Quant weight_quant, std::array shape, const int debug_line) { if (tensor == nullptr) { std::cerr << "FATAL: missing tensor at line " << debug_line << std::endl; assert(false); return std::nullopt; } return QTensor::from_codec_tensor(*tensor, weight_quant, shape, debug_line); }; const Tensor* get_tensor(const YALMData& yalm, const std::string& key) { auto it = yalm.tensors.find(key); if (it == yalm.tensors.end()) { std::cerr << "FATAL: missing tensor: " << key << std::endl; assert(false); return nullptr; } const Tensor& tensor = it->second; return &tensor; }; Block::Block( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_ffn_weight, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ) : _layer_i(layer_i), _config(config) { switch (config->weight_quant) { case Quant::F32: case Quant::F16: case Quant::F8E5M2: case Quant::Q2_K: case Quant::Q3_K: { break; } default: { std::cerr << "FATAL: unsupported weight quantization: " << quant_to_string(config->weight_quant) << std::endl; assert(false); break; } } _rms_att_weight = check_tensor( rms_att_weight, Quant::F32, {config->dim, 0, 0, 0}, __LINE__ ); _rms_ffn_weight = check_tensor( rms_ffn_weight, Quant::F32, {config->dim, 0, 0, 0}, __LINE__ ); bool need_block_scales = _config->weight_quant == Quant::F8E5M2; int b0 = config->block_size[0]; int b1 = config->block_size[1]; if (config->n_routed_experts > 0 && layer_i >= config->first_k_dense_replace) { _moegate = check_tensor( moegate, Quant::F32, {config->n_routed_experts, config->dim, 0, 0}, __LINE__ ); if (moegate_bias != nullptr) { _moegate_bias = check_tensor( moegate_bias, Quant::F32, {config->n_routed_experts, 0, 0, 0}, __LINE__ ); } _w1 = check_tensor( w1, config->weight_quant, {config->n_routed_experts, config->moe_intermediate_size, config->dim, 0}, __LINE__ ); _w2 = check_tensor( w2, config->weight_quant, {config->n_routed_experts, config->dim, config->moe_intermediate_size, 0}, __LINE__ ); _w3 = check_tensor( w3, config->weight_quant, {config->n_routed_experts, config->moe_intermediate_size, config->dim, 0}, __LINE__ ); if (need_block_scales) { _s1 = check_tensor( s1, Quant::F32, {config->n_routed_experts, cdiv(config->moe_intermediate_size, b0), cdiv(config->dim, b1), 0}, __LINE__ ); _s2 = check_tensor( s2, Quant::F32, {config->n_routed_experts, cdiv(config->dim, b0), cdiv(config->moe_intermediate_size, b1), 0}, __LINE__ ); _s3 = check_tensor( s3, Quant::F32, {config->n_routed_experts, cdiv(config->moe_intermediate_size, b0), cdiv(config->dim, b1), 0}, __LINE__ ); } if (config->n_shared_experts > 0) { _shared_w1 = check_tensor( shared_w1, config->weight_quant, {config->n_shared_experts * config->moe_intermediate_size, config->dim, 0}, __LINE__ ); _shared_w2 = check_tensor( shared_w2, config->weight_quant, {config->dim, config->n_shared_experts * config->moe_intermediate_size, 0}, __LINE__ ); _shared_w3 = check_tensor( shared_w3, config->weight_quant, {config->n_shared_experts * config->moe_intermediate_size, config->dim, 0}, __LINE__ ); if (need_block_scales) { _shared_s1 = check_tensor( shared_s1, Quant::F32, {cdiv(config->n_shared_experts * config->moe_intermediate_size, b0), cdiv(config->dim, b1), 0}, __LINE__ ); _shared_s2 = check_tensor( shared_s2, Quant::F32, {cdiv(config->dim, b0), cdiv(config->n_shared_experts * config->moe_intermediate_size, b1), 0}, __LINE__ ); _shared_s3 = check_tensor( shared_s3, Quant::F32, {cdiv(config->n_shared_experts * config->moe_intermediate_size, b0), cdiv(config->dim, b1), 0}, __LINE__ ); } } } else { _w1 = check_tensor( w1, config->weight_quant, {config->hidden_dim, config->dim, 0, 0}, __LINE__ ); _w2 = check_tensor( w2, config->weight_quant, {config->dim, config->hidden_dim, 0, 0}, __LINE__ ); _w3 = check_tensor( w3, config->weight_quant, {config->hidden_dim, config->dim, 0, 0}, __LINE__ ); if (need_block_scales) { _s1 = check_tensor( s1, Quant::F32, {cdiv(config->hidden_dim, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); _s2 = check_tensor( s2, Quant::F32, {cdiv(config->dim, b0), cdiv(config->hidden_dim, b1), 0, 0}, __LINE__ ); _s3 = check_tensor( s3, Quant::F32, {cdiv(config->hidden_dim, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); } } } Block::~Block() {} void Block::block( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const { if (_device == Device::CPU) { switch (_config->weight_quant) { case Quant::F32: _block_cpu(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::F16: #if defined(__AVX2__) && defined(__F16C__) _block_cpu(s, pos, kv_sink, kv_pos, kv_len); #else assert(false && "float16 not supported on this platform"); #endif break; case Quant::F8E5M2: _block_cpu(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q2_K: _block_cpu(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q3_K: _block_cpu(s, pos, kv_sink, kv_pos, kv_len); break; default: assert(false && "unsupported weight quantization for cpu"); } } } double Block::active_bytes(size_t pos) const { double bytes_per_weight = bits_per_weight(_config->weight_quant, _config->block_size[0] * _config->block_size[1]) / 8.0; double bytes = 0; bytes += _rms_att_weight->size; bytes += _rms_ffn_weight->size; if (_config->n_routed_experts > 0 && _w1->ndim() == 3) { bytes += _moegate->size; if (_moegate_bias) bytes += _moegate_bias->size; // bytes_per_weight accounts for scales and other quantization schemes bytes += _config->n_active_routed * 3 * _config->dim * _config->moe_intermediate_size * bytes_per_weight; // w1, w2, w3 } else { bytes += _w1->size + _w2->size + _w3->size; // w1, w2, w3 if (_s1) { bytes += _s1->size; bytes += _s2->size; bytes += _s3->size; } } if (_config->n_shared_experts > 0) { if (_shared_w1) bytes += _shared_w1->size; if (_shared_s1) bytes += _shared_s1->size; if (_shared_w2) bytes += _shared_w2->size; if (_shared_s2) bytes += _shared_s2->size; if (_shared_w3) bytes += _shared_w3->size; if (_shared_s3) bytes += _shared_s3->size; } return bytes; } BlockMHA::BlockMHA( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_q_a_weight, const Tensor* rms_kv_a_weight, const Tensor* rms_ffn_weight, const Tensor* wq, const Tensor* sq, const Tensor* wq_a, const Tensor* sq_a, const Tensor* wkv_a, const Tensor* skv_a, const Tensor* wq_b, const Tensor* sq_b, const Tensor* wkv_b, const Tensor* skv_b, const Tensor* wo, const Tensor* so, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ) : Block(layer_i, config, rms_att_weight, rms_ffn_weight, w1, s1, w2, s2, w3, s3, shared_w1, shared_s1, shared_w2, shared_s2, shared_w3, shared_s3, moegate, moegate_bias) { bool need_block_scales = _config->weight_quant == Quant::F8E5M2; int b0 = config->block_size[0]; int b1 = config->block_size[1]; if (config->q_lora_rank > 0) { _rms_q_a_weight = check_tensor( rms_q_a_weight, Quant::F32, {config->q_lora_rank, 0, 0, 0}, __LINE__ ); _wq_a = check_tensor( wq_a, config->weight_quant, {config->q_lora_rank, config->dim, 0, 0}, __LINE__ ); _wq_b = check_tensor( wq_b, config->weight_quant, {config->n_heads * config->head_dim, config->q_lora_rank, 0, 0}, __LINE__ ); if (need_block_scales) { _sq_a = check_tensor( sq_a, Quant::F32, {cdiv(config->q_lora_rank, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); _sq_b = check_tensor( sq_b, Quant::F32, {cdiv(config->n_heads * config->head_dim, b0), cdiv(config->q_lora_rank, b1), 0, 0}, __LINE__ ); } } else { _wq = check_tensor( wq, config->weight_quant, {config->n_heads * config->head_dim, config->dim, 0, 0}, __LINE__ ); if (need_block_scales) { _sq = check_tensor( sq, Quant::F32, {cdiv(config->n_heads * config->head_dim, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); } } _rms_kv_a_weight = check_tensor( rms_kv_a_weight, Quant::F32, {config->kv_lora_rank, 0, 0, 0}, __LINE__ // Assuming kv_lora_rank is correct size here for MHA norm too ); _wkv_a = check_tensor( wkv_a, config->weight_quant, {config->kv_lora_rank + config->qk_rope_head_dim, config->dim, 0, 0}, __LINE__ ); _wkv_b = check_tensor( wkv_b, config->weight_quant, {config->n_heads * (config->head_dim-config->qk_rope_head_dim+config->v_head_dim), config->kv_lora_rank, 0, 0}, __LINE__ ); _wo = check_tensor( wo, config->weight_quant, {config->dim, config->n_heads * config->v_head_dim, 0, 0}, __LINE__ ); if (need_block_scales) { _skv_a = check_tensor( skv_a, Quant::F32, {cdiv(config->kv_lora_rank + config->qk_rope_head_dim, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); _skv_b = check_tensor( skv_b, Quant::F32, {cdiv(config->n_heads * (config->head_dim-config->qk_rope_head_dim+config->v_head_dim), b0), cdiv(config->kv_lora_rank, b1), 0, 0}, __LINE__ ); _so = check_tensor( so, Quant::F32, {cdiv(config->dim, b0), cdiv(config->n_heads * config->v_head_dim, b1), 0, 0}, __LINE__ ); } _key_cache = new f16_t[config->max_seq_len * config->n_heads * config->head_dim](); _value_cache = new f16_t[config->max_seq_len * config->n_heads * config->v_head_dim](); } BlockMHA::~BlockMHA() { if (_device == Device::CPU) { delete[] _key_cache; delete[] _value_cache; } } double BlockMHA::active_bytes(size_t pos) const { double bytes = Block::active_bytes(pos); // Add active bytes for attention and KV cache if (_wq) bytes += _wq->size; if (_sq) bytes += _sq->size; if (_wq_a) bytes += _wq_a->size; if (_sq_a) bytes += _sq_a->size; if (_wkv_a) bytes += _wkv_a->size; if (_skv_a) bytes += _skv_a->size; if (_wo) bytes += _wo->size; if (_so) bytes += _so->size; if (_wq_b) bytes += _wq_b->size; if (_sq_b) bytes += _sq_b->size; if (_wkv_b) bytes += _wkv_b->size; if (_skv_b) bytes += _skv_b->size; size_t kv_len = std::min(static_cast(_config->max_seq_len), pos + 1); size_t kv_entry_size = sizeof(f16_t); bytes += 2 * kv_len * _config->n_heads * _config->head_dim * kv_entry_size; // key_cache, value_cache return bytes; } void BlockMHA::attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const { switch (_config->weight_quant) { case Quant::F32: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::F16: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::F8E5M2: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q2_K: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q3_K: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; default: assert(false && "unsupported weight quantization for mha"); } } BlockMLA::BlockMLA( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_q_a_weight, const Tensor* rms_kv_a_weight, const Tensor* rms_ffn_weight, const Tensor* wq_a, const Tensor* sq_a, const Tensor* wkv_a, const Tensor* skv_a, const Tensor* wo, const Tensor* so, const Tensor* wc, const Tensor* sc, const Tensor* wq_rope_b, const Tensor* sq_rope_b, const Tensor* wv_b, const Tensor* sv_b, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ) : Block(layer_i, config, rms_att_weight, rms_ffn_weight, w1, s1, w2, s2, w3, s3, shared_w1, shared_s1, shared_w2, shared_s2, shared_w3, shared_s3, moegate, moegate_bias) { bool need_block_scales = _config->weight_quant == Quant::F8E5M2; int b0 = config->block_size[0]; int b1 = config->block_size[1]; _rms_q_a_weight = check_tensor( rms_q_a_weight, Quant::F32, {config->q_lora_rank, 0, 0, 0}, __LINE__ ); _rms_kv_a_weight = check_tensor( rms_kv_a_weight, Quant::F32, {config->kv_lora_rank, 0, 0, 0}, __LINE__ // Only norm latent part ); _wq_a = check_tensor( wq_a, config->weight_quant, {config->q_lora_rank, config->dim, 0, 0}, __LINE__ ); _wkv_a = check_tensor( wkv_a, config->weight_quant, {config->kv_lora_rank + config->qk_rope_head_dim, config->dim, 0, 0}, __LINE__ ); _wc = check_tensor( wc, config->weight_quant, {config->n_heads * config->kv_lora_rank, config->q_lora_rank, 0, 0}, __LINE__ ); _wq_rope_b = check_tensor( wq_rope_b, config->weight_quant, {config->n_heads * config->qk_rope_head_dim, config->q_lora_rank, 0, 0}, __LINE__ ); _wv_b = check_tensor( wv_b, config->weight_quant, {config->n_heads * config->v_head_dim, config->kv_lora_rank, 0, 0}, __LINE__ ); // Reshape _wv_b from 2D to 3D _wv_b = QTensor(_wv_b->quant, {config->n_heads, config->v_head_dim, config->kv_lora_rank}, _wv_b->data, _wv_b->size); _wo = check_tensor( wo, config->weight_quant, {config->dim, config->n_heads * config->v_head_dim, 0, 0}, __LINE__ ); if (need_block_scales) { _sq_a = check_tensor( sq_a, Quant::F32, {cdiv(config->q_lora_rank, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); _skv_a = check_tensor( skv_a, Quant::F32, {cdiv(config->kv_lora_rank + config->qk_rope_head_dim, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); _sc = check_tensor( sc, Quant::F32, {cdiv(config->n_heads * config->kv_lora_rank, b0), cdiv(config->q_lora_rank, b1), 0, 0}, __LINE__ ); _sq_rope_b = check_tensor( sq_rope_b, Quant::F32, {cdiv(config->n_heads * config->qk_rope_head_dim, b0), cdiv(config->q_lora_rank, b1), 0, 0}, __LINE__ ); _sv_b = check_tensor( sv_b, Quant::F32, {cdiv(config->n_heads * config->v_head_dim, b0), cdiv(config->kv_lora_rank, b1), 0, 0}, __LINE__ ); _so = check_tensor( so, Quant::F32, {cdiv(config->dim, b0), cdiv(config->n_heads * config->v_head_dim, b1), 0, 0}, __LINE__ ); } _kv_nope_cache = new f16_t[config->max_seq_len * config->kv_lora_rank](); _kv_rope_cache = new f16_t[config->max_seq_len * config->qk_rope_head_dim](); } BlockMLA::~BlockMLA() { if (_device == Device::CPU) { delete[] _kv_nope_cache; delete[] _kv_rope_cache; } } double BlockMLA::active_bytes(size_t pos) const { double bytes = Block::active_bytes(pos); bytes += _rms_q_a_weight->size; bytes += _rms_kv_a_weight->size; if (_wq_a) bytes += _wq_a->size; if (_sq_a) bytes += _sq_a->size; if (_wkv_a) bytes += _wkv_a->size; if (_skv_a) bytes += _skv_a->size; if (_wo) bytes += _wo->size; if (_so) bytes += _so->size; if (_wc) bytes += _wc->size; if (_sc) bytes += _sc->size; if (_wq_rope_b) bytes += _wq_rope_b->size; if (_sq_rope_b) bytes += _sq_rope_b->size; if (_wv_b) bytes += _wv_b->size; if (_sv_b) bytes += _sv_b->size; size_t kv_len = std::min(static_cast(_config->max_seq_len), pos + 1); size_t kv_entry_size = sizeof(f16_t); bytes += kv_len * _config->kv_lora_rank * kv_entry_size; // kv_nope_cache bytes += kv_len * _config->qk_rope_head_dim * kv_entry_size; // kv_rope_cache return bytes; } void BlockMLA::attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const { switch (_config->weight_quant) { case Quant::F32: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::F16: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::F8E5M2: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q2_K: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; case Quant::Q3_K: _attention_impl(s, pos, kv_sink, kv_pos, kv_len); break; default: assert(false && "unsupported weight quantization for mla"); } } InferenceState::InferenceState(const std::shared_ptr config): _config(config) { assert(config); _x = new float[config->dim](); _xb = new float[config->dim](); _xb2 = new float[std::max({ config->dim, config->n_heads * config->v_head_dim, config->n_heads * config->kv_lora_rank })](); _hb = new float[std::max({ config->dim, config->hidden_dim })](); _hb2 = new float[config->hidden_dim](); if (config->q_lora_rank > 0) { _q_a = new float[config->q_lora_rank](); } _q = new float[config->n_heads * config->head_dim](); _kv_a = new float[config->kv_lora_rank + config->qk_rope_head_dim](); _kv_b = new float[config->n_heads * (config->head_dim-config->qk_rope_head_dim+config->v_head_dim)](); _ropebuf = new float[config->n_heads * config->qk_rope_head_dim](); _k = new float[config->n_heads * config->head_dim](); _v = new float[config->n_heads * config->v_head_dim](); _att = new float[config->n_heads * config->max_seq_len](); _logits = new float[config->vocab_size](); _logit_indices = new int[config->vocab_size](); for (int i = 0; i < config->vocab_size; i++){ _logit_indices[i] = i; } if (config->use_mla) { _q_c = new float[config->n_heads * config->kv_lora_rank](); _q_rope = new float[config->n_heads * config->qk_rope_head_dim](); } if (config->n_routed_experts > 0) { _moe_weights = new float[config->n_routed_experts](); _active_experts = new int[config->n_active_routed](); _active_experts_weights = new float[config->n_active_routed](); } // TODO: consider dynamically resizing based on inputs size_t aqb_nitems = std::max({ config->dim, config->moe_intermediate_size, config->n_heads * config->v_head_dim, config->n_heads * config->kv_lora_rank, config->hidden_dim }); size_t aqb_nblocks = aqb_nitems / QK_K; _aqb = new uint8_t[aqb_nblocks * sizeof(block_q8_K)](); } InferenceState::~InferenceState() { if (_device == Device::CPU) { delete[] _x; delete[] _xb; delete[] _xb2; delete[] _hb; delete[] _hb2; if (_q_a != nullptr) { delete[] _q_a; } delete[] _q; delete[] _kv_a; delete[] _kv_b; delete[] _ropebuf; delete[] _k; delete[] _v; delete[] _att; delete[] _logits; delete[] _logit_indices; if (_moe_weights != nullptr) { delete[] _moe_weights; delete[] _active_experts; delete[] _active_experts_weights; } delete[] _aqb; } } Model::Model(YALMData& yalm, int context) { config = std::make_shared(); config->from_yalm(yalm, context); std::cout << "loading model with quant: " << quant_to_string(config->weight_quant) << std::endl; bool need_weight_scales = config->weight_quant == Quant::F8E5M2; bool use_mla = config->use_mla; int b0 = config->block_size[0]; int b1 = config->block_size[1]; token_embedding_table = check_tensor( get_tensor(yalm, "model.embed.weight"), config->weight_quant, {config->vocab_size, config->dim, 0, 0}, __LINE__ ); if (need_weight_scales) { token_embedding_scale = check_tensor( get_tensor(yalm, "model.embed.scale"), Quant::F32, {cdiv(config->vocab_size, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); } for (int i = 0; i < config->n_layers; ++i) { const Tensor* p_rms_att_weight = get_tensor(yalm, fmt::format("model.layers.{}.attn.norm.weight", i)); const Tensor* p_rms_ffn_weight = get_tensor(yalm, fmt::format("model.layers.{}.mlp.norm.weight", i)); const Tensor* p_w1 = get_tensor(yalm, fmt::format("model.layers.{}.mlp.w1.weight", i)); const Tensor* p_s1 = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.mlp.w1.scale", i)) : nullptr; const Tensor* p_w2 = get_tensor(yalm, fmt::format("model.layers.{}.mlp.w2.weight", i)); const Tensor* p_s2 = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.mlp.w2.scale", i)) : nullptr; const Tensor* p_w3 = get_tensor(yalm, fmt::format("model.layers.{}.mlp.w3.weight", i)); const Tensor* p_s3 = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.mlp.w3.scale", i)) : nullptr; const Tensor* p_shared_w1 = (i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w1.weight", i)) : nullptr; const Tensor* p_shared_s1 = (need_weight_scales && i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w1.scale", i)) : nullptr; const Tensor* p_shared_w2 = (i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w2.weight", i)) : nullptr; const Tensor* p_shared_s2 = (need_weight_scales && i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w2.scale", i)) : nullptr; const Tensor* p_shared_w3 = (i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w3.weight", i)) : nullptr; const Tensor* p_shared_s3 = (need_weight_scales && i >= config->first_k_dense_replace && config->n_shared_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.shared_mlp.w3.scale", i)) : nullptr; const Tensor* p_moegate = (i >= config->first_k_dense_replace && config->n_routed_experts > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.moegate.weight", i)) : nullptr; const Tensor* p_moegate_bias = (i >= config->first_k_dense_replace && config->n_routed_experts > 0 && config->has_moegate_bias) ? get_tensor(yalm, fmt::format("model.layers.{}.moegate.bias", i)) : nullptr; const Tensor* p_rms_q_a_weight = config->q_lora_rank > 0 ? get_tensor(yalm, fmt::format("model.layers.{}.attn.q_a_norm.weight", i)) : nullptr; const Tensor* p_rms_kv_a_weight = get_tensor(yalm, fmt::format("model.layers.{}.attn.kv_a_norm.weight", i)); const Tensor* p_wq_a = config->q_lora_rank > 0 ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_a.weight", i)) : nullptr; const Tensor* p_sq_a = (need_weight_scales && config->q_lora_rank > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_a.scale", i)) : nullptr; const Tensor* p_wkv_a = get_tensor(yalm, fmt::format("model.layers.{}.attn.wkv_a.weight", i)); const Tensor* p_skv_a = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wkv_a.scale", i)) : nullptr; const Tensor* p_wo = get_tensor(yalm, fmt::format("model.layers.{}.attn.wo.weight", i)); const Tensor* p_so = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wo.scale", i)) : nullptr; if (use_mla) { const Tensor* p_wc = get_tensor(yalm, fmt::format("model.layers.{}.attn.wc.weight", i)); const Tensor* p_sc = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wc.scale", i)) : nullptr; const Tensor* p_wq_rope_b = get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_rope_b.weight", i)); const Tensor* p_sq_rope_b = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_rope_b.scale", i)) : nullptr; const Tensor* p_wv_b = get_tensor(yalm, fmt::format("model.layers.{}.attn.wv_b.weight", i)); const Tensor* p_sv_b = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wv_b.scale", i)) : nullptr; blocks.emplace_back(std::make_shared( i, config, p_rms_att_weight, p_rms_q_a_weight, p_rms_kv_a_weight, p_rms_ffn_weight, p_wq_a, p_sq_a, p_wkv_a, p_skv_a, p_wo, p_so, p_wc, p_sc, p_wq_rope_b, p_sq_rope_b, p_wv_b, p_sv_b, p_w1, p_s1, p_w2, p_s2, p_w3, p_s3, p_shared_w1, p_shared_s1, p_shared_w2, p_shared_s2, p_shared_w3, p_shared_s3, p_moegate, p_moegate_bias )); } else { const Tensor* p_wq = config->q_lora_rank == 0 ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq.weight", i)) : nullptr; const Tensor* p_sq = (need_weight_scales && config->q_lora_rank == 0) ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq.scale", i)) : nullptr; const Tensor* p_wq_b = config->q_lora_rank > 0 ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_b.weight", i)) : nullptr; const Tensor* p_sq_b = (need_weight_scales && config->q_lora_rank > 0) ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wq_b.scale", i)) : nullptr; const Tensor* p_wkv_b = get_tensor(yalm, fmt::format("model.layers.{}.attn.wkv_b.weight", i)); const Tensor* p_skv_b = need_weight_scales ? get_tensor(yalm, fmt::format("model.layers.{}.attn.wkv_b.scale", i)) : nullptr; blocks.emplace_back(std::make_shared( i, config, p_rms_att_weight, p_rms_q_a_weight, p_rms_kv_a_weight, p_rms_ffn_weight, p_wq, p_sq, p_wq_a, p_sq_a, p_wkv_a, p_skv_a, p_wq_b, p_sq_b, p_wkv_b, p_skv_b, p_wo, p_so, p_w1, p_s1, p_w2, p_s2, p_w3, p_s3, p_shared_w1, p_shared_s1, p_shared_w2, p_shared_s2, p_shared_w3, p_shared_s3, p_moegate, p_moegate_bias )); } } rms_final_weight = check_tensor( get_tensor(yalm, "model.norm.weight"), Quant::F32, {config->dim, 0, 0, 0}, __LINE__ ); bool tie_word_embeddings = yalm.tensors.count("model.output.weight") == 0; if (tie_word_embeddings) { wcls = token_embedding_table; scls = token_embedding_scale; } else { wcls = check_tensor( get_tensor(yalm, "model.output.weight"), config->weight_quant, {config->vocab_size, config->dim, 0, 0}, __LINE__ ); if (need_weight_scales) { scls = check_tensor( get_tensor(yalm, "model.output.scale"), Quant::F32, {cdiv(config->vocab_size, b0), cdiv(config->dim, b1), 0, 0}, __LINE__ ); } } } void Model::forward(InferenceState& s, int token, int pos, InferenceMode mode) { if (s.device() != _device) { std::cerr << "FATAL: inference state device mismatch" << std::endl; assert(false); return; } if (_device == Device::CPU) { _forward_cpu(s, token, pos, mode); } } double Model::active_bytes(size_t pos) const { double bytes_per_weight = bits_per_weight(config->weight_quant, config->block_size[0] * config->block_size[1]) / 8.0; double bytes = 0; bytes += config->dim * bytes_per_weight; // 1 row of token_embedding_table // blocks for (auto& block : blocks) { bytes += block->active_bytes(pos); } bytes += rms_final_weight->size; bytes += wcls->size; if (scls) { bytes += scls->size; } return bytes; } #if DEBUG_MODEL DebugTensor::DebugTensor(const std::vector& data) { data_f32 = data; data_type = DataType::F32; } DebugTensor::DebugTensor(const std::vector& data) { data_f16 = data; data_type = DataType::F16; } float DebugTensor::max_err(const DebugTensor& other) const { if (data_type != other.data_type) { return -1; } if (data_type == DataType::F32) { float max_err = 0; for (size_t i = 0; i < data_f32.size(); i++) { max_err = std::max(max_err, std::abs(data_f32[i] - other.data_f32[i])); } return max_err; } else { #if defined(__F16C__) float max_err = 0; for (size_t i = 0; i < data_f16.size(); i++) { max_err = std::max(max_err, std::abs(_cvtsh_ss(data_f16[i]) - _cvtsh_ss(other.data_f16[i]))); } return max_err; #else assert(false && "float16 not supported on this platform"); #endif } } #endif ================================================ FILE: src/model.h ================================================ #pragma once #include "codec.h" #include #include #include #include #include "quant.h" #define DEBUG_MODEL 0 constexpr int KV_SINKS = 2; enum class ActivationType { GELU, SILU, }; enum class LayerNormType { RMSNorm, }; enum class TopKMethod { GREEDY, GROUP_LIMITED_GREEDY, NOAUX_TC, }; enum class ScoringFunc { SOFTMAX, SIGMOID, }; enum class Device { CPU, }; enum class InferenceMode { HYDRATE_KV_CACHE, // only hydrate the KV cache and don't compute output logits OUTPUT_LOGITS // set InferenceState logits to logits for the next token }; int cdiv(int a, int b); struct Config { int dim; // transformer input & output dimension int hidden_dim; // dimension of hidden layer in feedforward network (dense blocks only) int n_layers; // number of layers int n_heads; // number of attention heads int vocab_size; // vocabulary size int max_seq_len; // max sequence length float rope_theta; // RoPE theta float norm_eps; // epsilon for layer normalization ActivationType act; // activation function LayerNormType norm_type; // norm type int first_k_dense_replace; // how many blocks to keep the dense FFN (when sparse MoE is default) // mixture of experts int n_shared_experts; int n_routed_experts; int n_active_routed; int moe_intermediate_size; float routed_scaling_factor; int n_group; bool norm_topk_prob; ScoringFunc scoring_func; int topk_group; TopKMethod topk_method; bool has_moegate_bias; // multi-latent attention bool use_mla; // if false, use naive implementation of multi-latent attention int kv_lora_rank; int q_lora_rank; int qk_nope_head_dim; int qk_rope_head_dim; int v_head_dim; int head_dim; // dimension of each attention head, equal to qk_nope_head_dim + qk_rope_head_dim // Data type of the weights according to config, used // to safety check tensor dtype at initialization time. Quant weight_quant; // Block size for weight quantization if present // If weights are quantized but block size is (0, 0), then we are using // per-tensor quantization. std::array block_size = {0, 0}; // RoPE scaling int rs_beta_fast; int rs_beta_slow; float rs_factor; float rs_mscale; float rs_mscale_all_dim; int rs_original_max_position_embeddings; // If nonzero `context` is supplied, max sequence length is limited to `context`. void from_yalm(YALMData& yalm, int context = 0); }; // Buffer for all state used during a forward pass. // Members are reused across subsequent blocks and passes. // This lets us avoid allocations during inference. struct InferenceState { InferenceState(const std::shared_ptr config); ~InferenceState(); // current activations float* x() const { return _x; } float* xb() const { return _xb; } float* xb(int head) const { return _xb + _config->head_dim * head; } // TODO: do we need xb2? float* xb2() const { return _xb2; } float* xb2(int head, int head_size) const { return _xb2 + head_size * head; } float* hb() const { return _hb; } float* hb2() const { return _hb2; } float* q_a() const { return _q_a; } float* q() const { return _q; } float* q(int head) const { return _q + _config->head_dim * head; } float* kv_a() const { return _kv_a; } float* kv_b() const { return _kv_b; } float* kv_b(int head) const { return _kv_b + (_config->head_dim - _config->qk_rope_head_dim + _config->v_head_dim) * head; } float* ropebuf() const { return _ropebuf; } float* k() const { return _k; } float* k(int head) const { return _k + _config->head_dim * head; } float* v() const { return _v; } float* v(int head) const { return _v + _config->v_head_dim * head; } float* att() const { return _att; } float* att(int head) const { return _att + _config->max_seq_len * head; } // MLA only float* q_c() const { return _q_c; } float* q_c(int head) const { return _q_c + _config->kv_lora_rank * head; } float* q_rope() const { return _q_rope; } float* q_rope(int head) const { return _q_rope + _config->qk_rope_head_dim * head; } // mixture of experts float* moe_weights() const { return _moe_weights; } float* active_experts_weights() const { return _active_experts_weights; } int* active_experts() const { return _active_experts; } // LM head float* logits() const { return _logits; } int* logit_indices() const { return _logit_indices; } // activation quantization buffer void* aqb() const { return _aqb; } Device device() const { return _device; } InferenceMode mode() const { return _mode; } void set_mode(InferenceMode mode) { _mode = mode; } private: std::shared_ptr _config; Device _device = Device::CPU; InferenceMode _mode = InferenceMode::OUTPUT_LOGITS; // current activations float* _x = nullptr; // (dim,) - latest activation float* _xb = nullptr; // (dim,) - activation inside a residual branch float* _xb2 = nullptr; // (max{dim, n_heads * v_head_dim, n_heads * kv_lora_rank},) - activation inside a residual branch (second slot) float* _hb = nullptr; // (max{dim, hidden_dim},) - buffer for hidden dimension in feedforward network float* _hb2 = nullptr; // (hidden_dim,) - buffer for hidden dimension in feedforward network (second slot) float* _q_a = nullptr; // (q_lora_rank,) - compressed (latent) query vector for latest timestamp float* _q = nullptr; // (n_heads * head_dim,) - query vectors for latest timestamp float* _kv_a = nullptr; // (kv_lora_rank + qk_rope_head_dim,) - compressed (latent) key-value vector for latest timestamp float* _kv_b = nullptr; // (n_heads * (head_dim-qk_rope_head_dim+v_head_dim),) - uncompressed key-value vector for latest timestamp float* _ropebuf = nullptr; // (n_heads * qk_rope_head_dim,) - buffer for rope float* _k = nullptr; // (n_heads * head_dim,) - key vectors for latest timestamp float* _v = nullptr; // (n_heads * v_head_dim,) - value vectors for latest timestamp float* _att = nullptr; // (n_heads, seq_len) - buffer for attention scores // MLA only float* _q_c = nullptr; // (n_heads * kv_lora_rank,) - transformed and compressed query vector for latest timestamp float* _q_rope = nullptr; // (n_heads * qk_rope_head_dim,) - RoPE-transformed query vector for latest timestamp // mixture of experts float* _moe_weights = nullptr; // (n_routed_experts,) - buffer for expert weights, decided by router float* _active_experts_weights = nullptr; // (n_active_experts,) - buffer for weights of top K experts (active experts) int* _active_experts = nullptr; // (n_active_experts,) - buffer for indices of top K experts (active experts) // LM head float* _logits = nullptr; // (vocab_size,) - final output logits int* _logit_indices = nullptr; // (vocab_size,) - logit indices (for use by top-p sampler) // activation quantization buffer uint8_t* _aqb = nullptr; // buffer for quantized activations }; /* Transformer Block Base */ struct Block { Block( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_ffn_weight, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ); virtual ~Block(); float* rms_att_weight() const { return _rms_att_weight ? static_cast(_rms_att_weight->data) : nullptr; } float* rms_ffn_weight() const { return _rms_ffn_weight ? static_cast(_rms_ffn_weight->data) : nullptr; } std::optional w1() const { return _w1; } std::optional w2() const { return _w2; } std::optional w3() const { return _w3; } std::optional moegate() const { return _moegate; } std::optional moegate_bias() const { return _moegate_bias; } std::optional shared_w1() const { return _shared_w1; } std::optional shared_w2() const { return _shared_w2; } std::optional shared_w3() const { return _shared_w3; } // Compute forward pass for this block and update the inference state accordingly. // PRECONDITIONS: // - `s.x()` contains the input to the block. Output will also go here. // - Block KV cache is hydrated. void block( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const; virtual double active_bytes(size_t pos) const; protected: virtual void attention_impl( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const = 0; template void _block_cpu( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const; int _layer_i = 0; std::shared_ptr _config; Device _device = Device::CPU; // weights for norms std::optional _rms_att_weight = std::nullopt; // (dim) rmsnorm weights for attention input std::optional _rms_ffn_weight = std::nullopt; // (dim) rmsnorm weights for ffn input // weights for ffn std::optional _w1 = std::nullopt; // (n_routed_experts?, moe_intermediate_size, dim) or (hidden_dim, dim) std::optional _s1 = std::nullopt; std::optional _w2 = std::nullopt; // (n_routed_experts?, dim, moe_intermediate_size) or (dim, hidden_dim) std::optional _s2 = std::nullopt; std::optional _w3 = std::nullopt; // (n_routed_experts?, moe_intermediate_size, dim) or (hidden_dim, dim) std::optional _s3 = std::nullopt; std::optional _shared_w1 = std::nullopt; // (n_shared_experts?, moe_intermediate_size, dim) std::optional _shared_s1 = std::nullopt; std::optional _shared_w2 = std::nullopt; // (n_shared_experts?, dim, moe_intermediate_size) std::optional _shared_s2 = std::nullopt; std::optional _shared_w3 = std::nullopt; // (n_shared_experts?, moe_intermediate_size, dim) std::optional _shared_s3 = std::nullopt; // weights for mixture of experts router if present std::optional _moegate = std::nullopt; // (n_routed_experts?, dim) std::optional _moegate_bias = std::nullopt; }; /* Transformer Block - Multi-Head Attention */ struct BlockMHA : public Block { BlockMHA( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_q_a_weight, const Tensor* rms_kv_a_weight, const Tensor* rms_ffn_weight, const Tensor* wq, const Tensor* sq, const Tensor* wq_a, const Tensor* sq_a, const Tensor* wkv_a, const Tensor* skv_a, const Tensor* wq_b, const Tensor* sq_b, const Tensor* wkv_b, const Tensor* skv_b, const Tensor* wo, const Tensor* so, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ); ~BlockMHA() override; float* rms_q_a_weight() const { return _rms_q_a_weight ? static_cast(_rms_q_a_weight->data) : nullptr; } float* rms_kv_a_weight() const { return _rms_kv_a_weight ? static_cast(_rms_kv_a_weight->data) : nullptr; } std::optional wq() const { return _wq; } std::optional wq_a() const { return _wq_a; } std::optional wq_b() const { return _wq_b; } std::optional wkv_a() const { return _wkv_a; } std::optional wkv_b() const { return _wkv_b; } std::optional wo() const { return _wo; } f16_t* key_cache() const { return _key_cache; } f16_t* key_cache(int pos) const { return _key_cache + pos * _config->head_dim * _config->n_heads; } f16_t* value_cache() const { return _value_cache; } f16_t* value_cache(int pos) const { return _value_cache + pos * _config->v_head_dim * _config->n_heads; } double active_bytes(size_t pos) const override; protected: void attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const override; private: template void _attention_impl( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const; std::optional _rms_q_a_weight = std::nullopt; // (q_lora_rank) rmsnorm weights std::optional _rms_kv_a_weight = std::nullopt; // (kv_lora_rank + qk_rope_head_dim) // weights for self-attention matmuls std::optional _wq = std::nullopt; // (n_heads * head_dim, dim) std::optional _sq = std::nullopt; std::optional _wq_a = std::nullopt; // (q_lora_rank, dim) std::optional _sq_a = std::nullopt; std::optional _wkv_a = std::nullopt; // (kv_lora_rank + qk_rope_head_dim, dim) std::optional _skv_a = std::nullopt; std::optional _wo = std::nullopt; // (dim, n_heads * v_head_dim) std::optional _so = std::nullopt; std::optional _wq_b = std::nullopt; // (n_heads * head_dim, q_lora_rank) std::optional _sq_b = std::nullopt; std::optional _wkv_b = std::nullopt; // (n_heads * (head_dim-qk_rope_head_dim+v_head_dim), kv_lora_rank) std::optional _skv_b = std::nullopt; // MHA kv cache f16_t* _key_cache = nullptr; // (seq_len, n_heads * head_dim) f16_t* _value_cache = nullptr; // (seq_len, n_heads * v_head_dim) }; /* Transformer Block - Multi-Latent Attention */ struct BlockMLA : public Block { BlockMLA( int layer_i, const std::shared_ptr config, const Tensor* rms_att_weight, const Tensor* rms_q_a_weight, const Tensor* rms_kv_a_weight, const Tensor* rms_ffn_weight, const Tensor* wq_a, const Tensor* sq_a, const Tensor* wkv_a, const Tensor* skv_a, const Tensor* wo, const Tensor* so, const Tensor* wc, const Tensor* sc, const Tensor* wq_rope_b, const Tensor* sq_rope_b, const Tensor* wv_b, const Tensor* sv_b, const Tensor* w1, const Tensor* s1, const Tensor* w2, const Tensor* s2, const Tensor* w3, const Tensor* s3, const Tensor* shared_w1, const Tensor* shared_s1, const Tensor* shared_w2, const Tensor* shared_s2, const Tensor* shared_w3, const Tensor* shared_s3, const Tensor* moegate, const Tensor* moegate_bias ); ~BlockMLA() override; float* rms_q_a_weight() const { return _rms_q_a_weight ? static_cast(_rms_q_a_weight->data) : nullptr; } float* rms_kv_a_weight() const { return _rms_kv_a_weight ? static_cast(_rms_kv_a_weight->data) : nullptr; } std::optional wq_a() const { return _wq_a; } std::optional wkv_a() const { return _wkv_a; } std::optional wo() const { return _wo; } std::optional wc() const { return _wc; } std::optional wq_rope_b() const { return _wq_rope_b; } std::optional wv_b() const { return _wv_b; } f16_t* kv_nope_cache() const { return _kv_nope_cache; } f16_t* kv_nope_cache(int pos) const { return _kv_nope_cache + pos * _config->kv_lora_rank; } f16_t* kv_rope_cache() const { return _kv_rope_cache; } f16_t* kv_rope_cache(int pos) const { return _kv_rope_cache + pos * _config->qk_rope_head_dim; } double active_bytes(size_t pos) const override; protected: void attention_impl( InferenceState& s, int pos, int kv_sink, int kv_pos, int kv_len ) const override; private: template void _attention_impl( InferenceState& s, // inference state int pos, // index of the current token in the sequence int kv_sink, // number of sink tokens currently in the KV cache int kv_pos, // index of the current token in the kv cache, must be in [0..kv_len) since kv cache is a ring buffer int kv_len // number of tokens in the kv cache that we will attend over ) const; // weights for norms std::optional _rms_q_a_weight = std::nullopt; // (q_lora_rank) rmsnorm weights std::optional _rms_kv_a_weight = std::nullopt; // (kv_lora_rank + qk_rope_head_dim) // weights for self-attention matmuls std::optional _wq_a = std::nullopt; // (q_lora_rank, dim) std::optional _sq_a = std::nullopt; std::optional _wkv_a = std::nullopt; // (kv_lora_rank + qk_rope_head_dim, dim) std::optional _skv_a = std::nullopt; std::optional _wo = std::nullopt; // (dim, n_heads * v_head_dim) std::optional _so = std::nullopt; std::optional _wc = std::nullopt; // (n_heads * kv_lora_rank, q_lora_rank) std::optional _sc = std::nullopt; std::optional _wq_rope_b = std::nullopt; // (n_heads * qk_rope_head_dim, q_lora_rank) std::optional _sq_rope_b = std::nullopt; std::optional _wv_b = std::nullopt; // (n_heads, v_head_dim, kv_lora_rank) std::optional _sv_b = std::nullopt; // MLA kv cache f16_t* _kv_nope_cache = nullptr; // (seq_len, kv_lora_rank) f16_t* _kv_rope_cache = nullptr; // (seq_len, qk_rope_head_dim) }; struct Model { std::shared_ptr config; std::vector> blocks; // token embedding table std::optional token_embedding_table = std::nullopt; // (vocab_size, dim) std::optional token_embedding_scale = std::nullopt; // (ceil(vocab_size / block_size[0]), ceil(dim / block_size[1])) // final norm std::optional rms_final_weight = std::nullopt; // (dim,) // classifier weights for the logits, on the last layer std::optional wcls = std::nullopt; // (vocab_size, dim) std::optional scls = std::nullopt; Model(YALMData& yalm, int context = 0); void forward(InferenceState& s, int token, int pos, InferenceMode mode = InferenceMode::OUTPUT_LOGITS); double active_bytes(size_t pos) const; private: void _forward_cpu(InferenceState& s, int token, int pos, InferenceMode mode); void _copy_embedding(InferenceState& s, int token); Device _device = Device::CPU; }; #if DEBUG_MODEL struct DebugTensor { enum struct DataType { F32, F16, }; DebugTensor() = default; DebugTensor(const std::vector& data); DebugTensor(const std::vector& data); DebugTensor& operator=(const DebugTensor& other) = default; float max_err(const DebugTensor& other) const; std::vector data_f32; std::vector data_f16; DataType data_type; }; std::map& debug_map_cpu(); void dump_debug_map(const std::string& filename); void dump_debug_map_as_safetensors(const std::string& filename); #endif //////////////////////////////////////// // Exposed for tests //////////////////////////////////////// void attn( float* xout, // (dim,) - output vector float* atth, // (kv_len,) - scratch space to hold attention scores of the sequence const float* qh, // (head_dim,) - query vector for this head const f16_t* kh, // (kv_len, n_heads, head_dim) - buffer containing key vectors of the sequence for all KV heads const f16_t* vh, // (kv_len, n_heads, head_dim) - buffer containing value vectors of the sequence for all KV heads int head_dim, // size of the "key-space" int v_head_dim, // size of the "value-space" int n_heads, // number of attention heads int kv_len // number of tokens of the sequence we will attend over ); void mha_cpu( float* xout, // (n_heads, head_dim) float* att, // (n_heads, max_seq_len) f16_t* kb, // (max_seq_len, n_heads, head_dim) f16_t* vb, // (max_seq_len, n_heads, head_dim) float* q, // (n_heads, head_dim) int head_dim, int v_head_dim, int kv_len, int max_seq_len, int n_heads ); void matmul_unscaled(float* xout, float* x, const QTensor& w); void ffn_cpu( float* xout, float* x, float* w1, float* w2, float* w3, int hidden_dim, int dim, ActivationType act ); //////////////////////////////////////// ================================================ FILE: src/profile.cpp ================================================ #include "profile.h" #include static bool _profile_enabled = true; static std::vector _profile_scopes; static std::map _profile_times; void set_profile_enabled(bool enabled) { _profile_enabled = enabled; } bool get_profile_enabled() { return _profile_enabled; } const std::map& profile_times() { return _profile_times; } #if PROFILE_ENABLED ProfileScope::ProfileScope(std::string name) { _profile_scopes.push_back(name); _start = omp_get_wtime(); } ProfileScope::ProfileScope(const char* name) : ProfileScope(std::string(name)) {} ProfileScope::~ProfileScope() { double end = omp_get_wtime(); double duration = end - _start; if (_profile_enabled) { std::string key = ""; for (const auto& scope : _profile_scopes) { key += scope + "."; } _profile_times[key] += duration; } _profile_scopes.pop_back(); } #else ProfileScope::ProfileScope(std::string name) {} ProfileScope::ProfileScope(const char* name) {} ProfileScope::~ProfileScope() {} #endif ProfileDisabledScope::ProfileDisabledScope() { _was_enabled = get_profile_enabled(); set_profile_enabled(false); } ProfileDisabledScope::~ProfileDisabledScope() { set_profile_enabled(_was_enabled); } ================================================ FILE: src/profile.h ================================================ #include #include #include #define PROFILE_ENABLED 0 // Toggle aggregation of profile scopes at runtime. // This does not disable profile instrumentation; change PROFILE_ENABLED and recompile for that. void set_profile_enabled(bool enabled); bool get_profile_enabled(); const std::map& profile_times(); #if PROFILE_ENABLED // This macro can be used to profile a block of code. // Example: // ``` // { // PROFILE_BLOCK(my_block); // // code to profile... // } // ``` // The execution time will be saved with key `my_block` in the profile_times map. // `my_block` need not be a variable name; it can be any string. #define PROFILE_BLOCK(name) \ ProfileScope profile_scope(#name) #else #define PROFILE_BLOCK(name) #endif // This macro can be used to profile a single statement. // Example: // ``` // PROFILE(my_statement); // ``` // The execution time will be saved with key `my_statement` in the profile_times map. // `my_statement` should be a valid C++ statement or expression. #define PROFILE(X) do { \ PROFILE_BLOCK(X); \ X; \ } while(0) struct ProfileScope { ProfileScope(std::string name); ProfileScope(const char* name); ~ProfileScope(); private: double _start; }; struct ProfileDisabledScope { ProfileDisabledScope(); ~ProfileDisabledScope(); private: bool _was_enabled; }; ================================================ FILE: src/quant.cpp ================================================ /* K-quants adapted from llama.cpp MIT License Copyright (c) 2023-2024 The ggml authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "quant.h" #include #define GROUP_MAX_EPS 1e-15f static inline int nearest_int(float fval) { assert(fabsf(fval) <= 4194303.f); float val = fval + 12582912.f; int i; memcpy(&i, &val, sizeof(int)); return (i & 0x007fffff) - 0x00400000; } // some compilers don't provide _mm256_set_m128i, e.g. gcc 7 #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) #if __AVX__ || __AVX2__ || __AVX512F__ // horizontally add 8 floats static inline float hsum_float_8(const __m256 x) { __m128 res = _mm256_extractf128_ps(x, 1); res = _mm_add_ps(res, _mm256_castps256_ps128(x)); res = _mm_add_ps(res, _mm_movehl_ps(res, res)); res = _mm_add_ss(res, _mm_movehdup_ps(res)); return _mm_cvtss_f32(res); } // shuffles to pick the required scales in dot products static inline __m256i get_scale_shuffle_q3k(int i) { static const uint8_t k_shuffle[128] = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, }; return _mm256_loadu_si256((const __m256i*)k_shuffle + i); } #endif static float make_qkx2_quants(int n, int nmax, const float * __restrict__ x, const float * __restrict__ weights, uint8_t * __restrict__ L, float * __restrict__ the_min, uint8_t * __restrict__ Laux, float rmin, float rdelta, int nstep, bool use_mad) { float min = x[0]; float max = x[0]; float sum_w = weights[0]; float sum_x = sum_w * x[0]; #ifdef HAVE_BUGGY_APPLE_LINKER // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 for (volatile int i = 1; i < n; ++i) { #else for (int i = 1; i < n; ++i) { #endif if (x[i] < min) min = x[i]; if (x[i] > max) max = x[i]; float w = weights[i]; sum_w += w; sum_x += w * x[i]; } if (min > 0) min = 0; if (max == min) { for (int i = 0; i < n; ++i) L[i] = 0; *the_min = -min; return 0.f; } float iscale = nmax/(max - min); float scale = 1/iscale; float best_mad = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); L[i] = std::max(0, std::min(nmax, l)); float diff = scale * L[i] + min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; best_mad += w * diff; } if (nstep < 1) { *the_min = -min; return scale; } for (int is = 0; is <= nstep; ++is) { iscale = (rmin + rdelta*is + nmax)/(max - min); float sum_l = 0, sum_l2 = 0, sum_xl = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); l = std::max(0, std::min(nmax, l)); Laux[i] = l; float w = weights[i]; sum_l += w*l; sum_l2 += w*l*l; sum_xl += w*l*x[i]; } float D = sum_w * sum_l2 - sum_l * sum_l; if (D > 0) { float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; if (this_min > 0) { this_min = 0; this_scale = sum_xl / sum_l2; } float mad = 0; for (int i = 0; i < n; ++i) { float diff = this_scale * Laux[i] + this_min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; mad += w * diff; } if (mad < best_mad) { for (int i = 0; i < n; ++i) { L[i] = Laux[i]; } best_mad = mad; scale = this_scale; min = this_min; } } } *the_min = -min; return scale; } void quantize_row_q2_K_ref(const float * __restrict__ x, block_q2_K * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; uint8_t L[QK_K]; uint8_t Laux[16]; float weights[16]; float mins[QK_K/16]; float scales[QK_K/16]; const float q4scale = 15.f; for (int i = 0; i < nb; i++) { float max_scale = 0; // as we are deducting the min, scales are always positive float max_min = 0; for (int j = 0; j < QK_K/16; ++j) { for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]); scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true); float scale = scales[j]; if (scale > max_scale) { max_scale = scale; } float min = mins[j]; if (min > max_min) { max_min = min; } } if (max_scale > 0) { float iscale = q4scale/max_scale; for (int j = 0; j < QK_K/16; ++j) { int l = nearest_int(iscale*scales[j]); y[i].scales[j] = l; } y[i].d = float_to_half(max_scale/q4scale); } else { for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; y[i].d = float_to_half(0.f); } if (max_min > 0) { float iscale = q4scale/max_min; for (int j = 0; j < QK_K/16; ++j) { int l = nearest_int(iscale*mins[j]); y[i].scales[j] |= (l << 4); } y[i].dmin = float_to_half(max_min/q4scale); } else { y[i].dmin = float_to_half(0.f); } for (int j = 0; j < QK_K/16; ++j) { const float d = half_to_float(y[i].d) * (y[i].scales[j] & 0xF); if (!d) continue; const float dm = half_to_float(y[i].dmin) * (y[i].scales[j] >> 4); for (int ii = 0; ii < 16; ++ii) { int l = nearest_int((x[16*j + ii] + dm)/d); l = std::max(0, std::min(3, l)); L[16*j + ii] = l; } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } void dequantize_row_q2_K(const block_q2_K * __restrict__ x, float * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; for (int i = 0; i < nb; i++) { const float d = half_to_float(x[i].d); const float min = half_to_float(x[i].dmin); const uint8_t * q = x[i].qs; int is = 0; float dl, ml; for (int n = 0; n < QK_K; n += 128) { int shift = 0; for (int j = 0; j < 4; ++j) { uint8_t sc = x[i].scales[is++]; dl = d * (sc & 0xF); ml = min * (sc >> 4); for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; sc = x[i].scales[is++]; dl = d * (sc & 0xF); ml = min * (sc >> 4); for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; shift += 2; } q += 32; } } } static float make_q3_quants(int n, int nmax, const float * __restrict__ x, int8_t * __restrict__ L, bool do_rmse) { float max = 0; float amax = 0; for (int i = 0; i < n; ++i) { float ax = fabsf(x[i]); if (ax > amax) { amax = ax; max = x[i]; } } if (amax < GROUP_MAX_EPS) { // all zero for (int i = 0; i < n; ++i) { L[i] = 0; } return 0.f; } float iscale = -nmax / max; if (do_rmse) { float sumlx = 0; float suml2 = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = std::max(-nmax, std::min(nmax-1, l)); L[i] = l; float w = x[i]*x[i]; sumlx += w*x[i]*l; suml2 += w*l*l; } for (int itry = 0; itry < 5; ++itry) { int n_changed = 0; for (int i = 0; i < n; ++i) { float w = x[i]*x[i]; float slx = sumlx - w*x[i]*L[i]; if (slx > 0) { float sl2 = suml2 - w*L[i]*L[i]; int new_l = nearest_int(x[i] * sl2 / slx); new_l = std::max(-nmax, std::min(nmax-1, new_l)); if (new_l != L[i]) { slx += w*x[i]*new_l; sl2 += w*new_l*new_l; if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { L[i] = new_l; sumlx = slx; suml2 = sl2; ++n_changed; } } } } if (!n_changed) { break; } } for (int i = 0; i < n; ++i) { L[i] += nmax; } return sumlx / suml2; } for (int i = 0; i < n; ++i) { int l = nearest_int(iscale * x[i]); l = std::max(-nmax, std::min(nmax-1, l)); L[i] = l + nmax; } return 1/iscale; } void quantize_row_q3_K_ref(const float * __restrict__ x, block_q3_K * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; int8_t L[QK_K]; float scales[QK_K / 16]; for (int i = 0; i < nb; i++) { float max_scale = 0; float amax = 0; for (int j = 0; j < QK_K/16; ++j) { scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true); float scale = fabsf(scales[j]); if (scale > amax) { amax = scale; max_scale = scales[j]; } } memset(y[i].scales, 0, 12); if (max_scale) { float iscale = -32.f/max_scale; for (int j = 0; j < QK_K/16; ++j) { int8_t l = nearest_int(iscale*scales[j]); l = std::max(-32, std::min(31, static_cast(l))) + 32; if (j < 8) { y[i].scales[j] = l & 0xF; } else { y[i].scales[j-8] |= ((l & 0xF) << 4); } l >>= 4; y[i].scales[j%4 + 8] |= (l << (2*(j/4))); } y[i].d = float_to_half(1/iscale); } else { y[i].d = float_to_half(0.f); } int8_t sc; for (int j = 0; j < QK_K/16; ++j) { sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; float d = half_to_float(y[i].d) * sc; if (!d) { continue; } for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); l = std::max(-4, std::min(3, l)); L[16*j + ii] = l + 4; } } memset(y[i].hmask, 0, QK_K/8); // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. int m = 0; uint8_t hm = 1; for (int j = 0; j < QK_K; ++j) { if (L[j] > 3) { y[i].hmask[m] |= hm; L[j] -= 4; } if (++m == QK_K/8) { m = 0; hm <<= 1; } } for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) { y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); } } x += QK_K; } } void dequantize_row_q3_K(const block_q3_K * __restrict__ x, float * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int nb = k / QK_K; const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; uint32_t aux[4]; const int8_t * scales = (const int8_t*)aux; for (int i = 0; i < nb; i++) { const float d_all = half_to_float(x[i].d); const uint8_t * __restrict__ q = x[i].qs; const uint8_t * __restrict__ hm = x[i].hmask; uint8_t m = 1; memcpy(aux, x[i].scales, 12); uint32_t tmp = aux[2]; aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); int is = 0; float dl; for (int n = 0; n < QK_K; n += 128) { int shift = 0; for (int j = 0; j < 4; ++j) { dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); } dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); } shift += 2; m <<= 1; } q += 32; } } } void ggml_vec_dot_q3_K_q8_K(int n, float * __restrict__ s, const void * __restrict__ vx, const void * __restrict__ vy) { assert(n % QK_K == 0); const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; const block_q3_K * __restrict__ x = (const block_q3_K*)vx; const block_q8_K * __restrict__ y = (const block_q8_K*)vy; const int nb = n / QK_K; #if defined __AVX2__ const __m256i m3 = _mm256_set1_epi8(3); const __m256i mone = _mm256_set1_epi8(1); const __m128i m32 = _mm_set1_epi8(32); __m256 acc = _mm256_setzero_ps(); uint32_t aux[3]; for (int i = 0; i < nb; ++i) { const float d = y[i].d * half_to_float(x[i].d); const uint8_t * __restrict__ q3 = x[i].qs; const int8_t * __restrict__ q8 = y[i].qs; // Set up scales memcpy(aux, x[i].scales, 12); __m128i scales128 = _mm_set_epi32( ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); scales128 = _mm_sub_epi8(scales128, m32); const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; // high bit const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); // integer accumulator __m256i sumi = _mm256_setzero_si256(); int bit = 0; int is = 0; for (int j = 0; j < QK_K/128; ++j) { // load low 2 bits const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; // prepare low and high bits const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); ++bit; // load Q8 quants const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, // and 2 if the high bit was set) __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); p16_0 = _mm256_sub_epi16(p16_0, q8s_0); p16_1 = _mm256_sub_epi16(p16_1, q8s_1); p16_2 = _mm256_sub_epi16(p16_2, q8s_2); p16_3 = _mm256_sub_epi16(p16_3, q8s_3); // multiply with scales p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); // accumulate p16_0 = _mm256_add_epi32(p16_0, p16_1); p16_2 = _mm256_add_epi32(p16_2, p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); } // multiply with block scale and accumulate acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc); #else // scalar version // This function is written like this so the compiler can manage to vectorize most of it // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the // manually vectorized version above. Every other version I tried would run at least 4 times slower. // The ideal situation would be if we could just write the code once, and the compiler would // automatically produce the best possible set of machine instructions, instead of us having to manually // write vectorized versions for AVX, ARM_NEON, etc. int8_t aux8[QK_K]; int16_t aux16[8]; float sums [8]; int32_t aux32[8]; memset(sums, 0, 8*sizeof(float)); uint32_t auxs[4]; const int8_t * scales = (const int8_t*)auxs; float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * __restrict__ q3 = x[i].qs; const uint8_t * __restrict__ hm = x[i].hmask; const int8_t * __restrict__ q8 = y[i].qs; memset(aux32, 0, 8*sizeof(int32_t)); int8_t * __restrict__ a = aux8; uint8_t m = 1; for (int j = 0; j < QK_K; j += 128) { for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); a += 32; m <<= 1; q3 += 32; } a = aux8; memcpy(auxs, x[i].scales, 12); uint32_t tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); for (int j = 0; j < QK_K/16; ++j) { for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } const float d = half_to_float(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; *s = sumf; #endif } void quantize_row_q8_K_ref(const float * __restrict__ x, block_q8_K * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { float max = 0; float amax = 0; for (int j = 0; j < QK_K; ++j) { float ax = fabsf(x[j]); if (ax > amax) { amax = ax; max = x[j]; } } if (!amax) { y[i].d = 0; memset(y[i].qs, 0, QK_K); x += QK_K; continue; } //const float iscale = -128.f/max; // We need this change for IQ2_XXS, else the AVX implementation becomes very awkward const float iscale = -127.f/max; for (int j = 0; j < QK_K; ++j) { int v = nearest_int(iscale*x[j]); y[i].qs[j] = std::min(127, v); } for (int j = 0; j < QK_K/16; ++j) { int sum = 0; for (int ii = 0; ii < 16; ++ii) { sum += y[i].qs[j*16 + ii]; } y[i].bsums[j] = sum; } y[i].d = 1/iscale; x += QK_K; } } void dequantize_row_q8_K(const block_q8_K * __restrict__ x, float * __restrict__ y, int64_t k) { assert(k % QK_K == 0); const int64_t nb = k / QK_K; for (int i = 0; i < nb; i++) { for (int j = 0; j < QK_K; ++j) { *y++ = x[i].d * x[i].qs[j]; } } } void ggml_vec_dot_q2_K_q8_K( int n, float * __restrict__ s, const void * __restrict__ vx, const void * __restrict__ vy ) { const block_q2_K * __restrict__ x = (const block_q2_K *)vx; const block_q8_K * __restrict__ y = (const block_q8_K *)vy; const int nb = n / QK_K; #if defined __AVX2__ const __m256i m3 = _mm256_set1_epi8(3); const __m128i m4 = _mm_set1_epi8(0xF); __m256 acc = _mm256_setzero_ps(); for (int i = 0; i < nb; ++i) { const float d = y[i].d * half_to_float(x[i].d); const float dmin = -y[i].d * half_to_float(x[i].dmin); const uint8_t * __restrict__ q2 = x[i].qs; const int8_t * __restrict__ q8 = y[i].qs; const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); const __m256i mins = _mm256_cvtepi8_epi16(mins8); const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; __m256i sumi = _mm256_setzero_si256(); for (int j = 0; j < QK_K/128; ++j) { const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; const __m256i q2_0 = _mm256_and_si256(q2bits, m3); const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); p0 = _mm256_add_epi32(p0, p1); p2 = _mm256_add_epi32(p2, p3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } *s = hsum_float_8(acc); #else float sumf = 0; for (int i = 0; i < nb; ++i) { const uint8_t * q2 = x[i].qs; const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; int summs = 0; for (int j = 0; j < 16; ++j) { summs += y[i].bsums[j] * (sc[j] >> 4); } const float dall = y[i].d * half_to_float(x[i].d); const float dmin = y[i].d * half_to_float(x[i].dmin); int isum = 0; int is = 0; int d; for (int k = 0; k < QK_K/128; ++k) { int shift = 0; for (int j = 0; j < 4; ++j) { d = sc[is++] & 0xF; int isuml = 0; for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; d = sc[is++] & 0xF; isuml = 0; for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); isum += d * isuml; shift += 2; q8 += 32; } q2 += 32; } sumf += dall * isum - dmin * summs; } *s = sumf; #endif } ================================================ FILE: src/quant.h ================================================ /* K-quants adapted from llama.cpp MIT License Copyright (c) 2023-2024 The ggml authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include "codec.h" // QK = number of values after dequantization // QK_K = super-block size #define QK_K 256 // 2-bit quantization // weight is represented as x = a * q + b // 16 blocks of 16 elements each // Effectively 2.625 bits per weight typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants union { struct { uint16_t d; // super-block scale for quantized scales uint16_t dmin; // super-block scale for quantized mins }; uint32_t dm; }; } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(uint16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); // Quantize an array of weights using Q2_K quantization // - x: pointer to the weights to quantize // - y: pointer to the quantized weights // - k: number of weights to quantize (must be a multiple of QK_K) void quantize_row_q2_K_ref(const float * __restrict__ x, block_q2_K * __restrict__ y, int64_t k); // Dequantize an array of Q2_K quantized weights // - x: pointer to the quantized weights // - y: pointer to the dequantized weights // - k: number of weights to dequantize (must be a multiple of QK_K) void dequantize_row_q2_K(const block_q2_K * __restrict__ x, float * __restrict__ y, int64_t k); // 3-bit quantization // weight is represented as x = a * q // 16 blocks of 16 elements each // Effectively 3.4375 bits per weight typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits uint8_t scales[12]; // scales, quantized with 6 bits uint16_t d; // super-block scale } block_q3_K; static_assert(sizeof(block_q3_K) == sizeof(uint16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding"); // Quantize an array of weights using Q3_K quantization // - x: pointer to the weights to quantize // - y: pointer to the quantized weights // - k: number of weights to quantize (must be a multiple of QK_K) void quantize_row_q3_K_ref(const float * __restrict__ x, block_q3_K * __restrict__ y, int64_t k); // Dequantize an array of Q3_K quantized weights // - x: pointer to the quantized weights // - y: pointer to the dequantized weights // - k: number of weights to dequantize (must be a multiple of QK_K) void dequantize_row_q3_K(const block_q3_K * __restrict__ x, float * __restrict__ y, int64_t k); // Compute the dot product of two vectors, using Q3_K and Q8_K quantization // - n: number of elements in the vectors // - s: pointer to the result of the dot product // - vx: pointer to the first vector // - vy: pointer to the second vector void ggml_vec_dot_q3_K_q8_K( int n, float * __restrict__ s, const void * __restrict__ vx, const void * __restrict__ vy ); // 8-bit quantization // This is only used for intermediate quantization and dot products typedef struct { float d; // delta int8_t qs[QK_K]; // quants int16_t bsums[QK_K/16]; // sum of quants in groups of 16 } block_q8_K; static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); // Quantize an array of weights using Q8_K quantization // - x: pointer to the weights to quantize // - y: pointer to the quantized weights // - k: number of weights to quantize (must be a multiple of QK_K) void quantize_row_q8_K_ref(const float * __restrict__ x, block_q8_K * __restrict__ y, int64_t k); // Dequantize an array of Q8_K quantized weights // - x: pointer to the quantized weights // - y: pointer to the dequantized weights // - k: number of weights to dequantize (must be a multiple of QK_K) void dequantize_row_q8_K(const block_q8_K * __restrict__ x, float * __restrict__ y, int64_t k); // Compute the dot product of two vectors, using Q2_K and Q8_K quantization // - n: number of elements in the vectors // - s: pointer to the result of the dot product // - vx: pointer to the first vector // - vy: pointer to the second vector void ggml_vec_dot_q2_K_q8_K( int n, float * __restrict__ s, const void * __restrict__ vx, const void * __restrict__ vy ); ================================================ FILE: src/sampler.cpp ================================================ #include "sampler.h" #include #include #include Sampler::Sampler(const std::shared_ptr config, uint64_t seed) { vocab_size = config->vocab_size; std::srand(seed); // TODO: use random_device } float Sampler::sample_prob(int index, const InferenceState& s) { const float* logits = s.logits(); // Find max value to moderate the logits later on for numerical stability float max_val = -FLT_MAX; for (int i = 0; i < vocab_size; ++i) { if (logits[i] > max_val) { max_val = logits[i]; } } float sum = 0; for (int i = 0; i < vocab_size; ++i) { sum += expf(logits[i] - max_val); } return expf(logits[index] - max_val) / sum; } int Sampler::sample_argmax(const InferenceState& s) { const float* logits = s.logits(); int argmax = 0; float max_val = -FLT_MAX; for (int i = 0; i < vocab_size; ++i) { if (logits[i] > max_val) { max_val = logits[i]; argmax = i; } } return argmax; } int Sampler::sample(const InferenceState& s, float temperature, float top_p) { if (temperature == 0.0) { return sample_argmax(s); } const float* logits = s.logits(); int* logit_indices = s.logit_indices(); // Find max value to moderate the logits later on for numerical stability float max_val = -FLT_MAX; for (int i = 0; i < vocab_size; ++i) { if (logits[i] > max_val) { max_val = logits[i]; } } float sum = 0; for (int i = 0; i < vocab_size; ++i) { sum += expf((logits[i] - max_val) / temperature); } // Sort logits descending for nucleus/top-p sampling (https://arxiv.org/abs/1904.09751) if (top_p < 1.0) { std::sort( logit_indices, logit_indices + vocab_size, [&logits](int i, int j) { return logits[i] > logits[j]; } ); } // Randomly sample from the softmaxed logits distribution float r = std::rand() / (float)RAND_MAX * top_p; float cumsum = 0; for (int i = 0; i < vocab_size; ++i) { cumsum += expf((logits[i] - max_val) / temperature) / sum; if (cumsum >= r) { return i; } } return vocab_size - 1; } ================================================ FILE: src/sampler.h ================================================ #pragma once #include "model.h" #include struct Sampler { int vocab_size; Sampler(const std::shared_ptr config, uint64_t seed); // Return the probability score corresponding to `logits[index]`. // This is equivalent to taking the softmax of the logits and returning // the value at index `index`. float sample_prob(int index, const InferenceState& s); // Return the index of the maximum value in `logits`. int sample_argmax(const InferenceState& s); // Sample from the model's distribution with temperature. int sample(const InferenceState& s, float temperature = 1.0, float top_p = 0.95); }; ================================================ FILE: src/test.cpp ================================================ #include #include #include #include #include #include #include "immintrin.h" #include "model.h" #include "time_utils.h" bool floatEquals(float a, float b, float epsilon = 1e-5) { return std::abs(a - b) < epsilon; } bool arrayEquals(const std::vector& a, const std::vector& b, float epsilon = 1e-4) { if (a.size() != b.size()) { return false; } for (size_t i = 0; i < a.size(); i++) { if (!floatEquals(a[i], b[i], epsilon)) { return false; } } return true; } void assertArrayEquals(const std::vector& actual, const std::vector& expected, const std::string& message, float epsilon = 1e-4) { if (!arrayEquals(actual, expected, epsilon)) { std::cerr << "Assertion failed: " << message << std::endl; std::cerr << "actual: "; for (size_t i = 0; i < actual.size(); i++) { std::cerr << actual[i] << " "; } std::cerr << std::endl; std::cerr << "expected: "; for (size_t i = 0; i < expected.size(); i++) { std::cerr << expected[i] << " "; } std::cerr << std::endl; exit(1); } } void assertArrayEquals(float* actual, const std::vector& expected, const std::string& message) { std::vector actual_array; for (size_t i = 0; i < expected.size(); i++) { actual_array.push_back(actual[i]); } assertArrayEquals(actual_array, expected, message); } std::vector float_array_to_half(const std::vector& data) { std::vector half_data(data.size()); for (size_t i = 0; i < data.size(); i++) { half_data[i] = float_to_half(data[i]); } return half_data; } std::vector float_array_to_float8e5m2(const std::vector& data) { std::vector float8e5m2_data(data.size()); for (size_t i = 0; i < data.size(); i++) { float8e5m2_data[i] = float_to_float8e5m2(data[i]); } return float8e5m2_data; } void test_attn() { constexpr int TEST_SEQ_LEN = 4; constexpr int TEST_DIM = 6; constexpr int TEST_HEAD_DIM = 3; constexpr int TEST_N_HEADS = 2; std::shared_ptr config = std::make_shared(); config->dim = TEST_DIM; config->hidden_dim = TEST_DIM; config->head_dim = TEST_HEAD_DIM; config->n_heads = TEST_N_HEADS; config->vocab_size = 1; config->max_seq_len = TEST_SEQ_LEN; InferenceState s(config); // (n_heads, head_dim) - query vectors std::vector q{ 0., 1e4, 0., // h=0 0., 0., 1e4 // h=1 }; for (size_t i = 0; i < q.size(); i++) { s.q()[i] = q[i]; } std::vector kb = float_array_to_half({ 1., 0., 0., // t=0 0., 1., 0., // t=1 0., 0., 1., // t=2 -1., 0., 0. // t=3 }); // (kv_len, n_heads, head_dim) - buffer containing key vectors of the sequence for all KV heads std::vector vb = float_array_to_half({ 1., 0., 0., // t=0 0., 1., 0., // t=1 0., 0., 1., // t=2 -1., 0., 0. // t=3 }); // (kv_len, n_heads, head_dim) - buffer containing value vectors of the sequence for all KV heads // Multihead attention. Iterate over all heads. int h; #pragma omp parallel for private(h) for (h = 0; h < TEST_N_HEADS; h++) { int kv_head_offset = h * TEST_HEAD_DIM; f16_t* kh = kb.data() + kv_head_offset; f16_t* vh = vb.data() + kv_head_offset; attn(s.xb(h), s.att(h), s.q(h), kh, vh, TEST_HEAD_DIM, TEST_HEAD_DIM, TEST_N_HEADS, TEST_SEQ_LEN); } // attention scores // h=0 assertArrayEquals(s.att(0), { 0., 1., 0., 0. }, "att(h=0)"); // h=1 assertArrayEquals(s.att(1), { 0., 0., 1., 0. }, "att(h=1)"); assertArrayEquals(s.xb(), { 0., 1., 0., // h=0 0., 0., 1. // h=1 }, "xout"); } void test_matmul() { assert(float8e5m2_to_float(float_to_float8e5m2(1.0f)) == 1.0f); assert(float8e5m2_to_float(float_to_float8e5m2(-1.5f)) == -1.5f); assert(float8e5m2_to_float(float_to_float8e5m2(0.109375)) == 0.109375); std::vector x{ 2.0624e-01, 1.6975e+00, 8.4918e-01, -1.7186e-01, -9.0164e-01, 6.1108e-01, 2.2116e-01, 1.0412e+00, -1.6616e-03, 8.2840e-01, 2.2667e-01, -1.3993e+00, 4.1013e-01, -1.2223e+00, 2.2723e-01, 6.3558e-01 }; std::vector w_f32{ // row 1 -1.1210, -0.0235, -1.3527, 0.6300, 0.2566, -0.4517, -0.3528, 0.4422, -0.4032, -1.0949, -0.7834, 1.1425, 0.6263, -0.3680, 0.3226, -0.2984, // row 2 0.1176, -1.1462, -0.8181, -2.0047, 0.0932, 1.4665, -0.8682, -0.8490, -1.3017, -1.0068, -0.2890, 0.0167, 1.1607, 0.7196, 1.7701, 0.2891 }; std::vector w_f16 = float_array_to_half(w_f32); std::vector w_f8e5m2 = float_array_to_float8e5m2(w_f32); { std::vector xout(2); matmul_unscaled(xout.data(), x.data(), {Quant::F32, {16, 2}, w_f32.data(), 16*2*sizeof(float)}); assertArrayEquals(xout, { -3.7454, -3.2738 }, "matmul_f32", 1e-4); } { std::vector xout(2); matmul_unscaled(xout.data(), x.data(), {Quant::F16, {16, 2}, w_f16.data(), 16*2*sizeof(f16_t)}); assertArrayEquals(xout, { -3.7454, -3.2738 }, "matmul_f16", 1e-3); } { std::vector xout(2); matmul_unscaled(xout.data(), x.data(), {Quant::F8E5M2, {16, 2}, w_f8e5m2.data(), 16*2*sizeof(f8e5m2_t)}); assertArrayEquals(xout, { -3.7454, -3.2738 }, "matmul_f8e5m2", 3.78e-1); std::vector xout_roundtrip(2); std::vector w8_roundtrip; for (size_t i = 0; i < w_f8e5m2.size(); i++) { w8_roundtrip.push_back(float8e5m2_to_float(w_f8e5m2[i])); } matmul_unscaled(xout_roundtrip.data(), x.data(), {Quant::F8E5M2, {16, 2}, w8_roundtrip.data(), 16*2*sizeof(f8e5m2_t)}); assertArrayEquals(xout_roundtrip, xout, "matmul_f8e5m2_roundtrip"); } std::vector x8_roundtrip; for (size_t i = 0; i < x.size(); i++) { x8_roundtrip.push_back(float8e5m2_to_float(float_to_float8e5m2(x[i]))); } assertArrayEquals(x8_roundtrip, { 2.1875e-01, 1.7500e+00, 8.7500e-01, -1.5625e-01, -8.7500e-01, 6.2500e-01, 2.1875e-01, 1.0000e+00, -1.7090e-03, 8.7500e-01, 2.1875e-01, -1.5000e+00, 4.3750e-01, -1.2500e+00, 2.1875e-01, 6.2500e-01 }, "x_float8e5m2_roundtrip"); } void fill_random(float* data, size_t N, unsigned long seed, float scale_factor = 1.0) { std::default_random_engine gen(seed); std::normal_distribution dist(0.0, 1.0); for (size_t i = 0; i < N; i++) { data[i] = dist(gen) * scale_factor; } } void fill_random(f16_t* data, size_t N, unsigned long seed, float scale_factor = 1.0) { #if defined(__AVX2__) && defined(__F16C__) std::default_random_engine gen(seed); std::normal_distribution dist(0.0, 1.0); for (size_t i = 0; i < N; i++) { data[i] = _cvtss_sh(dist(gen) * scale_factor, 0); } #else assert(false && "Cannot fill_random due to missing F16C extensions"); #endif } // Helper function to allocate aligned memory float* allocateAlignedArray(size_t N) { // Allocate aligned memory (64-byte alignment for AVX-512) void* ptr = nullptr; if (posix_memalign(&ptr, 64, N * sizeof(float)) != 0) { throw std::bad_alloc(); } return static_cast(ptr); } void mem_bench() { constexpr size_t N_THREADS = 32; constexpr size_t MB_PER_THREAD = 1024; constexpr size_t ELS_PER_THREAD = (MB_PER_THREAD * 1024 * 1024) / sizeof(float); constexpr size_t N = N_THREADS * ELS_PER_THREAD; std::cout << "Using " << N_THREADS << " threads" << std::endl; std::cout << "Allocating " << N_THREADS * MB_PER_THREAD << " MB (" << N << " floats)" << std::endl; float* data = allocateAlignedArray(N); std::cout << "Filling data..." << std::endl; #pragma omp parallel for num_threads(N_THREADS) for (size_t i = 0; i < N_THREADS; i++) { fill_random(data + i * ELS_PER_THREAD, ELS_PER_THREAD, (unsigned long)i); } std::cout << "Running memory bandwidth test..." << std::endl; float totalSum = 0.0; uint64_t start = get_timestamp_ms(); #pragma omp parallel for simd reduction(+:totalSum) schedule(guided) aligned(data: 64) num_threads(N_THREADS) for (size_t i = 0; i < N; i++) { totalSum += data[i]; } uint64_t end = get_timestamp_ms(); float elapsed_s = (end - start) / 1000.0; float mb_per_s = N_THREADS * MB_PER_THREAD / elapsed_s; std::cout << "Total sum: " << totalSum << std::endl; std::cout << "Elapsed time: " << elapsed_s << " s" << std::endl; std::cout << "Memory bandwidth: " << mb_per_s << " MB/s" << std::endl; } // 64 is the typical cache line size struct alignas(64) ThreadData { volatile uint32_t sink; char padding[60]; // Ensures 64-byte alignment/padding }; void mem_bench2_thread(uint32_t* data, size_t start_idx, size_t elements_per_thread, ThreadData* thread_sink) { for (size_t i = start_idx; i < start_idx + elements_per_thread; i++) { // 32-bit load stored in volatile to prevent optimization thread_sink->sink = data[i]; } } void mem_bench2() { constexpr size_t N_THREADS = 64; constexpr size_t MB_PER_THREAD = 2048; constexpr size_t ELS_PER_THREAD = (MB_PER_THREAD * 1024 * 1024) / sizeof(uint32_t); constexpr size_t N = N_THREADS * ELS_PER_THREAD; std::cout << "Using " << N_THREADS << " threads" << std::endl; std::cout << "Allocating " << N_THREADS * MB_PER_THREAD << " MB (" << N << " uint32_t)" << std::endl; uint32_t* data = new uint32_t[N]; std::cout << "Filling data..." << std::endl; #pragma omp parallel for num_threads(N_THREADS) for (size_t i = 0; i < N_THREADS; i++) { for (size_t j = 0; j < ELS_PER_THREAD; j++) { data[i * ELS_PER_THREAD + j] = i + j; } } std::cout << "Running memory bandwidth test..." << std::endl; // Allocate cache-line aligned sinks for each thread std::vector thread_sinks(N_THREADS); uint64_t start = get_timestamp_ms(); std::vector threads; // Launch threads for (size_t i = 0; i < N_THREADS; i++) { threads.emplace_back(mem_bench2_thread, data, i * ELS_PER_THREAD, ELS_PER_THREAD, &thread_sinks[i] ); } // Wait for all threads to complete for (auto& thread : threads) { thread.join(); } uint64_t end = get_timestamp_ms(); float elapsed_s = (end - start) / 1000.0; float mb_per_s = N_THREADS * MB_PER_THREAD / elapsed_s; std::cout << "Elapsed time: " << elapsed_s << " s" << std::endl; std::cout << "Memory bandwidth: " << mb_per_s << " MB/s" << std::endl; } int main(int argc, char* argv[]) { if (argc == 2 && std::string(argv[1]) == "-b") { std::cout << "Running memory benchmark" << std::endl; mem_bench(); } else if (argc == 2 && std::string(argv[1]) == "-b2") { std::cout << "Running memory benchmark 2" << std::endl; mem_bench2(); } else { test_attn(); test_matmul(); } std::cout << "All tests passed" << std::endl; return 0; } ================================================ FILE: src/time_utils.cpp ================================================ #include "time_utils.h" #include uint64_t get_timestamp_ms() { return std::chrono::duration_cast( std::chrono::system_clock::now().time_since_epoch()).count(); } ================================================ FILE: src/time_utils.h ================================================ #pragma once #include uint64_t get_timestamp_ms(); ================================================ FILE: src/tokenizer.cpp ================================================ #include "tokenizer.h" Tokenizer::Tokenizer(const YALMData& data) { this->bos_id = std::stoi(data.metadata.at("bos_token_id").get()); this->eos_id = std::stoi(data.metadata.at("eos_token_id").get()); // TODO: figure out edge cases: // Q: should `vocab` include byte fallback tokens? // Q: should `vocab` include special tokens, e.g. '', '', ''? // TODO: avoid copy by using std::string_view const Tensor& tokens_tensor = data.tensors.at("tokenizer.tokens"); char* tokens_tensor_end = (char*)tokens_tensor.data + tokens_tensor.size; for (char* ptr = (char*)tokens_tensor.data; ptr < tokens_tensor_end; ptr++) { char* s = ptr; while (*ptr != '\0' && ptr < tokens_tensor_end) { ptr++; } vocab.emplace_back(s, ptr - s); } for (size_t i = 0; i < vocab.size(); i++) { if (vocab[i] == "<0x00>") { byte_fallback_start = i; } else if (vocab[i] == "<|eot_id|>" || vocab[i] == "<|end|>" || vocab[i] == "<|im_end|>") { eot_id = i; } } // init byte_pieces for (size_t i = 0; i < 256; i++) { byte_pieces[i] = (char)i; } // init vocab trie for (size_t i = 0; i < vocab.size(); i++) { const std::string& word = vocab[i]; TokenTrie* p = &vocab_trie; for (char c : word) { if (p->children.count(c) == 0) { p->children[c] = std::make_shared(); } p = p->children[c].get(); } p->token_id = i; } } std::string Tokenizer::decode_one(int prev_token, int token) const { const std::string& piece = vocab[token]; // if following BOS token, sentencepiece decoder strips any leading whitespace if (prev_token == bos_id && piece[0] == ' ') { return piece.substr(1); } // return byte piece for byte fallback tokens (<0x00>, <0x01>, ..., <0xFF>) if (byte_fallback_start >= 0 && token >= byte_fallback_start && (token - byte_fallback_start) < 256) { return byte_pieces[token - byte_fallback_start]; } return piece; } std::vector Tokenizer::encode(const std::string& text, bool encode_bos) const { std::vector out_tokens; if (encode_bos) { out_tokens.push_back(bos_id); } for (size_t i = 0; i < text.size();) { size_t l = 0; size_t valid_l = 0; const TokenTrie* p = &vocab_trie; const TokenTrie* valid_p = nullptr; while (i + l < text.size()) { char c = text[i+l]; if (p->children.count(c)) { p = p->children.at(c).get(); l += 1; if (p->token_id >= 0) { valid_p = p; valid_l = l; } } else { break; } } if (!valid_p) { // No substring starting from `i` matches any vocab words, use byte fallback if (byte_fallback_start >= 0) { out_tokens.push_back((unsigned char)text[i] + byte_fallback_start); } i += 1; } else { out_tokens.push_back(valid_p->token_id); i += valid_l; } } return out_tokens; } std::string Tokenizer::encoding_to_debug_string(const std::vector& encoding) const { std::string token_encoding_debug_str = ""; for (int token_id : encoding) { if (token_id == bos_id) { token_encoding_debug_str += "[:" + std::to_string(token_id) + "]"; } else if (token_id == eos_id) { token_encoding_debug_str += "[:" + std::to_string(token_id) + "]"; } else { token_encoding_debug_str += "[" + vocab[token_id] + ":" + std::to_string(token_id) + "]"; } } return token_encoding_debug_str; } ================================================ FILE: src/tokenizer.h ================================================ #pragma once #include "codec.h" #include #include #include #include struct TokenTrie; struct TokenTrie { std::unordered_map> children; // If non-negative, then this represents the ID of the token formed by the path from the root to this node. int token_id = -1; }; /* A tokenizer vocab consists of a concatenated tensor with the key "tokenizer.tokens" in the .yalm file. Shown as a list of strings: ``` "tokenizer.tokens": [ "", // 0 "", // 1 "", // 2 "<0x00>", // 3--------------+ "<0x01>", // 4 | Byte "<0x02>", // 5 | Fallback ... | Tokens "<0xFE>", // 257 | "<0xFF>", // 258------------+ "▁▁", // 259 "▁▁▁▁", // 260 "▁t", // 261 "in", // 262 "er", // 263 ... ] ``` In tensor form, it looks like a UTF-8 encoded byte array: ``` \0\0\0<0x00>\0<0x01>\0<0x02>\0...\0<0xFE>\0<0xFF>\0▁▁\0▁▁▁▁\0▁t\0in\0er\0... ``` Important token IDs are included in the metadata of the .yalm file: ``` "bos_token_id": "1", "eos_token_id": "2", ``` */ struct Tokenizer { // vector where the index is the token id and the value is the token string std::vector vocab; // trie mapping token strings to token ids TokenTrie vocab_trie; int bos_id = -1; int eos_id = -1; int eot_id = -1; // start index of the byte fallback range (256 tokens). -1 if none. int byte_fallback_start = -1; // convenience array containing the decodings for the fixed 256 byte fallbacks '{0x00}\0', '{0x01}\0', ..., '{0xFF}\0'. // TODO: use constexpr? std::string byte_pieces[256]; Tokenizer(const YALMData& data); std::vector encode(const std::string& text, bool encode_bos) const; std::string decode_one(int prev_token, int token) const; std::string encoding_to_debug_string(const std::vector& encoding) const; }; ================================================ FILE: src/wikitest.cat.1chunk.v2-encoded.txt ================================================ 100000, 207, 185, 403, 7940, 88819, 367, 403, 207, 185, 207, 185, 7940, 88819, 367, 317, 274, 3517, 6131, 2051, 16657, 285, 22624, 14112, 1021, 1063, 661, 245, 11477, 1173, 12, 31, 52439, 5012, 331, 254, 16657, 4077, 429, 9614, 279, 207, 17, 15, 15, 15, 1021, 1002, 438, 5496, 457, 245, 52439, 5012, 279, 254, 1538, 5067, 879, 4473, 457, 18475, 75156, 2051, 588, 438, 7903, 279, 207, 17, 15, 15, 16, 430, 254, 12040, 8592, 20699, 1021, 1063, 661, 245, 11477, 5012, 279, 254, 16657, 4077, 23053, 3326, 1987, 271, 279, 207, 17, 15, 15, 17, 1021, 685, 207, 17, 15, 15, 19, 88819, 367, 23831, 245, 5012, 372, 440, 32163, 440, 279, 254, 15341, 440, 57034, 655, 82, 18443, 440, 280, 254, 16657, 4077, 429, 9055, 37432, 6204, 362, 72661, 16381, 19639, 5231, 29820, 285, 53918, 68568, 1021, 1063, 438, 4733, 279, 254, 207, 17, 15, 15, 20, 22624, 41075, 280, 254, 16855, 47812, 2734, 1538, 43200, 45799, 2051, 588, 438, 7903, 430, 254, 42925, 20699, 279, 47558, 285, 254, 10889, 1369, 38118, 30821, 279, 5922, 1021, 1063, 438, 12977, 457, 3326, 76272, 285, 72661, 16381, 6493, 2180, 844, 2268, 2051, 64762, 1981, 22551, 2051, 14503, 16530, 2051, 49866, 24308, 379, 2051, 54652, 65665, 285, 83822, 9243, 1021, 207, 185, 685, 207, 17, 15, 15, 21, 2051, 88819, 367, 72661, 16381, 2180, 844, 2268, 279, 254, 1538, 50926, 2223, 4473, 457, 5231, 43799, 19860, 1021, 1063, 7676, 331, 245, 207, 17, 15, 15, 21, 15341, 280, 254, 16657, 4077, 2051, 58162, 2051, 5496, 457, 245, 5012, 279, 254, 207, 17, 15, 15, 22, 22624, 5909, 280, 1724, 276, 12246, 346, 12977, 457, 13905, 522, 433, 416, 400, 1021, 1724, 276, 12246, 346, 438, 7903, 430, 21350, 20699, 279, 254, 5922, 70963, 280, 6556, 18163, 28496, 285, 40955, 4791, 1021, 88819, 367, 72661, 279, 984, 15687, 279, 207, 17, 15, 15, 23, 2051, 6617, 3205, 4867, 33935, 457, 73540, 8913, 2673, 864, 72, 2051, 285, 4946, 2458, 61720, 12977, 457, 8562, 326, 95725, 1021, 685, 3638, 207, 17, 15, 15, 23, 2051, 88819, 367, 1401, 245, 11477, 8753, 331, 245, 984, 1173, 12, 31, 697, 15341, 10621, 280, 254, 16657, 4077, 423, 1379, 254, 20241, 2051, 5496, 457, 274, 8753, 331, 254, 16657, 4077, 79684, 715, 279, 7953, 207, 17, 15, 15, 23, 1021, 1063, 661, 245, 47198, 5012, 279, 2146, 26774, 280, 254, 16657, 4077, 73370, 1000, 279, 207, 17, 15, 16, 15, 2051, 372, 440, 721, 94016, 53980, 440, 1021, 88819, 367, 72661, 279, 254, 207, 17, 15, 16, 16, 6131, 19611, 255, 4940, 12977, 457, 8913, 2673, 864, 72, 1021, 207, 185, 207, 185, 403, 403, 40388, 403, 403, 207, 185, 207, 185, 207, 185, 403, 403, 403, 207, 17, 15, 15, 15, 207, 891, 207, 17, 15, 15, 20, 403, 403, 403, 207, 185, 207, 185, 685, 207, 17, 15, 15, 15, 88819, 367, 661, 245, 11477, 1173, 12, 31, 52439, 5012, 331, 254, 16657, 4077, 429, 9614, 6204, 362, 57697, 440, 10854, 3126, 674, 440, 279, 254, 15341, 2051, 440, 685, 29018, 44099, 440, 1021, 88819, 367, 72661, 372, 440, 10854, 440, 279, 254, 1538, 5067, 879, 4473, 457, 18475, 75156, 2051, 588, 438, 7903, 279, 207, 17, 15, 15, 16, 430, 254, 12040, 8592, 20699, 1021, 338, 3783, 280, 88819, 367, 655, 82, 3791, 279, 429, 30520, 331, 7926, 5734, 857, 372, 440, 62046, 85267, 440, 279, 254, 5012, 2051, 285, 362, 4503, 7233, 9247, 279, 429, 47856, 2051, 285, 49901, 15745, 1021, 1063, 7676, 279, 254, 16657, 4077, 23053, 3326, 1987, 271, 279, 207, 17, 15, 15, 17, 372, 440, 4294, 370, 7829, 16311, 440, 279, 254, 15341, 440, 36953, 47023, 57310, 440, 2051, 285, 661, 245, 5012, 372, 245, 1448, 3199, 440, 58434, 68984, 440, 331, 429, 9614, 1021, 207, 185, 1063, 661, 245, 47198, 5012, 279, 207, 17, 15, 15, 18, 331, 984, 26774, 280, 429, 9614, 2051, 372, 3199, 440, 74878, 11972, 440, 1021, 685, 207, 17, 15, 15, 19, 88819, 367, 23831, 245, 5012, 372, 440, 32163, 440, 279, 254, 15341, 440, 57034, 655, 82, 18443, 440, 280, 254, 16657, 4077, 429, 9055, 37432, 6204, 362, 72661, 16381, 19639, 5231, 29820, 285, 53918, 68568, 1021, 88819, 367, 72661, 372, 440, 77674, 440, 2051, 279, 254, 207, 17, 15, 15, 20, 22624, 41075, 280, 254, 16855, 47812, 2734, 1538, 43200, 45799, 1021, 809, 438, 7903, 430, 254, 42925, 20699, 279, 47558, 2051, 285, 254, 10889, 1369, 38118, 30821, 279, 5922, 1021, 1063, 438, 12977, 457, 3326, 76272, 285, 72661, 16381, 6493, 2180, 844, 2268, 2051, 64762, 1981, 22551, 2051, 14503, 16530, 2051, 49866, 24308, 379, 2051, 54652, 65665, 285, 83822, 9243, 1021, 88819, 367, 4503, 245, 26810, 3783, 279, 429, 20151, 62011, 1193, 440, 429, 13646, 317, 467, 2501, 7931, 17907, 2051, 366, 29048, 20630, 473, 6493, 2180, 844, 2268, 334, 1132, 405, 26899, 67551, 473, 586, 3791, 372, 60138, 461, 8831, 655, 82, 63063, 2238, 2051, 7940, 88819, 367, 2051, 64762, 1981, 22551, 285, 49866, 24308, 379, 1021, 440, 429, 37716, 10689, 2051, 440, 6493, 2180, 844, 2268, 285, 7940, 88819, 367, 3685, 41369, 18872, 254, 81543, 1431, 1021, 440, 207, 185, 207, 185, 403, 403, 403, 207, 17, 15, 15, 21, 207, 891, 1828, 403, 403, 403, 207, 185, 207, 185, 685, 207, 17, 15, 15, 21, 88819, 367, 72661, 279, 254, 1538, 50926, 2223, 4473, 457, 5231, 43799, 19860, 1021, 429, 1538, 438, 697, 280, 245, 4077, 588, 17120, 1448, 86580, 82, 2051, 29241, 18354, 893, 31518, 3179, 893, 50926, 2223, 1021, 685, 245, 207, 17, 15, 15, 21, 8314, 2051, 7080, 14112, 6493, 2180, 844, 2268, 10730, 88819, 367, 372, 634, 280, 586, 7078, 980, 1173, 12, 31, 7524, 1193, 440, 304, 7130, 2695, 366, 245, 10822, 2424, 7940, 88819, 367, 2051, 779, 438, 279, 254, 18038, 5634, 280, 18354, 2051, 31518, 3179, 285, 50926, 2223, 430, 254, 5562, 1021, 1063, 7222, 601, 6251, 279, 43200, 45799, 1021, 440, 1063, 57697, 440, 24168, 39448, 440, 331, 254, 207, 17, 15, 15, 21, 15341, 280, 254, 16657, 4077, 2051, 58162, 2051, 29241, 440, 22494, 304, 338, 454, 440, 1021, 88819, 367, 72661, 372, 440, 8660, 440, 279, 254, 207, 17, 15, 15, 22, 5909, 280, 1724, 276, 12246, 346, 12977, 457, 13905, 522, 433, 416, 400, 1021, 1724, 276, 12246, 346, 438, 7903, 430, 21350, 20699, 279, 254, 5922, 70963, 280, 6556, 18163, 28496, 285, 40955, 4791, 1021, 685, 245, 3783, 280, 254, 5909, 327, 429, 20151, 62011, 2051, 22624, 12157, 9952, 38960, 10689, 2051, 440, 7940, 88819, 367, 10941, 245, 23005, 43680, 276, 254, 6351, 372, 8660, 1021, 440, 207, 185, 88819, 367, 72661, 279, 984, 15687, 279, 207, 17, 15, 15, 23, 2051, 6617, 3205, 4867, 33935, 457, 73540, 8913, 2673, 864, 72, 2051, 285, 4946, 2458, 61720, 12977, 457, 8562, 326, 95725, 1021, 88819, 367, 57697, 245, 3199, 7046, 440, 36141, 440, 279, 4946, 2458, 61720, 2051, 779, 15983, 2726, 366, 3199, 440, 22577, 440, 372, 254, 440, 7257, 6251, 3564, 779, 16365, 359, 842, 366, 323, 9241, 72, 440, 1021, 88819, 367, 11477, 72661, 331, 245, 984, 1173, 12, 31, 697, 15341, 10621, 440, 423, 5745, 440, 279, 3638, 207, 17, 15, 15, 23, 280, 254, 16657, 4077, 423, 1379, 254, 20241, 372, 3199, 440, 29254, 1987, 5783, 440, 1021, 1063, 7676, 331, 254, 16657, 4077, 79684, 715, 372, 440, 37747, 440, 279, 7953, 207, 17, 15, 15, 23, 1021, 1063, 661, 245, 47198, 5012, 279, 2146, 26774, 280, 254, 16657, 4077, 73370, 1000, 279, 207, 17, 15, 16, 15, 2051, 372, 440, 721, 94016, 53980, 440, 1021, 1063, 57697, 274, 13922, 22429, 12397, 327, 245, 6997, 40799, 1021, 1063, 26693, 331, 254, 23250, 17473, 279, 31237, 272, 245, 22429, 331, 16657, 1193, 440, 55047, 245, 7601, 317, 245, 8387, 2784, 1021, 52943, 3116, 340, 1006, 856, 340, 655, 248, 6807, 786, 754, 340, 1113, 655, 83, 317, 1079, 51245, 548, 745, 418, 66987, 331, 845, 779, 418, 12486, 430, 3950, 340, 1184, 14240, 285, 5749, 340, 254, 9568, 276, 2039, 745, 285, 1068, 837, 340, 1006, 856, 340, 655, 248, 3217, 1021, 440, 88819, 367, 72661, 279, 254, 207, 17, 15, 16, 16, 6131, 19611, 255, 4940, 12977, 457, 8913, 2673, 864, 72, 1021, 207, 185, 207, 185, 403, 403, 19034, 7021, 403, 403, 207, 185, 207, 185, 207, 185, 403, 403, 403, 19034, 403, 403, 403, 207, 185, 207, 185, 207, 185, 403, 403, 403, 44336, 403, 403, 403, 207, 185, 207, 185, 207, 185, 403, 403, 403, 20699, 403, 403, 403, 207, 185, 207, 185, 207, 185, 207, 185, 403, 9497, 39088, 403, 207, 185, 207, 185, 9497, 39088, 334, 53578, 207, 891, 71708, 1193, 25404, 39088, 6204, 10112, 1193, 207, 17141, 65577, 6204, 207, 22, 16, 17, 207, 891, 207, 22, 22, 15, 2238, 438, 245, 19606, 10112, 9816, 280, 254, 43874, 63046, 1021, 29051, 366, 13298, 89189, 334, 13298, 8081, 2238, 2051, 362, 317, 10344, 2424, 254, 9846, 280, 254, 10112, 34512, 1021, 3848, 9846, 30043, 438, 276, 5043, 586, 3085, 372, 245, 6956, 7377, 18448, 2051, 548, 362, 10340, 9892, 276, 1099, 254, 4374, 43677, 1021, 3848, 1728, 2051, 837, 254, 3000, 3085, 2051, 438, 76737, 457, 254, 1640, 413, 2181, 261, 1926, 77592, 280, 207, 22, 20, 20, 2051, 285, 586, 1562, 207, 16, 20, 1555, 778, 245, 766, 280, 3308, 4081, 60001, 1021, 207, 185, 9495, 15920, 362, 438, 1585, 1173, 12, 31, 3185, 276, 750, 14778, 2051, 586, 2722, 2373, 276, 330, 54807, 35812, 279, 1572, 10112, 285, 13727, 19580, 8814, 1021, 4947, 586, 42643, 4456, 2051, 6128, 20378, 5197, 27494, 463, 803, 20778, 855, 254, 13205, 1021, 1063, 643, 803, 2424, 254, 440, 63689, 1173, 12, 31, 6944, 41175, 440, 285, 254, 440, 63689, 1173, 12, 31, 51688, 440, 457, 10112, 32123, 2051, 1477, 254, 3169, 280, 586, 830, 643, 5987, 857, 276, 330, 8131, 276, 12732, 11726, 372, 440, 254, 10112, 75368, 2051, 48103, 2051, 97790, 2051, 23768, 2051, 33968, 2051, 42706, 2051, 73970, 2051, 380, 587, 81, 4402, 2051, 44187, 410, 80604, 282, 9832, 440, 1021, 207, 185, 207, 185, 403, 403, 8598, 403, 403, 207, 185, 207, 185, 46238, 10112, 19580, 23510, 44939, 254, 1728, 280, 254, 3855, 754, 55242, 245, 830, 2051, 245, 5561, 588, 45261, 31699, 12235, 276, 440, 254, 3415, 7841, 344, 8046, 10112, 2215, 989, 1046, 1439, 2460, 285, 40974, 440, 1021, 5512, 1313, 280, 9497, 39088, 655, 82, 27494, 4169, 40974, 285, 4345, 2051, 437, 5561, 317, 7282, 2375, 1021, 11727, 2806, 2051, 10730, 457, 254, 10112, 42962, 8660, 27280, 2051, 317, 344, 10112, 27494, 418, 9184, 46019, 2051, 78165, 3733, 344, 1667, 330, 7683, 2051, 548, 588, 274, 13218, 15813, 1027, 330, 11636, 276, 1006, 1021, 1494, 4959, 12732, 11726, 2051, 440, 429, 2246, 21541, 395, 1006, 254, 766, 2051, 254, 1810, 285, 254, 9930, 279, 254, 4140, 2051, 254, 691, 26990, 395, 418, 276, 10743, 359, 41176, 2051, 285, 254, 1230, 543, 330, 344, 395, 2818, 73161, 254, 20002, 410, 4571, 276, 2579, 359, 16369, 440, 1021, 18232, 37580, 11615, 245, 4443, 6088, 2590, 276, 9497, 39088, 2051, 41021, 344, 254, 6265, 280, 254, 9816, 655, 82, 830, 3430, 12314, 280, 586, 3000, 1728, 2051, 3272, 853, 254, 440, 298, 77214, 440, 27861, 7823, 1222, 327, 691, 6415, 34512, 1021, 207, 185, 207, 185, 403, 403, 403, 22020, 1555, 403, 403, 403, 207, 185, 207, 185, 7188, 280, 856, 317, 3185, 280, 9497, 39088, 655, 82, 1728, 3450, 473, 586, 27494, 1021, 3848, 72164, 28401, 438, 9497, 46992, 12598, 2051, 245, 10689, 46761, 285, 9816, 2320, 254, 20893, 280, 77980, 35098, 1021, 9497, 39088, 438, 7722, 279, 207, 22, 16, 17, 6204, 254, 3508, 89782, 317, 9255, 2051, 5085, 344, 359, 438, 3345, 9193, 1145, 673, 2051, 8088, 261, 21834, 334, 95530, 14147, 317, 245, 14860, 12088, 2238, 1021, 685, 3470, 1728, 2051, 362, 4828, 3177, 276, 6893, 276, 254, 6077, 3787, 280, 45870, 655, 261, 2051, 68563, 53983, 280, 254, 9497, 2617, 1021, 207, 185, 9497, 39088, 655, 82, 4143, 7648, 19420, 1166, 362, 438, 7722, 2051, 285, 362, 438, 19161, 8473, 457, 586, 23577, 1021, 1063, 661, 274, 18418, 6251, 2051, 779, 7648, 2823, 1021, 1063, 839, 661, 1853, 3222, 18062, 285, 634, 3222, 9233, 2051, 276, 4989, 362, 10344, 13434, 279, 586, 27494, 2051, 5802, 362, 1866, 29676, 586, 3458, 14294, 1021, 207, 185, 429, 4278, 280, 245, 10398, 17492, 1173, 12, 31, 6269, 2051, 586, 8859, 438, 7133, 331, 254, 4182, 6090, 280, 245, 3680, 7377, 18448, 1193, 3437, 285, 31177, 8210, 280, 254, 8698, 1240, 997, 49394, 280, 17293, 2051, 4345, 285, 18210, 1021, 1063, 3470, 16583, 276, 463, 7338, 4525, 5274, 27494, 457, 586, 3923, 34395, 2051, 548, 1069, 463, 803, 4739, 1021, 207, 185, 685, 254, 3923, 207, 22, 18, 15, 82, 2051, 362, 34473, 279, 254, 70727, 4634, 893, 1981, 247, 60150, 3046, 6204, 586, 23214, 43446, 20002, 2051, 16934, 245, 18210, 16252, 2051, 317, 2215, 276, 4290, 473, 254, 1225, 280, 437, 3463, 2051, 1983, 207, 22, 18, 20, 1021, 685, 344, 1012, 2051, 362, 3061, 254, 7377, 2418, 3799, 2051, 4135, 279, 45870, 655, 261, 1021, 1063, 5985, 2051, 276, 586, 10782, 285, 344, 280, 20580, 280, 3470, 32123, 1021, 27280, 37280, 344, 362, 3748, 5985, 1373, 586, 39375, 3398, 430, 254, 766, 438, 1653, 17777, 285, 32826, 2051, 1477, 908, 264, 11615, 586, 9123, 276, 52237, 10609, 279, 254, 6077, 975, 463, 803, 276, 18449, 1021, 4128, 437, 9123, 2051, 362, 2674, 1062, 276, 19097, 2051, 437, 766, 1983, 1783, 384, 596, 285, 1063, 1342, 72, 1021, 207, 185, 3848, 4209, 7648, 1983, 207, 22, 19, 15, 1021, 9497, 39088, 744, 463, 803, 5987, 276, 5223, 254, 7377, 2418, 1373, 280, 586, 4209, 655, 82, 7052, 2051, 548, 362, 317, 2215, 276, 463, 2028, 581, 254, 23550, 279, 9000, 280, 634, 280, 586, 3222, 18062, 1021, 1063, 7133, 254, 2112, 2685, 1555, 4731, 279, 254, 9193, 1145, 673, 3046, 2051, 43555, 586, 16611, 279, 13535, 15726, 1021, 207, 185, 685, 254, 25731, 280, 207, 22, 19, 19, 2051, 362, 2011, 13298, 89189, 334, 13298, 8081, 2238, 327, 254, 1022, 766, 2051, 285, 254, 984, 34512, 9163, 245, 19422, 1021, 7128, 13291, 12997, 437, 372, 440, 254, 1094, 4485, 1020, 1251, 3699, 279, 9497, 39088, 655, 82, 23357, 4205, 440, 1373, 359, 4366, 857, 245, 4731, 2203, 280, 254, 298, 9256, 9816, 1173, 12, 31, 17492, 1728, 276, 588, 362, 438, 23786, 1166, 586, 9123, 279, 254, 7377, 2418, 3799, 1021, 429, 5306, 438, 8997, 634, 1173, 12, 31, 68344, 2051, 3111, 1021, 9497, 39088, 438, 457, 742, 1555, 254, 14000, 2051, 1477, 13298, 89189, 438, 2571, 245, 42643, 5395, 1021, 1003, 463, 15367, 27494, 276, 410, 786, 13298, 89189, 473, 254, 14000, 9816, 2051, 548, 889, 634, 279, 254, 750, 5489, 1021, 1955, 2011, 1310, 889, 2561, 2051, 279, 207, 22, 19, 20, 1021, 207, 185, 685, 207, 22, 19, 21, 2051, 362, 6937, 276, 254, 6077, 279, 274, 4788, 276, 84903, 586, 6269, 7337, 1021, 1063, 3061, 254, 7377, 2418, 3799, 245, 1864, 766, 2320, 254, 1893, 1012, 2051, 548, 521, 254, 13139, 778, 5985, 457, 254, 9966, 14704, 334, 12403, 279, 1835, 276, 5120, 254, 46085, 280, 2198, 35835, 2238, 1021, 1063, 1866, 1310, 17942, 254, 56997, 2051, 3615, 24057, 272, 254, 36756, 4723, 279, 207, 22, 20, 16, 2051, 207, 22, 20, 19, 285, 3748, 1310, 279, 207, 22, 20, 20, 1021, 1063, 9552, 1983, 207, 22, 20, 17, 2051, 285, 457, 207, 22, 20, 22, 254, 5457, 661, 661, 4047, 3022, 207, 1972, 1853, 16512, 285, 984, 23066, 207, 1972, 548, 634, 280, 254, 16512, 7648, 279, 61543, 279, 207, 22, 20, 20, 1021, 4810, 207, 22, 20, 19, 362, 4732, 276, 463, 23410, 4194, 334, 3748, 51124, 2238, 2051, 254, 1022, 280, 245, 4077, 280, 78066, 588, 83050, 857, 327, 254, 1610, 280, 586, 1728, 1021, 809, 438, 279, 344, 1012, 344, 9497, 39088, 438, 10599, 276, 2938, 586, 2617, 3266, 276, 254, 67034, 280, 245, 61571, 4832, 786, 457, 11542, 55296, 279, 254, 4928, 1021, 207, 185, 685, 207, 22, 20, 20, 2051, 362, 4503, 274, 14988, 372, 87571, 280, 254, 12975, 13803, 401, 655, 82, 4995, 280, 254, 27231, 13445, 655, 82, 27646, 1021, 9495, 437, 438, 245, 10398, 1767, 2051, 279, 4057, 2602, 359, 744, 463, 803, 430, 2754, 254, 1330, 280, 274, 6269, 7337, 1021, 6809, 1323, 362, 661, 17070, 830, 2051, 3111, 2051, 254, 3299, 438, 24671, 2302, 457, 4378, 1021, 207, 185, 207, 185, 403, 403, 403, 6368, 403, 403, 403, 207, 185, 207, 185, 429, 1640, 413, 2181, 261, 1926, 77592, 4732, 279, 7983, 207, 22, 20, 20, 2051, 285, 438, 441, 5522, 30031, 327, 3308, 6524, 1555, 1021, 809, 7629, 20020, 46123, 276, 10112, 8213, 1193, 254, 44235, 280, 207, 22, 20, 19, 12790, 207, 20, 17, 1173, 13, 31, 207, 24, 4875, 1245, 2051, 548, 2146, 1555, 3470, 2051, 254, 44235, 25696, 953, 207, 16, 21, 1173, 13, 31, 207, 24, 4875, 2051, 254, 22821, 2497, 803, 51840, 410, 9571, 1021, 11399, 437, 766, 2051, 9497, 39088, 5426, 245, 15052, 40521, 401, 1728, 96632, 457, 27770, 2051, 5329, 1464, 1451, 285, 27526, 74340, 1021, 1002, 3463, 280, 405, 48094, 438, 254, 2883, 280, 9497, 39088, 372, 245, 9816, 1193, 6809, 31846, 908, 264, 643, 4473, 344, 2051, 440, 2461, 362, 3505, 1983, 857, 207, 1972, 254, 6110, 280, 586, 2617, 2051, 18792, 2051, 285, 28163, 207, 891, 856, 362, 4101, 2051, 285, 856, 362, 18936, 327, 410, 26619, 473, 254, 5635, 280, 3947, 23745, 207, 1972, 1069, 5322, 254, 48361, 20667, 280, 586, 18210, 440, 1021, 6809, 754, 362, 7213, 280, 254, 4349, 280, 586, 33596, 1867, 2051, 362, 4540, 276, 254, 14079, 280, 3072, 279, 586, 18210, 3615, 280, 31159, 2065, 586, 1377, 73459, 1021, 9497, 39088, 7048, 1193, 207, 185, 5835, 6364, 331, 856, 304, 463, 8663, 1184, 2051, 565, 1236, 304, 1006, 1108, 14079, 2051, 254, 3064, 668, 1534, 13946, 330, 84342, 457, 254, 21670, 1021, 207, 185, 685, 207, 22, 20, 21, 2051, 23287, 1444, 9603, 89, 596, 438, 10599, 276, 31721, 254, 6077, 285, 547, 67, 8972, 1021, 9497, 39088, 2051, 779, 661, 803, 2302, 473, 254, 3787, 2051, 3061, 586, 2617, 276, 245, 1810, 280, 6923, 285, 17942, 276, 6417, 254, 6518, 280, 254, 761, 36756, 334, 33698, 596, 2238, 2051, 548, 362, 438, 16441, 457, 254, 33605, 285, 3443, 276, 45870, 655, 261, 1021, 685, 254, 25731, 2051, 586, 33596, 4278, 2051, 9497, 1981, 596, 52785, 334, 23194, 25232, 2238, 2051, 438, 7722, 1021, 37332, 437, 766, 9497, 39088, 317, 2215, 276, 463, 38349, 71546, 1021, 207, 185, 1063, 21327, 473, 45870, 655, 261, 254, 1893, 1012, 2051, 285, 438, 15572, 5666, 7326, 754, 362, 51285, 254, 6518, 279, 3638, 207, 22, 20, 22, 1021, 1002, 1767, 4366, 2462, 276, 254, 36756, 548, 438, 15052, 80626, 1021, 9497, 39088, 655, 82, 73864, 1465, 25380, 857, 276, 1682, 276, 1099, 938, 280, 359, 1193, 362, 7629, 8168, 327, 3177, 457, 80507, 254, 15613, 280, 586, 2138, 285, 24793, 84700, 2891, 261, 331, 245, 47134, 5361, 1021, 1063, 438, 19752, 548, 438, 25880, 271, 279, 6564, 1021, 1063, 438, 14807, 4283, 276, 3230, 586, 2617, 279, 7564, 2051, 548, 362, 3444, 51285, 254, 6518, 285, 331, 7983, 207, 23, 2051, 207, 22, 20, 22, 2051, 362, 5651, 276, 45870, 655, 261, 366, 254, 36756, 1893, 895, 991, 20977, 457, 4403, 8271, 1021, 3159, 2051, 586, 7402, 7091, 276, 330, 405, 58765, 616, 2051, 285, 279, 254, 6237, 280, 207, 22, 20, 23, 362, 438, 7134, 6148, 276, 245, 1767, 372, 36521, 280, 11375, 279, 23933, 1758, 18863, 1021, 429, 3299, 438, 441, 276, 586, 9455, 1193, 279, 634, 20002, 2051, 362, 7048, 1193, 207, 185, 304, 608, 786, 276, 20260, 89691, 279, 254, 4995, 893, 30954, 754, 657, 3792, 691, 10142, 276, 23428, 3872, 331, 601, 15053, 1021, 207, 185, 1063, 6937, 331, 279, 254, 6237, 280, 207, 22, 20, 24, 6204, 437, 643, 38470, 803, 67908, 276, 61571, 2051, 548, 27280, 17947, 344, 37357, 317, 245, 691, 4135, 2806, 1021, 1063, 2112, 7133, 1983, 4484, 5752, 279, 1551, 246, 46454, 334, 1132, 323, 3752, 71, 3970, 2051, 452, 533, 84, 21834, 2238, 2051, 1066, 362, 7048, 691, 853, 26371, 27494, 1021, 207, 185, 207, 185, 403, 403, 403, 69838, 678, 403, 403, 403, 207, 185, 207, 185, 685, 7983, 207, 22, 20, 24, 2051, 362, 20358, 17552, 279, 75311, 3441, 334, 4959, 452, 533, 84, 2238, 1021, 1063, 31334, 331, 7983, 207, 17, 19, 327, 69838, 678, 334, 324, 514, 9603, 21834, 2238, 2051, 1066, 362, 438, 20315, 457, 2301, 64673, 285, 7080, 9816, 4792, 72, 8106, 1021, 9497, 23829, 2853, 3177, 279, 324, 514, 9603, 327, 1094, 280, 254, 2112, 4047, 1555, 1021, 3563, 254, 25731, 280, 344, 1012, 362, 438, 279, 6335, 8168, 2051, 285, 2143, 27494, 50740, 1345, 276, 3947, 58615, 1021, 1063, 438, 32256, 457, 26319, 35098, 2051, 245, 2138, 285, 5676, 37461, 779, 438, 15572, 20820, 2401, 430, 69838, 678, 1021, 22081, 586, 6335, 4194, 2051, 437, 438, 634, 280, 254, 61223, 285, 1094, 24177, 16021, 280, 586, 1728, 1021, 8410, 280, 9497, 655, 82, 27494, 473, 437, 3463, 418, 24177, 5502, 27258, 280, 586, 1728, 430, 440, 344, 2910, 34323, 440, 1021, 685, 207, 22, 21, 17, 2051, 362, 2116, 254, 3787, 276, 10778, 245, 43248, 2051, 548, 362, 5651, 279, 6237, 207, 22, 21, 19, 754, 362, 438, 15572, 274, 37515, 276, 26319, 2051, 779, 438, 6591, 279, 23745, 2481, 254, 78456, 22490, 1021, 207, 185, 207, 185, 403, 403, 403, 11354, 1555, 403, 403, 403, 207, 185, 207, 185, 9193, 1145, 673, 2051, 254, 4928, 280, 586, 89782, 2051, 438, 19586, 457, 4403, 8271, 279, 254, 8678, 280, 207, 22, 21, 17, 2051, 285, 279, 254, 8079, 280, 207, 22, 21, 20, 9497, 39088, 285, 586, 2617, 33997, 1224, 254, 33177, 83, 3337, 2051, 12403, 366, 254, 14308, 280, 2883, 704, 1143, 745, 1021, 1955, 32339, 9405, 2051, 4586, 581, 457, 586, 3730, 1173, 12, 31, 2802, 334, 457, 437, 766, 362, 438, 14079, 473, 4922, 97585, 2051, 36764, 1465, 285, 2401, 1712, 4500, 279, 4317, 276, 586, 3590, 78066, 2238, 1021, 1955, 17552, 279, 33637, 531, 18863, 334, 279, 856, 317, 1132, 14186, 305, 10859, 950, 2051, 908, 596, 80, 272, 2238, 430, 254, 15157, 276, 254, 14863, 73740, 257, 327, 3308, 984, 1555, 473, 5189, 8079, 207, 22, 21, 21, 1021, 1002, 3463, 438, 9497, 39088, 655, 82, 1562, 1228, 42643, 63430, 2051, 285, 1285, 362, 7048, 207, 19, 15, 15, 27494, 279, 586, 17777, 2051, 5189, 3398, 1021, 685, 25731, 207, 22, 21, 21, 2051, 4307, 6655, 35701, 5322, 20820, 280, 254, 4928, 1193, 362, 6879, 9497, 39088, 40737, 285, 12340, 857, 372, 586, 75983, 26234, 1021, 207, 185, 685, 6175, 207, 22, 21, 23, 2051, 362, 4732, 586, 8398, 1310, 285, 2149, 372, 2367, 372, 38721, 261, 21834, 2051, 1066, 362, 7648, 279, 14631, 46454, 334, 1132, 45870, 20102, 2238, 279, 7953, 410, 7983, 207, 22, 22, 15, 2051, 279, 586, 207, 20, 23, 393, 1012, 1021, 1063, 438, 29109, 457, 586, 5391, 285, 984, 16512, 2051, 779, 10471, 279, 254, 3046, 327, 742, 1555, 430, 2754, 1021, 3848, 1562, 3185, 66040, 317, 245, 52674, 779, 13136, 245, 16223, 54181, 327, 254, 9816, 473, 68877, 1981, 1104, 279, 207, 23, 16, 18, 1021, 207, 185, 27280, 15162, 4095, 586, 1728, 457, 53006, 344, 2051, 440, 1063, 7676, 276, 330, 245, 68447, 4278, 2051, 274, 47112, 4209, 2051, 245, 19129, 6251, 2051, 245, 19558, 6964, 2051, 245, 17732, 2138, 2051, 245, 6766, 3468, 6269, 2051, 285, 245, 57319, 3605, 1021, 440, 207, 185, 24275, 317, 274, 2203, 280, 634, 280, 9497, 39088, 655, 82, 3470, 2722, 2051, 2158, 2682, 10168, 1594, 14862, 61792, 334, 10112, 1193, 207, 808, 217, 741, 236, 6869, 2628, 230, 4781, 2238, 1021, 9532, 1313, 750, 27494, 279, 254, 43874, 359, 17120, 254, 9665, 280, 245, 1234, 44749, 1439, 3997, 2051, 588, 438, 2752, 3266, 276, 13690, 1435, 10344, 20540, 276, 254, 35455, 1193, 207, 185, 207, 185, 403, 403, 15571, 403, 403, 207, 185, 207, 185, 22025, 40329, 280, 9497, 39088, 655, 82, 2722, 643, 11554, 331, 586, 2955, 3078, 280, 4345, 2051, 586, 12481, 16138, 2051, 285, 586, 9388, 25927, 1021, 207, 185, 207, 185, 403, 403, 403, 11380, 403, 403, 403, 207, 185, 207, 185, 5512, 254, 18716, 63046, 2051, 32123, 463, 2424, 9497, 39088, 254, 440, 9816, 42962, 440, 334, 207, 29703, 102, 6030, 467, 24320, 467, 129, 225, 2238, 1021, 429, 1094, 4723, 13092, 280, 586, 27494, 418, 1462, 42813, 331, 8931, 32513, 410, 254, 43664, 285, 28715, 280, 254, 4403, 2051, 410, 254, 27494, 280, 7402, 588, 362, 7048, 276, 254, 36756, 1021, 2334, 1300, 326, 2051, 362, 7048, 786, 254, 1941, 280, 254, 2602, 279, 588, 362, 8663, 331, 3177, 2051, 285, 331, 254, 12224, 1245, 280, 7812, 1021, 1733, 31699, 9267, 2051, 437, 317, 1757, 440, 280, 245, 2447, 24496, 1503, 279, 254, 22881, 23289, 46549, 280, 254, 2906, 440, 1021, 207, 185, 9497, 39088, 655, 82, 7083, 6792, 418, 2853, 331, 17171, 3272, 853, 13998, 1193, 586, 63851, 463, 803, 56498, 81, 1334, 372, 2051, 440, 3960, 450, 521, 330, 2246, 29527, 2051, 1632, 450, 521, 536, 856, 395, 418, 8548, 276, 536, 440, 1021, 5512, 586, 8208, 778, 8627, 276, 25005, 366, 2051, 586, 5206, 3200, 10372, 90020, 10014, 9120, 586, 7103, 372, 254, 6327, 5617, 280, 10112, 42643, 4345, 1021, 207, 185, 207, 185, 403, 403, 403, 78469, 16138, 403, 403, 403, 207, 185, 207, 185, 338, 1864, 14860, 37590, 15456, 280, 10112, 32123, 317, 344, 280, 440, 9816, 47257, 440, 334, 207, 29703, 102, 1777, 231, 467, 24320, 467, 724, 950, 2238, 2051, 245, 36704, 276, 254, 40089, 47257, 2051, 8698, 93499, 1021, 3474, 280, 254, 23214, 43446, 2722, 2051, 429, 18716, 280, 254, 32857, 879, 334, 473, 1983, 207, 22, 20, 15, 2238, 2051, 4380, 5136, 276, 254, 50070, 280, 245, 365, 2663, 18325, 279, 254, 27526, 10053, 285, 245, 3662, 1173, 12, 31, 89254, 22800, 280, 14079, 1021, 3410, 11820, 418, 22511, 88579, 279, 27494, 331, 254, 6110, 280, 1572, 13474, 285, 52641, 7338, 457, 9497, 39088, 5923, 586, 1728, 1021, 207, 185, 9495, 9497, 39088, 655, 82, 16601, 13531, 276, 586, 1377, 17473, 481, 2102, 254, 14164, 280, 274, 521, 1173, 12, 31, 30366, 1197, 2744, 2001, 2051, 12480, 9673, 42347, 344, 586, 440, 9679, 24968, 279, 1714, 5312, 3177, 2051, 16316, 3148, 98572, 285, 3308, 372, 274, 1166, 15418, 440, 1021, 1063, 4573, 440, 66667, 54810, 440, 276, 254, 19738, 5743, 457, 19037, 359, 276, 440, 586, 1377, 7860, 86723, 17197, 465, 440, 1021, 207, 185, 9497, 39088, 655, 82, 24968, 2051, 327, 3177, 285, 327, 3072, 2051, 438, 697, 280, 586, 2401, 64114, 280, 254, 8225, 280, 18210, 1193, 362, 17633, 1313, 2722, 276, 13631, 588, 661, 8524, 803, 4828, 86186, 327, 42643, 6036, 1021, 40814, 568, 522, 7048, 344, 327, 9497, 39088, 2051, 440, 3694, 279, 437, 1843, 317, 18210, 440, 2051, 9497, 7048, 31412, 331, 13388, 1108, 372, 13535, 1728, 2051, 1282, 95419, 2051, 27003, 2051, 8466, 2051, 285, 750, 27494, 1021, 207, 185, 207, 185, 403, 403, 403, 29694, 25927, 403, 403, 403, 207, 185, 207, 185, 9497, 39088, 655, 82, 830, 317, 20307, 2330, 521, 327, 895, 3169, 1021, 10112, 32123, 38470, 1222, 254, 1639, 207, 3771, 748, 1114, 334, 523, 656, 67, 676, 358, 587, 950, 12, 440, 3938, 4300, 28195, 440, 2238, 2051, 245, 5480, 276, 364, 901, 2691, 655, 6411, 280, 8698, 93499, 1021, 68877, 1981, 1104, 438, 254, 1022, 276, 4347, 254, 40646, 280, 9497, 39088, 655, 82, 24071, 2051, 4456, 279, 207, 23, 16, 18, 344, 586, 41529, 2051, 440, 24394, 279, 586, 830, 31081, 588, 3590, 1743, 661, 12293, 889, 91366, 440, 1021, 1063, 59407, 521, 254, 6813, 280, 10112, 18210, 1193, 908, 264, 2970, 344, 279, 1131, 1020, 362, 440, 2818, 1401, 17387, 28292, 410, 21822, 17387, 6987, 440, 1021, 14361, 2051, 586, 27494, 938, 245, 5505, 3169, 280, 28355, 2051, 473, 254, 1713, 285, 61512, 276, 254, 521, 24459, 285, 1791, 1173, 12, 31, 65651, 19580, 1021, 1002, 6265, 317, 50728, 1236, 2383, 3451, 2722, 1193, 37580, 33697, 254, 2051, 440, 8195, 92687, 285, 92111, 26281, 440, 279, 27494, 588, 7774, 254, 9816, 276, 2564, 1448, 57255, 280, 245, 5437, 2051, 1477, 908, 264, 5131, 254, 1639, 440, 89347, 700, 440, 372, 254, 3144, 25855, 5278, 279, 711, 830, 1021, 9497, 39088, 317, 10689, 327, 2497, 4473, 691, 331, 9816, 964, 285, 14445, 853, 688, 750, 9579, 280, 586, 766, 1021, 1063, 7048, 33852, 27494, 331, 14445, 5254, 2051, 691, 853, 688, 750, 43874, 9816, 1021, 9497, 39088, 655, 82, 24926, 6640, 37856, 331, 254, 78513, 7569, 27003, 280, 20156, 61682, 5895, 1536, 245, 35350, 344, 643, 43650, 276, 254, 1828, 1492, 1021, 207, 185, 429, 50112, 280, 586, 830, 5464, 372, 362, 6907, 586, 3398, 285, 21606, 276, 586, 29143, 334, 440, 496, 471, 70634, 1173, 12, 31, 837, 440, 4299, 276, 31699, 2238, 1193, 586, 23214, 2722, 418, 279, 245, 10291, 19533, 2051, 6518, 326, 3398, 2051, 548, 362, 2373, 881, 586, 1377, 279, 254, 1555, 280, 254, 43248, 1021, 37580, 6792, 331, 254, 440, 28034, 17961, 440, 280, 254, 1551, 246, 46454, 27494, 2051, 588, 34443, 254, 14231, 15099, 6204, 254, 2722, 473, 586, 69838, 678, 3463, 418, 440, 2156, 2051, 2752, 40962, 6195, 440, 6204, 1477, 254, 27494, 473, 254, 5189, 33637, 531, 18863, 3463, 463, 245, 440, 6422, 285, 1823, 280, 10046, 440, 1021, 207, 185, 9495, 362, 7048, 279, 521, 42643, 6813, 2051, 9497, 39088, 317, 1608, 3185, 327, 586, 284, 129, 237, 51650, 2051, 245, 1449, 280, 20002, 366, 8881, 11612, 331, 1020, 285, 3093, 2051, 327, 2203, 1193, 207, 185, 13000, 984, 91030, 280, 9497, 39088, 655, 82, 207, 16, 20, 15, 15, 85797, 2722, 418, 279, 437, 1020, 2051, 285, 362, 317, 6051, 4828, 276, 330, 895, 5881, 29511, 1021, 3848, 1608, 284, 129, 237, 51650, 938, 254, 9304, 10014, 3430, 457, 254, 1020, 276, 962, 46130, 3093, 3272, 853, 372, 12596, 9388, 19247, 1021, 12480, 9673, 6792, 344, 2051, 440, 359, 317, 6971, 344, 25404, 39088, 317, 2249, 276, 938, 558, 51437, 49903, 1614, 245, 1020, 279, 558, 3892, 245, 6166, 440, 1021, 207, 185, 207, 185, 403, 403, 82333, 403, 403, 207, 185, 207, 185, 10068, 276, 254, 33308, 425, 21710, 12485, 63945, 1170, 2051, 9497, 39088, 655, 82, 32184, 418, 4828, 457, 1313, 19580, 32123, 276, 330, 3264, 254, 9846, 280, 521, 766, 2051, 285, 359, 4605, 440, 586, 17777, 2051, 29640, 4706, 3190, 938, 280, 521, 254, 365, 1265, 1251, 855, 55491, 280, 245, 8071, 285, 280, 521, 254, 1098, 249, 1625, 32825, 280, 254, 3451, 1734, 2051, 19195, 344, 642, 13858, 481, 2647, 17547, 1021, 440, 207, 185, 685, 586, 17476, 285, 6163, 1893, 586, 4349, 2051, 9497, 39088, 438, 441, 11246, 14168, 1021, 685, 697, 437, 481, 330, 26791, 276, 586, 92687, 285, 8970, 41216, 2051, 742, 280, 588, 418, 1592, 440, 4828, 7999, 35010, 285, 51245, 457, 10112, 32123, 1021, 440, 2071, 418, 1860, 15813, 13531, 276, 857, 207, 1972, 889, 26566, 27494, 473, 4484, 14778, 207, 1972, 285, 1069, 7183, 857, 279, 3769, 280, 17802, 2051, 548, 441, 372, 245, 1112, 5995, 280, 42643, 410, 12481, 33524, 1021, 9497, 39088, 317, 839, 28830, 11096, 279, 15813, 33872, 8255, 280, 18210, 1021, 207, 185, 3159, 2051, 372, 27280, 9267, 2051, 362, 440, 317, 254, 889, 10112, 9816, 4318, 7927, 10318, 366, 766, 440, 2051, 285, 586, 2722, 4732, 276, 4686, 279, 23270, 279, 254, 43867, 8295, 1021, 22020, 4864, 6792, 2373, 473, 89189, 568, 13717, 72, 2051, 779, 40408, 254, 12481, 32125, 280, 742, 280, 9497, 39088, 655, 82, 2722, 334, 5802, 362, 1503, 1069, 279, 889, 245, 1760, 12044, 280, 254, 27494, 2238, 2051, 285, 473, 20156, 40052, 2051, 779, 7048, 245, 5836, 36720, 9497, 39088, 285, 13298, 89189, 331, 29747, 18047, 473, 13195, 1401, 2481, 768, 1021, 9848, 1069, 14778, 7363, 254, 7927, 280, 9497, 39088, 279, 704, 1377, 42643, 830, 1021, 3563, 254, 6278, 280, 254, 207, 16, 15, 393, 8295, 2051, 61792, 68687, 673, 13475, 254, 1022, 27730, 280, 586, 344, 2910, 25341, 279, 324, 514, 9603, 1021, 207, 185, 809, 438, 279, 254, 207, 16, 16, 393, 8295, 2051, 2320, 254, 17506, 18716, 2906, 344, 9497, 39088, 655, 82, 14947, 6889, 895, 11294, 1021, 685, 437, 3463, 245, 13862, 298, 1173, 12, 31, 12619, 280, 7234, 34512, 3061, 1810, 2051, 279, 588, 31563, 61792, 2051, 13298, 89189, 285, 9497, 39088, 2373, 276, 330, 15102, 372, 14357, 6357, 254, 52376, 2051, 414, 12626, 382, 285, 8698, 1240, 997, 55949, 280, 10112, 8814, 1021, 2803, 254, 1246, 766, 2051, 254, 4205, 280, 61586, 1173, 12, 31, 8698, 1240, 997, 2001, 53693, 344, 9497, 39088, 2051, 372, 895, 42643, 83123, 2051, 16134, 254, 59731, 3299, 1021, 7641, 56707, 77434, 10372, 437, 22834, 754, 362, 7048, 344, 9497, 39088, 438, 440, 840, 81302, 3564, 1373, 3564, 1184, 521, 586, 20575, 979, 13354, 2051, 362, 1866, 327, 254, 2516, 280, 245, 12134, 10498, 586, 28999, 440, 1021, 3848, 7927, 438, 8579, 457, 586, 6184, 276, 57772, 8523, 84489, 3773, 1193, 7083, 72936, 778, 23786, 457, 586, 28901, 276, 254, 8135, 1835, 2051, 1477, 7083, 57843, 40178, 586, 5296, 327, 254, 4922, 1021, 60095, 72936, 1027, 1068, 276, 586, 9388, 56071, 2051, 1477, 19580, 57843, 778, 12565, 457, 586, 41216, 1021, 5512, 254, 22105, 280, 254, 10233, 655, 82, 9684, 280, 7812, 2051, 9497, 39088, 655, 82, 28901, 276, 254, 1977, 285, 5296, 327, 254, 4922, 463, 803, 22554, 372, 46976, 8214, 6728, 2001, 285, 90870, 2051, 285, 362, 643, 803, 40408, 327, 586, 938, 280, 2976, 2051, 440, 1245, 655, 82, 4706, 440, 1021, 207, 185, 9497, 39088, 655, 82, 23270, 10318, 276, 1108, 274, 10798, 344, 359, 317, 372, 2074, 276, 3271, 586, 7927, 372, 344, 280, 23768, 279, 6435, 1193, 359, 438, 2074, 327, 688, 10112, 9816, 441, 276, 330, 24434, 457, 857, 1021, 5807, 745, 438, 1866, 1913, 9497, 39088, 2051, 3451, 34512, 5496, 279, 254, 27199, 280, 3052, 11867, 280, 586, 830, 1193, 89189, 568, 13717, 72, 655, 82, 5296, 327, 254, 4922, 2051, 9193, 1257, 655, 82, 62359, 2051, 285, 4233, 72, 24337, 406, 1104, 655, 82, 36068, 331, 254, 15501, 43515, 418, 245, 1860, 6987, 1021, 4600, 41635, 2051, 9497, 39088, 655, 82, 830, 279, 42030, 254, 284, 129, 237, 51650, 473, 12596, 1734, 1538, 881, 440, 245, 8136, 327, 6082, 42643, 48071, 440, 845, 254, 6351, 327, 1131, 15500, 9579, 279, 254, 29416, 1021, 207, 185, 685, 254, 207, 17, 15, 393, 8295, 2051, 362, 438, 254, 14860, 9816, 280, 48847, 53185, 74717, 2051, 779, 643, 5734, 857, 372, 440, 254, 9846, 2170, 1173, 12, 31, 28656, 2051, 2170, 1173, 12, 31, 21030, 9816, 779, 643, 29109, 279, 688, 4706, 440, 2051, 285, 26693, 344, 2051, 440, 362, 643, 1401, 525, 245, 2018, 668, 2051, 372, 245, 12481, 8943, 285, 372, 245, 74048, 54270, 440, 1021, 207, 185, 207, 185, 403, 403, 403, 82333, 331, 13727, 10591, 403, 403, 403, 207, 185, 207, 185, 9497, 39088, 655, 82, 18210, 643, 1401, 245, 21065, 5454, 331, 13727, 10591, 2051, 4398, 331, 254, 10591, 473, 254, 11108, 283, 32748, 3463, 285, 331, 30142, 285, 34512, 279, 254, 427, 2860, 3463, 2051, 2847, 364, 50606, 78, 40896, 19084, 2051, 254, 1079, 9846, 280, 521, 422, 54179, 34512, 1021, 6809, 279, 4959, 13727, 2051, 254, 1639, 11648, 280, 48939, 334, 207, 29703, 102, 1777, 231, 2051, 467, 1011, 72, 2238, 317, 9565, 53061, 366, 9497, 39088, 1021, 207, 185, 30024, 254, 207, 16, 18, 393, 8295, 2051, 254, 13727, 12744, 89189, 568, 13717, 72, 2330, 521, 34512, 285, 745, 778, 1860, 13531, 276, 9497, 39088, 2051, 5802, 586, 7927, 481, 330, 2826, 279, 742, 530, 533, 4824, 334, 440, 10112, 18210, 1401, 457, 13727, 34512, 440, 2238, 33872, 8255, 1108, 372, 380, 5078, 64, 1783, 31966, 248, 844, 31966, 279, 254, 207, 24, 393, 8295, 1021, 429, 1022, 20307, 13727, 6700, 1270, 280, 9497, 39088, 655, 82, 18210, 438, 75362, 261, 1783, 22226, 334, 207, 16, 17, 22, 23, 207, 891, 207, 16, 18, 19, 21, 2238, 2051, 245, 40961, 89, 2904, 35855, 68271, 285, 634, 280, 254, 1094, 19606, 10598, 280, 254, 10591, 280, 254, 24121, 34188, 6204, 362, 6383, 40408, 9497, 39088, 285, 1401, 245, 37856, 331, 742, 27494, 280, 9497, 39088, 473, 254, 12402, 280, 245, 35855, 17981, 279, 5924, 1021, 207, 16, 16, 280, 324, 2904, 71, 544, 2181, 31966, 1021, 3848, 6710, 908, 31966, 1591, 2353, 708, 4634, 15323, 1313, 530, 533, 4824, 588, 778, 7765, 9961, 440, 24434, 457, 9497, 39088, 440, 279, 704, 3663, 2265, 1021, 908, 31966, 1591, 655, 82, 6710, 452, 305, 19084, 1783, 31966, 1040, 246, 661, 3415, 4714, 366, 254, 8592, 285, 16925, 1923, 10288, 1783, 89512, 387, 285, 71020, 9497, 39088, 655, 82, 18210, 279, 254, 74824, 1843, 6204, 634, 1492, 461, 4036, 19084, 770, 378, 6526, 6056, 2051, 254, 81271, 37491, 56274, 280, 254, 8592, 285, 254, 7492, 10801, 280, 298, 81139, 18210, 2051, 3637, 452, 305, 19084, 2051, 440, 12816, 304, 3071, 254, 18210, 280, 9497, 39088, 285, 13298, 89189, 3025, 440, 452, 305, 19084, 30725, 276, 10846, 2051, 440, 7587, 565, 340, 536, 463, 2527, 19952, 1021, 2366, 565, 536, 441, 1021, 440, 5512, 937, 2051, 745, 661, 803, 1313, 52033, 331, 9497, 39088, 655, 82, 18210, 1572, 279, 35855, 30354, 285, 279, 254, 78048, 8213, 2051, 285, 372, 245, 1230, 586, 18210, 438, 2752, 26637, 279, 13727, 10591, 279, 254, 11108, 283, 32748, 3463, 2051, 301, 13, 70, 13, 2051, 22648, 247, 9206, 2051, 245, 13092, 28656, 279, 254, 5189, 207, 16, 19, 393, 8295, 2051, 285, 742, 642, 71, 11407, 1108, 372, 13281, 559, 8379, 2051, 40896, 19084, 2051, 285, 1783, 5078, 261, 1021, 207, 185, 11399, 254, 31822, 655, 30850, 2906, 280, 254, 427, 2860, 3463, 334, 207, 16, 21, 17, 19, 207, 891, 207, 16, 21, 19, 18, 2238, 2051, 1783, 676, 78, 70068, 1655, 77, 334, 207, 51595, 6702, 111, 2238, 280, 254, 56354, 87940, 655, 82, 72420, 96715, 331, 9497, 39088, 655, 82, 413, 129, 237, 51650, 334, 207, 17141, 6097, 3771, 2477, 2051, 7456, 59581, 1783, 1923, 94574, 2238, 438, 26238, 881, 12693, 2051, 285, 359, 14474, 48973, 23270, 279, 8698, 1240, 997, 30142, 285, 496, 19084, 50190, 334, 4660, 9462, 1153, 2238, 762, 1021, 429, 37856, 8135, 9497, 39088, 655, 82, 24060, 372, 254, 7492, 280, 521, 34512, 6204, 327, 3940, 2051, 16978, 28534, 1783, 4111, 2904, 2051, 245, 20307, 8698, 1240, 997, 17492, 2051, 26693, 279, 5924, 1021, 207, 18, 22, 280, 452, 2063, 19084, 38249, 1040, 31966, 344, 1981, 129, 225, 76, 5148, 72, 825, 9497, 39088, 6331, 438, 254, 1079, 1608, 9816, 279, 4345, 285, 40408, 1783, 676, 78, 70068, 1655, 77, 655, 82, 37856, 327, 895, 17961, 285, 79344, 2051, 1477, 362, 58964, 1712, 5283, 4940, 2320, 254, 68877, 87940, 778, 1653, 15194, 511, 283, 510, 1021, 364, 50606, 78, 40896, 19084, 2051, 254, 9846, 422, 54179, 9816, 2051, 438, 839, 10871, 24434, 457, 9497, 39088, 6204, 279, 13290, 84, 642, 65034, 283, 42488, 2051, 586, 54575, 2051, 362, 62729, 254, 1022, 984, 4303, 280, 338, 13336, 8275, 334, 207, 5434, 4279, 2238, 1323, 245, 422, 54179, 372, 895, 14189, 285, 839, 1313, 280, 586, 750, 422, 54179, 463, 3083, 49807, 285, 20667, 1021, 809, 317, 993, 344, 754, 362, 7648, 279, 91069, 2320, 245, 1234, 4886, 2051, 245, 4486, 280, 9497, 39088, 655, 82, 18210, 438, 1503, 366, 857, 372, 634, 280, 245, 1860, 17353, 5023, 588, 362, 438, 2249, 276, 5652, 1983, 1021, 207, 185, 207, 185, 403, 403, 31869, 403, 403, 207, 185, 207, 185, 338, 6265, 280, 14315, 463, 803, 1222, 279, 8950, 276, 15255, 9497, 39088, 655, 82, 830, 881, 3517, 1021, 1733, 45261, 31699, 21053, 279, 429, 51847, 62783, 280, 9497, 39088, 2051, 440, 2071, 418, 1313, 1448, 4703, 276, 3956, 254, 4194, 6591, 279, 55455, 9497, 39088, 2051, 588, 317, 2643, 395, 933, 372, 1313, 1448, 36593, 372, 2198, 440, 334, 265, 13, 70892, 72, 2238, 1021, 429, 87286, 463, 661, 276, 46774, 366, 11973, 636, 254, 8970, 11612, 280, 254, 3632, 1673, 39685, 8305, 10039, 276, 245, 12732, 3256, 334, 7282, 754, 55455, 31100, 23192, 2051, 410, 284, 129, 237, 51650, 2238, 2051, 285, 70005, 254, 4301, 521, 28412, 11011, 7282, 279, 254, 3470, 2722, 334, 12480, 9673, 14870, 344, 440, 586, 27494, 536, 441, 372, 245, 6290, 1865, 1184, 1079, 1136, 279, 13858, 440, 207, 1972, 265, 13, 48101, 2238, 1021, 3474, 12459, 331, 1319, 3616, 317, 11096, 457, 48847, 53185, 74717, 655, 82, 3474, 51402, 62783, 4810, 254, 10112, 1021, 3848, 418, 2094, 36593, 2051, 588, 9847, 276, 30265, 254, 9304, 10014, 1184, 456, 73, 1058, 1375, 285, 10995, 285, 36350, 280, 254, 3093, 6204, 586, 16811, 276, 254, 521, 28412, 418, 59688, 276, 29966, 1094, 280, 1069, 27494, 473, 586, 6604, 2051, 285, 64451, 276, 440, 15255, 636, 440, 254, 13531, 279, 1462, 2722, 588, 362, 1217, 2732, 1021, 207, 185, 7065, 87286, 463, 6746, 1266, 6146, 4297, 331, 3507, 276, 15518, 245, 3078, 280, 254, 42643, 6813, 1222, 457, 9497, 39088, 1021, 39543, 3492, 47624, 279, 14863, 10112, 91034, 5131, 3517, 1173, 12, 31, 3398, 55893, 19123, 2051, 12130, 35480, 15001, 81136, 279, 417, 5026, 254, 20992, 94864, 254, 10112, 55893, 8600, 6204, 1572, 938, 1225, 1173, 12, 31, 8583, 4303, 285, 17359, 742, 5757, 280, 66886, 1021, 685, 429, 51847, 62783, 280, 9497, 39088, 2051, 45261, 31699, 4446, 254, 9304, 10014, 3148, 16660, 2051, 17160, 9308, 254, 17868, 8959, 276, 7679, 276, 254, 27494, 3272, 853, 14031, 34307, 1021, 21367, 2051, 362, 13607, 366, 254, 60801, 280, 254, 3470, 2722, 457, 22381, 20071, 13858, 366, 12885, 30636, 1021, 207, 185, 685, 207, 17, 15, 16, 20, 2051, 18232, 37580, 7019, 36593, 2051, 366, 14087, 10112, 23917, 2051, 280, 254, 3938, 18210, 280, 9497, 39088, 279, 4484, 19430, 2051, 366, 12885, 59372, 33757, 2051, 588, 44939, 20071, 1465, 1021, 207, 185, 207, 185, 207, 185, 403, 45915, 1257, 334, 3474, 61824, 5654, 2238, 403, 207, 185, 207, 185, 440, 45915, 1257, 440, 317, 245, 5654, 12790, 457, 3517, 1173, 12, 31, 14987, 5733, 4219, 3474, 61824, 327, 704, 1864, 15112, 11679, 2051, 11293, 4233, 7152, 334, 207, 17, 15, 16, 17, 2238, 1021, 809, 438, 7964, 372, 254, 3377, 655, 82, 1864, 2816, 279, 11357, 285, 254, 4443, 7705, 2816, 331, 207, 22, 7291, 207, 17, 15, 16, 18, 1021, 429, 5654, 438, 15323, 457, 31349, 47400, 60721, 282, 5303, 2051, 31349, 997, 64550, 246, 2051, 1396, 4166, 38993, 2580, 2051, 13708, 261, 721, 1686, 6482, 2051, 29354, 1888, 285, 895, 28506, 2051, 20354, 64029, 285, 10343, 72, 770, 306, 264, 65, 1021, 440, 45915, 1257, 440, 317, 274, 91531, 1823, 2445, 5654, 366, 13039, 5766, 6204, 254, 30209, 7988, 245, 66859, 655, 82, 96194, 21716, 366, 245, 4485, 750, 1021, 90128, 40408, 254, 5654, 327, 895, 5909, 2051, 9609, 359, 245, 2039, 1173, 12, 31, 636, 4184, 331, 11293, 4233, 7152, 1021, 207, 185, 429, 4184, 5322, 254, 2592, 655, 82, 25485, 1869, 1173, 12, 31, 2146, 5906, 279, 14416, 285, 254, 4794, 17903, 2051, 1477, 79618, 1869, 1173, 12, 31, 17205, 11275, 279, 1572, 50634, 46402, 334, 80293, 285, 11476, 13856, 2238, 2051, 372, 1136, 372, 279, 10011, 2051, 8900, 2051, 37742, 2051, 7239, 2051, 2006, 18457, 2051, 285, 254, 30371, 1021, 429, 2816, 69355, 430, 1604, 207, 19, 21, 331, 254, 2616, 59769, 8111, 207, 16, 15, 15, 285, 643, 803, 18865, 5299, 457, 254, 71870, 25284, 9727, 280, 6096, 334, 53342, 5413, 2238, 327, 69508, 280, 207, 20, 15, 15, 1173, 53534, 207, 15, 15, 15, 15334, 1021, 3474, 61824, 7903, 440, 45915, 1257, 440, 331, 1572, 254, 7312, 285, 2616, 8451, 280, 429, 1444, 44488, 285, 207, 18, 3144, 15903, 26882, 1193, 11293, 4233, 7152, 11433, 334, 207, 17, 15, 16, 18, 2238, 2051, 8880, 1003, 6475, 11433, 334, 207, 17, 15, 16, 19, 2238, 285, 2426, 254, 9862, 13046, 11433, 334, 207, 17, 15, 16, 20, 2238, 1021, 207, 185, 1640, 33152, 4236, 3914, 2051, 5405, 276, 3798, 254, 2592, 655, 82, 385, 30368, 18777, 2051, 438, 12977, 457, 83004, 23601, 488, 2051, 779, 661, 8524, 5168, 366, 254, 2592, 331, 984, 750, 4236, 10141, 1021, 429, 14800, 50847, 254, 4219, 15698, 3947, 16284, 3764, 245, 5575, 4147, 2051, 588, 3433, 18327, 53042, 280, 31950, 4236, 10141, 280, 11688, 1108, 372, 254, 12791, 30384, 655, 440, 7704, 3990, 21970, 440, 2051, 69471, 99317, 655, 82, 440, 73289, 6005, 10593, 440, 285, 433, 9241, 12737, 655, 82, 440, 4233, 246, 10245, 440, 1021, 429, 4236, 3914, 4503, 207, 16, 15, 1173, 13, 31, 207, 19, 4875, 8208, 279, 245, 207, 17, 19, 1173, 12, 31, 5064, 3463, 285, 4864, 37856, 473, 56239, 2051, 779, 14168, 895, 2062, 6629, 2051, 24885, 401, 4910, 1021, 207, 185, 429, 5654, 438, 5598, 279, 254, 20501, 2624, 4983, 28000, 207, 17, 15, 16, 19, 2051, 285, 317, 839, 634, 280, 254, 2732, 11688, 2324, 331, 254, 25215, 2627, 1021, 18494, 2051, 359, 317, 254, 2328, 1969, 4184, 331, 254, 2616, 12545, 280, 4201, 2608, 655, 82, 2461, 304, 8906, 10548, 2081, 207, 19, 21, 1021, 207, 185, 207, 185, 403, 403, 39371, 285, 6255, 403, 403, 207, 185, 207, 185, 440, 45915, 1257, 440, 438, 4473, 457, 31349, 47400, 60721, 282, 5303, 2051, 31349, 997, 64550, 246, 2051, 1396, 4166, 38993, 2580, 2051, 13708, 261, 721, 1686, 6482, 2051, 29354, 1888, 2051, 285, 895, 28506, 2051, 20354, 64029, 285, 10343, 72, 770, 306, 264, 65, 1021, 64029, 2051, 721, 1686, 6482, 2051, 285, 770, 306, 264, 65, 661, 10084, 6022, 15323, 3474, 61824, 655, 82, 3590, 5906, 24452, 2051, 440, 2461, 44051, 1257, 27043, 440, 2051, 440, 3474, 45434, 440, 2051, 285, 440, 12606, 5807, 1003, 655, 248, 13291, 440, 1021, 685, 6511, 207, 17, 15, 16, 17, 2051, 429, 30520, 6701, 344, 18475, 23007, 488, 2051, 254, 2592, 655, 82, 7709, 2051, 661, 33044, 19606, 5654, 47397, 276, 23418, 327, 2516, 331, 3474, 61824, 655, 82, 1864, 11679, 1021, 64029, 993, 2051, 440, 809, 655, 82, 2375, 276, 752, 704, 48896, 331, 254, 4236, 1021, 440, 685, 4317, 2051, 254, 4718, 6701, 344, 12184, 1651, 21121, 438, 2695, 331, 13139, 344, 5598, 8690, 11667, 285, 64550, 246, 1021, 207, 185, 440, 45915, 1257, 440, 438, 8224, 372, 254, 1864, 2616, 2816, 285, 4443, 7617, 473, 704, 1864, 15112, 11679, 2051, 11293, 4233, 7152, 1021, 75001, 64441, 2051, 245, 2592, 5612, 2051, 279, 245, 7953, 207, 17, 15, 16, 17, 8314, 366, 63057, 9044, 2051, 9643, 2643, 657, 13070, 440, 45915, 1257, 440, 372, 254, 11679, 655, 82, 1864, 2816, 279, 254, 2616, 1021, 64441, 438, 20638, 372, 5366, 1193, 440, 2991, 254, 11679, 2051, 344, 655, 82, 254, 1022, 634, 344, 395, 19240, 276, 285, 395, 778, 837, 2051, 655, 32259, 2051, 395, 2126, 437, 5654, 655, 440, 1021, 10068, 276, 245, 63057, 9044, 4718, 2051, 254, 1604, 438, 7964, 62529, 279, 254, 4794, 5110, 331, 207, 16, 22, 7953, 207, 17, 15, 16, 17, 1021, 3563, 207, 16, 23, 7291, 207, 17, 15, 16, 18, 2051, 254, 5654, 661, 441, 803, 22881, 30903, 276, 2616, 9319, 19273, 1021, 429, 4184, 2051, 3111, 2051, 438, 7964, 457, 25369, 10548, 26666, 331, 207, 23, 8520, 207, 17, 15, 16, 18, 2051, 372, 254, 3377, 655, 82, 1864, 2816, 279, 11357, 1021, 207, 185, 207, 185, 403, 403, 74509, 285, 19184, 403, 403, 207, 185, 207, 185, 440, 45915, 1257, 440, 317, 274, 581, 34881, 4552, 2051, 91531, 1823, 2445, 5654, 588, 7513, 327, 245, 13640, 280, 207, 18, 1193, 207, 15, 19, 334, 207, 18, 4063, 2051, 2685, 7661, 2238, 1021, 429, 4184, 3804, 13039, 5766, 2051, 73374, 34787, 2051, 245, 440, 10010, 10010, 10010, 440, 30087, 2051, 285, 245, 12283, 709, 1173, 12, 31, 33616, 271, 47151, 1021, 3474, 61824, 655, 82, 22156, 3169, 279, 254, 5654, 13497, 473, 254, 4347, 280, 427, 19, 276, 339, 207, 33513, 107, 207, 21, 1021, 54293, 335, 5312, 14661, 14404, 2051, 15462, 4303, 285, 36427, 1021, 51423, 279, 254, 2131, 280, 427, 3144, 2051, 254, 8896, 317, 845, 279, 3064, 766, 285, 13412, 430, 245, 3399, 207, 24, 15, 34950, 513, 8298, 2051, 4299, 276, 254, 6583, 11696, 4236, 7019, 430, 10548, 14831, 13, 690, 457, 25369, 893, 96416, 10548, 33629, 1021, 43100, 2051, 17259, 2239, 13212, 473, 2428, 34309, 10689, 344, 254, 4184, 317, 440, 38017, 53501, 10566, 440, 1021, 429, 96287, 3093, 19006, 254, 66859, 655, 82, 96194, 21716, 366, 245, 4485, 750, 2051, 285, 42532, 60758, 11119, 10014, 327, 13511, 41376, 279, 254, 4303, 440, 1273, 340, 1113, 207, 487, 244, 41891, 1529, 359, 4363, 893, 1308, 340, 953, 41891, 1529, 525, 1719, 893, 23194, 1481, 30351, 2051, 30351, 2051, 30351, 2051, 30351, 2051, 30351, 1021, 440, 207, 185, 440, 45915, 1257, 440, 438, 1136, 4503, 457, 15813, 4236, 32123, 2051, 779, 62083, 331, 895, 3512, 280, 5909, 1021, 9848, 53392, 17365, 655, 82, 14333, 19895, 261, 2051, 779, 40408, 895, 21958, 2051, 285, 15261, 64441, 280, 59769, 2051, 779, 14168, 254, 47151, 2051, 5734, 440, 45915, 1257, 440, 372, 634, 280, 254, 11679, 655, 82, 22499, 1021, 64928, 7604, 3736, 262, 327, 429, 37716, 78766, 254, 4184 ================================================ FILE: src/wikitest.cat.1chunk.v3-encoded.txt ================================================ 0, 539, 438, 10498, 102771, 402, 438, 54921, 10498, 102771, 402, 344, 411, 3947, 5485, 1537, 15758, 305, 31085, 22491, 1204, 1166, 936, 260, 20521, 2390, 15, 34, 71132, 3615, 377, 270, 15758, 4923, 455, 10987, 295, 223, 792, 18, 1204, 1162, 515, 7128, 513, 260, 71132, 3615, 295, 270, 1946, 5334, 1054, 4866, 513, 20923, 89127, 1537, 778, 515, 7451, 295, 223, 792, 19, 509, 270, 15301, 6605, 30650, 1204, 1166, 936, 260, 20521, 3615, 295, 270, 15758, 4923, 25750, 3880, 1675, 284, 295, 223, 792, 20, 1204, 660, 223, 792, 22, 102771, 402, 33906, 260, 3615, 412, 582, 38950, 582, 295, 270, 19569, 582, 93802, 905, 85, 18145, 582, 294, 270, 15758, 4923, 455, 9266, 49866, 3749, 468, 98410, 19930, 21180, 5101, 27290, 305, 56068, 18844, 75, 1204, 1166, 515, 8733, 295, 270, 223, 792, 23, 31085, 58853, 294, 270, 28551, 59059, 3429, 1946, 52493, 58703, 1537, 778, 515, 7451, 509, 270, 74024, 30650, 295, 78779, 305, 270, 12651, 1374, 60442, 50442, 295, 6693, 1204, 1166, 515, 14717, 513, 3880, 101547, 305, 98410, 19930, 7173, 2282, 994, 1649, 1537, 73908, 1653, 15674, 1537, 18881, 21324, 1537, 60048, 27626, 443, 1537, 59898, 102010, 305, 95787, 10316, 1204, 539, 660, 223, 792, 24, 1537, 102771, 402, 98410, 19930, 2282, 994, 1649, 295, 270, 1946, 84690, 4866, 513, 5101, 60792, 39136, 1204, 1166, 11244, 377, 260, 223, 792, 24, 19569, 294, 270, 15758, 4923, 1537, 74746, 1537, 7128, 513, 260, 3615, 295, 270, 223, 792, 25, 31085, 4606, 294, 1730, 304, 7564, 467, 14717, 513, 8656, 500, 433, 551, 2438, 1204, 1730, 304, 7564, 467, 515, 7451, 509, 24507, 30650, 295, 270, 6693, 76115, 294, 43728, 387, 41178, 305, 43008, 5440, 1204, 102771, 402, 98410, 295, 1234, 14263, 295, 223, 792, 26, 1537, 113170, 9512, 46157, 513, 89999, 11111, 2033, 122083, 1537, 305, 7740, 4989, 113861, 14717, 513, 10754, 367, 117151, 1204, 660, 3460, 223, 792, 26, 1537, 102771, 402, 1960, 260, 20521, 12343, 377, 260, 1234, 2390, 15, 34, 892, 19569, 21736, 294, 270, 15758, 4923, 448, 1690, 270, 25628, 1537, 7128, 513, 411, 12343, 377, 270, 15758, 4923, 88943, 808, 295, 5997, 223, 792, 26, 1204, 1166, 936, 260, 54693, 3615, 295, 5147, 29193, 294, 270, 15758, 4923, 117856, 1189, 295, 223, 643, 18, 1537, 412, 582, 657, 39706, 70148, 582, 1204, 102771, 402, 98410, 295, 270, 223, 643, 19, 5485, 41296, 267, 5647, 14717, 513, 11111, 2033, 122083, 1204, 54921, 438, 438, 34604, 438, 438, 54921, 539, 438, 438, 438, 223, 792, 18, 1256, 223, 792, 23, 438, 438, 438, 54921, 660, 223, 792, 18, 102771, 402, 936, 260, 20521, 2390, 15, 34, 71132, 3615, 377, 270, 15758, 4923, 455, 10987, 3749, 468, 48626, 582, 12655, 4228, 744, 582, 295, 270, 19569, 1537, 582, 660, 37831, 60497, 582, 1204, 102771, 402, 98410, 412, 582, 12655, 582, 295, 270, 1946, 5334, 1054, 4866, 513, 20923, 89127, 1537, 778, 515, 7451, 295, 223, 792, 19, 509, 270, 15301, 6605, 30650, 1204, 334, 3862, 294, 102771, 402, 905, 85, 4197, 295, 455, 29023, 377, 8907, 5654, 1440, 412, 582, 5415, 51568, 2420, 8721, 582, 295, 270, 3615, 1537, 305, 468, 5730, 6490, 13712, 295, 455, 51699, 1537, 305, 69493, 14645, 1204, 1166, 11244, 295, 270, 15758, 4923, 25750, 3880, 1675, 284, 295, 223, 792, 20, 412, 582, 7043, 381, 1544, 4565, 615, 582, 295, 270, 19569, 582, 21774, 69318, 5995, 582, 1537, 305, 936, 260, 3615, 412, 260, 1688, 3053, 582, 107062, 98095, 582, 377, 455, 10987, 1204, 539, 1166, 936, 260, 54693, 3615, 295, 223, 792, 21, 377, 1234, 29193, 294, 455, 10987, 1537, 412, 3053, 582, 75971, 15366, 582, 1204, 660, 223, 792, 22, 102771, 402, 33906, 260, 3615, 412, 582, 38950, 582, 295, 270, 19569, 582, 93802, 905, 85, 18145, 582, 294, 270, 15758, 4923, 455, 9266, 49866, 3749, 468, 98410, 19930, 21180, 5101, 27290, 305, 56068, 18844, 75, 1204, 102771, 402, 98410, 412, 582, 101277, 582, 1537, 295, 270, 223, 792, 23, 31085, 58853, 294, 270, 28551, 59059, 3429, 1946, 52493, 58703, 1204, 983, 515, 7451, 509, 270, 74024, 30650, 295, 78779, 1537, 305, 270, 12651, 1374, 60442, 50442, 295, 6693, 1204, 1166, 515, 14717, 513, 3880, 101547, 305, 98410, 19930, 7173, 2282, 994, 1649, 1537, 73908, 1653, 15674, 1537, 18881, 21324, 1537, 60048, 27626, 443, 1537, 59898, 102010, 305, 95787, 10316, 1204, 102771, 402, 5730, 260, 29141, 3862, 295, 455, 19383, 66782, 1313, 582, 455, 15757, 344, 568, 3937, 12250, 18934, 1537, 418, 68025, 22704, 538, 7173, 2282, 994, 1649, 343, 1928, 97771, 73617, 538, 793, 4197, 412, 74767, 471, 7541, 905, 85, 66982, 1900, 1537, 10498, 102771, 402, 1537, 73908, 1653, 15674, 305, 60048, 27626, 443, 1204, 582, 455, 39077, 9764, 1537, 582, 7173, 2282, 994, 1649, 305, 10498, 102771, 402, 3477, 78776, 26292, 270, 8042, 409, 1212, 1204, 582, 54921, 438, 438, 438, 223, 792, 24, 1256, 2236, 438, 438, 438, 54921, 660, 223, 792, 24, 102771, 402, 98410, 295, 270, 1946, 84690, 4866, 513, 5101, 60792, 39136, 1204, 455, 1946, 515, 892, 294, 260, 4923, 778, 24369, 1688, 79360, 85, 1537, 31970, 31651, 1492, 26455, 5638, 1492, 84690, 1204, 660, 260, 223, 792, 24, 8222, 1537, 16636, 22491, 7173, 2282, 994, 1649, 8045, 102771, 402, 412, 834, 294, 793, 11145, 1218, 2390, 15, 34, 12570, 1313, 582, 342, 10423, 3946, 418, 260, 15650, 3252, 10498, 102771, 402, 1537, 995, 515, 295, 270, 28071, 5390, 294, 31651, 1537, 26455, 5638, 305, 84690, 509, 270, 4545, 1204, 1166, 8178, 1026, 8809, 295, 52493, 58703, 1204, 582, 1166, 48626, 582, 29856, 44797, 582, 377, 270, 223, 792, 24, 19569, 294, 270, 15758, 4923, 1537, 74746, 1537, 31970, 582, 33633, 342, 334, 592, 582, 1204, 102771, 402, 98410, 412, 582, 7232, 582, 295, 270, 223, 792, 25, 4606, 294, 1730, 304, 7564, 467, 14717, 513, 8656, 500, 433, 551, 2438, 1204, 1730, 304, 7564, 467, 515, 7451, 509, 24507, 30650, 295, 270, 6693, 76115, 294, 43728, 387, 41178, 305, 43008, 5440, 1204, 660, 260, 3862, 294, 270, 4606, 362, 455, 19383, 66782, 1537, 31085, 12306, 12410, 47787, 9764, 1537, 582, 10498, 102771, 402, 14257, 260, 35560, 31191, 304, 270, 6632, 412, 7232, 1204, 582, 539, 102771, 402, 98410, 295, 1234, 14263, 295, 223, 792, 26, 1537, 113170, 9512, 46157, 513, 89999, 11111, 2033, 122083, 1537, 305, 7740, 4989, 113861, 14717, 513, 10754, 367, 117151, 1204, 102771, 402, 48626, 260, 3053, 9306, 582, 42333, 582, 295, 7740, 4989, 113861, 1537, 995, 27634, 3758, 418, 3053, 582, 26021, 582, 412, 270, 582, 12426, 8809, 4588, 995, 23404, 436, 1375, 418, 330, 4422, 75, 582, 1204, 102771, 402, 20521, 98410, 377, 260, 1234, 2390, 15, 34, 892, 19569, 21736, 582, 448, 4201, 582, 295, 3460, 223, 792, 26, 294, 270, 15758, 4923, 448, 1690, 270, 25628, 412, 3053, 582, 40983, 1675, 8538, 582, 1204, 1166, 11244, 377, 270, 15758, 4923, 88943, 808, 412, 582, 43716, 582, 295, 5997, 223, 792, 26, 1204, 1166, 936, 260, 54693, 3615, 295, 5147, 29193, 294, 270, 15758, 4923, 117856, 1189, 295, 223, 643, 18, 1537, 412, 582, 657, 39706, 70148, 582, 1204, 1166, 48626, 411, 13331, 24579, 15666, 362, 260, 5931, 54460, 1204, 1166, 39008, 377, 270, 21798, 18330, 295, 121983, 260, 24579, 377, 15758, 1313, 582, 68755, 260, 10817, 344, 260, 17759, 3217, 1204, 41697, 3261, 440, 1153, 1205, 440, 905, 266, 10144, 943, 1082, 440, 2090, 905, 86, 344, 1855, 58614, 790, 1031, 477, 77877, 377, 1341, 995, 477, 24403, 509, 5177, 440, 1407, 10918, 305, 8176, 440, 270, 11198, 304, 2466, 1031, 305, 1747, 1277, 440, 1153, 1205, 440, 905, 266, 4843, 1204, 582, 102771, 402, 98410, 295, 270, 223, 643, 19, 5485, 41296, 267, 5647, 14717, 513, 11111, 2033, 122083, 1204, 54921, 438, 438, 20496, 5338, 438, 438, 54921, 539, 438, 438, 438, 20496, 438, 438, 438, 54921, 539, 438, 438, 438, 47906, 438, 438, 438, 54921, 539, 438, 438, 438, 30650, 438, 438, 438, 122770, 438, 11677, 30618, 438, 54921, 11677, 30618, 343, 62286, 1256, 95709, 1313, 22096, 30618, 3749, 7891, 1313, 223, 70287, 3749, 223, 25544, 1256, 223, 26631, 1900, 515, 260, 19496, 7891, 22361, 294, 270, 24019, 53983, 1204, 36230, 418, 7645, 78725, 343, 7645, 9984, 1900, 1537, 468, 344, 11477, 3252, 270, 11849, 294, 270, 7891, 46494, 1204, 4457, 11849, 45899, 515, 304, 8463, 793, 3924, 412, 260, 7877, 8478, 34596, 1537, 790, 468, 16477, 14740, 304, 1635, 270, 5033, 56860, 1204, 4457, 1988, 1537, 1277, 270, 4445, 3924, 1537, 515, 83323, 513, 270, 1417, 462, 3090, 276, 101283, 294, 223, 27481, 1537, 305, 793, 2336, 223, 856, 1737, 881, 260, 1014, 294, 4975, 6245, 48309, 1204, 539, 8083, 16494, 468, 515, 2961, 2390, 15, 34, 3459, 304, 915, 17818, 1537, 793, 2984, 4127, 304, 366, 81938, 29681, 295, 1952, 7891, 305, 10999, 19044, 5785, 1204, 5673, 793, 46383, 4985, 1537, 8791, 25535, 2759, 27107, 611, 1047, 30239, 1060, 270, 16922, 1204, 1166, 769, 1047, 3252, 270, 582, 98240, 2390, 15, 34, 43507, 976, 582, 305, 270, 582, 98240, 2390, 15, 34, 52827, 582, 513, 7891, 30454, 1537, 1901, 270, 3291, 294, 793, 1116, 769, 7837, 1440, 304, 366, 10097, 304, 10734, 12592, 412, 582, 270, 7891, 103709, 1537, 101480, 1537, 541, 28817, 1537, 28429, 1537, 53623, 1537, 54374, 1537, 115472, 1537, 406, 8354, 5983, 1537, 57715, 469, 103612, 7705, 687, 582, 1204, 54921, 438, 438, 7586, 438, 438, 54921, 37345, 7891, 19044, 22682, 32092, 270, 1988, 294, 270, 3725, 1082, 45826, 260, 1116, 1537, 260, 4646, 778, 61008, 36965, 17911, 304, 582, 270, 5010, 10348, 396, 7075, 7891, 3241, 1197, 1303, 1469, 2783, 305, 40718, 582, 1204, 8074, 1623, 294, 11677, 30618, 905, 85, 27107, 7080, 40718, 305, 3980, 1537, 566, 4646, 344, 6440, 2239, 1204, 13023, 3986, 1537, 8045, 513, 270, 7891, 40615, 7232, 22127, 1537, 344, 396, 7891, 27107, 477, 8600, 47468, 1537, 6320, 5460, 5148, 396, 2786, 366, 7723, 1537, 790, 778, 411, 12695, 15874, 1494, 366, 15990, 304, 1153, 1204, 1884, 5970, 10734, 12592, 1537, 582, 455, 2477, 20671, 579, 1153, 270, 1014, 1537, 270, 2445, 305, 270, 11451, 295, 270, 6951, 1537, 270, 850, 38906, 579, 477, 304, 15778, 436, 69233, 1537, 305, 270, 1529, 759, 366, 396, 579, 4338, 78740, 270, 17261, 469, 4442, 304, 2572, 436, 33677, 582, 1204, 20986, 47643, 10905, 260, 4824, 4637, 3191, 304, 11677, 30618, 1537, 39726, 396, 270, 6890, 294, 270, 22361, 905, 85, 1116, 4072, 14058, 294, 793, 4445, 1988, 1537, 4562, 1099, 270, 582, 322, 35686, 582, 25076, 15866, 1505, 362, 850, 6543, 46494, 1204, 54921, 438, 438, 438, 15225, 1737, 438, 438, 438, 54921, 7973, 294, 1205, 344, 3459, 294, 11677, 30618, 905, 85, 1988, 5095, 538, 793, 27107, 1204, 4457, 76430, 39057, 515, 11677, 38288, 12098, 1537, 260, 9764, 45860, 305, 22361, 2184, 270, 28870, 294, 123719, 28592, 1204, 11677, 30618, 515, 9288, 295, 223, 25544, 3749, 270, 6319, 107908, 344, 13026, 1537, 6816, 396, 436, 515, 4851, 10391, 1260, 555, 1537, 10134, 276, 22643, 343, 77716, 17504, 344, 260, 29361, 14626, 1900, 1204, 660, 3859, 1988, 1537, 468, 5083, 5619, 304, 9028, 304, 270, 6102, 4593, 294, 32209, 905, 276, 1537, 58387, 66137, 294, 270, 11677, 3096, 1204, 539, 11677, 30618, 905, 85, 5642, 9521, 27768, 1561, 468, 515, 9288, 1537, 305, 468, 515, 21847, 9927, 513, 793, 51327, 1204, 1166, 936, 411, 20062, 8809, 1537, 995, 9521, 4047, 1204, 1166, 990, 936, 2038, 4747, 22226, 305, 834, 4747, 13939, 1537, 304, 9642, 468, 11477, 12001, 295, 793, 27107, 1537, 7254, 468, 3214, 43375, 793, 3132, 26331, 1204, 539, 455, 4053, 294, 260, 10262, 41335, 2390, 15, 34, 6805, 1537, 793, 12181, 515, 9185, 377, 270, 5103, 4342, 294, 260, 3988, 8478, 34596, 1313, 2286, 305, 19607, 6797, 294, 270, 75637, 276, 71574, 294, 16296, 1537, 3980, 305, 19106, 1204, 1166, 3859, 16026, 304, 611, 7444, 5588, 6603, 27107, 513, 793, 3808, 40614, 1537, 790, 1305, 611, 1047, 6644, 1204, 539, 660, 270, 3808, 223, 25168, 85, 1537, 468, 52591, 295, 270, 41377, 11646, 1492, 111430, 56521, 3078, 3749, 793, 27419, 41606, 17261, 1537, 22897, 260, 19106, 22110, 1537, 344, 3241, 304, 5045, 538, 270, 1522, 294, 566, 3285, 1537, 2448, 223, 29264, 1204, 660, 396, 1141, 1537, 468, 4310, 270, 8478, 4008, 1708, 1537, 4888, 295, 32209, 905, 276, 1204, 1166, 10815, 1537, 304, 793, 16567, 305, 396, 294, 17841, 294, 3859, 30454, 1204, 22127, 39157, 396, 468, 6162, 10815, 1754, 793, 46030, 5392, 509, 270, 1014, 515, 2727, 27091, 305, 51112, 1537, 1901, 114705, 10905, 793, 8977, 304, 50275, 14458, 295, 270, 6102, 1142, 611, 1047, 304, 27323, 1204, 5057, 566, 8977, 1537, 468, 4661, 1559, 304, 24185, 1537, 566, 1014, 2448, 2009, 119107, 305, 1166, 69113, 1204, 539, 4457, 6369, 9521, 2448, 223, 24438, 1204, 11677, 30618, 1114, 611, 1047, 7837, 304, 6601, 270, 8478, 4008, 1754, 294, 793, 6369, 905, 85, 10681, 1537, 790, 468, 344, 3241, 304, 611, 2910, 890, 270, 32104, 295, 17244, 294, 834, 294, 793, 4747, 22226, 1204, 1166, 9185, 270, 2894, 2689, 1737, 5530, 295, 270, 10391, 1260, 555, 3078, 1537, 42176, 793, 19118, 295, 12541, 23072, 1204, 539, 660, 270, 36406, 294, 223, 28509, 1537, 468, 1811, 7645, 78725, 343, 7645, 9984, 1900, 362, 270, 1257, 1014, 1537, 305, 270, 1234, 46494, 8216, 260, 28124, 1204, 7558, 14390, 13308, 566, 412, 582, 270, 1473, 3168, 72450, 4885, 295, 11677, 30618, 905, 85, 23052, 2934, 582, 1754, 436, 6718, 1440, 260, 5530, 2511, 294, 270, 322, 11290, 22361, 2390, 15, 34, 41335, 1988, 304, 778, 468, 515, 29728, 1561, 793, 8977, 295, 270, 8478, 4008, 1708, 1204, 455, 3998, 515, 15471, 834, 2390, 15, 34, 107474, 1537, 4499, 1204, 11677, 30618, 515, 513, 1093, 1737, 270, 15590, 1537, 1901, 7645, 78725, 515, 4006, 260, 46383, 9051, 1204, 1350, 611, 20626, 27107, 304, 469, 943, 7645, 78725, 538, 270, 15590, 22361, 1537, 790, 1353, 834, 295, 270, 915, 6315, 1204, 2359, 1811, 1820, 1353, 4387, 1537, 295, 223, 29444, 1204, 539, 660, 223, 30787, 1537, 468, 8599, 304, 270, 6102, 295, 411, 6041, 304, 102197, 793, 6805, 7609, 1204, 1166, 4310, 270, 8478, 4008, 1708, 260, 1957, 1014, 2184, 270, 2502, 1141, 1537, 790, 710, 270, 15644, 881, 10815, 513, 270, 4309, 16968, 343, 18841, 295, 2496, 304, 4819, 270, 31092, 294, 3338, 53071, 1900, 1204, 1166, 3214, 1820, 22612, 270, 47766, 1537, 6240, 19379, 288, 270, 46146, 6578, 295, 223, 29197, 1537, 223, 29556, 305, 6162, 1820, 295, 223, 27481, 1204, 1166, 13841, 2448, 223, 28723, 1537, 305, 513, 223, 28997, 270, 8417, 936, 936, 3818, 2845, 2136, 2038, 24231, 305, 1234, 33712, 2136, 790, 834, 294, 270, 24231, 9521, 295, 73306, 295, 223, 27481, 1204, 5795, 223, 29556, 468, 6006, 304, 611, 15180, 4454, 343, 6162, 31787, 1900, 1537, 270, 1257, 294, 260, 4923, 294, 90410, 778, 6397, 4295, 1440, 362, 270, 2591, 294, 793, 1988, 1204, 983, 515, 295, 396, 1141, 396, 11677, 30618, 515, 12522, 304, 5002, 793, 3096, 3504, 304, 270, 68978, 294, 260, 76449, 7068, 943, 513, 14534, 51793, 295, 270, 5181, 1204, 539, 660, 223, 27481, 1537, 468, 5730, 411, 21434, 412, 103347, 294, 270, 15759, 23766, 479, 905, 85, 7032, 294, 270, 35226, 23098, 905, 85, 37622, 1204, 8083, 566, 515, 260, 10262, 2411, 1537, 295, 5374, 2734, 436, 1114, 611, 1047, 509, 3396, 270, 1904, 294, 411, 6805, 7609, 1204, 8239, 1936, 468, 936, 23654, 1116, 1537, 4499, 1537, 270, 3609, 515, 42756, 3543, 513, 5516, 1204, 54921, 438, 438, 438, 5602, 438, 438, 438, 54921, 455, 1417, 462, 3090, 276, 101283, 6006, 295, 5899, 223, 27481, 1537, 305, 515, 554, 8064, 46744, 362, 4975, 4944, 1737, 1204, 983, 7677, 25896, 38579, 304, 7891, 6591, 1313, 270, 34425, 294, 223, 29556, 11516, 223, 4157, 2390, 16, 34, 223, 27, 3646, 1482, 1537, 790, 5147, 1737, 3859, 1537, 270, 34425, 34011, 1438, 223, 926, 2390, 16, 34, 223, 27, 3646, 1537, 270, 17347, 3638, 1047, 45298, 469, 12035, 1204, 10272, 566, 1014, 1537, 11677, 30618, 6192, 260, 13900, 59735, 479, 1988, 72319, 1610, 513, 26989, 1537, 4515, 2013, 1656, 305, 26400, 100178, 8897, 1204, 1162, 3285, 294, 35662, 87811, 515, 270, 3490, 294, 11677, 30618, 412, 260, 22361, 1313, 8239, 59892, 114705, 769, 4866, 396, 1537, 582, 1999, 468, 5847, 2448, 1440, 2136, 270, 6156, 294, 793, 3096, 1537, 26926, 1537, 305, 48106, 1256, 1205, 468, 7431, 1537, 305, 1205, 468, 25206, 362, 469, 41982, 538, 270, 6486, 294, 3624, 27005, 2136, 1305, 5750, 270, 38381, 19219, 294, 793, 19106, 582, 1204, 8239, 1082, 468, 8838, 294, 270, 4641, 294, 793, 43345, 1888, 1537, 468, 6818, 304, 270, 16021, 294, 3628, 295, 793, 19106, 6240, 294, 52220, 4465, 793, 1956, 58883, 3399, 273, 1204, 11677, 30618, 8398, 1313, 539, 7570, 8686, 377, 1205, 342, 611, 11385, 1407, 1537, 855, 1749, 342, 1153, 1345, 16021, 1537, 270, 2953, 836, 2231, 19398, 366, 58414, 1610, 513, 270, 28878, 1204, 539, 660, 223, 29291, 1537, 36963, 2137, 7243, 92, 700, 515, 12522, 304, 53920, 270, 6102, 305, 42776, 12784, 1204, 11677, 30618, 1537, 995, 936, 1047, 3543, 538, 270, 4593, 1537, 4310, 793, 3096, 304, 260, 2445, 294, 6555, 305, 22612, 304, 10764, 270, 5476, 294, 270, 1017, 46146, 343, 44944, 700, 1900, 1537, 790, 468, 515, 19888, 513, 270, 61321, 305, 4737, 304, 32209, 905, 276, 1204, 660, 270, 36406, 1537, 793, 43345, 4053, 1537, 11677, 1653, 700, 48434, 343, 34153, 35132, 1900, 1537, 515, 9288, 1204, 34659, 566, 1014, 11677, 30618, 344, 3241, 304, 611, 55218, 43641, 1204, 539, 1166, 40530, 538, 32209, 905, 276, 270, 2502, 1141, 1537, 305, 515, 19594, 7708, 6774, 1082, 468, 43381, 1808, 270, 5476, 295, 3460, 223, 28997, 1204, 1162, 2411, 6718, 3278, 304, 270, 46146, 790, 515, 13900, 85909, 1204, 11677, 30618, 905, 85, 114701, 1719, 52593, 1440, 304, 3047, 304, 1635, 1347, 294, 436, 1313, 468, 7677, 14805, 362, 5619, 513, 106662, 270, 16800, 294, 793, 6117, 305, 35548, 66109, 89374, 377, 260, 79311, 8039, 1204, 1166, 515, 25488, 790, 515, 116502, 31772, 295, 5210, 1204, 1166, 515, 16581, 6559, 304, 5711, 793, 3096, 295, 5773, 1537, 790, 468, 6415, 43381, 1808, 270, 5476, 305, 377, 5899, 223, 26, 1537, 223, 28997, 1537, 468, 10274, 304, 32209, 905, 276, 418, 270, 46146, 2502, 1009, 1270, 79040, 513, 3635, 8753, 1204, 3211, 1537, 793, 11389, 8361, 304, 366, 612, 1827, 7852, 638, 1537, 305, 295, 270, 8012, 294, 223, 30942, 468, 515, 2131, 12284, 304, 260, 2411, 412, 32284, 294, 6297, 295, 14947, 1521, 27397, 1204, 455, 3609, 515, 554, 304, 793, 13132, 1313, 295, 834, 17261, 1537, 468, 8398, 1313, 539, 342, 1030, 943, 304, 59455, 17196, 367, 295, 270, 7032, 1492, 43791, 1082, 851, 4379, 850, 14803, 304, 31640, 4077, 377, 1026, 21848, 1204, 539, 1166, 8599, 377, 295, 270, 8012, 294, 223, 31257, 3749, 566, 769, 34478, 1047, 92355, 304, 76449, 1537, 790, 22127, 19166, 396, 35814, 344, 260, 850, 4888, 3986, 1204, 1166, 2894, 9185, 2448, 4104, 6167, 295, 75609, 50096, 343, 1928, 330, 3697, 39238, 1537, 489, 634, 87, 22643, 1900, 1537, 1479, 468, 8398, 850, 1099, 13961, 27107, 1204, 54921, 438, 438, 438, 43975, 664, 438, 438, 438, 54921, 660, 5899, 223, 31257, 1537, 468, 24752, 21855, 295, 45192, 8320, 343, 5970, 489, 634, 87, 1900, 1204, 1166, 60405, 377, 5899, 223, 1173, 362, 43975, 664, 343, 122077, 22643, 1900, 1537, 1479, 468, 515, 32187, 513, 3277, 4701, 4214, 305, 16636, 22361, 4897, 75, 6625, 1204, 11677, 23255, 2951, 5619, 295, 122077, 362, 1473, 294, 270, 2894, 3818, 1737, 1204, 3715, 270, 36406, 294, 396, 1141, 468, 515, 295, 5024, 14805, 1537, 305, 3667, 27107, 79978, 1694, 304, 3624, 111120, 1204, 1166, 515, 49049, 513, 38432, 28592, 1537, 260, 6117, 305, 7017, 46334, 995, 515, 19594, 26748, 3810, 509, 43975, 664, 1204, 15907, 793, 5024, 4454, 1537, 566, 515, 834, 294, 270, 2938, 17213, 305, 1473, 29777, 14194, 294, 793, 1988, 1204, 8647, 294, 11677, 905, 85, 27107, 538, 566, 3285, 477, 29777, 120759, 294, 793, 1988, 509, 582, 396, 3234, 75702, 582, 1204, 660, 223, 30160, 1537, 468, 3001, 270, 4593, 304, 16408, 260, 50873, 1537, 790, 468, 10274, 295, 8012, 223, 30251, 1082, 468, 515, 19594, 411, 45659, 304, 38432, 1537, 995, 515, 5984, 295, 27005, 2765, 270, 68606, 19738, 1204, 54921, 438, 438, 438, 12673, 1737, 438, 438, 438, 54921, 10391, 1260, 555, 1537, 270, 5181, 294, 793, 107908, 1537, 515, 27064, 513, 3635, 8753, 295, 270, 11369, 294, 223, 30160, 1537, 305, 295, 270, 9623, 294, 223, 28782, 11677, 30618, 305, 793, 3096, 66678, 1910, 270, 20334, 86, 2317, 1537, 18841, 418, 270, 17687, 294, 3490, 786, 1722, 1031, 1204, 2359, 33730, 13257, 1537, 5607, 890, 513, 793, 4160, 2390, 15, 34, 2195, 343, 513, 566, 1014, 468, 515, 16021, 538, 7066, 5117, 564, 1537, 45381, 1719, 305, 3810, 3072, 4109, 295, 3012, 304, 793, 4412, 90410, 1900, 1204, 2359, 21855, 295, 31733, 571, 27397, 343, 295, 1205, 344, 1928, 10579, 104351, 24586, 1537, 86197, 80006, 1900, 509, 270, 23254, 304, 270, 13475, 489, 76614, 362, 4975, 1234, 1737, 538, 6218, 9623, 223, 30400, 1204, 1162, 3285, 515, 11677, 30618, 905, 85, 2336, 2405, 46383, 47359, 1537, 305, 2155, 468, 8398, 223, 5126, 27107, 295, 793, 27091, 1537, 6218, 5392, 1204, 660, 36406, 223, 30400, 1537, 4983, 8595, 33495, 5750, 26748, 294, 270, 5181, 1313, 468, 10166, 11677, 30618, 44856, 305, 13631, 1440, 412, 793, 101332, 29286, 1204, 539, 660, 5077, 223, 26754, 1537, 468, 6006, 793, 8572, 1820, 305, 3557, 412, 3706, 412, 18694, 276, 22643, 1537, 1479, 468, 9521, 295, 47551, 27397, 343, 1928, 32209, 20863, 1900, 295, 5997, 469, 5899, 223, 26631, 1537, 295, 793, 223, 3175, 463, 1141, 1204, 1166, 515, 28303, 513, 793, 9070, 305, 1234, 24231, 1537, 995, 12652, 295, 270, 3078, 362, 1093, 1737, 509, 3396, 1204, 4457, 2336, 3459, 101276, 344, 260, 73172, 995, 21361, 260, 27881, 75546, 362, 270, 22361, 538, 48525, 107499, 295, 223, 29722, 1204, 539, 22127, 15747, 4692, 793, 1988, 513, 63050, 396, 1537, 582, 1166, 11244, 304, 366, 260, 2274, 536, 4053, 1537, 411, 103420, 6369, 1537, 260, 33625, 8809, 1537, 260, 33623, 10299, 1537, 260, 20070, 6117, 1537, 260, 9746, 6760, 6805, 1537, 305, 260, 94546, 3863, 1204, 582, 539, 32765, 344, 411, 2511, 294, 834, 294, 11677, 30618, 905, 85, 3859, 2984, 1537, 2600, 4207, 3922, 2031, 49096, 38903, 343, 7891, 1313, 223, 83876, 25638, 3601, 11637, 3049, 1900, 1204, 12554, 1623, 915, 27107, 295, 270, 24019, 436, 24369, 270, 12769, 294, 260, 1606, 110580, 1469, 5446, 1537, 778, 515, 2915, 3504, 304, 12502, 1902, 11477, 19915, 304, 270, 38973, 1313, 54921, 438, 438, 6266, 438, 438, 54921, 91964, 294, 11677, 30618, 905, 85, 2984, 769, 10792, 377, 793, 3828, 4880, 294, 3980, 1537, 793, 12417, 14857, 1537, 305, 793, 10021, 32258, 1204, 54921, 438, 438, 438, 7646, 438, 438, 438, 54921, 8074, 270, 19386, 53983, 1537, 30454, 611, 3252, 11677, 30618, 270, 582, 22361, 40615, 582, 343, 223, 31236, 3043, 568, 13007, 568, 60061, 1900, 1204, 455, 1473, 6578, 9600, 294, 793, 27107, 477, 1948, 73202, 377, 8193, 35094, 469, 270, 47160, 305, 31489, 294, 270, 3635, 1537, 469, 270, 27107, 294, 11389, 778, 468, 8398, 304, 270, 46146, 1204, 101594, 367, 1537, 468, 8398, 943, 270, 1788, 294, 270, 2734, 295, 778, 468, 11385, 377, 5619, 1537, 305, 377, 270, 17229, 1482, 294, 5794, 1204, 1924, 36965, 9575, 1537, 566, 344, 1951, 582, 294, 260, 3576, 48266, 2006, 295, 270, 27890, 36756, 45099, 294, 270, 9861, 582, 1204, 539, 11677, 30618, 905, 85, 5218, 8785, 477, 2951, 377, 17951, 4562, 1099, 15540, 1313, 793, 77971, 611, 1047, 81554, 1381, 412, 1537, 582, 4480, 550, 710, 366, 2477, 47743, 1537, 2513, 550, 710, 696, 1205, 579, 477, 13313, 304, 696, 582, 1204, 8074, 793, 9003, 881, 13606, 304, 37172, 418, 1537, 793, 120705, 9971, 22948, 10666, 22104, 793, 19544, 412, 270, 6672, 5991, 294, 7891, 46383, 3980, 1204, 54921, 438, 438, 438, 61236, 14857, 438, 438, 438, 54921, 334, 1957, 29361, 20683, 7772, 294, 7891, 30454, 344, 396, 294, 582, 22361, 59432, 582, 343, 223, 31236, 26228, 568, 13007, 568, 2666, 992, 1900, 1537, 260, 26626, 304, 270, 30912, 59432, 1537, 75637, 349, 1204, 4114, 294, 270, 27419, 41606, 2984, 1537, 455, 19386, 294, 270, 92694, 1054, 343, 538, 2448, 223, 15098, 1900, 1537, 7047, 7140, 304, 270, 115587, 294, 260, 1120, 2276, 33336, 295, 270, 26400, 15291, 305, 260, 4521, 2390, 15, 34, 13581, 284, 21568, 294, 16021, 1204, 3109, 10078, 477, 24839, 67329, 295, 27107, 377, 270, 6156, 294, 1952, 17573, 305, 46940, 7444, 513, 11677, 30618, 6443, 793, 1988, 1204, 539, 8083, 11677, 30618, 905, 85, 17215, 17157, 304, 793, 1956, 18330, 588, 3475, 270, 22059, 294, 411, 710, 2390, 15, 34, 35116, 1663, 4198, 1478, 1537, 14510, 12974, 23711, 396, 793, 582, 12734, 24874, 295, 1577, 5260, 5619, 1537, 18356, 5686, 84591, 305, 4975, 412, 411, 1561, 62832, 582, 1204, 1166, 6253, 582, 95047, 85275, 582, 304, 270, 19328, 8628, 513, 19844, 436, 304, 582, 793, 1956, 10526, 465, 595, 45189, 481, 582, 1204, 539, 11677, 30618, 905, 85, 24874, 1537, 362, 5619, 305, 362, 3628, 1537, 515, 892, 294, 793, 3810, 103516, 294, 270, 13680, 294, 19106, 1313, 468, 27179, 1623, 2984, 304, 12179, 778, 936, 9479, 1047, 5083, 101542, 362, 46383, 4135, 1204, 17902, 552, 500, 8398, 396, 362, 11677, 30618, 1537, 582, 5675, 295, 566, 2058, 344, 19106, 582, 1537, 11677, 8398, 35027, 377, 11404, 1345, 412, 12541, 1988, 1537, 1926, 69120, 1537, 30649, 1537, 7951, 1537, 305, 915, 27107, 1204, 54921, 438, 438, 438, 23595, 32258, 438, 438, 438, 54921, 11677, 30618, 905, 85, 1116, 344, 24917, 3554, 710, 362, 1009, 3291, 1204, 7891, 30454, 34478, 1505, 270, 2112, 223, 2089, 89957, 343, 597, 28742, 3221, 29897, 992, 15, 582, 5553, 84925, 582, 1900, 1537, 260, 6915, 304, 12651, 72757, 905, 8837, 294, 75637, 349, 1204, 48525, 107499, 515, 270, 1257, 304, 7155, 270, 54283, 294, 11677, 30618, 905, 85, 21382, 1537, 4985, 295, 223, 29722, 396, 793, 59402, 1537, 582, 35400, 295, 793, 1116, 20944, 778, 4412, 2420, 936, 17192, 1353, 100466, 582, 1204, 1166, 77855, 710, 270, 6823, 294, 7891, 19106, 1313, 114705, 4230, 396, 295, 1750, 1179, 468, 582, 4338, 1960, 21661, 25038, 469, 18374, 21661, 7165, 582, 1204, 13144, 1537, 793, 27107, 1347, 260, 7050, 3291, 294, 47595, 1537, 538, 270, 2624, 305, 90654, 536, 304, 270, 710, 31306, 305, 2280, 2390, 15, 34, 68453, 19044, 1204, 1162, 6890, 344, 61603, 1749, 2549, 2811, 2984, 1313, 47643, 31331, 270, 1537, 582, 8365, 108307, 305, 64445, 25532, 582, 295, 27107, 778, 12535, 270, 22361, 304, 3293, 1688, 64466, 294, 260, 6299, 1537, 1901, 114705, 6623, 270, 2112, 582, 79720, 5484, 582, 412, 270, 3631, 24354, 4105, 295, 969, 1116, 1204, 11677, 30618, 344, 9764, 362, 3638, 4866, 850, 377, 1159, 14688, 305, 17780, 1099, 1117, 915, 12770, 294, 793, 1014, 1204, 1166, 8398, 40355, 27107, 377, 17780, 7472, 1537, 850, 1099, 1117, 915, 24019, 22361, 1204, 11677, 30618, 905, 85, 28848, 6740, 36803, 377, 270, 105125, 15000, 30649, 294, 16325, 59277, 118006, 260, 33787, 396, 769, 70119, 304, 270, 2236, 2173, 1204, 539, 455, 96268, 294, 793, 1116, 7956, 412, 468, 5873, 793, 5392, 305, 21514, 304, 793, 25632, 343, 582, 537, 691, 105884, 2390, 15, 34, 1277, 582, 4509, 304, 36965, 1900, 1313, 793, 27419, 2984, 477, 295, 260, 9113, 16174, 1537, 5476, 367, 5392, 1537, 790, 468, 4127, 1055, 793, 1956, 295, 270, 1737, 294, 270, 50873, 1204, 47643, 8785, 377, 270, 582, 46728, 34059, 582, 294, 270, 75609, 50096, 27107, 1537, 778, 51388, 270, 21870, 13010, 3749, 270, 2984, 538, 793, 43975, 664, 3285, 477, 582, 2900, 1537, 2915, 53239, 7199, 582, 3749, 1901, 270, 27107, 538, 270, 6218, 31733, 571, 27397, 3285, 611, 260, 582, 9677, 305, 2333, 294, 10048, 582, 1204, 539, 8083, 468, 8398, 295, 710, 46383, 6823, 1537, 11677, 30618, 344, 2455, 3459, 362, 793, 314, 134, 253, 66472, 1537, 260, 2613, 294, 17261, 418, 14528, 18643, 377, 1179, 305, 3445, 1537, 362, 2511, 1313, 539, 10454, 1234, 94955, 294, 11677, 30618, 905, 85, 223, 4980, 18, 81871, 2984, 477, 295, 566, 1179, 1537, 305, 468, 344, 7175, 5083, 304, 366, 1009, 6646, 40519, 1204, 4457, 2455, 314, 134, 253, 66472, 1347, 270, 11550, 10666, 4072, 513, 270, 1179, 304, 1258, 58706, 3445, 4562, 1099, 412, 14097, 10021, 19149, 1204, 14510, 12974, 8785, 396, 1537, 582, 436, 344, 12274, 396, 22096, 30618, 344, 3495, 304, 1347, 832, 76375, 33638, 1766, 260, 1179, 295, 832, 4095, 260, 9379, 582, 1204, 54921, 438, 438, 41129, 438, 438, 54921, 9498, 304, 270, 77155, 10954, 53109, 72925, 1537, 11677, 30618, 905, 85, 34156, 477, 5083, 513, 1623, 19044, 30454, 304, 366, 3611, 270, 11849, 294, 710, 1014, 1537, 305, 436, 5756, 582, 793, 27091, 1537, 41761, 4063, 4491, 1347, 294, 710, 270, 399, 2869, 1246, 1060, 121186, 294, 260, 17132, 305, 294, 710, 270, 688, 265, 1273, 47776, 294, 270, 2811, 2004, 1537, 23045, 396, 1119, 14891, 588, 4541, 17336, 1204, 582, 539, 660, 793, 22665, 305, 8817, 2502, 793, 4641, 1537, 11677, 30618, 515, 554, 16592, 27788, 1204, 660, 892, 566, 588, 366, 23554, 304, 793, 108307, 305, 10956, 34909, 1537, 1093, 294, 778, 477, 2413, 582, 5083, 11644, 67265, 305, 58614, 513, 7891, 30454, 1204, 582, 2454, 477, 2767, 15874, 17157, 304, 1440, 2136, 1353, 30116, 27107, 538, 4104, 17818, 2136, 305, 1305, 9734, 1440, 295, 3999, 294, 33738, 1537, 790, 554, 412, 260, 1383, 11452, 294, 46383, 469, 12417, 45444, 1204, 11677, 30618, 344, 990, 29999, 11260, 295, 15874, 42206, 7101, 294, 19106, 1204, 539, 3211, 1537, 412, 22127, 9575, 1537, 468, 582, 344, 270, 1353, 7891, 22361, 6501, 6853, 15021, 418, 1014, 582, 1537, 305, 793, 2984, 6006, 304, 4138, 295, 23881, 295, 270, 47559, 6683, 1204, 15225, 4337, 8785, 4127, 538, 78725, 552, 6843, 75, 1537, 995, 46959, 270, 12417, 60346, 294, 1093, 294, 11677, 30618, 905, 85, 2984, 343, 7254, 468, 2006, 1305, 295, 1353, 260, 2395, 9078, 294, 270, 27107, 1900, 1537, 305, 538, 16325, 26816, 1537, 995, 8398, 260, 8521, 43473, 11677, 30618, 305, 7645, 78725, 377, 30556, 20864, 538, 14416, 1960, 2765, 1066, 1204, 11309, 1305, 17818, 7111, 270, 6853, 294, 11677, 30618, 295, 786, 1956, 46383, 1116, 1204, 3715, 270, 7407, 294, 270, 223, 553, 463, 6683, 1537, 38903, 43135, 555, 17769, 270, 1257, 79841, 294, 793, 396, 3234, 56318, 295, 122077, 1204, 539, 983, 515, 295, 270, 223, 779, 463, 6683, 1537, 2184, 270, 17009, 19386, 9861, 396, 11677, 30618, 905, 85, 19887, 9359, 1009, 15011, 1204, 660, 566, 3285, 260, 10501, 322, 2390, 15, 34, 10520, 294, 7728, 46494, 4310, 2445, 1537, 295, 778, 14616, 38903, 1537, 7645, 78725, 305, 11677, 30618, 4127, 304, 366, 20057, 412, 16882, 8370, 270, 42590, 1537, 110525, 435, 305, 75637, 276, 55013, 294, 7891, 5785, 1204, 3256, 270, 1975, 1014, 1537, 270, 2934, 294, 71529, 2390, 15, 34, 75637, 68275, 58412, 396, 11677, 30618, 1537, 412, 1009, 46383, 18845, 287, 1537, 25190, 270, 49990, 3609, 1204, 3280, 41499, 83957, 9971, 566, 22805, 1082, 468, 8398, 396, 11677, 30618, 515, 582, 852, 381, 28314, 4588, 1754, 4588, 1407, 710, 793, 28384, 1151, 14234, 1537, 468, 3214, 362, 270, 3987, 294, 260, 16464, 40607, 793, 43296, 582, 1204, 4457, 6853, 515, 10082, 513, 793, 5304, 304, 76483, 10932, 101145, 1313, 5218, 85365, 881, 29728, 513, 793, 31299, 304, 270, 7622, 2496, 1537, 1901, 5218, 50159, 46807, 793, 4829, 362, 270, 7066, 1204, 52557, 85365, 1494, 1747, 304, 793, 10021, 54056, 1537, 1901, 19044, 50159, 881, 17621, 513, 793, 34909, 1204, 8074, 270, 21048, 294, 270, 9637, 905, 85, 9375, 294, 5794, 1537, 11677, 30618, 905, 85, 31299, 304, 270, 2501, 305, 4829, 362, 270, 7066, 611, 1047, 26398, 412, 60389, 56108, 305, 68590, 1537, 305, 468, 769, 1047, 46959, 362, 793, 1347, 294, 4654, 1537, 582, 1482, 905, 85, 4063, 582, 1204, 539, 11677, 30618, 905, 85, 23881, 15021, 304, 1345, 411, 10636, 396, 436, 344, 412, 3375, 304, 4472, 793, 6853, 412, 396, 294, 28429, 295, 8304, 1313, 436, 515, 3375, 362, 1117, 7891, 22361, 554, 304, 366, 17210, 513, 1440, 1204, 6001, 1031, 515, 3214, 2658, 11677, 30618, 1537, 2811, 46494, 7128, 295, 270, 20624, 294, 3549, 8681, 294, 793, 1116, 1313, 78725, 552, 6843, 75, 905, 85, 4829, 362, 270, 7066, 1537, 10391, 2042, 905, 85, 101603, 1537, 305, 81084, 47583, 55963, 905, 85, 46750, 377, 270, 24004, 44773, 477, 260, 2767, 7165, 1204, 4895, 38224, 1537, 11677, 30618, 905, 85, 1116, 295, 38892, 270, 314, 134, 253, 66472, 538, 14097, 2004, 1946, 1055, 582, 260, 9329, 362, 7162, 46383, 90649, 582, 1341, 270, 6632, 362, 1750, 14319, 12770, 295, 270, 23306, 1204, 539, 660, 270, 223, 397, 463, 6683, 1537, 468, 515, 270, 29361, 22361, 294, 49218, 72290, 92087, 1537, 995, 769, 5654, 1440, 412, 582, 270, 11849, 2408, 2390, 15, 34, 32230, 1537, 2408, 2390, 15, 34, 22235, 22361, 995, 769, 28303, 295, 1117, 4063, 582, 1537, 305, 39008, 396, 1537, 582, 468, 769, 1960, 678, 260, 2993, 836, 1537, 412, 260, 12417, 11014, 305, 412, 260, 39477, 2331, 26369, 582, 1204, 54921, 438, 438, 438, 41129, 377, 10999, 8945, 438, 438, 438, 54921, 11677, 30618, 905, 85, 19106, 769, 1960, 260, 18978, 3683, 377, 10999, 8945, 1537, 4861, 377, 270, 8945, 538, 270, 20915, 496, 38689, 3285, 305, 377, 14436, 305, 46494, 295, 270, 114419, 3285, 1537, 2622, 46267, 26904, 65648, 16767, 1537, 270, 1855, 11849, 294, 710, 535, 16251, 46494, 1204, 8239, 295, 5970, 10999, 1537, 270, 2112, 15099, 294, 44249, 343, 223, 31236, 26228, 1537, 568, 1226, 75, 1900, 344, 11755, 67582, 418, 11677, 30618, 1204, 539, 35635, 270, 223, 907, 463, 6683, 1537, 270, 10999, 14743, 78725, 552, 6843, 75, 3554, 710, 46494, 305, 1031, 881, 2767, 17157, 304, 11677, 30618, 1537, 7254, 793, 6853, 588, 366, 4326, 295, 1093, 50026, 6366, 343, 582, 7891, 19106, 1960, 513, 10999, 46494, 582, 1900, 42206, 7101, 1345, 412, 48823, 3332, 2009, 17083, 266, 994, 17083, 295, 270, 223, 27, 463, 6683, 1204, 455, 1257, 24917, 10999, 9499, 1741, 294, 11677, 30618, 905, 85, 19106, 515, 28496, 276, 2009, 23564, 343, 223, 8870, 26, 1256, 223, 9451, 24, 1900, 1537, 260, 433, 57416, 2238, 34253, 66242, 305, 834, 294, 270, 1473, 19496, 8951, 294, 270, 8945, 294, 270, 20782, 37891, 3749, 468, 7159, 46959, 11677, 30618, 305, 1960, 260, 36803, 377, 1093, 27107, 294, 11677, 30618, 538, 270, 10143, 294, 260, 34253, 25715, 295, 5799, 1204, 223, 779, 294, 115462, 74, 633, 3090, 17083, 1204, 4457, 5347, 899, 17083, 1508, 2374, 1697, 87, 14591, 1623, 50026, 6366, 778, 881, 8947, 10844, 582, 17210, 513, 11677, 30618, 582, 295, 786, 852, 10939, 1204, 899, 17083, 1508, 905, 85, 5347, 489, 329, 16767, 2009, 17083, 3037, 261, 936, 5010, 7530, 418, 270, 6605, 305, 19120, 635, 13913, 2009, 520, 371, 434, 305, 95307, 11677, 30618, 905, 85, 19106, 295, 270, 77259, 2058, 3749, 834, 2173, 112011, 16767, 58983, 73317, 1537, 270, 56186, 21296, 1250, 309, 294, 270, 6605, 305, 270, 8740, 10296, 294, 322, 992, 67, 19106, 1537, 4869, 489, 329, 16767, 1537, 582, 19431, 342, 3281, 270, 19106, 294, 11677, 30618, 305, 7645, 78725, 4054, 582, 489, 329, 16767, 78224, 304, 20430, 1537, 582, 11608, 855, 440, 696, 611, 4128, 23203, 1204, 3011, 855, 696, 554, 1204, 582, 8074, 1539, 1537, 1031, 936, 1047, 1623, 70234, 377, 11677, 30618, 905, 85, 19106, 1952, 295, 34253, 49184, 305, 295, 270, 111689, 6591, 1537, 305, 412, 260, 1529, 793, 19106, 515, 2915, 22961, 295, 10999, 8945, 295, 270, 20915, 496, 38689, 3285, 1537, 312, 3588, 16, 1537, 20491, 263, 10234, 1537, 260, 9600, 32230, 295, 270, 6218, 223, 929, 463, 6683, 1537, 305, 1093, 1119, 74, 10662, 1345, 412, 11697, 480, 6288, 1537, 65648, 16767, 1537, 305, 2009, 371, 4716, 1204, 539, 10272, 270, 22002, 905, 26070, 9861, 294, 270, 114419, 3285, 343, 223, 10669, 22, 1256, 223, 10095, 21, 1900, 1537, 2009, 61360, 65276, 6339, 343, 223, 33028, 12940, 1900, 294, 270, 46740, 57709, 905, 85, 70075, 70272, 377, 11677, 30618, 905, 85, 462, 134, 253, 66472, 343, 223, 10540, 3079, 2089, 1335, 1537, 11118, 81375, 2009, 15045, 2238, 1900, 515, 37386, 1055, 6310, 1537, 305, 436, 17100, 48540, 23881, 295, 75637, 276, 14436, 305, 537, 16767, 71928, 343, 23721, 27295, 1900, 1312, 1204, 455, 36803, 7622, 11677, 30618, 905, 85, 39083, 412, 270, 8740, 294, 710, 46494, 3749, 362, 7409, 1537, 18716, 25836, 2009, 9921, 2238, 1537, 260, 24917, 75637, 276, 41335, 1537, 39008, 295, 5799, 1204, 223, 1942, 294, 489, 1165, 16767, 48823, 3037, 17083, 396, 1653, 60061, 71014, 75, 764, 11677, 30618, 6105, 515, 270, 1855, 2455, 22361, 295, 3980, 305, 46959, 2009, 61360, 65276, 6339, 905, 85, 36803, 362, 1009, 34059, 305, 101074, 1537, 1901, 468, 46529, 3072, 119637, 2184, 270, 48525, 57709, 881, 2727, 8946, 901, 316, 679, 1204, 46267, 26904, 65648, 16767, 1537, 270, 11849, 535, 16251, 22361, 1537, 515, 990, 15187, 17210, 513, 11677, 30618, 3749, 295, 541, 6648, 1119, 76595, 316, 33361, 1537, 793, 62762, 1537, 468, 75848, 270, 1257, 1234, 6243, 294, 334, 10468, 7288, 343, 223, 4040, 2660, 1900, 1936, 260, 535, 16251, 412, 1009, 13401, 305, 990, 1623, 294, 793, 915, 535, 16251, 611, 4032, 80240, 305, 19219, 1204, 983, 344, 1359, 396, 1082, 468, 9521, 295, 93660, 2184, 260, 1606, 5901, 1537, 260, 7610, 294, 11677, 30618, 905, 85, 19106, 515, 2006, 418, 1440, 412, 834, 294, 260, 2767, 27486, 7316, 778, 468, 515, 3495, 304, 7457, 2448, 1204, 54921, 438, 438, 40905, 438, 438, 54921, 334, 6890, 294, 18395, 611, 1047, 1505, 295, 8310, 304, 28295, 11677, 30618, 905, 85, 1116, 1055, 3947, 1204, 1924, 61008, 36965, 33867, 295, 455, 44017, 73042, 294, 11677, 30618, 1537, 582, 2454, 477, 1623, 1688, 4689, 304, 4090, 270, 4454, 5984, 295, 59926, 11677, 30618, 1537, 778, 344, 3939, 579, 1309, 412, 1623, 1688, 41783, 412, 3338, 582, 343, 280, 16, 41498, 4268, 1900, 1204, 455, 118683, 611, 936, 304, 60967, 418, 15579, 798, 270, 10956, 18643, 294, 270, 4632, 2503, 56478, 7314, 21660, 304, 260, 10734, 12765, 343, 6440, 1082, 59926, 29559, 24817, 1537, 469, 314, 134, 253, 66472, 1900, 1537, 305, 92155, 270, 4190, 710, 35130, 14541, 6440, 295, 270, 3859, 2984, 343, 14510, 12974, 20366, 396, 582, 793, 27107, 696, 554, 412, 260, 7498, 2822, 1407, 1855, 1585, 295, 14891, 582, 2136, 280, 16, 82866, 1900, 1204, 4114, 15075, 377, 1660, 5452, 344, 11260, 513, 49218, 72290, 92087, 905, 85, 4114, 17967, 73042, 5795, 270, 7891, 1204, 4457, 477, 3080, 41783, 1537, 778, 6767, 304, 64948, 270, 11550, 10666, 1407, 576, 76, 2668, 1156, 305, 13163, 305, 40946, 294, 270, 3445, 3749, 793, 11632, 304, 270, 710, 35130, 477, 83248, 304, 83446, 1473, 294, 1305, 27107, 538, 793, 9581, 1537, 305, 98603, 304, 582, 28295, 798, 582, 270, 17157, 295, 1948, 2984, 778, 468, 1918, 4033, 1204, 539, 6241, 118683, 611, 8728, 1949, 5798, 5288, 377, 5958, 304, 17792, 260, 4880, 294, 270, 46383, 6823, 1505, 513, 11677, 30618, 1204, 40810, 966, 62705, 295, 13475, 7891, 9984, 1697, 6623, 3947, 2390, 15, 34, 5392, 66107, 22267, 1537, 12828, 36227, 16950, 81, 480, 295, 447, 8721, 270, 16078, 7793, 1074, 270, 7891, 66107, 13120, 3749, 1952, 1347, 1522, 2390, 15, 34, 12852, 6243, 305, 23950, 1093, 6954, 294, 120988, 1204, 660, 455, 44017, 73042, 294, 11677, 30618, 1537, 61008, 36965, 7226, 270, 11550, 10666, 5686, 29851, 1537, 21938, 11714, 270, 17739, 12791, 304, 6708, 304, 270, 27107, 4562, 1099, 20664, 45165, 1204, 21405, 1537, 468, 20833, 418, 270, 123301, 294, 270, 3859, 2984, 513, 26179, 39248, 14891, 418, 14325, 51232, 1204, 539, 660, 223, 643, 23, 1537, 20986, 47643, 5806, 41783, 1537, 418, 15199, 7891, 18490, 1537, 294, 270, 5553, 19106, 294, 11677, 30618, 295, 4104, 23680, 1537, 418, 14325, 45582, 15208, 1537, 778, 32092, 39248, 1719, 1204, 54921, 539, 438, 59152, 2042, 343, 4114, 32797, 7223, 1900, 438, 54921, 582, 59152, 2042, 582, 344, 260, 7223, 11516, 513, 3947, 2390, 15, 34, 18532, 9916, 6762, 4114, 32797, 362, 786, 1957, 21040, 14143, 1537, 14033, 3428, 9278, 343, 223, 643, 20, 1900, 1204, 983, 515, 9400, 412, 270, 3676, 905, 85, 1957, 4031, 295, 10322, 305, 270, 4824, 6492, 4031, 377, 223, 25, 5513, 223, 643, 21, 1204, 455, 7223, 515, 14591, 513, 28887, 51417, 447, 50408, 7747, 1537, 28887, 976, 53541, 261, 1537, 1582, 13314, 62055, 3272, 1537, 17710, 276, 657, 2314, 16342, 1537, 45206, 3555, 305, 1009, 26974, 1537, 12620, 75322, 305, 433, 8336, 894, 439, 103919, 1204, 582, 59152, 2042, 582, 344, 411, 115062, 2333, 2482, 7223, 418, 12137, 4680, 3749, 270, 32584, 6271, 260, 47433, 905, 85, 2472, 10599, 341, 418, 260, 3168, 915, 1204, 86568, 46959, 270, 7223, 362, 1009, 4606, 1537, 14134, 436, 260, 2466, 2390, 15, 34, 798, 6781, 377, 14033, 3428, 9278, 1204, 539, 455, 6781, 5750, 270, 2740, 905, 85, 26571, 2656, 2390, 15, 34, 5147, 8180, 295, 16352, 305, 270, 3702, 14460, 1537, 1901, 98271, 2656, 2390, 15, 34, 12062, 11788, 295, 1952, 65024, 36305, 343, 116642, 305, 14543, 13191, 1900, 1537, 412, 1585, 412, 295, 9338, 1537, 8642, 1537, 36631, 1537, 8760, 1537, 2010, 17407, 1537, 305, 270, 17705, 1204, 455, 4031, 88992, 509, 1167, 223, 2372, 377, 270, 3076, 61458, 14746, 223, 1457, 305, 769, 1047, 31924, 8495, 513, 270, 77895, 20936, 9134, 294, 5992, 343, 54650, 6196, 1900, 362, 85678, 294, 223, 3712, 2390, 97256, 223, 1320, 21974, 1204, 4114, 32797, 7451, 582, 59152, 2042, 582, 377, 1952, 270, 7364, 305, 3076, 18333, 294, 455, 2137, 13941, 305, 223, 21, 3631, 25966, 39999, 1313, 14033, 3428, 9278, 15037, 343, 223, 643, 21, 1900, 1537, 11343, 1350, 6529, 15037, 343, 223, 643, 22, 1900, 305, 2483, 270, 12914, 16841, 15037, 343, 223, 643, 23, 1900, 1204, 539, 1417, 35616, 5146, 6217, 1537, 6558, 304, 5514, 270, 2740, 905, 85, 465, 43140, 20760, 1537, 515, 14717, 513, 104028, 32270, 574, 1537, 995, 936, 9479, 7911, 418, 270, 2740, 377, 1234, 915, 5146, 17452, 1204, 455, 32025, 50914, 270, 6762, 22189, 3624, 22574, 5566, 260, 6726, 9421, 1537, 778, 3394, 18323, 65272, 294, 32380, 5146, 17452, 294, 15822, 1345, 412, 270, 21385, 43998, 905, 582, 7814, 725, 22733, 582, 1537, 94199, 5605, 3429, 905, 85, 582, 112748, 10281, 13774, 582, 305, 433, 4422, 11321, 905, 85, 582, 84685, 8311, 582, 1204, 455, 5146, 6217, 5730, 223, 553, 2390, 16, 34, 223, 22, 3646, 9003, 295, 260, 223, 1173, 2390, 15, 34, 6857, 3285, 305, 4337, 36803, 538, 63346, 1537, 995, 27788, 1009, 2312, 28620, 1537, 99619, 321, 479, 4936, 1204, 539, 455, 7223, 515, 5833, 295, 270, 32137, 3917, 6688, 36164, 223, 643, 22, 1537, 305, 344, 990, 834, 294, 270, 4033, 15822, 3510, 377, 270, 47289, 6013, 1204, 12148, 1537, 436, 344, 270, 4087, 1840, 6781, 377, 270, 3076, 14845, 294, 6583, 3841, 905, 85, 1999, 342, 14983, 14388, 4050, 223, 2372, 1204, 54921, 438, 438, 26944, 305, 7917, 438, 438, 54921, 582, 59152, 2042, 582, 515, 4866, 513, 28887, 51417, 447, 50408, 7747, 1537, 28887, 976, 53541, 261, 1537, 1582, 13314, 62055, 3272, 1537, 17710, 276, 657, 2314, 16342, 1537, 45206, 3555, 1537, 305, 1009, 26974, 1537, 12620, 75322, 305, 433, 8336, 894, 439, 103919, 1204, 75322, 1537, 657, 2314, 16342, 1537, 305, 894, 439, 103919, 936, 92080, 14591, 4114, 32797, 905, 85, 4412, 8180, 50346, 1537, 582, 1999, 60871, 2042, 47740, 582, 1537, 582, 4114, 66294, 582, 1537, 305, 582, 22094, 6001, 1350, 905, 266, 14390, 582, 1204, 660, 5383, 223, 643, 20, 1537, 455, 29023, 5768, 396, 20923, 31992, 574, 1537, 270, 2740, 905, 85, 11466, 1537, 936, 29488, 19496, 7223, 82324, 304, 26630, 362, 3987, 377, 4114, 32797, 905, 85, 1957, 14143, 1204, 75322, 1359, 1537, 582, 983, 905, 85, 2239, 304, 1178, 786, 50758, 377, 270, 5146, 1204, 582, 660, 3012, 1537, 270, 3783, 5768, 396, 9221, 1692, 26453, 515, 3946, 377, 15644, 396, 5833, 10988, 12572, 305, 53541, 261, 1204, 539, 582, 59152, 2042, 582, 515, 12033, 412, 270, 1957, 3076, 4031, 305, 4824, 6128, 538, 786, 1957, 21040, 14143, 1537, 14033, 3428, 9278, 1204, 73624, 82031, 1537, 260, 2740, 5637, 1537, 295, 260, 5997, 223, 643, 20, 8222, 418, 105197, 7612, 1537, 10785, 3939, 851, 16789, 582, 59152, 2042, 582, 412, 270, 14143, 905, 85, 1957, 4031, 295, 270, 3076, 1204, 82031, 515, 31037, 412, 8174, 1313, 582, 3710, 270, 14143, 1537, 396, 905, 85, 270, 1257, 834, 396, 579, 34226, 304, 305, 579, 881, 1277, 1537, 905, 43665, 1537, 579, 3518, 566, 7223, 905, 582, 1204, 9498, 304, 260, 105197, 7612, 3783, 1537, 270, 1167, 515, 9400, 76258, 295, 270, 3702, 4106, 377, 223, 1002, 5997, 223, 643, 20, 1204, 3715, 223, 864, 5513, 223, 643, 21, 1537, 270, 7223, 936, 554, 1047, 27890, 30226, 304, 3076, 10600, 19612, 1204, 455, 6781, 1537, 4499, 1537, 515, 9400, 513, 46476, 14388, 39881, 377, 223, 26, 6396, 223, 643, 21, 1537, 412, 270, 3676, 905, 85, 1957, 4031, 295, 10322, 1204, 54921, 438, 438, 53110, 305, 30135, 438, 438, 54921, 582, 59152, 2042, 582, 344, 411, 25964, 93729, 1537, 115062, 2333, 2482, 7223, 778, 12122, 362, 260, 14473, 294, 223, 21, 1313, 223, 3663, 343, 223, 21, 4786, 1537, 2689, 9651, 1900, 1204, 455, 6781, 5381, 12137, 4680, 1537, 122960, 69943, 1537, 260, 582, 3119, 3119, 3119, 582, 27703, 1537, 305, 260, 11501, 1164, 2390, 15, 34, 26206, 284, 54736, 1204, 4114, 32797, 905, 85, 26897, 3291, 295, 270, 7223, 19369, 538, 270, 7155, 294, 446, 22, 304, 345, 30717, 110, 223, 24, 1204, 46907, 341, 5260, 23957, 21021, 1537, 25986, 6243, 305, 51811, 1204, 41070, 295, 270, 3077, 294, 446, 3631, 1537, 270, 15603, 344, 1341, 295, 2953, 1014, 305, 14712, 509, 260, 4787, 223, 2225, 45007, 716, 10317, 1537, 4509, 304, 270, 6517, 12909, 5146, 5806, 509, 14388, 33955, 2193, 513, 46476, 1492, 14193, 56, 14388, 20635, 1204, 42478, 1537, 18394, 11142, 287, 538, 2899, 74217, 9764, 396, 270, 6781, 344, 582, 62540, 36291, 12139, 582, 1204, 455, 102209, 3445, 30977, 270, 47433, 905, 85, 2472, 10599, 341, 418, 260, 3168, 915, 1537, 305, 46699, 312, 1425, 2868, 10666, 362, 10479, 76637, 295, 270, 6243, 582, 2052, 440, 2090, 13399, 259, 73070, 2188, 436, 6470, 1492, 1890, 440, 1438, 73070, 2188, 678, 2680, 1492, 34153, 2507, 39339, 1537, 39339, 1537, 39339, 1537, 39339, 1537, 39339, 1204, 582, 539, 582, 59152, 2042, 582, 515, 1585, 5730, 513, 15874, 5146, 30454, 1537, 995, 91820, 377, 1009, 4474, 294, 4606, 1204, 11309, 69921, 22583, 905, 85, 16177, 31607, 276, 1537, 995, 46959, 1009, 15385, 1537, 305, 19449, 82031, 294, 61458, 1537, 995, 27788, 270, 54736, 1537, 5654, 582, 59152, 2042, 582, 412, 834, 294, 270, 14143, 905, 85, 20078, 1204, 95743, 10268, 2379, 278, 362, 455, 39077, 118829, 270, 6781, 905, 85, 56271, 412, 582, 3375, 304, 787, 78, 25636, 538, 782, 6773, 582, 1204, 10498, 27581, 18829, 294, 11718, 81317, 9764, 270, 7223, 905, 85, 12644, 304, 3644, 411, 6128, 8180, 1537, 73639, 74109, 436, 4053, 1875, 1204, 334, 82317, 362, 105197, 7612, 5654, 270, 6781, 905, 85, 86104, 1478, 412, 582, 64850, 2390, 15, 34, 69446, 582, 1537, 305, 7789, 86628, 5701, 294, 7509, 331, 1741, 8398, 396, 582, 59152, 2042, 582, 344, 111249, 260, 2466, 2390, 15, 34, 798, 6781, 377, 1009, 4222, 14143, 1204, 11601, 17320, 59188, 1537, 4985, 362, 23936, 97372, 1537, 20057, 270, 7223, 412, 582, 260, 291, 1871, 1239, 1537, 12137, 32734, 14365, 1189, 1537, 582, 1901, 19449, 894, 1871, 500, 1537, 260, 12306, 538, 223, 22, 41368, 1537, 26807, 436, 411, 582, 12274, 2482, 7223, 582, 1537, 847, 74109, 270, 2740, 905, 85, 38307, 36688, 305, 1009, 582, 2578, 2390, 15, 34, 291, 8934, 411, 1805, 11139, 582, 56271, 1204, 54921, 438, 438, 36770, 4197, 438, 438, 54921, 455, 4031, 1960, 1009, 18532, 93137, 25383, 26425, 509, 1167, 223, 1173, 377, 270, 3469, 18249, 223, 907, 5899, 223, 643, 20, 1204, 983, 88992, 509, 1167, 6170, 377, 270, 3469, 18249, 223, 1002, 5513, 223, 643, 21, 1537, 37255, 786, 26571, 2656, 5147, 12343, 295, 16352, 1204, 582, 59152, 2042, 582, 12736, 509, 1167, 223, 10073, 295, 270, 7364, 93137, 25383, 377, 223, 1173, 5997, 223, 643, 20, 1204, 983, 88992, 509, 1167, 7341, 377, 270, 7364, 93137, 25383, 377, 223, 1450, 5513, 223, 643, 21, 1537, 10998, 4114, 32797, 905, 85, 26571, 2656, 5147, 8180, 295, 270, 3702, 14460, 1204, 2483, 270, 3469, 18249, 223, 864, 5997, 223, 643, 20, 1537, 582, 59152, 2042, 582, 120036, 509, 1167, 223, 2225, 377, 270, 3702, 4106, 61458, 14746, 223, 1457, 3504, 304, 6517, 8986, 8432, 538, 1009, 4222, 14143, 1204, 1924, 260, 1529, 294, 411, 582, 1522, 2390, 15, 34, 294, 2390, 15, 34, 1141, 8986, 30373, 582, 377, 270, 3469, 18249, 223, 1059, 5899, 223, 643, 20, 1537, 270, 6781, 322, 2390, 15, 34, 12736, 270, 14746, 223, 1457, 509, 1167, 223, 6131, 1204, 5057, 270, 35616, 5146, 6217, 515, 9400, 1537, 270, 7223, 322, 2390, 15, 34, 12736, 270, 14746, 223, 1457, 509, 1167, 223, 2738, 1204, 582, 59152, 2042, 582, 936, 7340, 223, 13423, 2390, 97256, 223, 1320, 6517, 70286, 295, 270, 3076, 513, 223, 864, 5513, 223, 643, 21, 1204, 455, 4031, 14835, 88992, 509, 1167, 223, 2372, 377, 270, 14746, 223, 1457, 305, 515, 31924, 8495, 513, 270, 77895, 20936, 9134, 294, 5992, 343, 54650, 6196, 1900, 377, 223, 1069, 5383, 223, 643, 21, 1537, 108197, 85678, 294, 223, 3712, 2390, 97256, 223, 1320, 21974, 1204, 539, 455, 7223, 5750 ================================================ FILE: vendor/fmt/base.h ================================================ // Formatting library for C++ - the base API for char/UTF-8 // // Copyright (c) 2012 - present, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_BASE_H_ #define FMT_BASE_H_ #if defined(FMT_IMPORT_STD) && !defined(FMT_MODULE) # define FMT_MODULE #endif #ifndef FMT_MODULE # include // CHAR_BIT # include // FILE # include // strlen // is also included transitively from . # include // std::byte # include // std::enable_if #endif // The fmt library version in the form major * 10000 + minor * 100 + patch. #define FMT_VERSION 110002 // Detect compiler versions. #if defined(__clang__) && !defined(__ibmxl__) # define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) #else # define FMT_CLANG_VERSION 0 #endif #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) # define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #else # define FMT_GCC_VERSION 0 #endif #if defined(__ICL) # define FMT_ICC_VERSION __ICL #elif defined(__INTEL_COMPILER) # define FMT_ICC_VERSION __INTEL_COMPILER #else # define FMT_ICC_VERSION 0 #endif #if defined(_MSC_VER) # define FMT_MSC_VERSION _MSC_VER #else # define FMT_MSC_VERSION 0 #endif // Detect standard library versions. #ifdef _GLIBCXX_RELEASE # define FMT_GLIBCXX_RELEASE _GLIBCXX_RELEASE #else # define FMT_GLIBCXX_RELEASE 0 #endif #ifdef _LIBCPP_VERSION # define FMT_LIBCPP_VERSION _LIBCPP_VERSION #else # define FMT_LIBCPP_VERSION 0 #endif #ifdef _MSVC_LANG # define FMT_CPLUSPLUS _MSVC_LANG #else # define FMT_CPLUSPLUS __cplusplus #endif // Detect __has_*. #ifdef __has_feature # define FMT_HAS_FEATURE(x) __has_feature(x) #else # define FMT_HAS_FEATURE(x) 0 #endif #ifdef __has_include # define FMT_HAS_INCLUDE(x) __has_include(x) #else # define FMT_HAS_INCLUDE(x) 0 #endif #ifdef __has_cpp_attribute # define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else # define FMT_HAS_CPP_ATTRIBUTE(x) 0 #endif #define FMT_HAS_CPP14_ATTRIBUTE(attribute) \ (FMT_CPLUSPLUS >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) #define FMT_HAS_CPP17_ATTRIBUTE(attribute) \ (FMT_CPLUSPLUS >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) // Detect C++14 relaxed constexpr. #ifdef FMT_USE_CONSTEXPR // Use the provided definition. #elif FMT_GCC_VERSION >= 600 && FMT_CPLUSPLUS >= 201402L // GCC only allows throw in constexpr since version 6: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67371. # define FMT_USE_CONSTEXPR 1 #elif FMT_ICC_VERSION # define FMT_USE_CONSTEXPR 0 // https://github.com/fmtlib/fmt/issues/1628 #elif FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VERSION >= 1912 # define FMT_USE_CONSTEXPR 1 #else # define FMT_USE_CONSTEXPR 0 #endif #if FMT_USE_CONSTEXPR # define FMT_CONSTEXPR constexpr #else # define FMT_CONSTEXPR #endif // Detect consteval, C++20 constexpr extensions and std::is_constant_evaluated. #if !defined(__cpp_lib_is_constant_evaluated) # define FMT_USE_CONSTEVAL 0 #elif FMT_CPLUSPLUS < 201709L # define FMT_USE_CONSTEVAL 0 #elif FMT_GLIBCXX_RELEASE && FMT_GLIBCXX_RELEASE < 10 # define FMT_USE_CONSTEVAL 0 #elif FMT_LIBCPP_VERSION && FMT_LIBCPP_VERSION < 10000 # define FMT_USE_CONSTEVAL 0 #elif defined(__apple_build_version__) && __apple_build_version__ < 14000029L # define FMT_USE_CONSTEVAL 0 // consteval is broken in Apple clang < 14. #elif FMT_MSC_VERSION && FMT_MSC_VERSION < 1929 # define FMT_USE_CONSTEVAL 0 // consteval is broken in MSVC VS2019 < 16.10. #elif defined(__cpp_consteval) # define FMT_USE_CONSTEVAL 1 #elif FMT_GCC_VERSION >= 1002 || FMT_CLANG_VERSION >= 1101 # define FMT_USE_CONSTEVAL 1 #else # define FMT_USE_CONSTEVAL 0 #endif #if FMT_USE_CONSTEVAL # define FMT_CONSTEVAL consteval # define FMT_CONSTEXPR20 constexpr #else # define FMT_CONSTEVAL # define FMT_CONSTEXPR20 #endif #if defined(FMT_USE_NONTYPE_TEMPLATE_ARGS) // Use the provided definition. #elif defined(__NVCOMPILER) # define FMT_USE_NONTYPE_TEMPLATE_ARGS 0 #elif FMT_GCC_VERSION >= 903 && FMT_CPLUSPLUS >= 201709L # define FMT_USE_NONTYPE_TEMPLATE_ARGS 1 #elif defined(__cpp_nontype_template_args) && \ __cpp_nontype_template_args >= 201911L # define FMT_USE_NONTYPE_TEMPLATE_ARGS 1 #elif FMT_CLANG_VERSION >= 1200 && FMT_CPLUSPLUS >= 202002L # define FMT_USE_NONTYPE_TEMPLATE_ARGS 1 #else # define FMT_USE_NONTYPE_TEMPLATE_ARGS 0 #endif #ifdef FMT_USE_CONCEPTS // Use the provided definition. #elif defined(__cpp_concepts) # define FMT_USE_CONCEPTS 1 #else # define FMT_USE_CONCEPTS 0 #endif // Check if exceptions are disabled. #ifdef FMT_EXCEPTIONS // Use the provided definition. #elif defined(__GNUC__) && !defined(__EXCEPTIONS) # define FMT_EXCEPTIONS 0 #elif FMT_MSC_VERSION && !_HAS_EXCEPTIONS # define FMT_EXCEPTIONS 0 #else # define FMT_EXCEPTIONS 1 #endif #if FMT_EXCEPTIONS # define FMT_TRY try # define FMT_CATCH(x) catch (x) #else # define FMT_TRY if (true) # define FMT_CATCH(x) if (false) #endif #if FMT_HAS_CPP17_ATTRIBUTE(fallthrough) # define FMT_FALLTHROUGH [[fallthrough]] #elif defined(__clang__) # define FMT_FALLTHROUGH [[clang::fallthrough]] #elif FMT_GCC_VERSION >= 700 && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) # define FMT_FALLTHROUGH [[gnu::fallthrough]] #else # define FMT_FALLTHROUGH #endif // Disable [[noreturn]] on MSVC/NVCC because of bogus unreachable code warnings. #if FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VERSION && !defined(__NVCC__) # define FMT_NORETURN [[noreturn]] #else # define FMT_NORETURN #endif #ifndef FMT_NODISCARD # if FMT_HAS_CPP17_ATTRIBUTE(nodiscard) # define FMT_NODISCARD [[nodiscard]] # else # define FMT_NODISCARD # endif #endif #ifdef FMT_DEPRECATED // Use the provided definition. #elif FMT_HAS_CPP14_ATTRIBUTE(deprecated) # define FMT_DEPRECATED [[deprecated]] #else # define FMT_DEPRECATED /* deprecated */ #endif #ifdef FMT_INLINE // Use the provided definition. #elif FMT_GCC_VERSION || FMT_CLANG_VERSION # define FMT_ALWAYS_INLINE inline __attribute__((always_inline)) #else # define FMT_ALWAYS_INLINE inline #endif // A version of FMT_INLINE to prevent code bloat in debug mode. #ifdef NDEBUG # define FMT_INLINE FMT_ALWAYS_INLINE #else # define FMT_INLINE inline #endif #if FMT_GCC_VERSION || FMT_CLANG_VERSION # define FMT_VISIBILITY(value) __attribute__((visibility(value))) #else # define FMT_VISIBILITY(value) #endif #ifndef FMT_GCC_PRAGMA // Workaround a _Pragma bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59884 // and an nvhpc warning: https://github.com/fmtlib/fmt/pull/2582. # if FMT_GCC_VERSION >= 504 && !defined(__NVCOMPILER) # define FMT_GCC_PRAGMA(arg) _Pragma(arg) # else # define FMT_GCC_PRAGMA(arg) # endif #endif // GCC < 5 requires this-> in decltype. #if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 # define FMT_DECLTYPE_THIS this-> #else # define FMT_DECLTYPE_THIS #endif #if FMT_MSC_VERSION # define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) # define FMT_UNCHECKED_ITERATOR(It) \ using _Unchecked_type = It // Mark iterator as checked. #else # define FMT_MSC_WARNING(...) # define FMT_UNCHECKED_ITERATOR(It) using unchecked_type = It #endif #ifndef FMT_BEGIN_NAMESPACE # define FMT_BEGIN_NAMESPACE \ namespace fmt { \ inline namespace v11 { # define FMT_END_NAMESPACE \ } \ } #endif #ifndef FMT_EXPORT # define FMT_EXPORT # define FMT_BEGIN_EXPORT # define FMT_END_EXPORT #endif #if !defined(FMT_HEADER_ONLY) && defined(_WIN32) # if defined(FMT_LIB_EXPORT) # define FMT_API __declspec(dllexport) # elif defined(FMT_SHARED) # define FMT_API __declspec(dllimport) # endif #elif defined(FMT_LIB_EXPORT) || defined(FMT_SHARED) # define FMT_API FMT_VISIBILITY("default") #endif #ifndef FMT_API # define FMT_API #endif #ifndef FMT_UNICODE # define FMT_UNICODE 1 #endif // Check if rtti is available. #ifndef FMT_USE_RTTI // __RTTI is for EDG compilers. _CPPRTTI is for MSVC. # if defined(__GXX_RTTI) || FMT_HAS_FEATURE(cxx_rtti) || defined(_CPPRTTI) || \ defined(__INTEL_RTTI__) || defined(__RTTI) # define FMT_USE_RTTI 1 # else # define FMT_USE_RTTI 0 # endif #endif #define FMT_FWD(...) static_cast(__VA_ARGS__) // Enable minimal optimizations for more compact code in debug mode. FMT_GCC_PRAGMA("GCC push_options") #if !defined(__OPTIMIZE__) && !defined(__CUDACC__) FMT_GCC_PRAGMA("GCC optimize(\"Og\")") #endif FMT_BEGIN_NAMESPACE // Implementations of enable_if_t and other metafunctions for older systems. template using enable_if_t = typename std::enable_if::type; template using conditional_t = typename std::conditional::type; template using bool_constant = std::integral_constant; template using remove_reference_t = typename std::remove_reference::type; template using remove_const_t = typename std::remove_const::type; template using remove_cvref_t = typename std::remove_cv>::type; template struct type_identity { using type = T; }; template using type_identity_t = typename type_identity::type; template using make_unsigned_t = typename std::make_unsigned::type; template using underlying_t = typename std::underlying_type::type; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 // A workaround for gcc 4.8 to make void_t work in a SFINAE context. template struct void_t_impl { using type = void; }; template using void_t = typename void_t_impl::type; #else template using void_t = void; #endif struct monostate { constexpr monostate() {} }; // An enable_if helper to be used in template parameters which results in much // shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed // to workaround a bug in MSVC 2019 (see #1140 and #1186). #ifdef FMT_DOC # define FMT_ENABLE_IF(...) #else # define FMT_ENABLE_IF(...) fmt::enable_if_t<(__VA_ARGS__), int> = 0 #endif // This is defined in base.h instead of format.h to avoid injecting in std. // It is a template to avoid undesirable implicit conversions to std::byte. #ifdef __cpp_lib_byte template ::value)> inline auto format_as(T b) -> unsigned char { return static_cast(b); } #endif namespace detail { // Suppresses "unused variable" warnings with the method described in // https://herbsutter.com/2009/10/18/mailbag-shutting-up-compiler-warnings/. // (void)var does not work on many Intel compilers. template FMT_CONSTEXPR void ignore_unused(const T&...) {} constexpr auto is_constant_evaluated(bool default_value = false) noexcept -> bool { // Workaround for incompatibility between libstdc++ consteval-based // std::is_constant_evaluated() implementation and clang-14: // https://github.com/fmtlib/fmt/issues/3247. #if FMT_CPLUSPLUS >= 202002L && FMT_GLIBCXX_RELEASE >= 12 && \ (FMT_CLANG_VERSION >= 1400 && FMT_CLANG_VERSION < 1500) ignore_unused(default_value); return __builtin_is_constant_evaluated(); #elif defined(__cpp_lib_is_constant_evaluated) ignore_unused(default_value); return std::is_constant_evaluated(); #else return default_value; #endif } // Suppresses "conditional expression is constant" warnings. template constexpr auto const_check(T value) -> T { return value; } FMT_NORETURN FMT_API void assert_fail(const char* file, int line, const char* message); #if defined(FMT_ASSERT) // Use the provided definition. #elif defined(NDEBUG) // FMT_ASSERT is not empty to avoid -Wempty-body. # define FMT_ASSERT(condition, message) \ fmt::detail::ignore_unused((condition), (message)) #else # define FMT_ASSERT(condition, message) \ ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ ? (void)0 \ : fmt::detail::assert_fail(__FILE__, __LINE__, (message))) #endif #ifdef FMT_USE_INT128 // Do nothing. #elif defined(__SIZEOF_INT128__) && !defined(__NVCC__) && \ !(FMT_CLANG_VERSION && FMT_MSC_VERSION) # define FMT_USE_INT128 1 using int128_opt = __int128_t; // An optional native 128-bit integer. using uint128_opt = __uint128_t; template inline auto convert_for_visit(T value) -> T { return value; } #else # define FMT_USE_INT128 0 #endif #if !FMT_USE_INT128 enum class int128_opt {}; enum class uint128_opt {}; // Reduce template instantiations. template auto convert_for_visit(T) -> monostate { return {}; } #endif // Casts a nonnegative integer to unsigned. template FMT_CONSTEXPR auto to_unsigned(Int value) -> make_unsigned_t { FMT_ASSERT(std::is_unsigned::value || value >= 0, "negative value"); return static_cast>(value); } // A heuristic to detect std::string and std::[experimental::]string_view. // It is mainly used to avoid dependency on <[experimental/]string_view>. template struct is_std_string_like : std::false_type {}; template struct is_std_string_like().find_first_of( typename T::value_type(), 0))>> : std::is_convertible().data()), const typename T::value_type*> {}; // Returns true iff the literal encoding is UTF-8. constexpr auto is_utf8_enabled() -> bool { // Avoid an MSVC sign extension bug: https://github.com/fmtlib/fmt/pull/2297. using uchar = unsigned char; return sizeof("\u00A7") == 3 && uchar("\u00A7"[0]) == 0xC2 && uchar("\u00A7"[1]) == 0xA7; } constexpr auto use_utf8() -> bool { return !FMT_MSC_VERSION || is_utf8_enabled(); } static_assert(!FMT_UNICODE || use_utf8(), "Unicode support requires compiling with /utf-8"); template FMT_CONSTEXPR auto length(const Char* s) -> size_t { size_t len = 0; while (*s++) ++len; return len; } template FMT_CONSTEXPR auto compare(const Char* s1, const Char* s2, std::size_t n) -> int { if (!is_constant_evaluated() && sizeof(Char) == 1) return memcmp(s1, s2, n); for (; n != 0; ++s1, ++s2, --n) { if (*s1 < *s2) return -1; if (*s1 > *s2) return 1; } return 0; } namespace adl { using namespace std; template auto invoke_back_inserter() -> decltype(back_inserter(std::declval())); } // namespace adl template struct is_back_insert_iterator : std::false_type {}; template struct is_back_insert_iterator< It, bool_constant()), It>::value>> : std::true_type {}; // Extracts a reference to the container from *insert_iterator. template inline auto get_container(OutputIt it) -> typename OutputIt::container_type& { struct accessor : OutputIt { accessor(OutputIt base) : OutputIt(base) {} using OutputIt::container; }; return *accessor(it).container; } } // namespace detail // Checks whether T is a container with contiguous storage. template struct is_contiguous : std::false_type {}; /** * An implementation of `std::basic_string_view` for pre-C++17. It provides a * subset of the API. `fmt::basic_string_view` is used for format strings even * if `std::basic_string_view` is available to prevent issues when a library is * compiled with a different `-std` option than the client code (which is not * recommended). */ FMT_EXPORT template class basic_string_view { private: const Char* data_; size_t size_; public: using value_type = Char; using iterator = const Char*; constexpr basic_string_view() noexcept : data_(nullptr), size_(0) {} /// Constructs a string reference object from a C string and a size. constexpr basic_string_view(const Char* s, size_t count) noexcept : data_(s), size_(count) {} constexpr basic_string_view(std::nullptr_t) = delete; /// Constructs a string reference object from a C string. FMT_CONSTEXPR20 basic_string_view(const Char* s) : data_(s), size_(detail::const_check(std::is_same::value && !detail::is_constant_evaluated(false)) ? strlen(reinterpret_cast(s)) : detail::length(s)) {} /// Constructs a string reference from a `std::basic_string` or a /// `std::basic_string_view` object. template ::value&& std::is_same< typename S::value_type, Char>::value)> FMT_CONSTEXPR basic_string_view(const S& s) noexcept : data_(s.data()), size_(s.size()) {} /// Returns a pointer to the string data. constexpr auto data() const noexcept -> const Char* { return data_; } /// Returns the string size. constexpr auto size() const noexcept -> size_t { return size_; } constexpr auto begin() const noexcept -> iterator { return data_; } constexpr auto end() const noexcept -> iterator { return data_ + size_; } constexpr auto operator[](size_t pos) const noexcept -> const Char& { return data_[pos]; } FMT_CONSTEXPR void remove_prefix(size_t n) noexcept { data_ += n; size_ -= n; } FMT_CONSTEXPR auto starts_with(basic_string_view sv) const noexcept -> bool { return size_ >= sv.size_ && detail::compare(data_, sv.data_, sv.size_) == 0; } FMT_CONSTEXPR auto starts_with(Char c) const noexcept -> bool { return size_ >= 1 && *data_ == c; } FMT_CONSTEXPR auto starts_with(const Char* s) const -> bool { return starts_with(basic_string_view(s)); } // Lexicographically compare this string reference to other. FMT_CONSTEXPR auto compare(basic_string_view other) const -> int { size_t str_size = size_ < other.size_ ? size_ : other.size_; int result = detail::compare(data_, other.data_, str_size); if (result == 0) result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); return result; } FMT_CONSTEXPR friend auto operator==(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) == 0; } friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) != 0; } friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) < 0; } friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) <= 0; } friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) > 0; } friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) >= 0; } }; FMT_EXPORT using string_view = basic_string_view; /// Specifies if `T` is a character type. Can be specialized by users. FMT_EXPORT template struct is_char : std::false_type {}; template <> struct is_char : std::true_type {}; namespace detail { // Constructs fmt::basic_string_view from types implicitly convertible // to it, deducing Char. Explicitly convertible types such as the ones returned // from FMT_STRING are intentionally excluded. template ::value)> constexpr auto to_string_view(const Char* s) -> basic_string_view { return s; } template ::value)> constexpr auto to_string_view(const T& s) -> basic_string_view { return s; } template constexpr auto to_string_view(basic_string_view s) -> basic_string_view { return s; } template struct has_to_string_view : std::false_type {}; // detail:: is intentional since to_string_view is not an extension point. template struct has_to_string_view< T, void_t()))>> : std::true_type {}; template struct string_literal { static constexpr Char value[sizeof...(C)] = {C...}; constexpr operator basic_string_view() const { return {value, sizeof...(C)}; } }; #if FMT_CPLUSPLUS < 201703L template constexpr Char string_literal::value[sizeof...(C)]; #endif enum class type { none_type, // Integer types should go first, int_type, uint_type, long_long_type, ulong_long_type, int128_type, uint128_type, bool_type, char_type, last_integer_type = char_type, // followed by floating-point types. float_type, double_type, long_double_type, last_numeric_type = long_double_type, cstring_type, string_type, pointer_type, custom_type }; // Maps core type T to the corresponding type enum constant. template struct type_constant : std::integral_constant {}; #define FMT_TYPE_CONSTANT(Type, constant) \ template \ struct type_constant \ : std::integral_constant {} FMT_TYPE_CONSTANT(int, int_type); FMT_TYPE_CONSTANT(unsigned, uint_type); FMT_TYPE_CONSTANT(long long, long_long_type); FMT_TYPE_CONSTANT(unsigned long long, ulong_long_type); FMT_TYPE_CONSTANT(int128_opt, int128_type); FMT_TYPE_CONSTANT(uint128_opt, uint128_type); FMT_TYPE_CONSTANT(bool, bool_type); FMT_TYPE_CONSTANT(Char, char_type); FMT_TYPE_CONSTANT(float, float_type); FMT_TYPE_CONSTANT(double, double_type); FMT_TYPE_CONSTANT(long double, long_double_type); FMT_TYPE_CONSTANT(const Char*, cstring_type); FMT_TYPE_CONSTANT(basic_string_view, string_type); FMT_TYPE_CONSTANT(const void*, pointer_type); constexpr auto is_integral_type(type t) -> bool { return t > type::none_type && t <= type::last_integer_type; } constexpr auto is_arithmetic_type(type t) -> bool { return t > type::none_type && t <= type::last_numeric_type; } constexpr auto set(type rhs) -> int { return 1 << static_cast(rhs); } constexpr auto in(type t, int set) -> bool { return ((set >> static_cast(t)) & 1) != 0; } // Bitsets of types. enum { sint_set = set(type::int_type) | set(type::long_long_type) | set(type::int128_type), uint_set = set(type::uint_type) | set(type::ulong_long_type) | set(type::uint128_type), bool_set = set(type::bool_type), char_set = set(type::char_type), float_set = set(type::float_type) | set(type::double_type) | set(type::long_double_type), string_set = set(type::string_type), cstring_set = set(type::cstring_type), pointer_set = set(type::pointer_type) }; } // namespace detail /// Reports a format error at compile time or, via a `format_error` exception, /// at runtime. // This function is intentionally not constexpr to give a compile-time error. FMT_NORETURN FMT_API void report_error(const char* message); FMT_DEPRECATED FMT_NORETURN inline void throw_format_error( const char* message) { report_error(message); } /// String's character (code unit) type. template ()))> using char_t = typename V::value_type; /** * Parsing context consisting of a format string range being parsed and an * argument counter for automatic indexing. * You can use the `format_parse_context` type alias for `char` instead. */ FMT_EXPORT template class basic_format_parse_context { private: basic_string_view format_str_; int next_arg_id_; FMT_CONSTEXPR void do_check_arg_id(int id); public: using char_type = Char; using iterator = const Char*; explicit constexpr basic_format_parse_context( basic_string_view format_str, int next_arg_id = 0) : format_str_(format_str), next_arg_id_(next_arg_id) {} /// Returns an iterator to the beginning of the format string range being /// parsed. constexpr auto begin() const noexcept -> iterator { return format_str_.begin(); } /// Returns an iterator past the end of the format string range being parsed. constexpr auto end() const noexcept -> iterator { return format_str_.end(); } /// Advances the begin iterator to `it`. FMT_CONSTEXPR void advance_to(iterator it) { format_str_.remove_prefix(detail::to_unsigned(it - begin())); } /// Reports an error if using the manual argument indexing; otherwise returns /// the next argument index and switches to the automatic indexing. FMT_CONSTEXPR auto next_arg_id() -> int { if (next_arg_id_ < 0) { report_error("cannot switch from manual to automatic argument indexing"); return 0; } int id = next_arg_id_++; do_check_arg_id(id); return id; } /// Reports an error if using the automatic argument indexing; otherwise /// switches to the manual indexing. FMT_CONSTEXPR void check_arg_id(int id) { if (next_arg_id_ > 0) { report_error("cannot switch from automatic to manual argument indexing"); return; } next_arg_id_ = -1; do_check_arg_id(id); } FMT_CONSTEXPR void check_arg_id(basic_string_view) { next_arg_id_ = -1; } FMT_CONSTEXPR void check_dynamic_spec(int arg_id); }; FMT_EXPORT using format_parse_context = basic_format_parse_context; namespace detail { // A parse context with extra data used only in compile-time checks. template class compile_parse_context : public basic_format_parse_context { private: int num_args_; const type* types_; using base = basic_format_parse_context; public: explicit FMT_CONSTEXPR compile_parse_context( basic_string_view format_str, int num_args, const type* types, int next_arg_id = 0) : base(format_str, next_arg_id), num_args_(num_args), types_(types) {} constexpr auto num_args() const -> int { return num_args_; } constexpr auto arg_type(int id) const -> type { return types_[id]; } FMT_CONSTEXPR auto next_arg_id() -> int { int id = base::next_arg_id(); if (id >= num_args_) report_error("argument not found"); return id; } FMT_CONSTEXPR void check_arg_id(int id) { base::check_arg_id(id); if (id >= num_args_) report_error("argument not found"); } using base::check_arg_id; FMT_CONSTEXPR void check_dynamic_spec(int arg_id) { detail::ignore_unused(arg_id); if (arg_id < num_args_ && types_ && !is_integral_type(types_[arg_id])) report_error("width/precision is not integer"); } }; /// A contiguous memory buffer with an optional growing ability. It is an /// internal class and shouldn't be used directly, only via `memory_buffer`. template class buffer { private: T* ptr_; size_t size_; size_t capacity_; using grow_fun = void (*)(buffer& buf, size_t capacity); grow_fun grow_; protected: // Don't initialize ptr_ since it is not accessed to save a few cycles. FMT_MSC_WARNING(suppress : 26495) FMT_CONSTEXPR20 buffer(grow_fun grow, size_t sz) noexcept : size_(sz), capacity_(sz), grow_(grow) {} constexpr buffer(grow_fun grow, T* p = nullptr, size_t sz = 0, size_t cap = 0) noexcept : ptr_(p), size_(sz), capacity_(cap), grow_(grow) {} FMT_CONSTEXPR20 ~buffer() = default; buffer(buffer&&) = default; /// Sets the buffer data and capacity. FMT_CONSTEXPR void set(T* buf_data, size_t buf_capacity) noexcept { ptr_ = buf_data; capacity_ = buf_capacity; } public: using value_type = T; using const_reference = const T&; buffer(const buffer&) = delete; void operator=(const buffer&) = delete; auto begin() noexcept -> T* { return ptr_; } auto end() noexcept -> T* { return ptr_ + size_; } auto begin() const noexcept -> const T* { return ptr_; } auto end() const noexcept -> const T* { return ptr_ + size_; } /// Returns the size of this buffer. constexpr auto size() const noexcept -> size_t { return size_; } /// Returns the capacity of this buffer. constexpr auto capacity() const noexcept -> size_t { return capacity_; } /// Returns a pointer to the buffer data (not null-terminated). FMT_CONSTEXPR auto data() noexcept -> T* { return ptr_; } FMT_CONSTEXPR auto data() const noexcept -> const T* { return ptr_; } /// Clears this buffer. void clear() { size_ = 0; } // Tries resizing the buffer to contain `count` elements. If T is a POD type // the new elements may not be initialized. FMT_CONSTEXPR void try_resize(size_t count) { try_reserve(count); size_ = count <= capacity_ ? count : capacity_; } // Tries increasing the buffer capacity to `new_capacity`. It can increase the // capacity by a smaller amount than requested but guarantees there is space // for at least one additional element either by increasing the capacity or by // flushing the buffer if it is full. FMT_CONSTEXPR void try_reserve(size_t new_capacity) { if (new_capacity > capacity_) grow_(*this, new_capacity); } FMT_CONSTEXPR void push_back(const T& value) { try_reserve(size_ + 1); ptr_[size_++] = value; } /// Appends data to the end of the buffer. template void append(const U* begin, const U* end) { while (begin != end) { auto count = to_unsigned(end - begin); try_reserve(size_ + count); auto free_cap = capacity_ - size_; if (free_cap < count) count = free_cap; // A loop is faster than memcpy on small sizes. T* out = ptr_ + size_; for (size_t i = 0; i < count; ++i) out[i] = begin[i]; size_ += count; begin += count; } } template FMT_CONSTEXPR auto operator[](Idx index) -> T& { return ptr_[index]; } template FMT_CONSTEXPR auto operator[](Idx index) const -> const T& { return ptr_[index]; } }; struct buffer_traits { explicit buffer_traits(size_t) {} auto count() const -> size_t { return 0; } auto limit(size_t size) -> size_t { return size; } }; class fixed_buffer_traits { private: size_t count_ = 0; size_t limit_; public: explicit fixed_buffer_traits(size_t limit) : limit_(limit) {} auto count() const -> size_t { return count_; } auto limit(size_t size) -> size_t { size_t n = limit_ > count_ ? limit_ - count_ : 0; count_ += size; return size < n ? size : n; } }; // A buffer that writes to an output iterator when flushed. template class iterator_buffer : public Traits, public buffer { private: OutputIt out_; enum { buffer_size = 256 }; T data_[buffer_size]; static FMT_CONSTEXPR void grow(buffer& buf, size_t) { if (buf.size() == buffer_size) static_cast(buf).flush(); } void flush() { auto size = this->size(); this->clear(); const T* begin = data_; const T* end = begin + this->limit(size); while (begin != end) *out_++ = *begin++; } public: explicit iterator_buffer(OutputIt out, size_t n = buffer_size) : Traits(n), buffer(grow, data_, 0, buffer_size), out_(out) {} iterator_buffer(iterator_buffer&& other) noexcept : Traits(other), buffer(grow, data_, 0, buffer_size), out_(other.out_) {} ~iterator_buffer() { // Don't crash if flush fails during unwinding. FMT_TRY { flush(); } FMT_CATCH(...) {} } auto out() -> OutputIt { flush(); return out_; } auto count() const -> size_t { return Traits::count() + this->size(); } }; template class iterator_buffer : public fixed_buffer_traits, public buffer { private: T* out_; enum { buffer_size = 256 }; T data_[buffer_size]; static FMT_CONSTEXPR void grow(buffer& buf, size_t) { if (buf.size() == buf.capacity()) static_cast(buf).flush(); } void flush() { size_t n = this->limit(this->size()); if (this->data() == out_) { out_ += n; this->set(data_, buffer_size); } this->clear(); } public: explicit iterator_buffer(T* out, size_t n = buffer_size) : fixed_buffer_traits(n), buffer(grow, out, 0, n), out_(out) {} iterator_buffer(iterator_buffer&& other) noexcept : fixed_buffer_traits(other), buffer(static_cast(other)), out_(other.out_) { if (this->data() != out_) { this->set(data_, buffer_size); this->clear(); } } ~iterator_buffer() { flush(); } auto out() -> T* { flush(); return out_; } auto count() const -> size_t { return fixed_buffer_traits::count() + this->size(); } }; template class iterator_buffer : public buffer { public: explicit iterator_buffer(T* out, size_t = 0) : buffer([](buffer&, size_t) {}, out, 0, ~size_t()) {} auto out() -> T* { return &*this->end(); } }; // A buffer that writes to a container with the contiguous storage. template class iterator_buffer< OutputIt, enable_if_t::value && is_contiguous::value, typename OutputIt::container_type::value_type>> : public buffer { private: using container_type = typename OutputIt::container_type; using value_type = typename container_type::value_type; container_type& container_; static FMT_CONSTEXPR void grow(buffer& buf, size_t capacity) { auto& self = static_cast(buf); self.container_.resize(capacity); self.set(&self.container_[0], capacity); } public: explicit iterator_buffer(container_type& c) : buffer(grow, c.size()), container_(c) {} explicit iterator_buffer(OutputIt out, size_t = 0) : iterator_buffer(get_container(out)) {} auto out() -> OutputIt { return back_inserter(container_); } }; // A buffer that counts the number of code units written discarding the output. template class counting_buffer : public buffer { private: enum { buffer_size = 256 }; T data_[buffer_size]; size_t count_ = 0; static FMT_CONSTEXPR void grow(buffer& buf, size_t) { if (buf.size() != buffer_size) return; static_cast(buf).count_ += buf.size(); buf.clear(); } public: counting_buffer() : buffer(grow, data_, 0, buffer_size) {} auto count() -> size_t { return count_ + this->size(); } }; } // namespace detail template FMT_CONSTEXPR void basic_format_parse_context::do_check_arg_id(int id) { // Argument id is only checked at compile-time during parsing because // formatting has its own validation. if (detail::is_constant_evaluated() && (!FMT_GCC_VERSION || FMT_GCC_VERSION >= 1200)) { using context = detail::compile_parse_context; if (id >= static_cast(this)->num_args()) report_error("argument not found"); } } template FMT_CONSTEXPR void basic_format_parse_context::check_dynamic_spec( int arg_id) { if (detail::is_constant_evaluated() && (!FMT_GCC_VERSION || FMT_GCC_VERSION >= 1200)) { using context = detail::compile_parse_context; static_cast(this)->check_dynamic_spec(arg_id); } } FMT_EXPORT template class basic_format_arg; FMT_EXPORT template class basic_format_args; FMT_EXPORT template class dynamic_format_arg_store; // A formatter for objects of type T. FMT_EXPORT template struct formatter { // A deleted default constructor indicates a disabled formatter. formatter() = delete; }; // Specifies if T has an enabled formatter specialization. A type can be // formattable even if it doesn't have a formatter e.g. via a conversion. template using has_formatter = std::is_constructible>; // An output iterator that appends to a buffer. It is used instead of // back_insert_iterator to reduce symbol sizes and avoid dependency. template class basic_appender { private: detail::buffer* buffer_; friend auto get_container(basic_appender app) -> detail::buffer& { return *app.buffer_; } public: using iterator_category = int; using value_type = T; using difference_type = ptrdiff_t; using pointer = T*; using reference = T&; using container_type = detail::buffer; FMT_UNCHECKED_ITERATOR(basic_appender); FMT_CONSTEXPR basic_appender(detail::buffer& buf) : buffer_(&buf) {} auto operator=(T c) -> basic_appender& { buffer_->push_back(c); return *this; } auto operator*() -> basic_appender& { return *this; } auto operator++() -> basic_appender& { return *this; } auto operator++(int) -> basic_appender { return *this; } }; using appender = basic_appender; namespace detail { template struct is_back_insert_iterator> : std::true_type {}; template struct locking : std::true_type {}; template struct locking>::nonlocking>> : std::false_type {}; template FMT_CONSTEXPR inline auto is_locking() -> bool { return locking::value; } template FMT_CONSTEXPR inline auto is_locking() -> bool { return locking::value || is_locking(); } // An optimized version of std::copy with the output value type (T). template ::value)> auto copy(InputIt begin, InputIt end, OutputIt out) -> OutputIt { get_container(out).append(begin, end); return out; } template ::value)> FMT_CONSTEXPR auto copy(InputIt begin, InputIt end, OutputIt out) -> OutputIt { while (begin != end) *out++ = static_cast(*begin++); return out; } template FMT_CONSTEXPR auto copy(basic_string_view s, OutputIt out) -> OutputIt { return copy(s.begin(), s.end(), out); } template constexpr auto has_const_formatter_impl(T*) -> decltype(typename Context::template formatter_type().format( std::declval(), std::declval()), true) { return true; } template constexpr auto has_const_formatter_impl(...) -> bool { return false; } template constexpr auto has_const_formatter() -> bool { return has_const_formatter_impl(static_cast(nullptr)); } template struct is_buffer_appender : std::false_type {}; template struct is_buffer_appender< It, bool_constant< is_back_insert_iterator::value && std::is_base_of, typename It::container_type>::value>> : std::true_type {}; // Maps an output iterator to a buffer. template ::value)> auto get_buffer(OutputIt out) -> iterator_buffer { return iterator_buffer(out); } template ::value)> auto get_buffer(OutputIt out) -> buffer& { return get_container(out); } template auto get_iterator(Buf& buf, OutputIt) -> decltype(buf.out()) { return buf.out(); } template auto get_iterator(buffer&, OutputIt out) -> OutputIt { return out; } struct view {}; template struct named_arg : view { const Char* name; const T& value; named_arg(const Char* n, const T& v) : name(n), value(v) {} }; template struct named_arg_info { const Char* name; int id; }; template struct is_named_arg : std::false_type {}; template struct is_statically_named_arg : std::false_type {}; template struct is_named_arg> : std::true_type {}; template constexpr auto count() -> size_t { return B ? 1 : 0; } template constexpr auto count() -> size_t { return (B1 ? 1 : 0) + count(); } template constexpr auto count_named_args() -> size_t { return count::value...>(); } template constexpr auto count_statically_named_args() -> size_t { return count::value...>(); } struct unformattable {}; struct unformattable_char : unformattable {}; struct unformattable_pointer : unformattable {}; template struct string_value { const Char* data; size_t size; }; template struct named_arg_value { const named_arg_info* data; size_t size; }; template struct custom_value { using parse_context = typename Context::parse_context_type; void* value; void (*format)(void* arg, parse_context& parse_ctx, Context& ctx); }; // A formatting argument value. template class value { public: using char_type = typename Context::char_type; union { monostate no_value; int int_value; unsigned uint_value; long long long_long_value; unsigned long long ulong_long_value; int128_opt int128_value; uint128_opt uint128_value; bool bool_value; char_type char_value; float float_value; double double_value; long double long_double_value; const void* pointer; string_value string; custom_value custom; named_arg_value named_args; }; constexpr FMT_ALWAYS_INLINE value() : no_value() {} constexpr FMT_ALWAYS_INLINE value(int val) : int_value(val) {} constexpr FMT_ALWAYS_INLINE value(unsigned val) : uint_value(val) {} constexpr FMT_ALWAYS_INLINE value(long long val) : long_long_value(val) {} constexpr FMT_ALWAYS_INLINE value(unsigned long long val) : ulong_long_value(val) {} FMT_ALWAYS_INLINE value(int128_opt val) : int128_value(val) {} FMT_ALWAYS_INLINE value(uint128_opt val) : uint128_value(val) {} constexpr FMT_ALWAYS_INLINE value(float val) : float_value(val) {} constexpr FMT_ALWAYS_INLINE value(double val) : double_value(val) {} FMT_ALWAYS_INLINE value(long double val) : long_double_value(val) {} constexpr FMT_ALWAYS_INLINE value(bool val) : bool_value(val) {} constexpr FMT_ALWAYS_INLINE value(char_type val) : char_value(val) {} FMT_CONSTEXPR FMT_ALWAYS_INLINE value(const char_type* val) { string.data = val; if (is_constant_evaluated()) string.size = {}; } FMT_CONSTEXPR FMT_ALWAYS_INLINE value(basic_string_view val) { string.data = val.data(); string.size = val.size(); } FMT_ALWAYS_INLINE value(const void* val) : pointer(val) {} FMT_ALWAYS_INLINE value(const named_arg_info* args, size_t size) : named_args{args, size} {} template FMT_CONSTEXPR20 FMT_ALWAYS_INLINE value(T& val) { using value_type = remove_const_t; // T may overload operator& e.g. std::vector::reference in libc++. #if defined(__cpp_if_constexpr) if constexpr (std::is_same::value) custom.value = const_cast(&val); #endif if (!is_constant_evaluated()) custom.value = const_cast(&reinterpret_cast(val)); // Get the formatter type through the context to allow different contexts // have different extension points, e.g. `formatter` for `format` and // `printf_formatter` for `printf`. custom.format = format_custom_arg< value_type, typename Context::template formatter_type>; } value(unformattable); value(unformattable_char); value(unformattable_pointer); private: // Formats an argument of a custom type, such as a user-defined class. template static void format_custom_arg(void* arg, typename Context::parse_context_type& parse_ctx, Context& ctx) { auto f = Formatter(); parse_ctx.advance_to(f.parse(parse_ctx)); using qualified_type = conditional_t(), const T, T>; // format must be const for compatibility with std::format and compilation. const auto& cf = f; ctx.advance_to(cf.format(*static_cast(arg), ctx)); } }; // To minimize the number of types we need to deal with, long is translated // either to int or to long long depending on its size. enum { long_short = sizeof(long) == sizeof(int) }; using long_type = conditional_t; using ulong_type = conditional_t; template struct format_as_result { template ::value || std::is_class::value)> static auto map(U*) -> remove_cvref_t()))>; static auto map(...) -> void; using type = decltype(map(static_cast(nullptr))); }; template using format_as_t = typename format_as_result::type; template struct has_format_as : bool_constant, void>::value> {}; #define FMT_MAP_API FMT_CONSTEXPR FMT_ALWAYS_INLINE // Maps formatting arguments to core types. // arg_mapper reports errors by returning unformattable instead of using // static_assert because it's used in the is_formattable trait. template struct arg_mapper { using char_type = typename Context::char_type; FMT_MAP_API auto map(signed char val) -> int { return val; } FMT_MAP_API auto map(unsigned char val) -> unsigned { return val; } FMT_MAP_API auto map(short val) -> int { return val; } FMT_MAP_API auto map(unsigned short val) -> unsigned { return val; } FMT_MAP_API auto map(int val) -> int { return val; } FMT_MAP_API auto map(unsigned val) -> unsigned { return val; } FMT_MAP_API auto map(long val) -> long_type { return val; } FMT_MAP_API auto map(unsigned long val) -> ulong_type { return val; } FMT_MAP_API auto map(long long val) -> long long { return val; } FMT_MAP_API auto map(unsigned long long val) -> unsigned long long { return val; } FMT_MAP_API auto map(int128_opt val) -> int128_opt { return val; } FMT_MAP_API auto map(uint128_opt val) -> uint128_opt { return val; } FMT_MAP_API auto map(bool val) -> bool { return val; } template ::value || std::is_same::value)> FMT_MAP_API auto map(T val) -> char_type { return val; } template ::value || #ifdef __cpp_char8_t std::is_same::value || #endif std::is_same::value || std::is_same::value) && !std::is_same::value, int> = 0> FMT_MAP_API auto map(T) -> unformattable_char { return {}; } FMT_MAP_API auto map(float val) -> float { return val; } FMT_MAP_API auto map(double val) -> double { return val; } FMT_MAP_API auto map(long double val) -> long double { return val; } FMT_MAP_API auto map(char_type* val) -> const char_type* { return val; } FMT_MAP_API auto map(const char_type* val) -> const char_type* { return val; } template , FMT_ENABLE_IF(std::is_same::value && !std::is_pointer::value)> FMT_MAP_API auto map(const T& val) -> basic_string_view { return to_string_view(val); } template , FMT_ENABLE_IF(!std::is_same::value && !std::is_pointer::value)> FMT_MAP_API auto map(const T&) -> unformattable_char { return {}; } FMT_MAP_API auto map(void* val) -> const void* { return val; } FMT_MAP_API auto map(const void* val) -> const void* { return val; } FMT_MAP_API auto map(volatile void* val) -> const void* { return const_cast(val); } FMT_MAP_API auto map(const volatile void* val) -> const void* { return const_cast(val); } FMT_MAP_API auto map(std::nullptr_t val) -> const void* { return val; } // Use SFINAE instead of a const T* parameter to avoid a conflict with the // array overload. template < typename T, FMT_ENABLE_IF( std::is_pointer::value || std::is_member_pointer::value || std::is_function::type>::value || (std::is_array::value && !std::is_convertible::value))> FMT_CONSTEXPR auto map(const T&) -> unformattable_pointer { return {}; } template ::value)> FMT_MAP_API auto map(const T (&values)[N]) -> const T (&)[N] { return values; } // Only map owning types because mapping views can be unsafe. template , FMT_ENABLE_IF(std::is_arithmetic::value)> FMT_MAP_API auto map(const T& val) -> decltype(FMT_DECLTYPE_THIS map(U())) { return map(format_as(val)); } template > struct formattable : bool_constant() || (has_formatter::value && !std::is_const::value)> {}; template ::value)> FMT_MAP_API auto do_map(T& val) -> T& { return val; } template ::value)> FMT_MAP_API auto do_map(T&) -> unformattable { return {}; } // is_fundamental is used to allow formatters for extended FP types. template , FMT_ENABLE_IF( (std::is_class::value || std::is_enum::value || std::is_union::value || std::is_fundamental::value) && !has_to_string_view::value && !is_char::value && !is_named_arg::value && !std::is_integral::value && !std::is_arithmetic>::value)> FMT_MAP_API auto map(T& val) -> decltype(FMT_DECLTYPE_THIS do_map(val)) { return do_map(val); } template ::value)> FMT_MAP_API auto map(const T& named_arg) -> decltype(FMT_DECLTYPE_THIS map(named_arg.value)) { return map(named_arg.value); } auto map(...) -> unformattable { return {}; } }; // A type constant after applying arg_mapper. template using mapped_type_constant = type_constant().map(std::declval())), typename Context::char_type>; enum { packed_arg_bits = 4 }; // Maximum number of arguments with packed types. enum { max_packed_args = 62 / packed_arg_bits }; enum : unsigned long long { is_unpacked_bit = 1ULL << 63 }; enum : unsigned long long { has_named_args_bit = 1ULL << 62 }; template struct is_output_iterator : std::false_type {}; template <> struct is_output_iterator : std::true_type {}; template struct is_output_iterator< It, T, void_t()++ = std::declval())>> : std::true_type {}; // A type-erased reference to an std::locale to avoid a heavy include. class locale_ref { private: const void* locale_; // A type-erased pointer to std::locale. public: constexpr locale_ref() : locale_(nullptr) {} template explicit locale_ref(const Locale& loc); explicit operator bool() const noexcept { return locale_ != nullptr; } template auto get() const -> Locale; }; template constexpr auto encode_types() -> unsigned long long { return 0; } template constexpr auto encode_types() -> unsigned long long { return static_cast(mapped_type_constant::value) | (encode_types() << packed_arg_bits); } template constexpr unsigned long long make_descriptor() { return NUM_ARGS <= max_packed_args ? encode_types() : is_unpacked_bit | NUM_ARGS; } // This type is intentionally undefined, only used for errors. template #if FMT_CLANG_VERSION && FMT_CLANG_VERSION <= 1500 // https://github.com/fmtlib/fmt/issues/3796 struct type_is_unformattable_for { }; #else struct type_is_unformattable_for; #endif template FMT_CONSTEXPR auto make_arg(T& val) -> value { using arg_type = remove_cvref_t().map(val))>; // Use enum instead of constexpr because the latter may generate code. enum { formattable_char = !std::is_same::value }; static_assert(formattable_char, "Mixing character types is disallowed."); // Formatting of arbitrary pointers is disallowed. If you want to format a // pointer cast it to `void*` or `const void*`. In particular, this forbids // formatting of `[const] volatile char*` printed as bool by iostreams. enum { formattable_pointer = !std::is_same::value }; static_assert(formattable_pointer, "Formatting of non-void pointers is disallowed."); enum { formattable = !std::is_same::value }; #if defined(__cpp_if_constexpr) if constexpr (!formattable) type_is_unformattable_for _; #endif static_assert( formattable, "Cannot format an argument. To make type T formattable provide a " "formatter specialization: https://fmt.dev/latest/api.html#udt"); return {arg_mapper().map(val)}; } template FMT_CONSTEXPR auto make_arg(T& val) -> basic_format_arg { auto arg = basic_format_arg(); arg.type_ = mapped_type_constant::value; arg.value_ = make_arg(val); return arg; } template FMT_CONSTEXPR inline auto make_arg(T& val) -> basic_format_arg { return make_arg(val); } template using arg_t = conditional_t, basic_format_arg>; template ::value)> void init_named_arg(named_arg_info*, int& arg_index, int&, const T&) { ++arg_index; } template ::value)> void init_named_arg(named_arg_info* named_args, int& arg_index, int& named_arg_index, const T& arg) { named_args[named_arg_index++] = {arg.name, arg_index++}; } // An array of references to arguments. It can be implicitly converted to // `fmt::basic_format_args` for passing into type-erased formatting functions // such as `fmt::vformat`. template struct format_arg_store { // args_[0].named_args points to named_args to avoid bloating format_args. // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. static constexpr size_t ARGS_ARR_SIZE = 1 + (NUM_ARGS != 0 ? NUM_ARGS : +1); arg_t args[ARGS_ARR_SIZE]; named_arg_info named_args[NUM_NAMED_ARGS]; template FMT_MAP_API format_arg_store(T&... values) : args{{named_args, NUM_NAMED_ARGS}, make_arg(values)...} { using dummy = int[]; int arg_index = 0, named_arg_index = 0; (void)dummy{ 0, (init_named_arg(named_args, arg_index, named_arg_index, values), 0)...}; } format_arg_store(format_arg_store&& rhs) { args[0] = {named_args, NUM_NAMED_ARGS}; for (size_t i = 1; i < ARGS_ARR_SIZE; ++i) args[i] = rhs.args[i]; for (size_t i = 0; i < NUM_NAMED_ARGS; ++i) named_args[i] = rhs.named_args[i]; } format_arg_store(const format_arg_store& rhs) = delete; format_arg_store& operator=(const format_arg_store& rhs) = delete; format_arg_store& operator=(format_arg_store&& rhs) = delete; }; // A specialization of format_arg_store without named arguments. // It is a plain struct to reduce binary size in debug mode. template struct format_arg_store { // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. arg_t args[NUM_ARGS != 0 ? NUM_ARGS : +1]; }; } // namespace detail FMT_BEGIN_EXPORT // A formatting argument. Context is a template parameter for the compiled API // where output can be unbuffered. template class basic_format_arg { private: detail::value value_; detail::type type_; template friend FMT_CONSTEXPR auto detail::make_arg(T& value) -> basic_format_arg; friend class basic_format_args; friend class dynamic_format_arg_store; using char_type = typename Context::char_type; template friend struct detail::format_arg_store; basic_format_arg(const detail::named_arg_info* args, size_t size) : value_(args, size) {} public: class handle { public: explicit handle(detail::custom_value custom) : custom_(custom) {} void format(typename Context::parse_context_type& parse_ctx, Context& ctx) const { custom_.format(custom_.value, parse_ctx, ctx); } private: detail::custom_value custom_; }; constexpr basic_format_arg() : type_(detail::type::none_type) {} constexpr explicit operator bool() const noexcept { return type_ != detail::type::none_type; } auto type() const -> detail::type { return type_; } auto is_integral() const -> bool { return detail::is_integral_type(type_); } auto is_arithmetic() const -> bool { return detail::is_arithmetic_type(type_); } /** * Visits an argument dispatching to the appropriate visit method based on * the argument type. For example, if the argument type is `double` then * `vis(value)` will be called with the value of type `double`. */ template FMT_CONSTEXPR FMT_INLINE auto visit(Visitor&& vis) const -> decltype(vis(0)) { switch (type_) { case detail::type::none_type: break; case detail::type::int_type: return vis(value_.int_value); case detail::type::uint_type: return vis(value_.uint_value); case detail::type::long_long_type: return vis(value_.long_long_value); case detail::type::ulong_long_type: return vis(value_.ulong_long_value); case detail::type::int128_type: return vis(detail::convert_for_visit(value_.int128_value)); case detail::type::uint128_type: return vis(detail::convert_for_visit(value_.uint128_value)); case detail::type::bool_type: return vis(value_.bool_value); case detail::type::char_type: return vis(value_.char_value); case detail::type::float_type: return vis(value_.float_value); case detail::type::double_type: return vis(value_.double_value); case detail::type::long_double_type: return vis(value_.long_double_value); case detail::type::cstring_type: return vis(value_.string.data); case detail::type::string_type: using sv = basic_string_view; return vis(sv(value_.string.data, value_.string.size)); case detail::type::pointer_type: return vis(value_.pointer); case detail::type::custom_type: return vis(typename basic_format_arg::handle(value_.custom)); } return vis(monostate()); } auto format_custom(const char_type* parse_begin, typename Context::parse_context_type& parse_ctx, Context& ctx) -> bool { if (type_ != detail::type::custom_type) return false; parse_ctx.advance_to(parse_begin); value_.custom.format(value_.custom.value, parse_ctx, ctx); return true; } }; template FMT_DEPRECATED FMT_CONSTEXPR auto visit_format_arg( Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)) { return arg.visit(static_cast(vis)); } /** * A view of a collection of formatting arguments. To avoid lifetime issues it * should only be used as a parameter type in type-erased functions such as * `vformat`: * * void vlog(fmt::string_view fmt, fmt::format_args args); // OK * fmt::format_args args = fmt::make_format_args(); // Dangling reference */ template class basic_format_args { public: using size_type = int; using format_arg = basic_format_arg; private: // A descriptor that contains information about formatting arguments. // If the number of arguments is less or equal to max_packed_args then // argument types are passed in the descriptor. This reduces binary code size // per formatting function call. unsigned long long desc_; union { // If is_packed() returns true then argument values are stored in values_; // otherwise they are stored in args_. This is done to improve cache // locality and reduce compiled code size since storing larger objects // may require more code (at least on x86-64) even if the same amount of // data is actually copied to stack. It saves ~10% on the bloat test. const detail::value* values_; const format_arg* args_; }; constexpr auto is_packed() const -> bool { return (desc_ & detail::is_unpacked_bit) == 0; } constexpr auto has_named_args() const -> bool { return (desc_ & detail::has_named_args_bit) != 0; } FMT_CONSTEXPR auto type(int index) const -> detail::type { int shift = index * detail::packed_arg_bits; unsigned int mask = (1 << detail::packed_arg_bits) - 1; return static_cast((desc_ >> shift) & mask); } public: constexpr basic_format_args() : desc_(0), args_(nullptr) {} /// Constructs a `basic_format_args` object from `format_arg_store`. template constexpr FMT_ALWAYS_INLINE basic_format_args( const detail::format_arg_store& store) : desc_(DESC), values_(store.args + (NUM_NAMED_ARGS != 0 ? 1 : 0)) {} template detail::max_packed_args)> constexpr basic_format_args( const detail::format_arg_store& store) : desc_(DESC), args_(store.args + (NUM_NAMED_ARGS != 0 ? 1 : 0)) {} /// Constructs a `basic_format_args` object from `dynamic_format_arg_store`. constexpr basic_format_args(const dynamic_format_arg_store& store) : desc_(store.get_types()), args_(store.data()) {} /// Constructs a `basic_format_args` object from a dynamic list of arguments. constexpr basic_format_args(const format_arg* args, int count) : desc_(detail::is_unpacked_bit | detail::to_unsigned(count)), args_(args) {} /// Returns the argument with the specified id. FMT_CONSTEXPR auto get(int id) const -> format_arg { format_arg arg; if (!is_packed()) { if (id < max_size()) arg = args_[id]; return arg; } if (static_cast(id) >= detail::max_packed_args) return arg; arg.type_ = type(id); if (arg.type_ == detail::type::none_type) return arg; arg.value_ = values_[id]; return arg; } template auto get(basic_string_view name) const -> format_arg { int id = get_id(name); return id >= 0 ? get(id) : format_arg(); } template FMT_CONSTEXPR auto get_id(basic_string_view name) const -> int { if (!has_named_args()) return -1; const auto& named_args = (is_packed() ? values_[-1] : args_[-1].value_).named_args; for (size_t i = 0; i < named_args.size; ++i) { if (named_args.data[i].name == name) return named_args.data[i].id; } return -1; } auto max_size() const -> int { unsigned long long max_packed = detail::max_packed_args; return static_cast(is_packed() ? max_packed : desc_ & ~detail::is_unpacked_bit); } }; // A formatting context. class context { private: appender out_; basic_format_args args_; detail::locale_ref loc_; public: /// The character type for the output. using char_type = char; using iterator = appender; using format_arg = basic_format_arg; using parse_context_type = basic_format_parse_context; template using formatter_type = formatter; /// Constructs a `basic_format_context` object. References to the arguments /// are stored in the object so make sure they have appropriate lifetimes. FMT_CONSTEXPR context(iterator out, basic_format_args ctx_args, detail::locale_ref loc = {}) : out_(out), args_(ctx_args), loc_(loc) {} context(context&&) = default; context(const context&) = delete; void operator=(const context&) = delete; FMT_CONSTEXPR auto arg(int id) const -> format_arg { return args_.get(id); } auto arg(string_view name) -> format_arg { return args_.get(name); } FMT_CONSTEXPR auto arg_id(string_view name) -> int { return args_.get_id(name); } auto args() const -> const basic_format_args& { return args_; } // Returns an iterator to the beginning of the output range. FMT_CONSTEXPR auto out() -> iterator { return out_; } // Advances the begin iterator to `it`. void advance_to(iterator) {} FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } }; template class generic_context; // Longer aliases for C++20 compatibility. template using basic_format_context = conditional_t::value, context, generic_context>; using format_context = context; template using buffered_context = basic_format_context, Char>; template using is_formattable = bool_constant>() .map(std::declval()))>::value>; #if FMT_USE_CONCEPTS template concept formattable = is_formattable, Char>::value; #endif /** * Constructs an object that stores references to arguments and can be * implicitly converted to `format_args`. `Context` can be omitted in which case * it defaults to `format_context`. See `arg` for lifetime considerations. */ // Take arguments by lvalue references to avoid some lifetime issues, e.g. // auto args = make_format_args(std::string()); template (), unsigned long long DESC = detail::make_descriptor(), FMT_ENABLE_IF(NUM_NAMED_ARGS == 0)> constexpr FMT_ALWAYS_INLINE auto make_format_args(T&... args) -> detail::format_arg_store { return {{detail::make_arg( args)...}}; } #ifndef FMT_DOC template (), unsigned long long DESC = detail::make_descriptor() | static_cast(detail::has_named_args_bit), FMT_ENABLE_IF(NUM_NAMED_ARGS != 0)> constexpr auto make_format_args(T&... args) -> detail::format_arg_store { return {args...}; } #endif /** * Returns a named argument to be used in a formatting function. * It should only be used in a call to a formatting function or * `dynamic_format_arg_store::push_back`. * * **Example**: * * fmt::print("The answer is {answer}.", fmt::arg("answer", 42)); */ template inline auto arg(const Char* name, const T& arg) -> detail::named_arg { static_assert(!detail::is_named_arg(), "nested named arguments"); return {name, arg}; } FMT_END_EXPORT /// An alias for `basic_format_args`. // A separate type would result in shorter symbols but break ABI compatibility // between clang and gcc on ARM (#1919). FMT_EXPORT using format_args = basic_format_args; // We cannot use enum classes as bit fields because of a gcc bug, so we put them // in namespaces instead (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414). // Additionally, if an underlying type is specified, older gcc incorrectly warns // that the type is too small. Both bugs are fixed in gcc 9.3. #if FMT_GCC_VERSION && FMT_GCC_VERSION < 903 # define FMT_ENUM_UNDERLYING_TYPE(type) #else # define FMT_ENUM_UNDERLYING_TYPE(type) : type #endif namespace align { enum type FMT_ENUM_UNDERLYING_TYPE(unsigned char){none, left, right, center, numeric}; } using align_t = align::type; namespace sign { enum type FMT_ENUM_UNDERLYING_TYPE(unsigned char){none, minus, plus, space}; } using sign_t = sign::type; namespace detail { template using unsigned_char = typename conditional_t::value, std::make_unsigned, type_identity>::type; // Character (code unit) type is erased to prevent template bloat. struct fill_t { private: enum { max_size = 4 }; char data_[max_size] = {' '}; unsigned char size_ = 1; public: template FMT_CONSTEXPR void operator=(basic_string_view s) { auto size = s.size(); size_ = static_cast(size); if (size == 1) { unsigned uchar = static_cast>(s[0]); data_[0] = static_cast(uchar); data_[1] = static_cast(uchar >> 8); return; } FMT_ASSERT(size <= max_size, "invalid fill"); for (size_t i = 0; i < size; ++i) data_[i] = static_cast(s[i]); } FMT_CONSTEXPR void operator=(char c) { data_[0] = c; size_ = 1; } constexpr auto size() const -> size_t { return size_; } template constexpr auto get() const -> Char { using uchar = unsigned char; return static_cast(static_cast(data_[0]) | (static_cast(data_[1]) << 8)); } template ::value)> constexpr auto data() const -> const Char* { return data_; } template ::value)> constexpr auto data() const -> const Char* { return nullptr; } }; } // namespace detail enum class presentation_type : unsigned char { // Common specifiers: none = 0, debug = 1, // '?' string = 2, // 's' (string, bool) // Integral, bool and character specifiers: dec = 3, // 'd' hex, // 'x' or 'X' oct, // 'o' bin, // 'b' or 'B' chr, // 'c' // String and pointer specifiers: pointer = 3, // 'p' // Floating-point specifiers: exp = 1, // 'e' or 'E' (1 since there is no FP debug presentation) fixed, // 'f' or 'F' general, // 'g' or 'G' hexfloat // 'a' or 'A' }; // Format specifiers for built-in and string types. struct format_specs { int width; int precision; presentation_type type; align_t align : 4; sign_t sign : 3; bool upper : 1; // An uppercase version e.g. 'X' for 'x'. bool alt : 1; // Alternate form ('#'). bool localized : 1; detail::fill_t fill; constexpr format_specs() : width(0), precision(-1), type(presentation_type::none), align(align::none), sign(sign::none), upper(false), alt(false), localized(false) {} }; namespace detail { enum class arg_id_kind { none, index, name }; // An argument reference. template struct arg_ref { FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {} FMT_CONSTEXPR explicit arg_ref(int index) : kind(arg_id_kind::index), val(index) {} FMT_CONSTEXPR explicit arg_ref(basic_string_view name) : kind(arg_id_kind::name), val(name) {} FMT_CONSTEXPR auto operator=(int idx) -> arg_ref& { kind = arg_id_kind::index; val.index = idx; return *this; } arg_id_kind kind; union value { FMT_CONSTEXPR value(int idx = 0) : index(idx) {} FMT_CONSTEXPR value(basic_string_view n) : name(n) {} int index; basic_string_view name; } val; }; // Format specifiers with width and precision resolved at formatting rather // than parsing time to allow reusing the same parsed specifiers with // different sets of arguments (precompilation of format strings). template struct dynamic_format_specs : format_specs { arg_ref width_ref; arg_ref precision_ref; }; // Converts a character to ASCII. Returns '\0' on conversion failure. template ::value)> constexpr auto to_ascii(Char c) -> char { return c <= 0xff ? static_cast(c) : '\0'; } // Returns the number of code units in a code point or 1 on error. template FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int { if (const_check(sizeof(Char) != 1)) return 1; auto c = static_cast(*begin); return static_cast((0x3a55000000000000ull >> (2 * (c >> 3))) & 0x3) + 1; } // Return the result via the out param to workaround gcc bug 77539. template FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr& out) -> bool { for (out = first; out != last; ++out) { if (*out == value) return true; } return false; } template <> inline auto find(const char* first, const char* last, char value, const char*& out) -> bool { out = static_cast(memchr(first, value, to_unsigned(last - first))); return out != nullptr; } // Parses the range [begin, end) as an unsigned integer. This function assumes // that the range is non-empty and the first character is a digit. template FMT_CONSTEXPR auto parse_nonnegative_int(const Char*& begin, const Char* end, int error_value) noexcept -> int { FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); unsigned value = 0, prev = 0; auto p = begin; do { prev = value; value = value * 10 + unsigned(*p - '0'); ++p; } while (p != end && '0' <= *p && *p <= '9'); auto num_digits = p - begin; begin = p; int digits10 = static_cast(sizeof(int) * CHAR_BIT * 3 / 10); if (num_digits <= digits10) return static_cast(value); // Check for overflow. unsigned max = INT_MAX; return num_digits == digits10 + 1 && prev * 10ull + unsigned(p[-1] - '0') <= max ? static_cast(value) : error_value; } FMT_CONSTEXPR inline auto parse_align(char c) -> align_t { switch (c) { case '<': return align::left; case '>': return align::right; case '^': return align::center; } return align::none; } template constexpr auto is_name_start(Char c) -> bool { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '_'; } template FMT_CONSTEXPR auto do_parse_arg_id(const Char* begin, const Char* end, Handler&& handler) -> const Char* { Char c = *begin; if (c >= '0' && c <= '9') { int index = 0; if (c != '0') index = parse_nonnegative_int(begin, end, INT_MAX); else ++begin; if (begin == end || (*begin != '}' && *begin != ':')) report_error("invalid format string"); else handler.on_index(index); return begin; } if (!is_name_start(c)) { report_error("invalid format string"); return begin; } auto it = begin; do { ++it; } while (it != end && (is_name_start(*it) || ('0' <= *it && *it <= '9'))); handler.on_name({begin, to_unsigned(it - begin)}); return it; } template FMT_CONSTEXPR auto parse_arg_id(const Char* begin, const Char* end, Handler&& handler) -> const Char* { FMT_ASSERT(begin != end, ""); Char c = *begin; if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler); handler.on_auto(); return begin; } template struct dynamic_spec_id_handler { basic_format_parse_context& ctx; arg_ref& ref; FMT_CONSTEXPR void on_auto() { int id = ctx.next_arg_id(); ref = arg_ref(id); ctx.check_dynamic_spec(id); } FMT_CONSTEXPR void on_index(int id) { ref = arg_ref(id); ctx.check_arg_id(id); ctx.check_dynamic_spec(id); } FMT_CONSTEXPR void on_name(basic_string_view id) { ref = arg_ref(id); ctx.check_arg_id(id); } }; // Parses [integer | "{" [arg_id] "}"]. template FMT_CONSTEXPR auto parse_dynamic_spec(const Char* begin, const Char* end, int& value, arg_ref& ref, basic_format_parse_context& ctx) -> const Char* { FMT_ASSERT(begin != end, ""); if ('0' <= *begin && *begin <= '9') { int val = parse_nonnegative_int(begin, end, -1); if (val != -1) value = val; else report_error("number is too big"); } else if (*begin == '{') { ++begin; auto handler = dynamic_spec_id_handler{ctx, ref}; if (begin != end) begin = parse_arg_id(begin, end, handler); if (begin != end && *begin == '}') return ++begin; report_error("invalid format string"); } return begin; } template FMT_CONSTEXPR auto parse_precision(const Char* begin, const Char* end, int& value, arg_ref& ref, basic_format_parse_context& ctx) -> const Char* { ++begin; if (begin == end || *begin == '}') { report_error("invalid precision"); return begin; } return parse_dynamic_spec(begin, end, value, ref, ctx); } enum class state { start, align, sign, hash, zero, width, precision, locale }; // Parses standard format specifiers. template FMT_CONSTEXPR auto parse_format_specs(const Char* begin, const Char* end, dynamic_format_specs& specs, basic_format_parse_context& ctx, type arg_type) -> const Char* { auto c = '\0'; if (end - begin > 1) { auto next = to_ascii(begin[1]); c = parse_align(next) == align::none ? to_ascii(*begin) : '\0'; } else { if (begin == end) return begin; c = to_ascii(*begin); } struct { state current_state = state::start; FMT_CONSTEXPR void operator()(state s, bool valid = true) { if (current_state >= s || !valid) report_error("invalid format specifier"); current_state = s; } } enter_state; using pres = presentation_type; constexpr auto integral_set = sint_set | uint_set | bool_set | char_set; struct { const Char*& begin; dynamic_format_specs& specs; type arg_type; FMT_CONSTEXPR auto operator()(pres pres_type, int set) -> const Char* { if (!in(arg_type, set)) { if (arg_type == type::none_type) return begin; report_error("invalid format specifier"); } specs.type = pres_type; return begin + 1; } } parse_presentation_type{begin, specs, arg_type}; for (;;) { switch (c) { case '<': case '>': case '^': enter_state(state::align); specs.align = parse_align(c); ++begin; break; case '+': case '-': case ' ': if (arg_type == type::none_type) return begin; enter_state(state::sign, in(arg_type, sint_set | float_set)); switch (c) { case '+': specs.sign = sign::plus; break; case '-': specs.sign = sign::minus; break; case ' ': specs.sign = sign::space; break; } ++begin; break; case '#': if (arg_type == type::none_type) return begin; enter_state(state::hash, is_arithmetic_type(arg_type)); specs.alt = true; ++begin; break; case '0': enter_state(state::zero); if (!is_arithmetic_type(arg_type)) { if (arg_type == type::none_type) return begin; report_error("format specifier requires numeric argument"); } if (specs.align == align::none) { // Ignore 0 if align is specified for compatibility with std::format. specs.align = align::numeric; specs.fill = '0'; } ++begin; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '{': enter_state(state::width); begin = parse_dynamic_spec(begin, end, specs.width, specs.width_ref, ctx); break; case '.': if (arg_type == type::none_type) return begin; enter_state(state::precision, in(arg_type, float_set | string_set | cstring_set)); begin = parse_precision(begin, end, specs.precision, specs.precision_ref, ctx); break; case 'L': if (arg_type == type::none_type) return begin; enter_state(state::locale, is_arithmetic_type(arg_type)); specs.localized = true; ++begin; break; case 'd': return parse_presentation_type(pres::dec, integral_set); case 'X': specs.upper = true; FMT_FALLTHROUGH; case 'x': return parse_presentation_type(pres::hex, integral_set); case 'o': return parse_presentation_type(pres::oct, integral_set); case 'B': specs.upper = true; FMT_FALLTHROUGH; case 'b': return parse_presentation_type(pres::bin, integral_set); case 'E': specs.upper = true; FMT_FALLTHROUGH; case 'e': return parse_presentation_type(pres::exp, float_set); case 'F': specs.upper = true; FMT_FALLTHROUGH; case 'f': return parse_presentation_type(pres::fixed, float_set); case 'G': specs.upper = true; FMT_FALLTHROUGH; case 'g': return parse_presentation_type(pres::general, float_set); case 'A': specs.upper = true; FMT_FALLTHROUGH; case 'a': return parse_presentation_type(pres::hexfloat, float_set); case 'c': if (arg_type == type::bool_type) report_error("invalid format specifier"); return parse_presentation_type(pres::chr, integral_set); case 's': return parse_presentation_type(pres::string, bool_set | string_set | cstring_set); case 'p': return parse_presentation_type(pres::pointer, pointer_set | cstring_set); case '?': return parse_presentation_type(pres::debug, char_set | string_set | cstring_set); case '}': return begin; default: { if (*begin == '}') return begin; // Parse fill and alignment. auto fill_end = begin + code_point_length(begin); if (end - fill_end <= 0) { report_error("invalid format specifier"); return begin; } if (*begin == '{') { report_error("invalid fill character '{'"); return begin; } auto align = parse_align(to_ascii(*fill_end)); enter_state(state::align, align != align::none); specs.fill = basic_string_view(begin, to_unsigned(fill_end - begin)); specs.align = align; begin = fill_end + 1; } } if (begin == end) return begin; c = to_ascii(*begin); } } template FMT_CONSTEXPR auto parse_replacement_field(const Char* begin, const Char* end, Handler&& handler) -> const Char* { struct id_adapter { Handler& handler; int arg_id; FMT_CONSTEXPR void on_auto() { arg_id = handler.on_arg_id(); } FMT_CONSTEXPR void on_index(int id) { arg_id = handler.on_arg_id(id); } FMT_CONSTEXPR void on_name(basic_string_view id) { arg_id = handler.on_arg_id(id); } }; ++begin; if (begin == end) return handler.on_error("invalid format string"), end; if (*begin == '}') { handler.on_replacement_field(handler.on_arg_id(), begin); } else if (*begin == '{') { handler.on_text(begin, begin + 1); } else { auto adapter = id_adapter{handler, 0}; begin = parse_arg_id(begin, end, adapter); Char c = begin != end ? *begin : Char(); if (c == '}') { handler.on_replacement_field(adapter.arg_id, begin); } else if (c == ':') { begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); if (begin == end || *begin != '}') return handler.on_error("unknown format specifier"), end; } else { return handler.on_error("missing '}' in format string"), end; } } return begin + 1; } template FMT_CONSTEXPR void parse_format_string(basic_string_view format_str, Handler&& handler) { auto begin = format_str.data(); auto end = begin + format_str.size(); if (end - begin < 32) { // Use a simple loop instead of memchr for small strings. const Char* p = begin; while (p != end) { auto c = *p++; if (c == '{') { handler.on_text(begin, p - 1); begin = p = parse_replacement_field(p - 1, end, handler); } else if (c == '}') { if (p == end || *p != '}') return handler.on_error("unmatched '}' in format string"); handler.on_text(begin, p); begin = ++p; } } handler.on_text(begin, end); return; } struct writer { FMT_CONSTEXPR void operator()(const Char* from, const Char* to) { if (from == to) return; for (;;) { const Char* p = nullptr; if (!find(from, to, Char('}'), p)) return handler_.on_text(from, to); ++p; if (p == to || *p != '}') return handler_.on_error("unmatched '}' in format string"); handler_.on_text(from, p); from = p + 1; } } Handler& handler_; } write = {handler}; while (begin != end) { // Doing two passes with memchr (one for '{' and another for '}') is up to // 2.5x faster than the naive one-pass implementation on big format strings. const Char* p = begin; if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) return write(begin, end); write(begin, p); begin = parse_replacement_field(p, end, handler); } } template ::value> struct strip_named_arg { using type = T; }; template struct strip_named_arg { using type = remove_cvref_t; }; template FMT_VISIBILITY("hidden") // Suppress an ld warning on macOS (#3769). FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) -> decltype(ctx.begin()) { using char_type = typename ParseContext::char_type; using context = buffered_context; using mapped_type = conditional_t< mapped_type_constant::value != type::custom_type, decltype(arg_mapper().map(std::declval())), typename strip_named_arg::type>; #if defined(__cpp_if_constexpr) if constexpr (std::is_default_constructible< formatter>::value) { return formatter().parse(ctx); } else { type_is_unformattable_for _; return ctx.begin(); } #else return formatter().parse(ctx); #endif } // Checks char specs and returns true iff the presentation type is char-like. FMT_CONSTEXPR inline auto check_char_specs(const format_specs& specs) -> bool { if (specs.type != presentation_type::none && specs.type != presentation_type::chr && specs.type != presentation_type::debug) { return false; } if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) report_error("invalid format specifier for char"); return true; } #if FMT_USE_NONTYPE_TEMPLATE_ARGS template constexpr auto get_arg_index_by_name(basic_string_view name) -> int { if constexpr (is_statically_named_arg()) { if (name == T::name) return N; } if constexpr (sizeof...(Args) > 0) return get_arg_index_by_name(name); (void)name; // Workaround an MSVC bug about "unused" parameter. return -1; } #endif template FMT_CONSTEXPR auto get_arg_index_by_name(basic_string_view name) -> int { #if FMT_USE_NONTYPE_TEMPLATE_ARGS if constexpr (sizeof...(Args) > 0) return get_arg_index_by_name<0, Args...>(name); #endif (void)name; return -1; } template class format_string_checker { private: using parse_context_type = compile_parse_context; static constexpr int num_args = sizeof...(Args); // Format specifier parsing function. // In the future basic_format_parse_context will replace compile_parse_context // here and will use is_constant_evaluated and downcasting to access the data // needed for compile-time checks: https://godbolt.org/z/GvWzcTjh1. using parse_func = const Char* (*)(parse_context_type&); type types_[num_args > 0 ? static_cast(num_args) : 1]; parse_context_type context_; parse_func parse_funcs_[num_args > 0 ? static_cast(num_args) : 1]; public: explicit FMT_CONSTEXPR format_string_checker(basic_string_view fmt) : types_{mapped_type_constant>::value...}, context_(fmt, num_args, types_), parse_funcs_{&parse_format_specs...} {} FMT_CONSTEXPR void on_text(const Char*, const Char*) {} FMT_CONSTEXPR auto on_arg_id() -> int { return context_.next_arg_id(); } FMT_CONSTEXPR auto on_arg_id(int id) -> int { return context_.check_arg_id(id), id; } FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { #if FMT_USE_NONTYPE_TEMPLATE_ARGS auto index = get_arg_index_by_name(id); if (index < 0) on_error("named argument is not found"); return index; #else (void)id; on_error("compile-time checks for named arguments require C++20 support"); return 0; #endif } FMT_CONSTEXPR void on_replacement_field(int id, const Char* begin) { on_format_specs(id, begin, begin); // Call parse() on empty specs. } FMT_CONSTEXPR auto on_format_specs(int id, const Char* begin, const Char*) -> const Char* { context_.advance_to(begin); // id >= 0 check is a workaround for gcc 10 bug (#2065). return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; } FMT_NORETURN FMT_CONSTEXPR void on_error(const char* message) { report_error(message); } }; // A base class for compile-time strings. struct compile_string {}; template using is_compile_string = std::is_base_of; // Reports a compile-time error if S is not a valid format string. template ::value)> FMT_ALWAYS_INLINE void check_format_string(const S&) { #ifdef FMT_ENFORCE_COMPILE_STRING static_assert(is_compile_string::value, "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " "FMT_STRING."); #endif } template ::value)> void check_format_string(S format_str) { using char_t = typename S::char_type; FMT_CONSTEXPR auto s = basic_string_view(format_str); using checker = format_string_checker...>; FMT_CONSTEXPR bool error = (parse_format_string(s, checker(s)), true); ignore_unused(error); } // Report truncation to prevent silent data loss. inline void report_truncation(bool truncated) { if (truncated) report_error("output is truncated"); } // Use vformat_args and avoid type_identity to keep symbols short and workaround // a GCC <= 4.8 bug. template struct vformat_args { using type = basic_format_args>; }; template <> struct vformat_args { using type = format_args; }; template void vformat_to(buffer& buf, basic_string_view fmt, typename vformat_args::type args, locale_ref loc = {}); FMT_API void vprint_mojibake(FILE*, string_view, format_args, bool = false); #ifndef _WIN32 inline void vprint_mojibake(FILE*, string_view, format_args, bool) {} #endif template struct native_formatter { private: dynamic_format_specs specs_; public: using nonlocking = void; template FMT_CONSTEXPR auto parse(ParseContext& ctx) -> const Char* { if (ctx.begin() == ctx.end() || *ctx.begin() == '}') return ctx.begin(); auto end = parse_format_specs(ctx.begin(), ctx.end(), specs_, ctx, TYPE); if (const_check(TYPE == type::char_type)) check_char_specs(specs_); return end; } template FMT_CONSTEXPR void set_debug_format(bool set = true) { specs_.type = set ? presentation_type::debug : presentation_type::none; } template FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const -> decltype(ctx.out()); }; } // namespace detail FMT_BEGIN_EXPORT // A formatter specialization for natively supported types. template struct formatter::value != detail::type::custom_type>> : detail::native_formatter::value> { }; template struct runtime_format_string { basic_string_view str; }; /// A compile-time format string. template class basic_format_string { private: basic_string_view str_; public: template < typename S, FMT_ENABLE_IF( std::is_convertible>::value || (detail::is_compile_string::value && std::is_constructible, const S&>::value))> FMT_CONSTEVAL FMT_ALWAYS_INLINE basic_format_string(const S& s) : str_(s) { static_assert( detail::count< (std::is_base_of>::value && std::is_reference::value)...>() == 0, "passing views as lvalues is disallowed"); #if FMT_USE_CONSTEVAL if constexpr (detail::count_named_args() == detail::count_statically_named_args()) { using checker = detail::format_string_checker...>; detail::parse_format_string(str_, checker(s)); } #else detail::check_format_string(s); #endif } basic_format_string(runtime_format_string fmt) : str_(fmt.str) {} FMT_ALWAYS_INLINE operator basic_string_view() const { return str_; } auto get() const -> basic_string_view { return str_; } }; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 // Workaround broken conversion on older gcc. template using format_string = string_view; inline auto runtime(string_view s) -> string_view { return s; } #else template using format_string = basic_format_string...>; /** * Creates a runtime format string. * * **Example**: * * // Check format string at runtime instead of compile-time. * fmt::print(fmt::runtime("{:d}"), "I am not a number"); */ inline auto runtime(string_view s) -> runtime_format_string<> { return {{s}}; } #endif /// Formats a string and writes the output to `out`. template , char>::value)> auto vformat_to(OutputIt&& out, string_view fmt, format_args args) -> remove_cvref_t { auto&& buf = detail::get_buffer(out); detail::vformat_to(buf, fmt, args, {}); return detail::get_iterator(buf, out); } /** * Formats `args` according to specifications in `fmt`, writes the result to * the output iterator `out` and returns the iterator past the end of the output * range. `format_to` does not append a terminating null character. * * **Example**: * * auto out = std::vector(); * fmt::format_to(std::back_inserter(out), "{}", 42); */ template , char>::value)> FMT_INLINE auto format_to(OutputIt&& out, format_string fmt, T&&... args) -> remove_cvref_t { return vformat_to(FMT_FWD(out), fmt, fmt::make_format_args(args...)); } template struct format_to_n_result { /// Iterator past the end of the output range. OutputIt out; /// Total (not truncated) output size. size_t size; }; template ::value)> auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) -> format_to_n_result { using traits = detail::fixed_buffer_traits; auto buf = detail::iterator_buffer(out, n); detail::vformat_to(buf, fmt, args, {}); return {buf.out(), buf.count()}; } /** * Formats `args` according to specifications in `fmt`, writes up to `n` * characters of the result to the output iterator `out` and returns the total * (not truncated) output size and the iterator past the end of the output * range. `format_to_n` does not append a terminating null character. */ template ::value)> FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, T&&... args) -> format_to_n_result { return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); } template struct format_to_result { /// Iterator pointing to just after the last successful write in the range. OutputIt out; /// Specifies if the output was truncated. bool truncated; FMT_CONSTEXPR operator OutputIt&() & { detail::report_truncation(truncated); return out; } FMT_CONSTEXPR operator const OutputIt&() const& { detail::report_truncation(truncated); return out; } FMT_CONSTEXPR operator OutputIt&&() && { detail::report_truncation(truncated); return static_cast(out); } }; template auto vformat_to(char (&out)[N], string_view fmt, format_args args) -> format_to_result { auto result = vformat_to_n(out, N, fmt, args); return {result.out, result.size > N}; } template FMT_INLINE auto format_to(char (&out)[N], format_string fmt, T&&... args) -> format_to_result { auto result = fmt::format_to_n(out, N, fmt, static_cast(args)...); return {result.out, result.size > N}; } /// Returns the number of chars in the output of `format(fmt, args...)`. template FMT_NODISCARD FMT_INLINE auto formatted_size(format_string fmt, T&&... args) -> size_t { auto buf = detail::counting_buffer<>(); detail::vformat_to(buf, fmt, fmt::make_format_args(args...), {}); return buf.count(); } FMT_API void vprint(string_view fmt, format_args args); FMT_API void vprint(FILE* f, string_view fmt, format_args args); FMT_API void vprint_buffered(FILE* f, string_view fmt, format_args args); FMT_API void vprintln(FILE* f, string_view fmt, format_args args); /** * Formats `args` according to specifications in `fmt` and writes the output * to `stdout`. * * **Example**: * * fmt::print("The answer is {}.", 42); */ template FMT_INLINE void print(format_string fmt, T&&... args) { const auto& vargs = fmt::make_format_args(args...); if (!detail::use_utf8()) return detail::vprint_mojibake(stdout, fmt, vargs); return detail::is_locking() ? vprint_buffered(stdout, fmt, vargs) : vprint(fmt, vargs); } /** * Formats `args` according to specifications in `fmt` and writes the * output to the file `f`. * * **Example**: * * fmt::print(stderr, "Don't {}!", "panic"); */ template FMT_INLINE void print(FILE* f, format_string fmt, T&&... args) { const auto& vargs = fmt::make_format_args(args...); if (!detail::use_utf8()) return detail::vprint_mojibake(f, fmt, vargs); return detail::is_locking() ? vprint_buffered(f, fmt, vargs) : vprint(f, fmt, vargs); } /// Formats `args` according to specifications in `fmt` and writes the output /// to the file `f` followed by a newline. template FMT_INLINE void println(FILE* f, format_string fmt, T&&... args) { const auto& vargs = fmt::make_format_args(args...); return detail::use_utf8() ? vprintln(f, fmt, vargs) : detail::vprint_mojibake(f, fmt, vargs, true); } /// Formats `args` according to specifications in `fmt` and writes the output /// to `stdout` followed by a newline. template FMT_INLINE void println(format_string fmt, T&&... args) { return fmt::println(stdout, fmt, static_cast(args)...); } FMT_END_EXPORT FMT_GCC_PRAGMA("GCC pop_options") FMT_END_NAMESPACE #ifdef FMT_HEADER_ONLY # include "format.h" #endif #endif // FMT_BASE_H_ ================================================ FILE: vendor/fmt/format-inl.h ================================================ // Formatting library for C++ - implementation // // Copyright (c) 2012 - 2016, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_FORMAT_INL_H_ #define FMT_FORMAT_INL_H_ #ifndef FMT_MODULE # include # include // errno # include # include # include # if !defined(FMT_STATIC_THOUSANDS_SEPARATOR) # include # endif #endif #if defined(_WIN32) && !defined(FMT_USE_WRITE_CONSOLE) # include // _isatty #endif #include "format.h" FMT_BEGIN_NAMESPACE namespace detail { FMT_FUNC void assert_fail(const char* file, int line, const char* message) { // Use unchecked std::fprintf to avoid triggering another assertion when // writing to stderr fails std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); // Chosen instead of std::abort to satisfy Clang in CUDA mode during device // code pass. std::terminate(); } FMT_FUNC void format_error_code(detail::buffer& out, int error_code, string_view message) noexcept { // Report error code making sure that the output fits into // inline_buffer_size to avoid dynamic memory allocation and potential // bad_alloc. out.try_resize(0); static const char SEP[] = ": "; static const char ERROR_STR[] = "error "; // Subtract 2 to account for terminating null characters in SEP and ERROR_STR. size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; auto abs_value = static_cast>(error_code); if (detail::is_negative(error_code)) { abs_value = 0 - abs_value; ++error_code_size; } error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); auto it = appender(out); if (message.size() <= inline_buffer_size - error_code_size) fmt::format_to(it, FMT_STRING("{}{}"), message, SEP); fmt::format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); FMT_ASSERT(out.size() <= inline_buffer_size, ""); } FMT_FUNC void report_error(format_func func, int error_code, const char* message) noexcept { memory_buffer full_message; func(full_message, error_code, message); // Don't use fwrite_fully because the latter may throw. if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) std::fputc('\n', stderr); } // A wrapper around fwrite that throws on error. inline void fwrite_fully(const void* ptr, size_t count, FILE* stream) { size_t written = std::fwrite(ptr, 1, count, stream); if (written < count) FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); } #ifndef FMT_STATIC_THOUSANDS_SEPARATOR template locale_ref::locale_ref(const Locale& loc) : locale_(&loc) { static_assert(std::is_same::value, ""); } template auto locale_ref::get() const -> Locale { static_assert(std::is_same::value, ""); return locale_ ? *static_cast(locale_) : std::locale(); } template FMT_FUNC auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result { auto& facet = std::use_facet>(loc.get()); auto grouping = facet.grouping(); auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); return {std::move(grouping), thousands_sep}; } template FMT_FUNC auto decimal_point_impl(locale_ref loc) -> Char { return std::use_facet>(loc.get()) .decimal_point(); } #else template FMT_FUNC auto thousands_sep_impl(locale_ref) -> thousands_sep_result { return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; } template FMT_FUNC Char decimal_point_impl(locale_ref) { return '.'; } #endif FMT_FUNC auto write_loc(appender out, loc_value value, const format_specs& specs, locale_ref loc) -> bool { #ifdef FMT_STATIC_THOUSANDS_SEPARATOR value.visit(loc_writer<>{ out, specs, std::string(1, FMT_STATIC_THOUSANDS_SEPARATOR), "\3", "."}); return true; #else auto locale = loc.get(); // We cannot use the num_put facet because it may produce output in // a wrong encoding. using facet = format_facet; if (std::has_facet(locale)) return std::use_facet(locale).put(out, value, specs); return facet(locale).put(out, value, specs); #endif } } // namespace detail FMT_FUNC void report_error(const char* message) { FMT_THROW(format_error(message)); } template typename Locale::id format_facet::id; #ifndef FMT_STATIC_THOUSANDS_SEPARATOR template format_facet::format_facet(Locale& loc) { auto& numpunct = std::use_facet>(loc); grouping_ = numpunct.grouping(); if (!grouping_.empty()) separator_ = std::string(1, numpunct.thousands_sep()); } template <> FMT_API FMT_FUNC auto format_facet::do_put( appender out, loc_value val, const format_specs& specs) const -> bool { return val.visit( detail::loc_writer<>{out, specs, separator_, grouping_, decimal_point_}); } #endif FMT_FUNC auto vsystem_error(int error_code, string_view fmt, format_args args) -> std::system_error { auto ec = std::error_code(error_code, std::generic_category()); return std::system_error(ec, vformat(fmt, args)); } namespace detail { template inline auto operator==(basic_fp x, basic_fp y) -> bool { return x.f == y.f && x.e == y.e; } // Compilers should be able to optimize this into the ror instruction. FMT_CONSTEXPR inline auto rotr(uint32_t n, uint32_t r) noexcept -> uint32_t { r &= 31; return (n >> r) | (n << (32 - r)); } FMT_CONSTEXPR inline auto rotr(uint64_t n, uint32_t r) noexcept -> uint64_t { r &= 63; return (n >> r) | (n << (64 - r)); } // Implementation of Dragonbox algorithm: https://github.com/jk-jeon/dragonbox. namespace dragonbox { // Computes upper 64 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline auto umul96_upper64(uint32_t x, uint64_t y) noexcept -> uint64_t { return umul128_upper64(static_cast(x) << 32, y); } // Computes lower 128 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline auto umul192_lower128(uint64_t x, uint128_fallback y) noexcept -> uint128_fallback { uint64_t high = x * y.high(); uint128_fallback high_low = umul128(x, y.low()); return {high + high_low.high(), high_low.low()}; } // Computes lower 64 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline auto umul96_lower64(uint32_t x, uint64_t y) noexcept -> uint64_t { return x * y; } // Various fast log computations. inline auto floor_log10_pow2_minus_log10_4_over_3(int e) noexcept -> int { FMT_ASSERT(e <= 2936 && e >= -2985, "too large exponent"); return (e * 631305 - 261663) >> 21; } FMT_INLINE_VARIABLE constexpr struct { uint32_t divisor; int shift_amount; } div_small_pow10_infos[] = {{10, 16}, {100, 16}}; // Replaces n by floor(n / pow(10, N)) returning true if and only if n is // divisible by pow(10, N). // Precondition: n <= pow(10, N + 1). template auto check_divisibility_and_divide_by_pow10(uint32_t& n) noexcept -> bool { // The numbers below are chosen such that: // 1. floor(n/d) = floor(nm / 2^k) where d=10 or d=100, // 2. nm mod 2^k < m if and only if n is divisible by d, // where m is magic_number, k is shift_amount // and d is divisor. // // Item 1 is a common technique of replacing division by a constant with // multiplication, see e.g. "Division by Invariant Integers Using // Multiplication" by Granlund and Montgomery (1994). magic_number (m) is set // to ceil(2^k/d) for large enough k. // The idea for item 2 originates from Schubfach. constexpr auto info = div_small_pow10_infos[N - 1]; FMT_ASSERT(n <= info.divisor * 10, "n is too large"); constexpr uint32_t magic_number = (1u << info.shift_amount) / info.divisor + 1; n *= magic_number; const uint32_t comparison_mask = (1u << info.shift_amount) - 1; bool result = (n & comparison_mask) < magic_number; n >>= info.shift_amount; return result; } // Computes floor(n / pow(10, N)) for small n and N. // Precondition: n <= pow(10, N + 1). template auto small_division_by_pow10(uint32_t n) noexcept -> uint32_t { constexpr auto info = div_small_pow10_infos[N - 1]; FMT_ASSERT(n <= info.divisor * 10, "n is too large"); constexpr uint32_t magic_number = (1u << info.shift_amount) / info.divisor + 1; return (n * magic_number) >> info.shift_amount; } // Computes floor(n / 10^(kappa + 1)) (float) inline auto divide_by_10_to_kappa_plus_1(uint32_t n) noexcept -> uint32_t { // 1374389535 = ceil(2^37/100) return static_cast((static_cast(n) * 1374389535) >> 37); } // Computes floor(n / 10^(kappa + 1)) (double) inline auto divide_by_10_to_kappa_plus_1(uint64_t n) noexcept -> uint64_t { // 2361183241434822607 = ceil(2^(64+7)/1000) return umul128_upper64(n, 2361183241434822607ull) >> 7; } // Various subroutines using pow10 cache template struct cache_accessor; template <> struct cache_accessor { using carrier_uint = float_info::carrier_uint; using cache_entry_type = uint64_t; static auto get_cached_power(int k) noexcept -> uint64_t { FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); static constexpr const uint64_t pow10_significands[] = { 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, 0xc612062576589ddb, 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, 0xf1c90080baf72cb2, 0x971da05074da7bef, 0xbce5086492111aeb, 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, 0xe12e13424bb40e14, 0x8cbccc096f5088cc, 0xafebff0bcb24aaff, 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, 0xd6bf94d5e57a42bd, 0x8637bd05af6c69b6, 0xa7c5ac471b478424, 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, 0x8ac7230489e80000, 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, 0xa968163f0a57b400, 0xd3c21bcecceda100, 0x84595161401484a0, 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940985, 0xa18f07d736b90be6, 0xc9f2c9cd04674edf, 0xfc6f7c4045812297, 0x9dc5ada82b70b59e, 0xc5371912364ce306, 0xf684df56c3e01bc7, 0x9a130b963a6c115d, 0xc097ce7bc90715b4, 0xf0bdc21abb48db21, 0x96769950b50d88f5, 0xbc143fa4e250eb32, 0xeb194f8e1ae525fe, 0x92efd1b8d0cf37bf, 0xb7abc627050305ae, 0xe596b7b0c643c71a, 0x8f7e32ce7bea5c70, 0xb35dbf821ae4f38c, 0xe0352f62a19e306f}; return pow10_significands[k - float_info::min_k]; } struct compute_mul_result { carrier_uint result; bool is_integer; }; struct compute_mul_parity_result { bool parity; bool is_integer; }; static auto compute_mul(carrier_uint u, const cache_entry_type& cache) noexcept -> compute_mul_result { auto r = umul96_upper64(u, cache); return {static_cast(r >> 32), static_cast(r) == 0}; } static auto compute_delta(const cache_entry_type& cache, int beta) noexcept -> uint32_t { return static_cast(cache >> (64 - 1 - beta)); } static auto compute_mul_parity(carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept -> compute_mul_parity_result { FMT_ASSERT(beta >= 1, ""); FMT_ASSERT(beta < 64, ""); auto r = umul96_lower64(two_f, cache); return {((r >> (64 - beta)) & 1) != 0, static_cast(r >> (32 - beta)) == 0}; } static auto compute_left_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return static_cast( (cache - (cache >> (num_significand_bits() + 2))) >> (64 - num_significand_bits() - 1 - beta)); } static auto compute_right_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return static_cast( (cache + (cache >> (num_significand_bits() + 1))) >> (64 - num_significand_bits() - 1 - beta)); } static auto compute_round_up_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return (static_cast( cache >> (64 - num_significand_bits() - 2 - beta)) + 1) / 2; } }; template <> struct cache_accessor { using carrier_uint = float_info::carrier_uint; using cache_entry_type = uint128_fallback; static auto get_cached_power(int k) noexcept -> uint128_fallback { FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); static constexpr const uint128_fallback pow10_significands[] = { #if FMT_USE_FULL_CACHE_DRAGONBOX {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, {0x9faacf3df73609b1, 0x77b191618c54e9ad}, {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, {0x9becce62836ac577, 0x4ee367f9430aec33}, {0xc2e801fb244576d5, 0x229c41f793cda740}, {0xf3a20279ed56d48a, 0x6b43527578c11110}, {0x9845418c345644d6, 0x830a13896b78aaaa}, {0xbe5691ef416bd60c, 0x23cc986bc656d554}, {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, {0x91376c36d99995be, 0x23100809b9c21fa2}, {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, {0xdd95317f31c7fa1d, 0x40405643d711d584}, {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, {0xad1c8eab5ee43b66, 0xda3243650005eed0}, {0xd863b256369d4a40, 0x90bed43e40076a83}, {0x873e4f75e2224e68, 0x5a7744a6e804a292}, {0xa90de3535aaae202, 0x711515d0a205cb37}, {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, {0x8412d9991ed58091, 0xe858790afe9486c3}, {0xa5178fff668ae0b6, 0x626e974dbe39a873}, {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, {0xc987434744ac874e, 0xa327ffb266b56221}, {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, {0xf6019da07f549b2b, 0x7e2a53a146606a49}, {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, {0xc0314325637a1939, 0xfa911155fefb5309}, {0xf03d93eebc589f88, 0x793555ab7eba27cb}, {0x96267c7535b763b5, 0x4bc1558b2f3458df}, {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, {0x92a1958a7675175f, 0x0bfacd89ec191eca}, {0xb749faed14125d36, 0xcef980ec671f667c}, {0xe51c79a85916f484, 0x82b7e12780e7401b}, {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, {0xaecc49914078536d, 0x58fae9f773886e19}, {0xda7f5bf590966848, 0xaf39a475506a899f}, {0x888f99797a5e012d, 0x6d8406c952429604}, {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, {0xd0601d8efc57b08b, 0xf13b94daf124da27}, {0x823c12795db6ce57, 0x76c53d08d6b70859}, {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, {0xc21094364dfb5636, 0x985915fc12f542e5}, {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, {0xbd8430bd08277231, 0x50c6ff782a838354}, {0xece53cec4a314ebd, 0xa4f8bf5635246429}, {0x940f4613ae5ed136, 0x871b7795e136be9a}, {0xb913179899f68584, 0x28e2557b59846e40}, {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, {0xb4bca50b065abe63, 0x0fed077a756b53aa}, {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, {0x89e42caaf9491b60, 0xf41686c49db57245}, {0xac5d37d5b79b6239, 0x311c2875c522ced6}, {0xd77485cb25823ac7, 0x7d633293366b828c}, {0x86a8d39ef77164bc, 0xae5dff9c02033198}, {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, {0xd267caa862a12d66, 0xd072df63c324fd7c}, {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, {0xa46116538d0deb78, 0x52d9be85f074e609}, {0xcd795be870516656, 0x67902e276c921f8c}, {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, {0xef340a98172aace4, 0x86fb897116c87c35}, {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, {0xbae0a846d2195712, 0x8974836059cca10a}, {0xe998d258869facd7, 0x2bd1a438703fc94c}, {0x91ff83775423cc06, 0x7b6306a34627ddd0}, {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, {0x8e938662882af53e, 0x547eb47b7282ee9d}, {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, {0xae0b158b4738705e, 0x9624ab50b148d446}, {0xd98ddaee19068c76, 0x3badd624dd9b0958}, {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, {0xd47487cc8470652b, 0x7647c32000696720}, {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, {0xa5fb0a17c777cf09, 0xf468107100525891}, {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, {0x81ac1fe293d599bf, 0xc6f14cd848405531}, {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, {0xfd442e4688bd304a, 0x908f4a166d1da664}, {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, {0xf7549530e188c128, 0xd12bee59e68ef47d}, {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, {0xebdf661791d60f56, 0x111b495b3464ad22}, {0x936b9fcebb25c995, 0xcab10dd900beec35}, {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, {0xb3f4e093db73a093, 0x59ed216765690f57}, {0xe0f218b8d25088b8, 0x306869c13ec3532d}, {0x8c974f7383725573, 0x1e414218c73a13fc}, {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, {0x894bc396ce5da772, 0x6b8bba8c328eb784}, {0xab9eb47c81f5114f, 0x066ea92f3f326565}, {0xd686619ba27255a2, 0xc80a537b0efefebe}, {0x8613fd0145877585, 0xbd06742ce95f5f37}, {0xa798fc4196e952e7, 0x2c48113823b73705}, {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, {0x82ef85133de648c4, 0x9a984d73dbe722fc}, {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, {0xcc963fee10b7d1b3, 0x318df905079926a9}, {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, {0x9c1661a651213e2d, 0x06bea10ca65c084f}, {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, {0xbe89523386091465, 0xf6bbb397f1135824}, {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, {0xba121a4650e4ddeb, 0x92f34d62616ce414}, {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, {0x87625f056c7c4a8b, 0x11471cd764ad4973}, {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, {0xd389b47879823479, 0x4aff1d108d4ec2c4}, {0x843610cb4bf160cb, 0xcedf722a585139bb}, {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, {0xce947a3da6a9273e, 0x733d226229feea33}, {0x811ccc668829b887, 0x0806357d5a3f5260}, {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, {0xc5029163f384a931, 0x0a9e795e65d4df12}, {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, {0x964e858c91ba2655, 0x3a6a07f8d510f870}, {0xbbe226efb628afea, 0x890489f70a55368c}, {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, {0xe55990879ddcaabd, 0xcc420a6a101d0516}, {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, {0xb32df8e9f3546564, 0x47939822dc96abfa}, {0xdff9772470297ebd, 0x59787e2b93bc56f8}, {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, {0xaefae51477a06b03, 0xede622920b6b23f2}, {0xdab99e59958885c4, 0xe95fab368e45ecee}, {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, {0xd59944a37c0752a2, 0x4be76d3346f04960}, {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, {0x825ecc24c873782f, 0x8ed400668c0c28c9}, {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, {0xc24452da229b021b, 0xfbe85badce996169}, {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, {0xed246723473e3813, 0x290123e9aab23b69}, {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, {0x8d590723948a535f, 0x579c487e5a38ad0f}, {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, {0xdcdb1b2798182244, 0xf8e431456cf88e66}, {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, {0xa87fea27a539e9a5, 0x3f2398d747b36225}, {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, {0x83a3eeeef9153e89, 0x1953cf68300424ad}, {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, {0xcdb02555653131b6, 0x3792f412cb06794e}, {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, {0xc8de047564d20a8b, 0xf245825a5a445276}, {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, {0x9ced737bb6c4183d, 0x55464dd69685606c}, {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, {0xf53304714d9265df, 0xd53dd99f4b3066a9}, {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, {0xbf8fdb78849a5f96, 0xde98520472bdd034}, {0xef73d256a5c0f77c, 0x963e66858f6d4441}, {0x95a8637627989aad, 0xdde7001379a44aa9}, {0xbb127c53b17ec159, 0x5560c018580d5d53}, {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, {0x9226712162ab070d, 0xcab3961304ca70e9}, {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, {0xb267ed1940f1c61c, 0x55f038b237591ed4}, {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, {0xd9c7dced53c72255, 0x96e7bd358c904a22}, {0x881cea14545c7575, 0x7e50d64177da2e55}, {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, {0xcfb11ead453994ba, 0x67de18eda5814af3}, {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, {0xa2425ff75e14fc31, 0xa1258379a94d028e}, {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, {0x9e74d1b791e07e48, 0x775ea264cf55347e}, {0xc612062576589dda, 0x95364afe032a819e}, {0xf79687aed3eec551, 0x3a83ddbd83f52205}, {0x9abe14cd44753b52, 0xc4926a9672793543}, {0xc16d9a0095928a27, 0x75b7053c0f178294}, {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, {0xb877aa3236a4b449, 0x09befeb9fad487c3}, {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, {0xb424dc35095cd80f, 0x538484c19ef38c95}, {0xe12e13424bb40e13, 0x2865a5f206b06fba}, {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, {0xafebff0bcb24aafe, 0xf78f69a51539d749}, {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, {0x89705f4136b4a597, 0x31680a88f8953031}, {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, {0xd1b71758e219652b, 0xd3c36113404ea4a9}, {0x83126e978d4fdf3b, 0x645a1cac083126ea}, {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, {0xcccccccccccccccc, 0xcccccccccccccccd}, {0x8000000000000000, 0x0000000000000000}, {0xa000000000000000, 0x0000000000000000}, {0xc800000000000000, 0x0000000000000000}, {0xfa00000000000000, 0x0000000000000000}, {0x9c40000000000000, 0x0000000000000000}, {0xc350000000000000, 0x0000000000000000}, {0xf424000000000000, 0x0000000000000000}, {0x9896800000000000, 0x0000000000000000}, {0xbebc200000000000, 0x0000000000000000}, {0xee6b280000000000, 0x0000000000000000}, {0x9502f90000000000, 0x0000000000000000}, {0xba43b74000000000, 0x0000000000000000}, {0xe8d4a51000000000, 0x0000000000000000}, {0x9184e72a00000000, 0x0000000000000000}, {0xb5e620f480000000, 0x0000000000000000}, {0xe35fa931a0000000, 0x0000000000000000}, {0x8e1bc9bf04000000, 0x0000000000000000}, {0xb1a2bc2ec5000000, 0x0000000000000000}, {0xde0b6b3a76400000, 0x0000000000000000}, {0x8ac7230489e80000, 0x0000000000000000}, {0xad78ebc5ac620000, 0x0000000000000000}, {0xd8d726b7177a8000, 0x0000000000000000}, {0x878678326eac9000, 0x0000000000000000}, {0xa968163f0a57b400, 0x0000000000000000}, {0xd3c21bcecceda100, 0x0000000000000000}, {0x84595161401484a0, 0x0000000000000000}, {0xa56fa5b99019a5c8, 0x0000000000000000}, {0xcecb8f27f4200f3a, 0x0000000000000000}, {0x813f3978f8940984, 0x4000000000000000}, {0xa18f07d736b90be5, 0x5000000000000000}, {0xc9f2c9cd04674ede, 0xa400000000000000}, {0xfc6f7c4045812296, 0x4d00000000000000}, {0x9dc5ada82b70b59d, 0xf020000000000000}, {0xc5371912364ce305, 0x6c28000000000000}, {0xf684df56c3e01bc6, 0xc732000000000000}, {0x9a130b963a6c115c, 0x3c7f400000000000}, {0xc097ce7bc90715b3, 0x4b9f100000000000}, {0xf0bdc21abb48db20, 0x1e86d40000000000}, {0x96769950b50d88f4, 0x1314448000000000}, {0xbc143fa4e250eb31, 0x17d955a000000000}, {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, {0xb7abc627050305ad, 0xf14a3d9e40000000}, {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, {0xe0352f62a19e306e, 0xd50b2037ad200000}, {0x8c213d9da502de45, 0x4526f422cc340000}, {0xaf298d050e4395d6, 0x9670b12b7f410000}, {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, {0xab0e93b6efee0053, 0x8eea0d047a457a00}, {0xd5d238a4abe98068, 0x72a4904598d6d880}, {0x85a36366eb71f041, 0x47a6da2b7f864750}, {0xa70c3c40a64e6c51, 0x999090b65f67d924}, {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, {0x82818f1281ed449f, 0xbff8f10e7a8921a5}, {0xa321f2d7226895c7, 0xaff72d52192b6a0e}, {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764491}, {0xfee50b7025c36a08, 0x02f236d04753d5b5}, {0x9f4f2726179a2245, 0x01d762422c946591}, {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef6}, {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb3}, {0x9b934c3b330c8577, 0x63cc55f49f88eb30}, {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fc}, {0xf316271c7fc3908a, 0x8bef464e3945ef7b}, {0x97edd871cfda3a56, 0x97758bf0e3cbb5ad}, {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea318}, {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bde}, {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6b}, {0xb975d6b6ee39e436, 0xb3e2fd538e122b45}, {0xe7d34c64a9c85d44, 0x60dbbca87196b617}, {0x90e40fbeea1d3a4a, 0xbc8955e946fe31ce}, {0xb51d13aea4a488dd, 0x6babab6398bdbe42}, {0xe264589a4dcdab14, 0xc696963c7eed2dd2}, {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca3}, {0xb0de65388cc8ada8, 0x3b25a55f43294bcc}, {0xdd15fe86affad912, 0x49ef0eb713f39ebf}, {0x8a2dbf142dfcc7ab, 0x6e3569326c784338}, {0xacb92ed9397bf996, 0x49c2c37f07965405}, {0xd7e77a8f87daf7fb, 0xdc33745ec97be907}, {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a4}, {0xa8acd7c0222311bc, 0xc40832ea0d68ce0d}, {0xd2d80db02aabd62b, 0xf50a3fa490c30191}, {0x83c7088e1aab65db, 0x792667c6da79e0fb}, {0xa4b8cab1a1563f52, 0x577001b891185939}, {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, {0x80b05e5ac60b6178, 0x544f8158315b05b5}, {0xa0dc75f1778e39d6, 0x696361ae3db1c722}, {0xc913936dd571c84c, 0x03bc3a19cd1e38ea}, {0xfb5878494ace3a5f, 0x04ab48a04065c724}, {0x9d174b2dcec0e47b, 0x62eb0d64283f9c77}, {0xc45d1df942711d9a, 0x3ba5d0bd324f8395}, {0xf5746577930d6500, 0xca8f44ec7ee3647a}, {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecc}, {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67f}, {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101f}, {0x95d04aee3b80ece5, 0xbba1f1d158724a13}, {0xbb445da9ca61281f, 0x2a8a6e45ae8edc98}, {0xea1575143cf97226, 0xf52d09d71a3293be}, {0x924d692ca61be758, 0x593c2626705f9c57}, {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836d}, {0xe498f455c38b997a, 0x0b6dfb9c0f956448}, {0x8edf98b59a373fec, 0x4724bd4189bd5ead}, {0xb2977ee300c50fe7, 0x58edec91ec2cb658}, {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ee}, {0x8b865b215899f46c, 0xbd79e0d20082ee75}, {0xae67f1e9aec07187, 0xecd8590680a3aa12}, {0xda01ee641a708de9, 0xe80e6f4820cc9496}, {0x884134fe908658b2, 0x3109058d147fdcde}, {0xaa51823e34a7eede, 0xbd4b46f0599fd416}, {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91b}, {0x850fadc09923329e, 0x03e2cf6bc604ddb1}, {0xa6539930bf6bff45, 0x84db8346b786151d}, {0xcfe87f7cef46ff16, 0xe612641865679a64}, {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07f}, {0xa26da3999aef7749, 0xe3be5e330f38f09e}, {0xcb090c8001ab551c, 0x5cadf5bfd3072cc6}, {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f7}, {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afb}, {0xc646d63501a1511d, 0xb281e1fd541501b9}, {0xf7d88bc24209a565, 0x1f225a7ca91a4227}, {0x9ae757596946075f, 0x3375788de9b06959}, {0xc1a12d2fc3978937, 0x0052d6b1641c83af}, {0xf209787bb47d6b84, 0xc0678c5dbd23a49b}, {0x9745eb4d50ce6332, 0xf840b7ba963646e1}, {0xbd176620a501fbff, 0xb650e5a93bc3d899}, {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebf}, {0x93ba47c980e98cdf, 0xc66f336c36b10138}, {0xb8a8d9bbe123f017, 0xb80b0047445d4185}, {0xe6d3102ad96cec1d, 0xa60dc059157491e6}, {0x9043ea1ac7e41392, 0x87c89837ad68db30}, {0xb454e4a179dd1877, 0x29babe4598c311fc}, {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67b}, {0x8ce2529e2734bb1d, 0x1899e4a65f58660d}, {0xb01ae745b101e9e4, 0x5ec05dcff72e7f90}, {0xdc21a1171d42645d, 0x76707543f4fa1f74}, {0x899504ae72497eba, 0x6a06494a791c53a9}, {0xabfa45da0edbde69, 0x0487db9d17636893}, {0xd6f8d7509292d603, 0x45a9d2845d3c42b7}, {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, {0xa7f26836f282b732, 0x8e6cac7768d7141f}, {0xd1ef0244af2364ff, 0x3207d795430cd927}, {0x8335616aed761f1f, 0x7f44e6bd49e807b9}, {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a7}, {0xcd036837130890a1, 0x36dba887c37a8c10}, {0x802221226be55a64, 0xc2494954da2c978a}, {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6d}, {0xc83553c5c8965d3d, 0x6f92829494e5acc8}, {0xfa42a8b73abbf48c, 0xcb772339ba1f17fa}, {0x9c69a97284b578d7, 0xff2a760414536efc}, {0xc38413cf25e2d70d, 0xfef5138519684abb}, {0xf46518c2ef5b8cd1, 0x7eb258665fc25d6a}, {0x98bf2f79d5993802, 0xef2f773ffbd97a62}, {0xbeeefb584aff8603, 0xaafb550ffacfd8fb}, {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf39}, {0x952ab45cfa97a0b2, 0xdd945a747bf26184}, {0xba756174393d88df, 0x94f971119aeef9e5}, {0xe912b9d1478ceb17, 0x7a37cd5601aab85e}, {0x91abb422ccb812ee, 0xac62e055c10ab33b}, {0xb616a12b7fe617aa, 0x577b986b314d600a}, {0xe39c49765fdf9d94, 0xed5a7e85fda0b80c}, {0x8e41ade9fbebc27d, 0x14588f13be847308}, {0xb1d219647ae6b31c, 0x596eb2d8ae258fc9}, {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bc}, {0x8aec23d680043bee, 0x25de7bb9480d5855}, {0xada72ccc20054ae9, 0xaf561aa79a10ae6b}, {0xd910f7ff28069da4, 0x1b2ba1518094da05}, {0x87aa9aff79042286, 0x90fb44d2f05d0843}, {0xa99541bf57452b28, 0x353a1607ac744a54}, {0xd3fa922f2d1675f2, 0x42889b8997915ce9}, {0x847c9b5d7c2e09b7, 0x69956135febada12}, {0xa59bc234db398c25, 0x43fab9837e699096}, {0xcf02b2c21207ef2e, 0x94f967e45e03f4bc}, {0x8161afb94b44f57d, 0x1d1be0eebac278f6}, {0xa1ba1ba79e1632dc, 0x6462d92a69731733}, {0xca28a291859bbf93, 0x7d7b8f7503cfdcff}, {0xfcb2cb35e702af78, 0x5cda735244c3d43f}, {0x9defbf01b061adab, 0x3a0888136afa64a8}, {0xc56baec21c7a1916, 0x088aaa1845b8fdd1}, {0xf6c69a72a3989f5b, 0x8aad549e57273d46}, {0x9a3c2087a63f6399, 0x36ac54e2f678864c}, {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7de}, {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d6}, {0x969eb7c47859e743, 0x9f644ae5a4b1b326}, {0xbc4665b596706114, 0x873d5d9f0dde1fef}, {0xeb57ff22fc0c7959, 0xa90cb506d155a7eb}, {0x9316ff75dd87cbd8, 0x09a7f12442d588f3}, {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb30}, {0xe5d3ef282a242e81, 0x8f1668c8a86da5fb}, {0x8fa475791a569d10, 0xf96e017d694487bd}, {0xb38d92d760ec4455, 0x37c981dcc395a9ad}, {0xe070f78d3927556a, 0x85bbe253f47b1418}, {0x8c469ab843b89562, 0x93956d7478ccec8f}, {0xaf58416654a6babb, 0x387ac8d1970027b3}, {0xdb2e51bfe9d0696a, 0x06997b05fcc0319f}, {0x88fcf317f22241e2, 0x441fece3bdf81f04}, {0xab3c2fddeeaad25a, 0xd527e81cad7626c4}, {0xd60b3bd56a5586f1, 0x8a71e223d8d3b075}, {0x85c7056562757456, 0xf6872d5667844e4a}, {0xa738c6bebb12d16c, 0xb428f8ac016561dc}, {0xd106f86e69d785c7, 0xe13336d701beba53}, {0x82a45b450226b39c, 0xecc0024661173474}, {0xa34d721642b06084, 0x27f002d7f95d0191}, {0xcc20ce9bd35c78a5, 0x31ec038df7b441f5}, {0xff290242c83396ce, 0x7e67047175a15272}, {0x9f79a169bd203e41, 0x0f0062c6e984d387}, {0xc75809c42c684dd1, 0x52c07b78a3e60869}, {0xf92e0c3537826145, 0xa7709a56ccdf8a83}, {0x9bbcc7a142b17ccb, 0x88a66076400bb692}, {0xc2abf989935ddbfe, 0x6acff893d00ea436}, {0xf356f7ebf83552fe, 0x0583f6b8c4124d44}, {0x98165af37b2153de, 0xc3727a337a8b704b}, {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5d}, {0xeda2ee1c7064130c, 0x1162def06f79df74}, {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba9}, {0xb9a74a0637ce2ee1, 0x6d953e2bd7173693}, {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0438}, {0x910ab1d4db9914a0, 0x1d9c9892400a22a3}, {0xb54d5e4a127f59c8, 0x2503beb6d00cab4c}, {0xe2a0b5dc971f303a, 0x2e44ae64840fd61e}, {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, {0xb10d8e1456105dad, 0x7425a83e872c5f48}, {0xdd50f1996b947518, 0xd12f124e28f7771a}, {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa70}, {0xace73cbfdc0bfb7b, 0x636cc64d1001550c}, {0xd8210befd30efa5a, 0x3c47f7e05401aa4f}, {0x8714a775e3e95c78, 0x65acfaec34810a72}, {0xa8d9d1535ce3b396, 0x7f1839a741a14d0e}, {0xd31045a8341ca07c, 0x1ede48111209a051}, {0x83ea2b892091e44d, 0x934aed0aab460433}, {0xa4e4b66b68b65d60, 0xf81da84d56178540}, {0xce1de40642e3f4b9, 0x36251260ab9d668f}, {0x80d2ae83e9ce78f3, 0xc1d72b7c6b42601a}, {0xa1075a24e4421730, 0xb24cf65b8612f820}, {0xc94930ae1d529cfc, 0xdee033f26797b628}, {0xfb9b7cd9a4a7443c, 0x169840ef017da3b2}, {0x9d412e0806e88aa5, 0x8e1f289560ee864f}, {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e3}, {0xf5b5d7ec8acb58a2, 0xae10af696774b1dc}, {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef2a}, {0xbff610b0cc6edd3f, 0x17fd090a58d32af4}, {0xeff394dcff8a948e, 0xddfc4b4cef07f5b1}, {0x95f83d0a1fb69cd9, 0x4abdaf101564f98f}, {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f2}, {0xea53df5fd18d5513, 0x84c86189216dc5ee}, {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb5}, {0xb7118682dbb66a77, 0x3fbc8c33221dc2a2}, {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, {0x8f05b1163ba6832d, 0x29cb4d87f2a7400f}, {0xb2c71d5bca9023f8, 0x743e20e9ef511013}, {0xdf78e4b2bd342cf6, 0x914da9246b255417}, {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548f}, {0xae9672aba3d0c320, 0xa184ac2473b529b2}, {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741f}, {0x8865899617fb1871, 0x7e2fa67c7a658893}, {0xaa7eebfb9df9de8d, 0xddbb901b98feeab8}, {0xd51ea6fa85785631, 0x552a74227f3ea566}, {0x8533285c936b35de, 0xd53a88958f872760}, {0xa67ff273b8460356, 0x8a892abaf368f138}, {0xd01fef10a657842c, 0x2d2b7569b0432d86}, {0x8213f56a67f6b29b, 0x9c3b29620e29fc74}, {0xa298f2c501f45f42, 0x8349f3ba91b47b90}, {0xcb3f2f7642717713, 0x241c70a936219a74}, {0xfe0efb53d30dd4d7, 0xed238cd383aa0111}, {0x9ec95d1463e8a506, 0xf4363804324a40ab}, {0xc67bb4597ce2ce48, 0xb143c6053edcd0d6}, {0xf81aa16fdc1b81da, 0xdd94b7868e94050b}, {0x9b10a4e5e9913128, 0xca7cf2b4191c8327}, {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f1}, {0xf24a01a73cf2dccf, 0xbc633b39673c8ced}, {0x976e41088617ca01, 0xd5be0503e085d814}, {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e19}, {0xec9c459d51852ba2, 0xddf8e7d60ed1219f}, {0x93e1ab8252f33b45, 0xcabb90e5c942b504}, {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, {0xe7109bfba19c0c9d, 0x0cc512670a783ad5}, {0x906a617d450187e2, 0x27fb2b80668b24c6}, {0xb484f9dc9641e9da, 0xb1f9f660802dedf7}, {0xe1a63853bbd26451, 0x5e7873f8a0396974}, {0x8d07e33455637eb2, 0xdb0b487b6423e1e9}, {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda63}, {0xdc5c5301c56b75f7, 0x7641a140cc7810fc}, {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9e}, {0xac2820d9623bf429, 0x546345fa9fbdcd45}, {0xd732290fbacaf133, 0xa97c177947ad4096}, {0x867f59a9d4bed6c0, 0x49ed8eabcccc485e}, {0xa81f301449ee8c70, 0x5c68f256bfff5a75}, {0xd226fc195c6a2f8c, 0x73832eec6fff3112}, {0x83585d8fd9c25db7, 0xc831fd53c5ff7eac}, {0xa42e74f3d032f525, 0xba3e7ca8b77f5e56}, {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35ec}, {0x80444b5e7aa7cf85, 0x7980d163cf5b81b4}, {0xa0555e361951c366, 0xd7e105bcc3326220}, {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa8}, {0xfa856334878fc150, 0xb14f98f6f0feb952}, {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d4}, {0xc3b8358109e84f07, 0x0a862f80ec4700c9}, {0xf4a642e14c6262c8, 0xcd27bb612758c0fb}, {0x98e7e9cccfbd7dbd, 0x8038d51cb897789d}, {0xbf21e44003acdd2c, 0xe0470a63e6bd56c4}, {0xeeea5d5004981478, 0x1858ccfce06cac75}, {0x95527a5202df0ccb, 0x0f37801e0c43ebc9}, {0xbaa718e68396cffd, 0xd30560258f54e6bb}, {0xe950df20247c83fd, 0x47c6b82ef32a206a}, {0x91d28b7416cdd27e, 0x4cdc331d57fa5442}, {0xb6472e511c81471d, 0xe0133fe4adf8e953}, {0xe3d8f9e563a198e5, 0x58180fddd97723a7}, {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7649}, {0xb201833b35d63f73, 0x2cd2cc6551e513db}, {0xde81e40a034bcf4f, 0xf8077f7ea65e58d2}, {0x8b112e86420f6191, 0xfb04afaf27faf783}, {0xadd57a27d29339f6, 0x79c5db9af1f9b564}, {0xd94ad8b1c7380874, 0x18375281ae7822bd}, {0x87cec76f1c830548, 0x8f2293910d0b15b6}, {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb23}, {0xd433179d9c8cb841, 0x5fa60692a46151ec}, {0x849feec281d7f328, 0xdbc7c41ba6bcd334}, {0xa5c7ea73224deff3, 0x12b9b522906c0801}, {0xcf39e50feae16bef, 0xd768226b34870a01}, {0x81842f29f2cce375, 0xe6a1158300d46641}, {0xa1e53af46f801c53, 0x60495ae3c1097fd1}, {0xca5e89b18b602368, 0x385bb19cb14bdfc5}, {0xfcf62c1dee382c42, 0x46729e03dd9ed7b6}, {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d2}, {0xc5a05277621be293, 0xc7098b7305241886}, {0xf70867153aa2db38, 0xb8cbee4fc66d1ea8}, {0x9a65406d44a5c903, 0x737f74f1dc043329}, {0xc0fe908895cf3b44, 0x505f522e53053ff3}, {0xf13e34aabb430a15, 0x647726b9e7c68ff0}, {0x96c6e0eab509e64d, 0x5eca783430dc19f6}, {0xbc789925624c5fe0, 0xb67d16413d132073}, {0xeb96bf6ebadf77d8, 0xe41c5bd18c57e890}, {0x933e37a534cbaae7, 0x8e91b962f7b6f15a}, {0xb80dc58e81fe95a1, 0x723627bbb5a4adb1}, {0xe61136f2227e3b09, 0xcec3b1aaa30dd91d}, {0x8fcac257558ee4e6, 0x213a4f0aa5e8a7b2}, {0xb3bd72ed2af29e1f, 0xa988e2cd4f62d19e}, {0xe0accfa875af45a7, 0x93eb1b80a33b8606}, {0x8c6c01c9498d8b88, 0xbc72f130660533c4}, {0xaf87023b9bf0ee6a, 0xeb8fad7c7f8680b5}, {0xdb68c2ca82ed2a05, 0xa67398db9f6820e2}, #else {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, {0x86a8d39ef77164bc, 0xae5dff9c02033198}, {0xd98ddaee19068c76, 0x3badd624dd9b0958}, {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, {0xe55990879ddcaabd, 0xcc420a6a101d0516}, {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, {0x95a8637627989aad, 0xdde7001379a44aa9}, {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, {0xc350000000000000, 0x0000000000000000}, {0x9dc5ada82b70b59d, 0xf020000000000000}, {0xfee50b7025c36a08, 0x02f236d04753d5b5}, {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, {0xa6539930bf6bff45, 0x84db8346b786151d}, {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, {0xd910f7ff28069da4, 0x1b2ba1518094da05}, {0xaf58416654a6babb, 0x387ac8d1970027b3}, {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, {0x95527a5202df0ccb, 0x0f37801e0c43ebc9}, {0xf13e34aabb430a15, 0x647726b9e7c68ff0} #endif }; #if FMT_USE_FULL_CACHE_DRAGONBOX return pow10_significands[k - float_info::min_k]; #else static constexpr const uint64_t powers_of_5_64[] = { 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, 0x000000000000007d, 0x0000000000000271, 0x0000000000000c35, 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, 0x00000000001dcd65, 0x00000000009502f9, 0x0000000002e90edd, 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, 0x000003782dace9d9, 0x00001158e460913d, 0x000056bc75e2d631, 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, 0x00d3c21bcecceda1, 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; static const int compression_ratio = 27; // Compute base index. int cache_index = (k - float_info::min_k) / compression_ratio; int kb = cache_index * compression_ratio + float_info::min_k; int offset = k - kb; // Get base cache. uint128_fallback base_cache = pow10_significands[cache_index]; if (offset == 0) return base_cache; // Compute the required amount of bit-shift. int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); // Try to recover the real cache. uint64_t pow5 = powers_of_5_64[offset]; uint128_fallback recovered_cache = umul128(base_cache.high(), pow5); uint128_fallback middle_low = umul128(base_cache.low(), pow5); recovered_cache += middle_low.high(); uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); recovered_cache = uint128_fallback{(recovered_cache.low() >> alpha) | high_to_middle, ((middle_low.low() >> alpha) | middle_to_low)}; FMT_ASSERT(recovered_cache.low() + 1 != 0, ""); return {recovered_cache.high(), recovered_cache.low() + 1}; #endif } struct compute_mul_result { carrier_uint result; bool is_integer; }; struct compute_mul_parity_result { bool parity; bool is_integer; }; static auto compute_mul(carrier_uint u, const cache_entry_type& cache) noexcept -> compute_mul_result { auto r = umul192_upper128(u, cache); return {r.high(), r.low() == 0}; } static auto compute_delta(cache_entry_type const& cache, int beta) noexcept -> uint32_t { return static_cast(cache.high() >> (64 - 1 - beta)); } static auto compute_mul_parity(carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept -> compute_mul_parity_result { FMT_ASSERT(beta >= 1, ""); FMT_ASSERT(beta < 64, ""); auto r = umul192_lower128(two_f, cache); return {((r.high() >> (64 - beta)) & 1) != 0, ((r.high() << beta) | (r.low() >> (64 - beta))) == 0}; } static auto compute_left_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return (cache.high() - (cache.high() >> (num_significand_bits() + 2))) >> (64 - num_significand_bits() - 1 - beta); } static auto compute_right_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return (cache.high() + (cache.high() >> (num_significand_bits() + 1))) >> (64 - num_significand_bits() - 1 - beta); } static auto compute_round_up_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept -> carrier_uint { return ((cache.high() >> (64 - num_significand_bits() - 2 - beta)) + 1) / 2; } }; FMT_FUNC auto get_cached_power(int k) noexcept -> uint128_fallback { return cache_accessor::get_cached_power(k); } // Various integer checks template auto is_left_endpoint_integer_shorter_interval(int exponent) noexcept -> bool { const int case_shorter_interval_left_endpoint_lower_threshold = 2; const int case_shorter_interval_left_endpoint_upper_threshold = 3; return exponent >= case_shorter_interval_left_endpoint_lower_threshold && exponent <= case_shorter_interval_left_endpoint_upper_threshold; } // Remove trailing zeros from n and return the number of zeros removed (float) FMT_INLINE int remove_trailing_zeros(uint32_t& n, int s = 0) noexcept { FMT_ASSERT(n != 0, ""); // Modular inverse of 5 (mod 2^32): (mod_inv_5 * 5) mod 2^32 = 1. constexpr uint32_t mod_inv_5 = 0xcccccccd; constexpr uint32_t mod_inv_25 = 0xc28f5c29; // = mod_inv_5 * mod_inv_5 while (true) { auto q = rotr(n * mod_inv_25, 2); if (q > max_value() / 100) break; n = q; s += 2; } auto q = rotr(n * mod_inv_5, 1); if (q <= max_value() / 10) { n = q; s |= 1; } return s; } // Removes trailing zeros and returns the number of zeros removed (double) FMT_INLINE int remove_trailing_zeros(uint64_t& n) noexcept { FMT_ASSERT(n != 0, ""); // This magic number is ceil(2^90 / 10^8). constexpr uint64_t magic_number = 12379400392853802749ull; auto nm = umul128(n, magic_number); // Is n is divisible by 10^8? if ((nm.high() & ((1ull << (90 - 64)) - 1)) == 0 && nm.low() < magic_number) { // If yes, work with the quotient... auto n32 = static_cast(nm.high() >> (90 - 64)); // ... and use the 32 bit variant of the function int s = remove_trailing_zeros(n32, 8); n = n32; return s; } // If n is not divisible by 10^8, work with n itself. constexpr uint64_t mod_inv_5 = 0xcccccccccccccccd; constexpr uint64_t mod_inv_25 = 0x8f5c28f5c28f5c29; // mod_inv_5 * mod_inv_5 int s = 0; while (true) { auto q = rotr(n * mod_inv_25, 2); if (q > max_value() / 100) break; n = q; s += 2; } auto q = rotr(n * mod_inv_5, 1); if (q <= max_value() / 10) { n = q; s |= 1; } return s; } // The main algorithm for shorter interval case template FMT_INLINE decimal_fp shorter_interval_case(int exponent) noexcept { decimal_fp ret_value; // Compute k and beta const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); const int beta = exponent + floor_log2_pow10(-minus_k); // Compute xi and zi using cache_entry_type = typename cache_accessor::cache_entry_type; const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case( cache, beta); auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case( cache, beta); // If the left endpoint is not an integer, increase it if (!is_left_endpoint_integer_shorter_interval(exponent)) ++xi; // Try bigger divisor ret_value.significand = zi / 10; // If succeed, remove trailing zeros if necessary and return if (ret_value.significand * 10 >= xi) { ret_value.exponent = minus_k + 1; ret_value.exponent += remove_trailing_zeros(ret_value.significand); return ret_value; } // Otherwise, compute the round-up of y ret_value.significand = cache_accessor::compute_round_up_for_shorter_interval_case(cache, beta); ret_value.exponent = minus_k; // When tie occurs, choose one of them according to the rule if (exponent >= float_info::shorter_interval_tie_lower_threshold && exponent <= float_info::shorter_interval_tie_upper_threshold) { ret_value.significand = ret_value.significand % 2 == 0 ? ret_value.significand : ret_value.significand - 1; } else if (ret_value.significand < xi) { ++ret_value.significand; } return ret_value; } template auto to_decimal(T x) noexcept -> decimal_fp { // Step 1: integer promotion & Schubfach multiplier calculation. using carrier_uint = typename float_info::carrier_uint; using cache_entry_type = typename cache_accessor::cache_entry_type; auto br = bit_cast(x); // Extract significand bits and exponent bits. const carrier_uint significand_mask = (static_cast(1) << num_significand_bits()) - 1; carrier_uint significand = (br & significand_mask); int exponent = static_cast((br & exponent_mask()) >> num_significand_bits()); if (exponent != 0) { // Check if normal. exponent -= exponent_bias() + num_significand_bits(); // Shorter interval case; proceed like Schubfach. // In fact, when exponent == 1 and significand == 0, the interval is // regular. However, it can be shown that the end-results are anyway same. if (significand == 0) return shorter_interval_case(exponent); significand |= (static_cast(1) << num_significand_bits()); } else { // Subnormal case; the interval is always regular. if (significand == 0) return {0, 0}; exponent = std::numeric_limits::min_exponent - num_significand_bits() - 1; } const bool include_left_endpoint = (significand % 2 == 0); const bool include_right_endpoint = include_left_endpoint; // Compute k and beta. const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); const int beta = exponent + floor_log2_pow10(-minus_k); // Compute zi and deltai. // 10^kappa <= deltai < 10^(kappa + 1) const uint32_t deltai = cache_accessor::compute_delta(cache, beta); const carrier_uint two_fc = significand << 1; // For the case of binary32, the result of integer check is not correct for // 29711844 * 2^-82 // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18 // and 29711844 * 2^-81 // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17, // and they are the unique counterexamples. However, since 29711844 is even, // this does not cause any problem for the endpoints calculations; it can only // cause a problem when we need to perform integer check for the center. // Fortunately, with these inputs, that branch is never executed, so we are // fine. const typename cache_accessor::compute_mul_result z_mul = cache_accessor::compute_mul((two_fc | 1) << beta, cache); // Step 2: Try larger divisor; remove trailing zeros if necessary. // Using an upper bound on zi, we might be able to optimize the division // better than the compiler; we are computing zi / big_divisor here. decimal_fp ret_value; ret_value.significand = divide_by_10_to_kappa_plus_1(z_mul.result); uint32_t r = static_cast(z_mul.result - float_info::big_divisor * ret_value.significand); if (r < deltai) { // Exclude the right endpoint if necessary. if (r == 0 && (z_mul.is_integer & !include_right_endpoint)) { --ret_value.significand; r = float_info::big_divisor; goto small_divisor_case_label; } } else if (r > deltai) { goto small_divisor_case_label; } else { // r == deltai; compare fractional parts. const typename cache_accessor::compute_mul_parity_result x_mul = cache_accessor::compute_mul_parity(two_fc - 1, cache, beta); if (!(x_mul.parity | (x_mul.is_integer & include_left_endpoint))) goto small_divisor_case_label; } ret_value.exponent = minus_k + float_info::kappa + 1; // We may need to remove trailing zeros. ret_value.exponent += remove_trailing_zeros(ret_value.significand); return ret_value; // Step 3: Find the significand with the smaller divisor. small_divisor_case_label: ret_value.significand *= 10; ret_value.exponent = minus_k + float_info::kappa; uint32_t dist = r - (deltai / 2) + (float_info::small_divisor / 2); const bool approx_y_parity = ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; // Is dist divisible by 10^kappa? const bool divisible_by_small_divisor = check_divisibility_and_divide_by_pow10::kappa>(dist); // Add dist / 10^kappa to the significand. ret_value.significand += dist; if (!divisible_by_small_divisor) return ret_value; // Check z^(f) >= epsilon^(f). // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f). // Since there are only 2 possibilities, we only need to care about the // parity. Also, zi and r should have the same parity since the divisor // is an even number. const auto y_mul = cache_accessor::compute_mul_parity(two_fc, cache, beta); // If z^(f) >= epsilon^(f), we might have a tie when z^(f) == epsilon^(f), // or equivalently, when y is an integer. if (y_mul.parity != approx_y_parity) --ret_value.significand; else if (y_mul.is_integer & (ret_value.significand % 2 != 0)) --ret_value.significand; return ret_value; } } // namespace dragonbox } // namespace detail template <> struct formatter { FMT_CONSTEXPR auto parse(format_parse_context& ctx) -> format_parse_context::iterator { return ctx.begin(); } auto format(const detail::bigint& n, format_context& ctx) const -> format_context::iterator { auto out = ctx.out(); bool first = true; for (auto i = n.bigits_.size(); i > 0; --i) { auto value = n.bigits_[i - 1u]; if (first) { out = fmt::format_to(out, FMT_STRING("{:x}"), value); first = false; continue; } out = fmt::format_to(out, FMT_STRING("{:08x}"), value); } if (n.exp_ > 0) out = fmt::format_to(out, FMT_STRING("p{}"), n.exp_ * detail::bigint::bigit_bits); return out; } }; FMT_FUNC detail::utf8_to_utf16::utf8_to_utf16(string_view s) { for_each_codepoint(s, [this](uint32_t cp, string_view) { if (cp == invalid_code_point) FMT_THROW(std::runtime_error("invalid utf8")); if (cp <= 0xFFFF) { buffer_.push_back(static_cast(cp)); } else { cp -= 0x10000; buffer_.push_back(static_cast(0xD800 + (cp >> 10))); buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); } return true; }); buffer_.push_back(0); } FMT_FUNC void format_system_error(detail::buffer& out, int error_code, const char* message) noexcept { FMT_TRY { auto ec = std::error_code(error_code, std::generic_category()); detail::write(appender(out), std::system_error(ec, message).what()); return; } FMT_CATCH(...) {} format_error_code(out, error_code, message); } FMT_FUNC void report_system_error(int error_code, const char* message) noexcept { report_error(format_system_error, error_code, message); } FMT_FUNC auto vformat(string_view fmt, format_args args) -> std::string { // Don't optimize the "{}" case to keep the binary size small and because it // can be better optimized in fmt::format anyway. auto buffer = memory_buffer(); detail::vformat_to(buffer, fmt, args); return to_string(buffer); } namespace detail { template struct span { T* data; size_t size; }; template auto flockfile(F* f) -> decltype(_lock_file(f)) { _lock_file(f); } template auto funlockfile(F* f) -> decltype(_unlock_file(f)) { _unlock_file(f); } #ifndef getc_unlocked template auto getc_unlocked(F* f) -> decltype(_fgetc_nolock(f)) { return _fgetc_nolock(f); } #endif template struct has_flockfile : std::false_type {}; template struct has_flockfile()))>> : std::true_type {}; // A FILE wrapper. F is FILE defined as a template parameter to make system API // detection work. template class file_base { public: F* file_; public: file_base(F* file) : file_(file) {} operator F*() const { return file_; } // Reads a code unit from the stream. auto get() -> int { int result = getc_unlocked(file_); if (result == EOF && ferror(file_) != 0) FMT_THROW(system_error(errno, FMT_STRING("getc failed"))); return result; } // Puts the code unit back into the stream buffer. void unget(char c) { if (ungetc(c, file_) == EOF) FMT_THROW(system_error(errno, FMT_STRING("ungetc failed"))); } void flush() { fflush(this->file_); } }; // A FILE wrapper for glibc. template class glibc_file : public file_base { private: enum { line_buffered = 0x200, // _IO_LINE_BUF unbuffered = 2 // _IO_UNBUFFERED }; public: using file_base::file_base; auto is_buffered() const -> bool { return (this->file_->_flags & unbuffered) == 0; } void init_buffer() { if (this->file_->_IO_write_ptr) return; // Force buffer initialization by placing and removing a char in a buffer. putc_unlocked(0, this->file_); --this->file_->_IO_write_ptr; } // Returns the file's read buffer. auto get_read_buffer() const -> span { auto ptr = this->file_->_IO_read_ptr; return {ptr, to_unsigned(this->file_->_IO_read_end - ptr)}; } // Returns the file's write buffer. auto get_write_buffer() const -> span { auto ptr = this->file_->_IO_write_ptr; return {ptr, to_unsigned(this->file_->_IO_buf_end - ptr)}; } void advance_write_buffer(size_t size) { this->file_->_IO_write_ptr += size; } bool needs_flush() const { if ((this->file_->_flags & line_buffered) == 0) return false; char* end = this->file_->_IO_write_end; return memchr(end, '\n', to_unsigned(this->file_->_IO_write_ptr - end)); } void flush() { fflush_unlocked(this->file_); } }; // A FILE wrapper for Apple's libc. template class apple_file : public file_base { private: enum { line_buffered = 1, // __SNBF unbuffered = 2 // __SLBF }; public: using file_base::file_base; auto is_buffered() const -> bool { return (this->file_->_flags & unbuffered) == 0; } void init_buffer() { if (this->file_->_p) return; // Force buffer initialization by placing and removing a char in a buffer. putc_unlocked(0, this->file_); --this->file_->_p; ++this->file_->_w; } auto get_read_buffer() const -> span { return {reinterpret_cast(this->file_->_p), to_unsigned(this->file_->_r)}; } auto get_write_buffer() const -> span { return {reinterpret_cast(this->file_->_p), to_unsigned(this->file_->_bf._base + this->file_->_bf._size - this->file_->_p)}; } void advance_write_buffer(size_t size) { this->file_->_p += size; this->file_->_w -= size; } bool needs_flush() const { if ((this->file_->_flags & line_buffered) == 0) return false; return memchr(this->file_->_p + this->file_->_w, '\n', to_unsigned(-this->file_->_w)); } }; // A fallback FILE wrapper. template class fallback_file : public file_base { private: char next_; // The next unconsumed character in the buffer. bool has_next_ = false; public: using file_base::file_base; auto is_buffered() const -> bool { return false; } auto needs_flush() const -> bool { return false; } void init_buffer() {} auto get_read_buffer() const -> span { return {&next_, has_next_ ? 1u : 0u}; } auto get_write_buffer() const -> span { return {nullptr, 0}; } void advance_write_buffer(size_t) {} auto get() -> int { has_next_ = false; return file_base::get(); } void unget(char c) { file_base::unget(c); next_ = c; has_next_ = true; } }; #ifndef FMT_USE_FALLBACK_FILE # define FMT_USE_FALLBACK_FILE 1 #endif template auto get_file(F* f, int) -> apple_file { return f; } template inline auto get_file(F* f, int) -> glibc_file { return f; } inline auto get_file(FILE* f, ...) -> fallback_file { return f; } using file_ref = decltype(get_file(static_cast(nullptr), 0)); template class file_print_buffer : public buffer { public: explicit file_print_buffer(F*) : buffer(nullptr, size_t()) {} }; template class file_print_buffer::value>> : public buffer { private: file_ref file_; static void grow(buffer& base, size_t) { auto& self = static_cast(base); self.file_.advance_write_buffer(self.size()); if (self.file_.get_write_buffer().size == 0) self.file_.flush(); auto buf = self.file_.get_write_buffer(); FMT_ASSERT(buf.size > 0, ""); self.set(buf.data, buf.size); self.clear(); } public: explicit file_print_buffer(F* f) : buffer(grow, size_t()), file_(f) { flockfile(f); file_.init_buffer(); auto buf = file_.get_write_buffer(); set(buf.data, buf.size); } ~file_print_buffer() { file_.advance_write_buffer(size()); bool flush = file_.needs_flush(); F* f = file_; // Make funlockfile depend on the template parameter F funlockfile(f); // for the system API detection to work. if (flush) fflush(file_); } }; #if !defined(_WIN32) || defined(FMT_USE_WRITE_CONSOLE) FMT_FUNC auto write_console(int, string_view) -> bool { return false; } #else using dword = conditional_t; extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // void*, const void*, dword, dword*, void*); FMT_FUNC bool write_console(int fd, string_view text) { auto u16 = utf8_to_utf16(text); return WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), u16.c_str(), static_cast(u16.size()), nullptr, nullptr) != 0; } #endif #ifdef _WIN32 // Print assuming legacy (non-Unicode) encoding. FMT_FUNC void vprint_mojibake(std::FILE* f, string_view fmt, format_args args, bool newline) { auto buffer = memory_buffer(); detail::vformat_to(buffer, fmt, args); if (newline) buffer.push_back('\n'); fwrite_fully(buffer.data(), buffer.size(), f); } #endif FMT_FUNC void print(std::FILE* f, string_view text) { #if defined(_WIN32) && !defined(FMT_USE_WRITE_CONSOLE) int fd = _fileno(f); if (_isatty(fd)) { std::fflush(f); if (write_console(fd, text)) return; } #endif fwrite_fully(text.data(), text.size(), f); } } // namespace detail FMT_FUNC void vprint_buffered(std::FILE* f, string_view fmt, format_args args) { auto buffer = memory_buffer(); detail::vformat_to(buffer, fmt, args); detail::print(f, {buffer.data(), buffer.size()}); } FMT_FUNC void vprint(std::FILE* f, string_view fmt, format_args args) { if (!detail::file_ref(f).is_buffered() || !detail::has_flockfile<>()) return vprint_buffered(f, fmt, args); auto&& buffer = detail::file_print_buffer<>(f); return detail::vformat_to(buffer, fmt, args); } FMT_FUNC void vprintln(std::FILE* f, string_view fmt, format_args args) { auto buffer = memory_buffer(); detail::vformat_to(buffer, fmt, args); buffer.push_back('\n'); detail::print(f, {buffer.data(), buffer.size()}); } FMT_FUNC void vprint(string_view fmt, format_args args) { vprint(stdout, fmt, args); } namespace detail { struct singleton { unsigned char upper; unsigned char lower_count; }; inline auto is_printable(uint16_t x, const singleton* singletons, size_t singletons_size, const unsigned char* singleton_lowers, const unsigned char* normal, size_t normal_size) -> bool { auto upper = x >> 8; auto lower_start = 0; for (size_t i = 0; i < singletons_size; ++i) { auto s = singletons[i]; auto lower_end = lower_start + s.lower_count; if (upper < s.upper) break; if (upper == s.upper) { for (auto j = lower_start; j < lower_end; ++j) { if (singleton_lowers[j] == (x & 0xff)) return false; } } lower_start = lower_end; } auto xsigned = static_cast(x); auto current = true; for (size_t i = 0; i < normal_size; ++i) { auto v = static_cast(normal[i]); auto len = (v & 0x80) != 0 ? (v & 0x7f) << 8 | normal[++i] : v; xsigned -= len; if (xsigned < 0) break; current = !current; } return current; } // This code is generated by support/printable.py. FMT_FUNC auto is_printable(uint32_t cp) -> bool { static constexpr singleton singletons0[] = { {0x00, 1}, {0x03, 5}, {0x05, 6}, {0x06, 3}, {0x07, 6}, {0x08, 8}, {0x09, 17}, {0x0a, 28}, {0x0b, 25}, {0x0c, 20}, {0x0d, 16}, {0x0e, 13}, {0x0f, 4}, {0x10, 3}, {0x12, 18}, {0x13, 9}, {0x16, 1}, {0x17, 5}, {0x18, 2}, {0x19, 3}, {0x1a, 7}, {0x1c, 2}, {0x1d, 1}, {0x1f, 22}, {0x20, 3}, {0x2b, 3}, {0x2c, 2}, {0x2d, 11}, {0x2e, 1}, {0x30, 3}, {0x31, 2}, {0x32, 1}, {0xa7, 2}, {0xa9, 2}, {0xaa, 4}, {0xab, 8}, {0xfa, 2}, {0xfb, 5}, {0xfd, 4}, {0xfe, 3}, {0xff, 9}, }; static constexpr unsigned char singletons0_lower[] = { 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57, 0x58, 0x8b, 0x8c, 0x90, 0x1c, 0x1d, 0xdd, 0x0e, 0x0f, 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f, 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e, 0x91, 0x92, 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6, 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04, 0x11, 0x12, 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, 0x3d, 0x49, 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9, 0xb1, 0xb4, 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, 0xe4, 0xe5, 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, 0x4a, 0x5e, 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, 0xc9, 0xce, 0xcf, 0x0d, 0x11, 0x29, 0x45, 0x49, 0x57, 0x64, 0x65, 0x8d, 0x91, 0xa9, 0xb4, 0xba, 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x0d, 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6, 0xbe, 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb, 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, 0xff, 0x80, 0x0d, 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x0f, 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae, 0xaf, 0xbb, 0xbc, 0xfa, 0x16, 0x17, 0x1e, 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96, 0x2f, 0x5f, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30, 0x8f, 0x1f, 0xc0, 0xc1, 0xce, 0xff, 0x4e, 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, 0x45, 0x90, 0x91, 0xfe, 0xff, 0x53, 0x67, 0x75, 0xc8, 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, }; static constexpr singleton singletons1[] = { {0x00, 6}, {0x01, 1}, {0x03, 1}, {0x04, 2}, {0x08, 8}, {0x09, 2}, {0x0a, 5}, {0x0b, 2}, {0x0e, 4}, {0x10, 1}, {0x11, 2}, {0x12, 5}, {0x13, 17}, {0x14, 1}, {0x15, 2}, {0x17, 2}, {0x19, 13}, {0x1c, 5}, {0x1d, 8}, {0x24, 1}, {0x6a, 3}, {0x6b, 2}, {0xbc, 2}, {0xd1, 2}, {0xd4, 12}, {0xd5, 9}, {0xd6, 2}, {0xd7, 2}, {0xda, 1}, {0xe0, 5}, {0xe1, 2}, {0xe8, 2}, {0xee, 32}, {0xf0, 4}, {0xf8, 2}, {0xf9, 2}, {0xfa, 2}, {0xfb, 1}, }; static constexpr unsigned char singletons1_lower[] = { 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e, 0x9e, 0x9f, 0x06, 0x07, 0x09, 0x36, 0x3d, 0x3e, 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x36, 0x37, 0x56, 0x57, 0x7f, 0xaa, 0xae, 0xaf, 0xbd, 0x35, 0xe0, 0x12, 0x87, 0x89, 0x8e, 0x9e, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, 0x45, 0x46, 0x49, 0x4a, 0x4e, 0x4f, 0x64, 0x65, 0x5c, 0xb6, 0xb7, 0x1b, 0x1c, 0x07, 0x08, 0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8, 0xa9, 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92, 0x6f, 0x5f, 0xee, 0xef, 0x5a, 0x62, 0x9a, 0x9b, 0x27, 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e, 0x3f, 0xc5, 0xc6, 0x04, 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0, 0xae, 0xaf, 0x79, 0xcc, 0x6e, 0x6f, 0x93, }; static constexpr unsigned char normal0[] = { 0x00, 0x20, 0x5f, 0x22, 0x82, 0xdf, 0x04, 0x82, 0x44, 0x08, 0x1b, 0x04, 0x06, 0x11, 0x81, 0xac, 0x0e, 0x80, 0xab, 0x35, 0x28, 0x0b, 0x80, 0xe0, 0x03, 0x19, 0x08, 0x01, 0x04, 0x2f, 0x04, 0x34, 0x04, 0x07, 0x03, 0x01, 0x07, 0x06, 0x07, 0x11, 0x0a, 0x50, 0x0f, 0x12, 0x07, 0x55, 0x07, 0x03, 0x04, 0x1c, 0x0a, 0x09, 0x03, 0x08, 0x03, 0x07, 0x03, 0x02, 0x03, 0x03, 0x03, 0x0c, 0x04, 0x05, 0x03, 0x0b, 0x06, 0x01, 0x0e, 0x15, 0x05, 0x3a, 0x03, 0x11, 0x07, 0x06, 0x05, 0x10, 0x07, 0x57, 0x07, 0x02, 0x07, 0x15, 0x0d, 0x50, 0x04, 0x43, 0x03, 0x2d, 0x03, 0x01, 0x04, 0x11, 0x06, 0x0f, 0x0c, 0x3a, 0x04, 0x1d, 0x25, 0x5f, 0x20, 0x6d, 0x04, 0x6a, 0x25, 0x80, 0xc8, 0x05, 0x82, 0xb0, 0x03, 0x1a, 0x06, 0x82, 0xfd, 0x03, 0x59, 0x07, 0x15, 0x0b, 0x17, 0x09, 0x14, 0x0c, 0x14, 0x0c, 0x6a, 0x06, 0x0a, 0x06, 0x1a, 0x06, 0x59, 0x07, 0x2b, 0x05, 0x46, 0x0a, 0x2c, 0x04, 0x0c, 0x04, 0x01, 0x03, 0x31, 0x0b, 0x2c, 0x04, 0x1a, 0x06, 0x0b, 0x03, 0x80, 0xac, 0x06, 0x0a, 0x06, 0x21, 0x3f, 0x4c, 0x04, 0x2d, 0x03, 0x74, 0x08, 0x3c, 0x03, 0x0f, 0x03, 0x3c, 0x07, 0x38, 0x08, 0x2b, 0x05, 0x82, 0xff, 0x11, 0x18, 0x08, 0x2f, 0x11, 0x2d, 0x03, 0x20, 0x10, 0x21, 0x0f, 0x80, 0x8c, 0x04, 0x82, 0x97, 0x19, 0x0b, 0x15, 0x88, 0x94, 0x05, 0x2f, 0x05, 0x3b, 0x07, 0x02, 0x0e, 0x18, 0x09, 0x80, 0xb3, 0x2d, 0x74, 0x0c, 0x80, 0xd6, 0x1a, 0x0c, 0x05, 0x80, 0xff, 0x05, 0x80, 0xdf, 0x0c, 0xee, 0x0d, 0x03, 0x84, 0x8d, 0x03, 0x37, 0x09, 0x81, 0x5c, 0x14, 0x80, 0xb8, 0x08, 0x80, 0xcb, 0x2a, 0x38, 0x03, 0x0a, 0x06, 0x38, 0x08, 0x46, 0x08, 0x0c, 0x06, 0x74, 0x0b, 0x1e, 0x03, 0x5a, 0x04, 0x59, 0x09, 0x80, 0x83, 0x18, 0x1c, 0x0a, 0x16, 0x09, 0x4c, 0x04, 0x80, 0x8a, 0x06, 0xab, 0xa4, 0x0c, 0x17, 0x04, 0x31, 0xa1, 0x04, 0x81, 0xda, 0x26, 0x07, 0x0c, 0x05, 0x05, 0x80, 0xa5, 0x11, 0x81, 0x6d, 0x10, 0x78, 0x28, 0x2a, 0x06, 0x4c, 0x04, 0x80, 0x8d, 0x04, 0x80, 0xbe, 0x03, 0x1b, 0x03, 0x0f, 0x0d, }; static constexpr unsigned char normal1[] = { 0x5e, 0x22, 0x7b, 0x05, 0x03, 0x04, 0x2d, 0x03, 0x66, 0x03, 0x01, 0x2f, 0x2e, 0x80, 0x82, 0x1d, 0x03, 0x31, 0x0f, 0x1c, 0x04, 0x24, 0x09, 0x1e, 0x05, 0x2b, 0x05, 0x44, 0x04, 0x0e, 0x2a, 0x80, 0xaa, 0x06, 0x24, 0x04, 0x24, 0x04, 0x28, 0x08, 0x34, 0x0b, 0x01, 0x80, 0x90, 0x81, 0x37, 0x09, 0x16, 0x0a, 0x08, 0x80, 0x98, 0x39, 0x03, 0x63, 0x08, 0x09, 0x30, 0x16, 0x05, 0x21, 0x03, 0x1b, 0x05, 0x01, 0x40, 0x38, 0x04, 0x4b, 0x05, 0x2f, 0x04, 0x0a, 0x07, 0x09, 0x07, 0x40, 0x20, 0x27, 0x04, 0x0c, 0x09, 0x36, 0x03, 0x3a, 0x05, 0x1a, 0x07, 0x04, 0x0c, 0x07, 0x50, 0x49, 0x37, 0x33, 0x0d, 0x33, 0x07, 0x2e, 0x08, 0x0a, 0x81, 0x26, 0x52, 0x4e, 0x28, 0x08, 0x2a, 0x56, 0x1c, 0x14, 0x17, 0x09, 0x4e, 0x04, 0x1e, 0x0f, 0x43, 0x0e, 0x19, 0x07, 0x0a, 0x06, 0x48, 0x08, 0x27, 0x09, 0x75, 0x0b, 0x3f, 0x41, 0x2a, 0x06, 0x3b, 0x05, 0x0a, 0x06, 0x51, 0x06, 0x01, 0x05, 0x10, 0x03, 0x05, 0x80, 0x8b, 0x62, 0x1e, 0x48, 0x08, 0x0a, 0x80, 0xa6, 0x5e, 0x22, 0x45, 0x0b, 0x0a, 0x06, 0x0d, 0x13, 0x39, 0x07, 0x0a, 0x36, 0x2c, 0x04, 0x10, 0x80, 0xc0, 0x3c, 0x64, 0x53, 0x0c, 0x48, 0x09, 0x0a, 0x46, 0x45, 0x1b, 0x48, 0x08, 0x53, 0x1d, 0x39, 0x81, 0x07, 0x46, 0x0a, 0x1d, 0x03, 0x47, 0x49, 0x37, 0x03, 0x0e, 0x08, 0x0a, 0x06, 0x39, 0x07, 0x0a, 0x81, 0x36, 0x19, 0x80, 0xb7, 0x01, 0x0f, 0x32, 0x0d, 0x83, 0x9b, 0x66, 0x75, 0x0b, 0x80, 0xc4, 0x8a, 0xbc, 0x84, 0x2f, 0x8f, 0xd1, 0x82, 0x47, 0xa1, 0xb9, 0x82, 0x39, 0x07, 0x2a, 0x04, 0x02, 0x60, 0x26, 0x0a, 0x46, 0x0a, 0x28, 0x05, 0x13, 0x82, 0xb0, 0x5b, 0x65, 0x4b, 0x04, 0x39, 0x07, 0x11, 0x40, 0x05, 0x0b, 0x02, 0x0e, 0x97, 0xf8, 0x08, 0x84, 0xd6, 0x2a, 0x09, 0xa2, 0xf7, 0x81, 0x1f, 0x31, 0x03, 0x11, 0x04, 0x08, 0x81, 0x8c, 0x89, 0x04, 0x6b, 0x05, 0x0d, 0x03, 0x09, 0x07, 0x10, 0x93, 0x60, 0x80, 0xf6, 0x0a, 0x73, 0x08, 0x6e, 0x17, 0x46, 0x80, 0x9a, 0x14, 0x0c, 0x57, 0x09, 0x19, 0x80, 0x87, 0x81, 0x47, 0x03, 0x85, 0x42, 0x0f, 0x15, 0x85, 0x50, 0x2b, 0x80, 0xd5, 0x2d, 0x03, 0x1a, 0x04, 0x02, 0x81, 0x70, 0x3a, 0x05, 0x01, 0x85, 0x00, 0x80, 0xd7, 0x29, 0x4c, 0x04, 0x0a, 0x04, 0x02, 0x83, 0x11, 0x44, 0x4c, 0x3d, 0x80, 0xc2, 0x3c, 0x06, 0x01, 0x04, 0x55, 0x05, 0x1b, 0x34, 0x02, 0x81, 0x0e, 0x2c, 0x04, 0x64, 0x0c, 0x56, 0x0a, 0x80, 0xae, 0x38, 0x1d, 0x0d, 0x2c, 0x04, 0x09, 0x07, 0x02, 0x0e, 0x06, 0x80, 0x9a, 0x83, 0xd8, 0x08, 0x0d, 0x03, 0x0d, 0x03, 0x74, 0x0c, 0x59, 0x07, 0x0c, 0x14, 0x0c, 0x04, 0x38, 0x08, 0x0a, 0x06, 0x28, 0x08, 0x22, 0x4e, 0x81, 0x54, 0x0c, 0x15, 0x03, 0x03, 0x05, 0x07, 0x09, 0x19, 0x07, 0x07, 0x09, 0x03, 0x0d, 0x07, 0x29, 0x80, 0xcb, 0x25, 0x0a, 0x84, 0x06, }; auto lower = static_cast(cp); if (cp < 0x10000) { return is_printable(lower, singletons0, sizeof(singletons0) / sizeof(*singletons0), singletons0_lower, normal0, sizeof(normal0)); } if (cp < 0x20000) { return is_printable(lower, singletons1, sizeof(singletons1) / sizeof(*singletons1), singletons1_lower, normal1, sizeof(normal1)); } if (0x2a6de <= cp && cp < 0x2a700) return false; if (0x2b735 <= cp && cp < 0x2b740) return false; if (0x2b81e <= cp && cp < 0x2b820) return false; if (0x2cea2 <= cp && cp < 0x2ceb0) return false; if (0x2ebe1 <= cp && cp < 0x2f800) return false; if (0x2fa1e <= cp && cp < 0x30000) return false; if (0x3134b <= cp && cp < 0xe0100) return false; if (0xe01f0 <= cp && cp < 0x110000) return false; return cp < 0x110000; } } // namespace detail FMT_END_NAMESPACE #endif // FMT_FORMAT_INL_H_ ================================================ FILE: vendor/fmt/format.h ================================================ /* Formatting library for C++ Copyright (c) 2012 - present, Victor Zverovich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --- Optional exception to the license --- As an exception, if, as a result of your compiling your source code, portions of this Software are embedded into a machine-executable object form of such source code, you may redistribute such embedded portions in such object form without including the above copyright and permission notices. */ #ifndef FMT_FORMAT_H_ #define FMT_FORMAT_H_ #ifndef _LIBCPP_REMOVE_TRANSITIVE_INCLUDES # define _LIBCPP_REMOVE_TRANSITIVE_INCLUDES # define FMT_REMOVE_TRANSITIVE_INCLUDES #endif #include "base.h" #ifndef FMT_MODULE # include // std::signbit # include // uint32_t # include // std::memcpy # include // std::initializer_list # include // std::numeric_limits # if defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI) // Workaround for pre gcc 5 libstdc++. # include // std::allocator_traits # endif # include // std::runtime_error # include // std::string # include // std::system_error // Checking FMT_CPLUSPLUS for warning suppression in MSVC. # if FMT_HAS_INCLUDE() && FMT_CPLUSPLUS > 201703L # include // std::bit_cast # endif // libc++ supports string_view in pre-c++17. # if FMT_HAS_INCLUDE() && \ (FMT_CPLUSPLUS >= 201703L || defined(_LIBCPP_VERSION)) # include # define FMT_USE_STRING_VIEW # endif #endif // FMT_MODULE #if defined __cpp_inline_variables && __cpp_inline_variables >= 201606L # define FMT_INLINE_VARIABLE inline #else # define FMT_INLINE_VARIABLE #endif #ifndef FMT_NO_UNIQUE_ADDRESS # if FMT_CPLUSPLUS >= 202002L # if FMT_HAS_CPP_ATTRIBUTE(no_unique_address) # define FMT_NO_UNIQUE_ADDRESS [[no_unique_address]] // VS2019 v16.10 and later except clang-cl (https://reviews.llvm.org/D110485). # elif (FMT_MSC_VERSION >= 1929) && !FMT_CLANG_VERSION # define FMT_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]] # endif # endif #endif #ifndef FMT_NO_UNIQUE_ADDRESS # define FMT_NO_UNIQUE_ADDRESS #endif // Visibility when compiled as a shared library/object. #if defined(FMT_LIB_EXPORT) || defined(FMT_SHARED) # define FMT_SO_VISIBILITY(value) FMT_VISIBILITY(value) #else # define FMT_SO_VISIBILITY(value) #endif #ifdef __has_builtin # define FMT_HAS_BUILTIN(x) __has_builtin(x) #else # define FMT_HAS_BUILTIN(x) 0 #endif #if FMT_GCC_VERSION || FMT_CLANG_VERSION # define FMT_NOINLINE __attribute__((noinline)) #else # define FMT_NOINLINE #endif namespace std { template <> struct iterator_traits { using iterator_category = output_iterator_tag; using value_type = char; }; } // namespace std #ifndef FMT_THROW # if FMT_EXCEPTIONS # if FMT_MSC_VERSION || defined(__NVCC__) FMT_BEGIN_NAMESPACE namespace detail { template inline void do_throw(const Exception& x) { // Silence unreachable code warnings in MSVC and NVCC because these // are nearly impossible to fix in a generic code. volatile bool b = true; if (b) throw x; } } // namespace detail FMT_END_NAMESPACE # define FMT_THROW(x) detail::do_throw(x) # else # define FMT_THROW(x) throw x # endif # else # define FMT_THROW(x) \ ::fmt::detail::assert_fail(__FILE__, __LINE__, (x).what()) # endif #endif #ifndef FMT_MAYBE_UNUSED # if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) # define FMT_MAYBE_UNUSED [[maybe_unused]] # else # define FMT_MAYBE_UNUSED # endif #endif #ifndef FMT_USE_USER_DEFINED_LITERALS // EDG based compilers (Intel, NVIDIA, Elbrus, etc), GCC and MSVC support UDLs. // // GCC before 4.9 requires a space in `operator"" _a` which is invalid in later // compiler versions. # if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 409 || \ FMT_MSC_VERSION >= 1900) && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) # define FMT_USE_USER_DEFINED_LITERALS 1 # else # define FMT_USE_USER_DEFINED_LITERALS 0 # endif #endif // Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of // integer formatter template instantiations to just one by only using the // largest integer type. This results in a reduction in binary size but will // cause a decrease in integer formatting performance. #if !defined(FMT_REDUCE_INT_INSTANTIATIONS) # define FMT_REDUCE_INT_INSTANTIATIONS 0 #endif // __builtin_clz is broken in clang with Microsoft CodeGen: // https://github.com/fmtlib/fmt/issues/519. #if !FMT_MSC_VERSION # if FMT_HAS_BUILTIN(__builtin_clz) || FMT_GCC_VERSION || FMT_ICC_VERSION # define FMT_BUILTIN_CLZ(n) __builtin_clz(n) # endif # if FMT_HAS_BUILTIN(__builtin_clzll) || FMT_GCC_VERSION || FMT_ICC_VERSION # define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) # endif #endif // __builtin_ctz is broken in Intel Compiler Classic on Windows: // https://github.com/fmtlib/fmt/issues/2510. #ifndef __ICL # if FMT_HAS_BUILTIN(__builtin_ctz) || FMT_GCC_VERSION || FMT_ICC_VERSION || \ defined(__NVCOMPILER) # define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) # endif # if FMT_HAS_BUILTIN(__builtin_ctzll) || FMT_GCC_VERSION || \ FMT_ICC_VERSION || defined(__NVCOMPILER) # define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) # endif #endif #if FMT_MSC_VERSION # include // _BitScanReverse[64], _BitScanForward[64], _umul128 #endif // Some compilers masquerade as both MSVC and GCC-likes or otherwise support // __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the // MSVC intrinsics if the clz and clzll builtins are not available. #if FMT_MSC_VERSION && !defined(FMT_BUILTIN_CLZLL) && \ !defined(FMT_BUILTIN_CTZLL) FMT_BEGIN_NAMESPACE namespace detail { // Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. # if !defined(__clang__) # pragma intrinsic(_BitScanForward) # pragma intrinsic(_BitScanReverse) # if defined(_WIN64) # pragma intrinsic(_BitScanForward64) # pragma intrinsic(_BitScanReverse64) # endif # endif inline auto clz(uint32_t x) -> int { unsigned long r = 0; _BitScanReverse(&r, x); FMT_ASSERT(x != 0, ""); // Static analysis complains about using uninitialized data // "r", but the only way that can happen is if "x" is 0, // which the callers guarantee to not happen. FMT_MSC_WARNING(suppress : 6102) return 31 ^ static_cast(r); } # define FMT_BUILTIN_CLZ(n) detail::clz(n) inline auto clzll(uint64_t x) -> int { unsigned long r = 0; # ifdef _WIN64 _BitScanReverse64(&r, x); # else // Scan the high 32 bits. if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 ^ static_cast(r + 32); // Scan the low 32 bits. _BitScanReverse(&r, static_cast(x)); # endif FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. return 63 ^ static_cast(r); } # define FMT_BUILTIN_CLZLL(n) detail::clzll(n) inline auto ctz(uint32_t x) -> int { unsigned long r = 0; _BitScanForward(&r, x); FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. return static_cast(r); } # define FMT_BUILTIN_CTZ(n) detail::ctz(n) inline auto ctzll(uint64_t x) -> int { unsigned long r = 0; FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. # ifdef _WIN64 _BitScanForward64(&r, x); # else // Scan the low 32 bits. if (_BitScanForward(&r, static_cast(x))) return static_cast(r); // Scan the high 32 bits. _BitScanForward(&r, static_cast(x >> 32)); r += 32; # endif return static_cast(r); } # define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) } // namespace detail FMT_END_NAMESPACE #endif FMT_BEGIN_NAMESPACE template struct is_contiguous> : std::true_type {}; namespace detail { FMT_CONSTEXPR inline void abort_fuzzing_if(bool condition) { ignore_unused(condition); #ifdef FMT_FUZZ if (condition) throw std::runtime_error("fuzzing limit reached"); #endif } #if defined(FMT_USE_STRING_VIEW) template using std_string_view = std::basic_string_view; #else template struct std_string_view {}; #endif // Implementation of std::bit_cast for pre-C++20. template FMT_CONSTEXPR20 auto bit_cast(const From& from) -> To { #ifdef __cpp_lib_bit_cast if (is_constant_evaluated()) return std::bit_cast(from); #endif auto to = To(); // The cast suppresses a bogus -Wclass-memaccess on GCC. std::memcpy(static_cast(&to), &from, sizeof(to)); return to; } inline auto is_big_endian() -> bool { #ifdef _WIN32 return false; #elif defined(__BIG_ENDIAN__) return true; #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) return __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; #else struct bytes { char data[sizeof(int)]; }; return bit_cast(1).data[0] == 0; #endif } class uint128_fallback { private: uint64_t lo_, hi_; public: constexpr uint128_fallback(uint64_t hi, uint64_t lo) : lo_(lo), hi_(hi) {} constexpr uint128_fallback(uint64_t value = 0) : lo_(value), hi_(0) {} constexpr auto high() const noexcept -> uint64_t { return hi_; } constexpr auto low() const noexcept -> uint64_t { return lo_; } template ::value)> constexpr explicit operator T() const { return static_cast(lo_); } friend constexpr auto operator==(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return lhs.hi_ == rhs.hi_ && lhs.lo_ == rhs.lo_; } friend constexpr auto operator!=(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return !(lhs == rhs); } friend constexpr auto operator>(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return lhs.hi_ != rhs.hi_ ? lhs.hi_ > rhs.hi_ : lhs.lo_ > rhs.lo_; } friend constexpr auto operator|(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { return {lhs.hi_ | rhs.hi_, lhs.lo_ | rhs.lo_}; } friend constexpr auto operator&(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { return {lhs.hi_ & rhs.hi_, lhs.lo_ & rhs.lo_}; } friend constexpr auto operator~(const uint128_fallback& n) -> uint128_fallback { return {~n.hi_, ~n.lo_}; } friend auto operator+(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { auto result = uint128_fallback(lhs); result += rhs; return result; } friend auto operator*(const uint128_fallback& lhs, uint32_t rhs) -> uint128_fallback { FMT_ASSERT(lhs.hi_ == 0, ""); uint64_t hi = (lhs.lo_ >> 32) * rhs; uint64_t lo = (lhs.lo_ & ~uint32_t()) * rhs; uint64_t new_lo = (hi << 32) + lo; return {(hi >> 32) + (new_lo < lo ? 1 : 0), new_lo}; } friend auto operator-(const uint128_fallback& lhs, uint64_t rhs) -> uint128_fallback { return {lhs.hi_ - (lhs.lo_ < rhs ? 1 : 0), lhs.lo_ - rhs}; } FMT_CONSTEXPR auto operator>>(int shift) const -> uint128_fallback { if (shift == 64) return {0, hi_}; if (shift > 64) return uint128_fallback(0, hi_) >> (shift - 64); return {hi_ >> shift, (hi_ << (64 - shift)) | (lo_ >> shift)}; } FMT_CONSTEXPR auto operator<<(int shift) const -> uint128_fallback { if (shift == 64) return {lo_, 0}; if (shift > 64) return uint128_fallback(lo_, 0) << (shift - 64); return {hi_ << shift | (lo_ >> (64 - shift)), (lo_ << shift)}; } FMT_CONSTEXPR auto operator>>=(int shift) -> uint128_fallback& { return *this = *this >> shift; } FMT_CONSTEXPR void operator+=(uint128_fallback n) { uint64_t new_lo = lo_ + n.lo_; uint64_t new_hi = hi_ + n.hi_ + (new_lo < lo_ ? 1 : 0); FMT_ASSERT(new_hi >= hi_, ""); lo_ = new_lo; hi_ = new_hi; } FMT_CONSTEXPR void operator&=(uint128_fallback n) { lo_ &= n.lo_; hi_ &= n.hi_; } FMT_CONSTEXPR20 auto operator+=(uint64_t n) noexcept -> uint128_fallback& { if (is_constant_evaluated()) { lo_ += n; hi_ += (lo_ < n ? 1 : 0); return *this; } #if FMT_HAS_BUILTIN(__builtin_addcll) && !defined(__ibmxl__) unsigned long long carry; lo_ = __builtin_addcll(lo_, n, 0, &carry); hi_ += carry; #elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64) && !defined(__ibmxl__) unsigned long long result; auto carry = __builtin_ia32_addcarryx_u64(0, lo_, n, &result); lo_ = result; hi_ += carry; #elif defined(_MSC_VER) && defined(_M_X64) auto carry = _addcarry_u64(0, lo_, n, &lo_); _addcarry_u64(carry, hi_, 0, &hi_); #else lo_ += n; hi_ += (lo_ < n ? 1 : 0); #endif return *this; } }; using uint128_t = conditional_t; #ifdef UINTPTR_MAX using uintptr_t = ::uintptr_t; #else using uintptr_t = uint128_t; #endif // Returns the largest possible value for type T. Same as // std::numeric_limits::max() but shorter and not affected by the max macro. template constexpr auto max_value() -> T { return (std::numeric_limits::max)(); } template constexpr auto num_bits() -> int { return std::numeric_limits::digits; } // std::numeric_limits::digits may return 0 for 128-bit ints. template <> constexpr auto num_bits() -> int { return 128; } template <> constexpr auto num_bits() -> int { return 128; } template <> constexpr auto num_bits() -> int { return 128; } // A heterogeneous bit_cast used for converting 96-bit long double to uint128_t // and 128-bit pointers to uint128_fallback. template sizeof(From))> inline auto bit_cast(const From& from) -> To { constexpr auto size = static_cast(sizeof(From) / sizeof(unsigned)); struct data_t { unsigned value[static_cast(size)]; } data = bit_cast(from); auto result = To(); if (const_check(is_big_endian())) { for (int i = 0; i < size; ++i) result = (result << num_bits()) | data.value[i]; } else { for (int i = size - 1; i >= 0; --i) result = (result << num_bits()) | data.value[i]; } return result; } template FMT_CONSTEXPR20 inline auto countl_zero_fallback(UInt n) -> int { int lz = 0; constexpr UInt msb_mask = static_cast(1) << (num_bits() - 1); for (; (n & msb_mask) == 0; n <<= 1) lz++; return lz; } FMT_CONSTEXPR20 inline auto countl_zero(uint32_t n) -> int { #ifdef FMT_BUILTIN_CLZ if (!is_constant_evaluated()) return FMT_BUILTIN_CLZ(n); #endif return countl_zero_fallback(n); } FMT_CONSTEXPR20 inline auto countl_zero(uint64_t n) -> int { #ifdef FMT_BUILTIN_CLZLL if (!is_constant_evaluated()) return FMT_BUILTIN_CLZLL(n); #endif return countl_zero_fallback(n); } FMT_INLINE void assume(bool condition) { (void)condition; #if FMT_HAS_BUILTIN(__builtin_assume) && !FMT_ICC_VERSION __builtin_assume(condition); #elif FMT_GCC_VERSION if (!condition) __builtin_unreachable(); #endif } // An approximation of iterator_t for pre-C++20 systems. template using iterator_t = decltype(std::begin(std::declval())); template using sentinel_t = decltype(std::end(std::declval())); // A workaround for std::string not having mutable data() until C++17. template inline auto get_data(std::basic_string& s) -> Char* { return &s[0]; } template inline auto get_data(Container& c) -> typename Container::value_type* { return c.data(); } // Attempts to reserve space for n extra characters in the output range. // Returns a pointer to the reserved range or a reference to it. template ::value&& is_contiguous::value)> #if FMT_CLANG_VERSION >= 307 && !FMT_ICC_VERSION __attribute__((no_sanitize("undefined"))) #endif inline auto reserve(OutputIt it, size_t n) -> typename OutputIt::value_type* { auto& c = get_container(it); size_t size = c.size(); c.resize(size + n); return get_data(c) + size; } template inline auto reserve(basic_appender it, size_t n) -> basic_appender { buffer& buf = get_container(it); buf.try_reserve(buf.size() + n); return it; } template constexpr auto reserve(Iterator& it, size_t) -> Iterator& { return it; } template using reserve_iterator = remove_reference_t(), 0))>; template constexpr auto to_pointer(OutputIt, size_t) -> T* { return nullptr; } template auto to_pointer(basic_appender it, size_t n) -> T* { buffer& buf = get_container(it); auto size = buf.size(); buf.try_reserve(size + n); if (buf.capacity() < size + n) return nullptr; buf.try_resize(size + n); return buf.data() + size; } template ::value&& is_contiguous::value)> inline auto base_iterator(OutputIt it, typename OutputIt::container_type::value_type*) -> OutputIt { return it; } template constexpr auto base_iterator(Iterator, Iterator it) -> Iterator { return it; } // is spectacularly slow to compile in C++20 so use a simple fill_n // instead (#1998). template FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T& value) -> OutputIt { for (Size i = 0; i < count; ++i) *out++ = value; return out; } template FMT_CONSTEXPR20 auto fill_n(T* out, Size count, char value) -> T* { if (is_constant_evaluated()) { return fill_n(out, count, value); } std::memset(out, value, to_unsigned(count)); return out + count; } template FMT_CONSTEXPR FMT_NOINLINE auto copy_noinline(InputIt begin, InputIt end, OutputIt out) -> OutputIt { return copy(begin, end, out); } // A public domain branchless UTF-8 decoder by Christopher Wellons: // https://github.com/skeeto/branchless-utf8 /* Decode the next character, c, from s, reporting errors in e. * * Since this is a branchless decoder, four bytes will be read from the * buffer regardless of the actual length of the next character. This * means the buffer _must_ have at least three bytes of zero padding * following the end of the data stream. * * Errors are reported in e, which will be non-zero if the parsed * character was somehow invalid: invalid byte sequence, non-canonical * encoding, or a surrogate half. * * The function returns a pointer to the next character. When an error * occurs, this pointer will be a guess that depends on the particular * error, but it will always advance at least one byte. */ FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e) -> const char* { constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; constexpr const int shiftc[] = {0, 18, 12, 6, 0}; constexpr const int shifte[] = {0, 6, 4, 2, 0}; int len = "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4" [static_cast(*s) >> 3]; // Compute the pointer to the next character early so that the next // iteration can start working on the next character. Neither Clang // nor GCC figure out this reordering on their own. const char* next = s + len + !len; using uchar = unsigned char; // Assume a four-byte character and load four bytes. Unused bits are // shifted out. *c = uint32_t(uchar(s[0]) & masks[len]) << 18; *c |= uint32_t(uchar(s[1]) & 0x3f) << 12; *c |= uint32_t(uchar(s[2]) & 0x3f) << 6; *c |= uint32_t(uchar(s[3]) & 0x3f) << 0; *c >>= shiftc[len]; // Accumulate the various error conditions. *e = (*c < mins[len]) << 6; // non-canonical encoding *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? *e |= (*c > 0x10FFFF) << 8; // out of range? *e |= (uchar(s[1]) & 0xc0) >> 2; *e |= (uchar(s[2]) & 0xc0) >> 4; *e |= uchar(s[3]) >> 6; *e ^= 0x2a; // top two bits of each tail byte correct? *e >>= shifte[len]; return next; } constexpr FMT_INLINE_VARIABLE uint32_t invalid_code_point = ~uint32_t(); // Invokes f(cp, sv) for every code point cp in s with sv being the string view // corresponding to the code point. cp is invalid_code_point on error. template FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) { auto decode = [f](const char* buf_ptr, const char* ptr) { auto cp = uint32_t(); auto error = 0; auto end = utf8_decode(buf_ptr, &cp, &error); bool result = f(error ? invalid_code_point : cp, string_view(ptr, error ? 1 : to_unsigned(end - buf_ptr))); return result ? (error ? buf_ptr + 1 : end) : nullptr; }; auto p = s.data(); const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. if (s.size() >= block_size) { for (auto end = p + s.size() - block_size + 1; p < end;) { p = decode(p, p); if (!p) return; } } if (auto num_chars_left = s.data() + s.size() - p) { char buf[2 * block_size - 1] = {}; copy(p, p + num_chars_left, buf); const char* buf_ptr = buf; do { auto end = decode(buf_ptr, p); if (!end) return; p += end - buf_ptr; buf_ptr = end; } while (buf_ptr - buf < num_chars_left); } } template inline auto compute_width(basic_string_view s) -> size_t { return s.size(); } // Computes approximate display width of a UTF-8 string. FMT_CONSTEXPR inline auto compute_width(string_view s) -> size_t { size_t num_code_points = 0; // It is not a lambda for compatibility with C++14. struct count_code_points { size_t* count; FMT_CONSTEXPR auto operator()(uint32_t cp, string_view) const -> bool { *count += detail::to_unsigned( 1 + (cp >= 0x1100 && (cp <= 0x115f || // Hangul Jamo init. consonants cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms (cp >= 0x20000 && cp <= 0x2fffd) || // CJK (cp >= 0x30000 && cp <= 0x3fffd) || // Miscellaneous Symbols and Pictographs + Emoticons: (cp >= 0x1f300 && cp <= 0x1f64f) || // Supplemental Symbols and Pictographs: (cp >= 0x1f900 && cp <= 0x1f9ff)))); return true; } }; // We could avoid branches by using utf8_decode directly. for_each_codepoint(s, count_code_points{&num_code_points}); return num_code_points; } template inline auto code_point_index(basic_string_view s, size_t n) -> size_t { size_t size = s.size(); return n < size ? n : size; } // Calculates the index of the nth code point in a UTF-8 string. inline auto code_point_index(string_view s, size_t n) -> size_t { size_t result = s.size(); const char* begin = s.begin(); for_each_codepoint(s, [begin, &n, &result](uint32_t, string_view sv) { if (n != 0) { --n; return true; } result = to_unsigned(sv.begin() - begin); return false; }); return result; } template struct is_integral : std::is_integral {}; template <> struct is_integral : std::true_type {}; template <> struct is_integral : std::true_type {}; template using is_signed = std::integral_constant::is_signed || std::is_same::value>; template using is_integer = bool_constant::value && !std::is_same::value && !std::is_same::value && !std::is_same::value>; #ifndef FMT_USE_FLOAT # define FMT_USE_FLOAT 1 #endif #ifndef FMT_USE_DOUBLE # define FMT_USE_DOUBLE 1 #endif #ifndef FMT_USE_LONG_DOUBLE # define FMT_USE_LONG_DOUBLE 1 #endif #if defined(FMT_USE_FLOAT128) // Use the provided definition. #elif FMT_CLANG_VERSION && FMT_HAS_INCLUDE() # define FMT_USE_FLOAT128 1 #elif FMT_GCC_VERSION && defined(_GLIBCXX_USE_FLOAT128) && \ !defined(__STRICT_ANSI__) # define FMT_USE_FLOAT128 1 #else # define FMT_USE_FLOAT128 0 #endif #if FMT_USE_FLOAT128 using float128 = __float128; #else using float128 = void; #endif template using is_float128 = std::is_same; template using is_floating_point = bool_constant::value || is_float128::value>; template ::value> struct is_fast_float : bool_constant::is_iec559 && sizeof(T) <= sizeof(double)> {}; template struct is_fast_float : std::false_type {}; template using is_double_double = bool_constant::digits == 106>; #ifndef FMT_USE_FULL_CACHE_DRAGONBOX # define FMT_USE_FULL_CACHE_DRAGONBOX 0 #endif template struct is_locale : std::false_type {}; template struct is_locale> : std::true_type {}; } // namespace detail FMT_BEGIN_EXPORT // The number of characters to store in the basic_memory_buffer object itself // to avoid dynamic memory allocation. enum { inline_buffer_size = 500 }; /** * A dynamically growing memory buffer for trivially copyable/constructible * types with the first `SIZE` elements stored in the object itself. Most * commonly used via the `memory_buffer` alias for `char`. * * **Example**: * * auto out = fmt::memory_buffer(); * fmt::format_to(std::back_inserter(out), "The answer is {}.", 42); * * This will append "The answer is 42." to `out`. The buffer content can be * converted to `std::string` with `to_string(out)`. */ template > class basic_memory_buffer : public detail::buffer { private: T store_[SIZE]; // Don't inherit from Allocator to avoid generating type_info for it. FMT_NO_UNIQUE_ADDRESS Allocator alloc_; // Deallocate memory allocated by the buffer. FMT_CONSTEXPR20 void deallocate() { T* data = this->data(); if (data != store_) alloc_.deallocate(data, this->capacity()); } static FMT_CONSTEXPR20 void grow(detail::buffer& buf, size_t size) { detail::abort_fuzzing_if(size > 5000); auto& self = static_cast(buf); const size_t max_size = std::allocator_traits::max_size(self.alloc_); size_t old_capacity = buf.capacity(); size_t new_capacity = old_capacity + old_capacity / 2; if (size > new_capacity) new_capacity = size; else if (new_capacity > max_size) new_capacity = size > max_size ? size : max_size; T* old_data = buf.data(); T* new_data = self.alloc_.allocate(new_capacity); // Suppress a bogus -Wstringop-overflow in gcc 13.1 (#3481). detail::assume(buf.size() <= new_capacity); // The following code doesn't throw, so the raw pointer above doesn't leak. memcpy(new_data, old_data, buf.size() * sizeof(T)); self.set(new_data, new_capacity); // deallocate must not throw according to the standard, but even if it does, // the buffer already uses the new storage and will deallocate it in // destructor. if (old_data != self.store_) self.alloc_.deallocate(old_data, old_capacity); } public: using value_type = T; using const_reference = const T&; FMT_CONSTEXPR20 explicit basic_memory_buffer( const Allocator& alloc = Allocator()) : detail::buffer(grow), alloc_(alloc) { this->set(store_, SIZE); if (detail::is_constant_evaluated()) detail::fill_n(store_, SIZE, T()); } FMT_CONSTEXPR20 ~basic_memory_buffer() { deallocate(); } private: // Move data from other to this buffer. FMT_CONSTEXPR20 void move(basic_memory_buffer& other) { alloc_ = std::move(other.alloc_); T* data = other.data(); size_t size = other.size(), capacity = other.capacity(); if (data == other.store_) { this->set(store_, capacity); detail::copy(other.store_, other.store_ + size, store_); } else { this->set(data, capacity); // Set pointer to the inline array so that delete is not called // when deallocating. other.set(other.store_, 0); other.clear(); } this->resize(size); } public: /// Constructs a `basic_memory_buffer` object moving the content of the other /// object to it. FMT_CONSTEXPR20 basic_memory_buffer(basic_memory_buffer&& other) noexcept : detail::buffer(grow) { move(other); } /// Moves the content of the other `basic_memory_buffer` object to this one. auto operator=(basic_memory_buffer&& other) noexcept -> basic_memory_buffer& { FMT_ASSERT(this != &other, ""); deallocate(); move(other); return *this; } // Returns a copy of the allocator associated with this buffer. auto get_allocator() const -> Allocator { return alloc_; } /// Resizes the buffer to contain `count` elements. If T is a POD type new /// elements may not be initialized. FMT_CONSTEXPR20 void resize(size_t count) { this->try_resize(count); } /// Increases the buffer capacity to `new_capacity`. void reserve(size_t new_capacity) { this->try_reserve(new_capacity); } using detail::buffer::append; template void append(const ContiguousRange& range) { append(range.data(), range.data() + range.size()); } }; using memory_buffer = basic_memory_buffer; template struct is_contiguous> : std::true_type { }; FMT_END_EXPORT namespace detail { FMT_API auto write_console(int fd, string_view text) -> bool; FMT_API void print(std::FILE*, string_view); } // namespace detail FMT_BEGIN_EXPORT // Suppress a misleading warning in older versions of clang. #if FMT_CLANG_VERSION # pragma clang diagnostic ignored "-Wweak-vtables" #endif /// An error reported from a formatting function. class FMT_SO_VISIBILITY("default") format_error : public std::runtime_error { public: using std::runtime_error::runtime_error; }; namespace detail_exported { #if FMT_USE_NONTYPE_TEMPLATE_ARGS template struct fixed_string { constexpr fixed_string(const Char (&str)[N]) { detail::copy(static_cast(str), str + N, data); } Char data[N] = {}; }; #endif // Converts a compile-time string to basic_string_view. template constexpr auto compile_string_to_view(const Char (&s)[N]) -> basic_string_view { // Remove trailing NUL character if needed. Won't be present if this is used // with a raw character array (i.e. not defined as a string). return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; } template constexpr auto compile_string_to_view(basic_string_view s) -> basic_string_view { return s; } } // namespace detail_exported // A generic formatting context with custom output iterator and character // (code unit) support. Char is the format string code unit type which can be // different from OutputIt::value_type. template class generic_context { private: OutputIt out_; basic_format_args args_; detail::locale_ref loc_; public: using char_type = Char; using iterator = OutputIt; using parse_context_type = basic_format_parse_context; template using formatter_type = formatter; constexpr generic_context(OutputIt out, basic_format_args ctx_args, detail::locale_ref loc = {}) : out_(out), args_(ctx_args), loc_(loc) {} generic_context(generic_context&&) = default; generic_context(const generic_context&) = delete; void operator=(const generic_context&) = delete; constexpr auto arg(int id) const -> basic_format_arg { return args_.get(id); } auto arg(basic_string_view name) -> basic_format_arg { return args_.get(name); } FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { return args_.get_id(name); } auto args() const -> const basic_format_args& { return args_; } FMT_CONSTEXPR auto out() -> iterator { return out_; } void advance_to(iterator it) { if (!detail::is_back_insert_iterator()) out_ = it; } FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } }; class loc_value { private: basic_format_arg value_; public: template ::value)> loc_value(T value) : value_(detail::make_arg(value)) {} template ::value)> loc_value(T) {} template auto visit(Visitor&& vis) -> decltype(vis(0)) { return value_.visit(vis); } }; // A locale facet that formats values in UTF-8. // It is parameterized on the locale to avoid the heavy include. template class format_facet : public Locale::facet { private: std::string separator_; std::string grouping_; std::string decimal_point_; protected: virtual auto do_put(appender out, loc_value val, const format_specs& specs) const -> bool; public: static FMT_API typename Locale::id id; explicit format_facet(Locale& loc); explicit format_facet(string_view sep = "", std::initializer_list g = {3}, std::string decimal_point = ".") : separator_(sep.data(), sep.size()), grouping_(g.begin(), g.end()), decimal_point_(decimal_point) {} auto put(appender out, loc_value val, const format_specs& specs) const -> bool { return do_put(out, val, specs); } }; FMT_END_EXPORT namespace detail { // Returns true if value is negative, false otherwise. // Same as `value < 0` but doesn't produce warnings if T is an unsigned type. template ::value)> constexpr auto is_negative(T value) -> bool { return value < 0; } template ::value)> constexpr auto is_negative(T) -> bool { return false; } template FMT_CONSTEXPR auto is_supported_floating_point(T) -> bool { if (std::is_same()) return FMT_USE_FLOAT; if (std::is_same()) return FMT_USE_DOUBLE; if (std::is_same()) return FMT_USE_LONG_DOUBLE; return true; } // Smallest of uint32_t, uint64_t, uint128_t that is large enough to // represent all values of an integral type T. template using uint32_or_64_or_128_t = conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, uint32_t, conditional_t() <= 64, uint64_t, uint128_t>>; template using uint64_or_128_t = conditional_t() <= 64, uint64_t, uint128_t>; #define FMT_POWERS_OF_10(factor) \ factor * 10, (factor) * 100, (factor) * 1000, (factor) * 10000, \ (factor) * 100000, (factor) * 1000000, (factor) * 10000000, \ (factor) * 100000000, (factor) * 1000000000 // Converts value in the range [0, 100) to a string. constexpr auto digits2(size_t value) -> const char* { // GCC generates slightly better code when value is pointer-size. return &"0001020304050607080910111213141516171819" "2021222324252627282930313233343536373839" "4041424344454647484950515253545556575859" "6061626364656667686970717273747576777879" "8081828384858687888990919293949596979899"[value * 2]; } // Sign is a template parameter to workaround a bug in gcc 4.8. template constexpr auto sign(Sign s) -> Char { #if !FMT_GCC_VERSION || FMT_GCC_VERSION >= 604 static_assert(std::is_same::value, ""); #endif return static_cast(((' ' << 24) | ('+' << 16) | ('-' << 8)) >> (s * 8)); } template FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { int count = 1; for (;;) { // Integer division is slow so do it for a group of four digits instead // of for every digit. The idea comes from the talk by Alexandrescu // "Three Optimization Tips for C++". See speed-test for a comparison. if (n < 10) return count; if (n < 100) return count + 1; if (n < 1000) return count + 2; if (n < 10000) return count + 3; n /= 10000u; count += 4; } } #if FMT_USE_INT128 FMT_CONSTEXPR inline auto count_digits(uint128_opt n) -> int { return count_digits_fallback(n); } #endif #ifdef FMT_BUILTIN_CLZLL // It is a separate function rather than a part of count_digits to workaround // the lack of static constexpr in constexpr functions. inline auto do_count_digits(uint64_t n) -> int { // This has comparable performance to the version by Kendall Willets // (https://github.com/fmtlib/format-benchmark/blob/master/digits10) // but uses smaller tables. // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). static constexpr uint8_t bsr2log10[] = { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; static constexpr const uint64_t zero_or_powers_of_10[] = { 0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), 10000000000000000000ULL}; return t - (n < zero_or_powers_of_10[t]); } #endif // Returns the number of decimal digits in n. Leading zeros are not counted // except for n == 0 in which case count_digits returns 1. FMT_CONSTEXPR20 inline auto count_digits(uint64_t n) -> int { #ifdef FMT_BUILTIN_CLZLL if (!is_constant_evaluated()) return do_count_digits(n); #endif return count_digits_fallback(n); } // Counts the number of digits in n. BITS = log2(radix). template FMT_CONSTEXPR auto count_digits(UInt n) -> int { #ifdef FMT_BUILTIN_CLZ if (!is_constant_evaluated() && num_bits() == 32) return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; #endif // Lambda avoids unreachable code warnings from NVHPC. return [](UInt m) { int num_digits = 0; do { ++num_digits; } while ((m >>= BITS) != 0); return num_digits; }(n); } #ifdef FMT_BUILTIN_CLZ // It is a separate function rather than a part of count_digits to workaround // the lack of static constexpr in constexpr functions. FMT_INLINE auto do_count_digits(uint32_t n) -> int { // An optimization by Kendall Willets from https://bit.ly/3uOIQrB. // This increments the upper 32 bits (log10(T) - 1) when >= T is added. # define FMT_INC(T) (((sizeof(#T) - 1ull) << 32) - T) static constexpr uint64_t table[] = { FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M FMT_INC(1000000000), FMT_INC(1000000000) // 4B }; auto inc = table[FMT_BUILTIN_CLZ(n | 1) ^ 31]; return static_cast((n + inc) >> 32); } #endif // Optional version of count_digits for better performance on 32-bit platforms. FMT_CONSTEXPR20 inline auto count_digits(uint32_t n) -> int { #ifdef FMT_BUILTIN_CLZ if (!is_constant_evaluated()) { return do_count_digits(n); } #endif return count_digits_fallback(n); } template constexpr auto digits10() noexcept -> int { return std::numeric_limits::digits10; } template <> constexpr auto digits10() noexcept -> int { return 38; } template <> constexpr auto digits10() noexcept -> int { return 38; } template struct thousands_sep_result { std::string grouping; Char thousands_sep; }; template FMT_API auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result; template inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { auto result = thousands_sep_impl(loc); return {result.grouping, Char(result.thousands_sep)}; } template <> inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { return thousands_sep_impl(loc); } template FMT_API auto decimal_point_impl(locale_ref loc) -> Char; template inline auto decimal_point(locale_ref loc) -> Char { return Char(decimal_point_impl(loc)); } template <> inline auto decimal_point(locale_ref loc) -> wchar_t { return decimal_point_impl(loc); } // Compares two characters for equality. template auto equal2(const Char* lhs, const char* rhs) -> bool { return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); } inline auto equal2(const char* lhs, const char* rhs) -> bool { return memcmp(lhs, rhs, 2) == 0; } // Copies two characters from src to dst. template FMT_CONSTEXPR20 FMT_INLINE void copy2(Char* dst, const char* src) { if (!is_constant_evaluated() && sizeof(Char) == sizeof(char)) { memcpy(dst, src, 2); return; } *dst++ = static_cast(*src++); *dst = static_cast(*src); } template struct format_decimal_result { Iterator begin; Iterator end; }; // Formats a decimal unsigned integer value writing into out pointing to a // buffer of specified size. The caller must ensure that the buffer is large // enough. template FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size) -> format_decimal_result { FMT_ASSERT(size >= count_digits(value), "invalid digit count"); out += size; Char* end = out; while (value >= 100) { // Integer division is slow so do it for a group of two digits instead // of for every digit. The idea comes from the talk by Alexandrescu // "Three Optimization Tips for C++". See speed-test for a comparison. out -= 2; copy2(out, digits2(static_cast(value % 100))); value /= 100; } if (value < 10) { *--out = static_cast('0' + value); return {out, end}; } out -= 2; copy2(out, digits2(static_cast(value))); return {out, end}; } template >::value)> FMT_CONSTEXPR inline auto format_decimal(Iterator out, UInt value, int size) -> format_decimal_result { // Buffer is large enough to hold all digits (digits10 + 1). Char buffer[digits10() + 1] = {}; auto end = format_decimal(buffer, value, size).end; return {out, detail::copy_noinline(buffer, end, out)}; } template FMT_CONSTEXPR auto format_uint(Char* buffer, UInt value, int num_digits, bool upper = false) -> Char* { buffer += num_digits; Char* end = buffer; do { const char* digits = upper ? "0123456789ABCDEF" : "0123456789abcdef"; unsigned digit = static_cast(value & ((1 << BASE_BITS) - 1)); *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) : digits[digit]); } while ((value >>= BASE_BITS) != 0); return end; } template FMT_CONSTEXPR inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) -> It { if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { format_uint(ptr, value, num_digits, upper); return out; } // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). char buffer[num_bits() / BASE_BITS + 1] = {}; format_uint(buffer, value, num_digits, upper); return detail::copy_noinline(buffer, buffer + num_digits, out); } // A converter from UTF-8 to UTF-16. class utf8_to_utf16 { private: basic_memory_buffer buffer_; public: FMT_API explicit utf8_to_utf16(string_view s); operator basic_string_view() const { return {&buffer_[0], size()}; } auto size() const -> size_t { return buffer_.size() - 1; } auto c_str() const -> const wchar_t* { return &buffer_[0]; } auto str() const -> std::wstring { return {&buffer_[0], size()}; } }; enum class to_utf8_error_policy { abort, replace }; // A converter from UTF-16/UTF-32 (host endian) to UTF-8. template class to_utf8 { private: Buffer buffer_; public: to_utf8() {} explicit to_utf8(basic_string_view s, to_utf8_error_policy policy = to_utf8_error_policy::abort) { static_assert(sizeof(WChar) == 2 || sizeof(WChar) == 4, "Expect utf16 or utf32"); if (!convert(s, policy)) FMT_THROW(std::runtime_error(sizeof(WChar) == 2 ? "invalid utf16" : "invalid utf32")); } operator string_view() const { return string_view(&buffer_[0], size()); } auto size() const -> size_t { return buffer_.size() - 1; } auto c_str() const -> const char* { return &buffer_[0]; } auto str() const -> std::string { return std::string(&buffer_[0], size()); } // Performs conversion returning a bool instead of throwing exception on // conversion error. This method may still throw in case of memory allocation // error. auto convert(basic_string_view s, to_utf8_error_policy policy = to_utf8_error_policy::abort) -> bool { if (!convert(buffer_, s, policy)) return false; buffer_.push_back(0); return true; } static auto convert(Buffer& buf, basic_string_view s, to_utf8_error_policy policy = to_utf8_error_policy::abort) -> bool { for (auto p = s.begin(); p != s.end(); ++p) { uint32_t c = static_cast(*p); if (sizeof(WChar) == 2 && c >= 0xd800 && c <= 0xdfff) { // Handle a surrogate pair. ++p; if (p == s.end() || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) { if (policy == to_utf8_error_policy::abort) return false; buf.append(string_view("\xEF\xBF\xBD")); --p; } else { c = (c << 10) + static_cast(*p) - 0x35fdc00; } } else if (c < 0x80) { buf.push_back(static_cast(c)); } else if (c < 0x800) { buf.push_back(static_cast(0xc0 | (c >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { buf.push_back(static_cast(0xe0 | (c >> 12))); buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else if (c >= 0x10000 && c <= 0x10ffff) { buf.push_back(static_cast(0xf0 | (c >> 18))); buf.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else { return false; } } return true; } }; // Computes 128-bit result of multiplication of two 64-bit unsigned integers. inline auto umul128(uint64_t x, uint64_t y) noexcept -> uint128_fallback { #if FMT_USE_INT128 auto p = static_cast(x) * static_cast(y); return {static_cast(p >> 64), static_cast(p)}; #elif defined(_MSC_VER) && defined(_M_X64) auto hi = uint64_t(); auto lo = _umul128(x, y, &hi); return {hi, lo}; #else const uint64_t mask = static_cast(max_value()); uint64_t a = x >> 32; uint64_t b = x & mask; uint64_t c = y >> 32; uint64_t d = y & mask; uint64_t ac = a * c; uint64_t bc = b * c; uint64_t ad = a * d; uint64_t bd = b * d; uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), (intermediate << 32) + (bd & mask)}; #endif } namespace dragonbox { // Computes floor(log10(pow(2, e))) for e in [-2620, 2620] using the method from // https://fmt.dev/papers/Dragonbox.pdf#page=28, section 6.1. inline auto floor_log10_pow2(int e) noexcept -> int { FMT_ASSERT(e <= 2620 && e >= -2620, "too large exponent"); static_assert((-1 >> 1) == -1, "right shift is not arithmetic"); return (e * 315653) >> 20; } inline auto floor_log2_pow10(int e) noexcept -> int { FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); return (e * 1741647) >> 19; } // Computes upper 64 bits of multiplication of two 64-bit unsigned integers. inline auto umul128_upper64(uint64_t x, uint64_t y) noexcept -> uint64_t { #if FMT_USE_INT128 auto p = static_cast(x) * static_cast(y); return static_cast(p >> 64); #elif defined(_MSC_VER) && defined(_M_X64) return __umulh(x, y); #else return umul128(x, y).high(); #endif } // Computes upper 128 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline auto umul192_upper128(uint64_t x, uint128_fallback y) noexcept -> uint128_fallback { uint128_fallback r = umul128(x, y.high()); r += umul128_upper64(x, y.low()); return r; } FMT_API auto get_cached_power(int k) noexcept -> uint128_fallback; // Type-specific information that Dragonbox uses. template struct float_info; template <> struct float_info { using carrier_uint = uint32_t; static const int exponent_bits = 8; static const int kappa = 1; static const int big_divisor = 100; static const int small_divisor = 10; static const int min_k = -31; static const int max_k = 46; static const int shorter_interval_tie_lower_threshold = -35; static const int shorter_interval_tie_upper_threshold = -35; }; template <> struct float_info { using carrier_uint = uint64_t; static const int exponent_bits = 11; static const int kappa = 2; static const int big_divisor = 1000; static const int small_divisor = 100; static const int min_k = -292; static const int max_k = 341; static const int shorter_interval_tie_lower_threshold = -77; static const int shorter_interval_tie_upper_threshold = -77; }; // An 80- or 128-bit floating point number. template struct float_info::digits == 64 || std::numeric_limits::digits == 113 || is_float128::value>> { using carrier_uint = detail::uint128_t; static const int exponent_bits = 15; }; // A double-double floating point number. template struct float_info::value>> { using carrier_uint = detail::uint128_t; }; template struct decimal_fp { using significand_type = typename float_info::carrier_uint; significand_type significand; int exponent; }; template FMT_API auto to_decimal(T x) noexcept -> decimal_fp; } // namespace dragonbox // Returns true iff Float has the implicit bit which is not stored. template constexpr auto has_implicit_bit() -> bool { // An 80-bit FP number has a 64-bit significand an no implicit bit. return std::numeric_limits::digits != 64; } // Returns the number of significand bits stored in Float. The implicit bit is // not counted since it is not stored. template constexpr auto num_significand_bits() -> int { // std::numeric_limits may not support __float128. return is_float128() ? 112 : (std::numeric_limits::digits - (has_implicit_bit() ? 1 : 0)); } template constexpr auto exponent_mask() -> typename dragonbox::float_info::carrier_uint { using float_uint = typename dragonbox::float_info::carrier_uint; return ((float_uint(1) << dragonbox::float_info::exponent_bits) - 1) << num_significand_bits(); } template constexpr auto exponent_bias() -> int { // std::numeric_limits may not support __float128. return is_float128() ? 16383 : std::numeric_limits::max_exponent - 1; } // Writes the exponent exp in the form "[+-]d{2,3}" to buffer. template FMT_CONSTEXPR auto write_exponent(int exp, It it) -> It { FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); if (exp < 0) { *it++ = static_cast('-'); exp = -exp; } else { *it++ = static_cast('+'); } if (exp >= 100) { const char* top = digits2(to_unsigned(exp / 100)); if (exp >= 1000) *it++ = static_cast(top[0]); *it++ = static_cast(top[1]); exp %= 100; } const char* d = digits2(to_unsigned(exp)); *it++ = static_cast(d[0]); *it++ = static_cast(d[1]); return it; } // A floating-point number f * pow(2, e) where F is an unsigned type. template struct basic_fp { F f; int e; static constexpr const int num_significand_bits = static_cast(sizeof(F) * num_bits()); constexpr basic_fp() : f(0), e(0) {} constexpr basic_fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {} // Constructs fp from an IEEE754 floating-point number. template FMT_CONSTEXPR basic_fp(Float n) { assign(n); } // Assigns n to this and return true iff predecessor is closer than successor. template ::value)> FMT_CONSTEXPR auto assign(Float n) -> bool { static_assert(std::numeric_limits::digits <= 113, "unsupported FP"); // Assume Float is in the format [sign][exponent][significand]. using carrier_uint = typename dragonbox::float_info::carrier_uint; const auto num_float_significand_bits = detail::num_significand_bits(); const auto implicit_bit = carrier_uint(1) << num_float_significand_bits; const auto significand_mask = implicit_bit - 1; auto u = bit_cast(n); f = static_cast(u & significand_mask); auto biased_e = static_cast((u & exponent_mask()) >> num_float_significand_bits); // The predecessor is closer if n is a normalized power of 2 (f == 0) // other than the smallest normalized number (biased_e > 1). auto is_predecessor_closer = f == 0 && biased_e > 1; if (biased_e == 0) biased_e = 1; // Subnormals use biased exponent 1 (min exponent). else if (has_implicit_bit()) f += static_cast(implicit_bit); e = biased_e - exponent_bias() - num_float_significand_bits; if (!has_implicit_bit()) ++e; return is_predecessor_closer; } template ::value)> FMT_CONSTEXPR auto assign(Float n) -> bool { static_assert(std::numeric_limits::is_iec559, "unsupported FP"); return assign(static_cast(n)); } }; using fp = basic_fp; // Normalizes the value converted from double and multiplied by (1 << SHIFT). template FMT_CONSTEXPR auto normalize(basic_fp value) -> basic_fp { // Handle subnormals. const auto implicit_bit = F(1) << num_significand_bits(); const auto shifted_implicit_bit = implicit_bit << SHIFT; while ((value.f & shifted_implicit_bit) == 0) { value.f <<= 1; --value.e; } // Subtract 1 to account for hidden bit. const auto offset = basic_fp::num_significand_bits - num_significand_bits() - SHIFT - 1; value.f <<= offset; value.e -= offset; return value; } // Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking. FMT_CONSTEXPR inline auto multiply(uint64_t lhs, uint64_t rhs) -> uint64_t { #if FMT_USE_INT128 auto product = static_cast<__uint128_t>(lhs) * rhs; auto f = static_cast(product >> 64); return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; #else // Multiply 32-bit parts of significands. uint64_t mask = (1ULL << 32) - 1; uint64_t a = lhs >> 32, b = lhs & mask; uint64_t c = rhs >> 32, d = rhs & mask; uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; // Compute mid 64-bit of result and round. uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); #endif } FMT_CONSTEXPR inline auto operator*(fp x, fp y) -> fp { return {multiply(x.f, y.f), x.e + y.e + 64}; } template () == num_bits()> using convert_float_result = conditional_t::value || doublish, double, T>; template constexpr auto convert_float(T value) -> convert_float_result { return static_cast>(value); } template FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, const fill_t& fill) -> OutputIt { auto fill_size = fill.size(); if (fill_size == 1) return detail::fill_n(it, n, fill.template get()); if (const Char* data = fill.template data()) { for (size_t i = 0; i < n; ++i) it = copy(data, data + fill_size, it); } return it; } // Writes the output of f, padded according to format specifications in specs. // size: output size in code units. // width: output display width in (terminal) column positions. template FMT_CONSTEXPR auto write_padded(OutputIt out, const format_specs& specs, size_t size, size_t width, F&& f) -> OutputIt { static_assert(align == align::left || align == align::right, ""); unsigned spec_width = to_unsigned(specs.width); size_t padding = spec_width > width ? spec_width - width : 0; // Shifts are encoded as string literals because static constexpr is not // supported in constexpr functions. auto* shifts = align == align::left ? "\x1f\x1f\x00\x01" : "\x00\x1f\x00\x01"; size_t left_padding = padding >> shifts[specs.align]; size_t right_padding = padding - left_padding; auto it = reserve(out, size + padding * specs.fill.size()); if (left_padding != 0) it = fill(it, left_padding, specs.fill); it = f(it); if (right_padding != 0) it = fill(it, right_padding, specs.fill); return base_iterator(out, it); } template constexpr auto write_padded(OutputIt out, const format_specs& specs, size_t size, F&& f) -> OutputIt { return write_padded(out, specs, size, size, f); } template FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, const format_specs& specs = {}) -> OutputIt { return write_padded( out, specs, bytes.size(), [bytes](reserve_iterator it) { const char* data = bytes.data(); return copy(data, data + bytes.size(), it); }); } template auto write_ptr(OutputIt out, UIntPtr value, const format_specs* specs) -> OutputIt { int num_digits = count_digits<4>(value); auto size = to_unsigned(num_digits) + size_t(2); auto write = [=](reserve_iterator it) { *it++ = static_cast('0'); *it++ = static_cast('x'); return format_uint<4, Char>(it, value, num_digits); }; return specs ? write_padded(out, *specs, size, write) : base_iterator(out, write(reserve(out, size))); } // Returns true iff the code point cp is printable. FMT_API auto is_printable(uint32_t cp) -> bool; inline auto needs_escape(uint32_t cp) -> bool { return cp < 0x20 || cp == 0x7f || cp == '"' || cp == '\\' || !is_printable(cp); } template struct find_escape_result { const Char* begin; const Char* end; uint32_t cp; }; template auto find_escape(const Char* begin, const Char* end) -> find_escape_result { for (; begin != end; ++begin) { uint32_t cp = static_cast>(*begin); if (const_check(sizeof(Char) == 1) && cp >= 0x80) continue; if (needs_escape(cp)) return {begin, begin + 1, cp}; } return {begin, nullptr, 0}; } inline auto find_escape(const char* begin, const char* end) -> find_escape_result { if (!use_utf8()) return find_escape(begin, end); auto result = find_escape_result{end, nullptr, 0}; for_each_codepoint(string_view(begin, to_unsigned(end - begin)), [&](uint32_t cp, string_view sv) { if (needs_escape(cp)) { result = {sv.begin(), sv.end(), cp}; return false; } return true; }); return result; } #define FMT_STRING_IMPL(s, base, explicit) \ [] { \ /* Use the hidden visibility as a workaround for a GCC bug (#1973). */ \ /* Use a macro-like name to avoid shadowing warnings. */ \ struct FMT_VISIBILITY("hidden") FMT_COMPILE_STRING : base { \ using char_type FMT_MAYBE_UNUSED = fmt::remove_cvref_t; \ FMT_MAYBE_UNUSED FMT_CONSTEXPR explicit \ operator fmt::basic_string_view() const { \ return fmt::detail_exported::compile_string_to_view(s); \ } \ }; \ return FMT_COMPILE_STRING(); \ }() /** * Constructs a compile-time format string from a string literal `s`. * * **Example**: * * // A compile-time error because 'd' is an invalid specifier for strings. * std::string s = fmt::format(FMT_STRING("{:d}"), "foo"); */ #define FMT_STRING(s) FMT_STRING_IMPL(s, fmt::detail::compile_string, ) template auto write_codepoint(OutputIt out, char prefix, uint32_t cp) -> OutputIt { *out++ = static_cast('\\'); *out++ = static_cast(prefix); Char buf[width]; fill_n(buf, width, static_cast('0')); format_uint<4>(buf, cp, width); return copy(buf, buf + width, out); } template auto write_escaped_cp(OutputIt out, const find_escape_result& escape) -> OutputIt { auto c = static_cast(escape.cp); switch (escape.cp) { case '\n': *out++ = static_cast('\\'); c = static_cast('n'); break; case '\r': *out++ = static_cast('\\'); c = static_cast('r'); break; case '\t': *out++ = static_cast('\\'); c = static_cast('t'); break; case '"': FMT_FALLTHROUGH; case '\'': FMT_FALLTHROUGH; case '\\': *out++ = static_cast('\\'); break; default: if (escape.cp < 0x100) return write_codepoint<2, Char>(out, 'x', escape.cp); if (escape.cp < 0x10000) return write_codepoint<4, Char>(out, 'u', escape.cp); if (escape.cp < 0x110000) return write_codepoint<8, Char>(out, 'U', escape.cp); for (Char escape_char : basic_string_view( escape.begin, to_unsigned(escape.end - escape.begin))) { out = write_codepoint<2, Char>(out, 'x', static_cast(escape_char) & 0xFF); } return out; } *out++ = c; return out; } template auto write_escaped_string(OutputIt out, basic_string_view str) -> OutputIt { *out++ = static_cast('"'); auto begin = str.begin(), end = str.end(); do { auto escape = find_escape(begin, end); out = copy(begin, escape.begin, out); begin = escape.end; if (!begin) break; out = write_escaped_cp(out, escape); } while (begin != end); *out++ = static_cast('"'); return out; } template auto write_escaped_char(OutputIt out, Char v) -> OutputIt { Char v_array[1] = {v}; *out++ = static_cast('\''); if ((needs_escape(static_cast(v)) && v != static_cast('"')) || v == static_cast('\'')) { out = write_escaped_cp(out, find_escape_result{v_array, v_array + 1, static_cast(v)}); } else { *out++ = v; } *out++ = static_cast('\''); return out; } template FMT_CONSTEXPR auto write_char(OutputIt out, Char value, const format_specs& specs) -> OutputIt { bool is_debug = specs.type == presentation_type::debug; return write_padded(out, specs, 1, [=](reserve_iterator it) { if (is_debug) return write_escaped_char(it, value); *it++ = value; return it; }); } template FMT_CONSTEXPR auto write(OutputIt out, Char value, const format_specs& specs, locale_ref loc = {}) -> OutputIt { // char is formatted as unsigned char for consistency across platforms. using unsigned_type = conditional_t::value, unsigned char, unsigned>; return check_char_specs(specs) ? write_char(out, value, specs) : write(out, static_cast(value), specs, loc); } // Data for write_int that doesn't depend on output iterator type. It is used to // avoid template code bloat. template struct write_int_data { size_t size; size_t padding; FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, const format_specs& specs) : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { if (specs.align == align::numeric) { auto width = to_unsigned(specs.width); if (width > size) { padding = width - size; size = width; } } else if (specs.precision > num_digits) { size = (prefix >> 24) + to_unsigned(specs.precision); padding = to_unsigned(specs.precision - num_digits); } } }; // Writes an integer in the format // // where are written by write_digits(it). // prefix contains chars in three lower bytes and the size in the fourth byte. template FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, unsigned prefix, const format_specs& specs, W write_digits) -> OutputIt { // Slightly faster check for specs.width == 0 && specs.precision == -1. if ((specs.width | (specs.precision + 1)) == 0) { auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); if (prefix != 0) { for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) *it++ = static_cast(p & 0xff); } return base_iterator(out, write_digits(it)); } auto data = write_int_data(num_digits, prefix, specs); return write_padded( out, specs, data.size, [=](reserve_iterator it) { for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) *it++ = static_cast(p & 0xff); it = detail::fill_n(it, data.padding, static_cast('0')); return write_digits(it); }); } template class digit_grouping { private: std::string grouping_; std::basic_string thousands_sep_; struct next_state { std::string::const_iterator group; int pos; }; auto initial_state() const -> next_state { return {grouping_.begin(), 0}; } // Returns the next digit group separator position. auto next(next_state& state) const -> int { if (thousands_sep_.empty()) return max_value(); if (state.group == grouping_.end()) return state.pos += grouping_.back(); if (*state.group <= 0 || *state.group == max_value()) return max_value(); state.pos += *state.group++; return state.pos; } public: explicit digit_grouping(locale_ref loc, bool localized = true) { if (!localized) return; auto sep = thousands_sep(loc); grouping_ = sep.grouping; if (sep.thousands_sep) thousands_sep_.assign(1, sep.thousands_sep); } digit_grouping(std::string grouping, std::basic_string sep) : grouping_(std::move(grouping)), thousands_sep_(std::move(sep)) {} auto has_separator() const -> bool { return !thousands_sep_.empty(); } auto count_separators(int num_digits) const -> int { int count = 0; auto state = initial_state(); while (num_digits > next(state)) ++count; return count; } // Applies grouping to digits and write the output to out. template auto apply(Out out, basic_string_view digits) const -> Out { auto num_digits = static_cast(digits.size()); auto separators = basic_memory_buffer(); separators.push_back(0); auto state = initial_state(); while (int i = next(state)) { if (i >= num_digits) break; separators.push_back(i); } for (int i = 0, sep_index = static_cast(separators.size() - 1); i < num_digits; ++i) { if (num_digits - i == separators[sep_index]) { out = copy(thousands_sep_.data(), thousands_sep_.data() + thousands_sep_.size(), out); --sep_index; } *out++ = static_cast(digits[to_unsigned(i)]); } return out; } }; FMT_CONSTEXPR inline void prefix_append(unsigned& prefix, unsigned value) { prefix |= prefix != 0 ? value << 8 : value; prefix += (1u + (value > 0xff ? 1 : 0)) << 24; } // Writes a decimal integer with digit grouping. template auto write_int(OutputIt out, UInt value, unsigned prefix, const format_specs& specs, const digit_grouping& grouping) -> OutputIt { static_assert(std::is_same, UInt>::value, ""); int num_digits = 0; auto buffer = memory_buffer(); switch (specs.type) { default: FMT_ASSERT(false, ""); FMT_FALLTHROUGH; case presentation_type::none: case presentation_type::dec: num_digits = count_digits(value); format_decimal(appender(buffer), value, num_digits); break; case presentation_type::hex: if (specs.alt) prefix_append(prefix, unsigned(specs.upper ? 'X' : 'x') << 8 | '0'); num_digits = count_digits<4>(value); format_uint<4, char>(appender(buffer), value, num_digits, specs.upper); break; case presentation_type::oct: num_digits = count_digits<3>(value); // Octal prefix '0' is counted as a digit, so only add it if precision // is not greater than the number of digits. if (specs.alt && specs.precision <= num_digits && value != 0) prefix_append(prefix, '0'); format_uint<3, char>(appender(buffer), value, num_digits); break; case presentation_type::bin: if (specs.alt) prefix_append(prefix, unsigned(specs.upper ? 'B' : 'b') << 8 | '0'); num_digits = count_digits<1>(value); format_uint<1, char>(appender(buffer), value, num_digits); break; case presentation_type::chr: return write_char(out, static_cast(value), specs); } unsigned size = (prefix != 0 ? prefix >> 24 : 0) + to_unsigned(num_digits) + to_unsigned(grouping.count_separators(num_digits)); return write_padded( out, specs, size, size, [&](reserve_iterator it) { for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) *it++ = static_cast(p & 0xff); return grouping.apply(it, string_view(buffer.data(), buffer.size())); }); } // Writes a localized value. FMT_API auto write_loc(appender out, loc_value value, const format_specs& specs, locale_ref loc) -> bool; template inline auto write_loc(OutputIt, loc_value, const format_specs&, locale_ref) -> bool { return false; } template struct write_int_arg { UInt abs_value; unsigned prefix; }; template FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) -> write_int_arg> { auto prefix = 0u; auto abs_value = static_cast>(value); if (is_negative(value)) { prefix = 0x01000000 | '-'; abs_value = 0 - abs_value; } else { constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', 0x1000000u | ' '}; prefix = prefixes[sign]; } return {abs_value, prefix}; } template struct loc_writer { basic_appender out; const format_specs& specs; std::basic_string sep; std::string grouping; std::basic_string decimal_point; template ::value)> auto operator()(T value) -> bool { auto arg = make_write_int_arg(value, specs.sign); write_int(out, static_cast>(arg.abs_value), arg.prefix, specs, digit_grouping(grouping, sep)); return true; } template ::value)> auto operator()(T) -> bool { return false; } }; template FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, const format_specs& specs, locale_ref) -> OutputIt { static_assert(std::is_same>::value, ""); auto abs_value = arg.abs_value; auto prefix = arg.prefix; switch (specs.type) { default: FMT_ASSERT(false, ""); FMT_FALLTHROUGH; case presentation_type::none: case presentation_type::dec: { int num_digits = count_digits(abs_value); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_decimal(it, abs_value, num_digits).end; }); } case presentation_type::hex: { if (specs.alt) prefix_append(prefix, unsigned(specs.upper ? 'X' : 'x') << 8 | '0'); int num_digits = count_digits<4>(abs_value); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<4, Char>(it, abs_value, num_digits, specs.upper); }); } case presentation_type::oct: { int num_digits = count_digits<3>(abs_value); // Octal prefix '0' is counted as a digit, so only add it if precision // is not greater than the number of digits. if (specs.alt && specs.precision <= num_digits && abs_value != 0) prefix_append(prefix, '0'); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<3, Char>(it, abs_value, num_digits); }); } case presentation_type::bin: { if (specs.alt) prefix_append(prefix, unsigned(specs.upper ? 'B' : 'b') << 8 | '0'); int num_digits = count_digits<1>(abs_value); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<1, Char>(it, abs_value, num_digits); }); } case presentation_type::chr: return write_char(out, static_cast(abs_value), specs); } } template FMT_CONSTEXPR FMT_NOINLINE auto write_int_noinline(OutputIt out, write_int_arg arg, const format_specs& specs, locale_ref loc) -> OutputIt { return write_int(out, arg, specs, loc); } template ::value && !std::is_same::value && !std::is_same::value)> FMT_CONSTEXPR FMT_INLINE auto write(basic_appender out, T value, const format_specs& specs, locale_ref loc) -> basic_appender { if (specs.localized && write_loc(out, value, specs, loc)) return out; return write_int_noinline(out, make_write_int_arg(value, specs.sign), specs, loc); } // An inlined version of write used in format string compilation. template ::value && !std::is_same::value && !std::is_same::value && !std::is_same>::value)> FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, const format_specs& specs, locale_ref loc) -> OutputIt { if (specs.localized && write_loc(out, value, specs, loc)) return out; return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); } // An output iterator that counts the number of objects written to it and // discards them. class counting_iterator { private: size_t count_; public: using iterator_category = std::output_iterator_tag; using difference_type = std::ptrdiff_t; using pointer = void; using reference = void; FMT_UNCHECKED_ITERATOR(counting_iterator); struct value_type { template FMT_CONSTEXPR void operator=(const T&) {} }; FMT_CONSTEXPR counting_iterator() : count_(0) {} FMT_CONSTEXPR auto count() const -> size_t { return count_; } FMT_CONSTEXPR auto operator++() -> counting_iterator& { ++count_; return *this; } FMT_CONSTEXPR auto operator++(int) -> counting_iterator { auto it = *this; ++*this; return it; } FMT_CONSTEXPR friend auto operator+(counting_iterator it, difference_type n) -> counting_iterator { it.count_ += static_cast(n); return it; } FMT_CONSTEXPR auto operator*() const -> value_type { return {}; } }; template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, const format_specs& specs) -> OutputIt { auto data = s.data(); auto size = s.size(); if (specs.precision >= 0 && to_unsigned(specs.precision) < size) size = code_point_index(s, to_unsigned(specs.precision)); bool is_debug = specs.type == presentation_type::debug; size_t width = 0; if (is_debug) size = write_escaped_string(counting_iterator{}, s).count(); if (specs.width != 0) { if (is_debug) width = size; else width = compute_width(basic_string_view(data, size)); } return write_padded(out, specs, size, width, [=](reserve_iterator it) { if (is_debug) return write_escaped_string(it, s); return copy(data, data + size, it); }); } template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view> s, const format_specs& specs, locale_ref) -> OutputIt { return write(out, s, specs); } template FMT_CONSTEXPR auto write(OutputIt out, const Char* s, const format_specs& specs, locale_ref) -> OutputIt { if (specs.type == presentation_type::pointer) return write_ptr(out, bit_cast(s), &specs); if (!s) report_error("string pointer is null"); return write(out, basic_string_view(s), specs, {}); } template ::value && !std::is_same::value && !std::is_same::value)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { auto abs_value = static_cast>(value); bool negative = is_negative(value); // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. if (negative) abs_value = ~abs_value + 1; int num_digits = count_digits(abs_value); auto size = (negative ? 1 : 0) + static_cast(num_digits); if (auto ptr = to_pointer(out, size)) { if (negative) *ptr++ = static_cast('-'); format_decimal(ptr, abs_value, num_digits); return out; } if (negative) *out++ = static_cast('-'); return format_decimal(out, abs_value, num_digits).end; } // DEPRECATED! template FMT_CONSTEXPR auto parse_align(const Char* begin, const Char* end, format_specs& specs) -> const Char* { FMT_ASSERT(begin != end, ""); auto align = align::none; auto p = begin + code_point_length(begin); if (end - p <= 0) p = begin; for (;;) { switch (to_ascii(*p)) { case '<': align = align::left; break; case '>': align = align::right; break; case '^': align = align::center; break; } if (align != align::none) { if (p != begin) { auto c = *begin; if (c == '}') return begin; if (c == '{') { report_error("invalid fill character '{'"); return begin; } specs.fill = basic_string_view(begin, to_unsigned(p - begin)); begin = p + 1; } else { ++begin; } break; } else if (p == begin) { break; } p = begin; } specs.align = align; return begin; } // A floating-point presentation format. enum class float_format : unsigned char { general, // General: exponent notation or fixed point based on magnitude. exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. fixed // Fixed point with the default precision of 6, e.g. 0.0012. }; struct float_specs { int precision; float_format format : 8; sign_t sign : 8; bool locale : 1; bool binary32 : 1; bool showpoint : 1; }; // DEPRECATED! FMT_CONSTEXPR inline auto parse_float_type_spec(const format_specs& specs) -> float_specs { auto result = float_specs(); result.showpoint = specs.alt; result.locale = specs.localized; switch (specs.type) { default: FMT_FALLTHROUGH; case presentation_type::none: result.format = float_format::general; break; case presentation_type::exp: result.format = float_format::exp; result.showpoint |= specs.precision != 0; break; case presentation_type::fixed: result.format = float_format::fixed; result.showpoint |= specs.precision != 0; break; case presentation_type::general: result.format = float_format::general; break; } return result; } template FMT_CONSTEXPR20 auto write_nonfinite(OutputIt out, bool isnan, format_specs specs, sign_t sign) -> OutputIt { auto str = isnan ? (specs.upper ? "NAN" : "nan") : (specs.upper ? "INF" : "inf"); constexpr size_t str_size = 3; auto size = str_size + (sign ? 1 : 0); // Replace '0'-padding with space for non-finite values. const bool is_zero_fill = specs.fill.size() == 1 && specs.fill.template get() == '0'; if (is_zero_fill) specs.fill = ' '; return write_padded(out, specs, size, [=](reserve_iterator it) { if (sign) *it++ = detail::sign(sign); return copy(str, str + str_size, it); }); } // A decimal floating-point number significand * pow(10, exp). struct big_decimal_fp { const char* significand; int significand_size; int exponent; }; constexpr auto get_significand_size(const big_decimal_fp& f) -> int { return f.significand_size; } template inline auto get_significand_size(const dragonbox::decimal_fp& f) -> int { return count_digits(f.significand); } template constexpr auto write_significand(OutputIt out, const char* significand, int significand_size) -> OutputIt { return copy(significand, significand + significand_size, out); } template inline auto write_significand(OutputIt out, UInt significand, int significand_size) -> OutputIt { return format_decimal(out, significand, significand_size).end; } template FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, int significand_size, int exponent, const Grouping& grouping) -> OutputIt { if (!grouping.has_separator()) { out = write_significand(out, significand, significand_size); return detail::fill_n(out, exponent, static_cast('0')); } auto buffer = memory_buffer(); write_significand(appender(buffer), significand, significand_size); detail::fill_n(appender(buffer), exponent, '0'); return grouping.apply(out, string_view(buffer.data(), buffer.size())); } template ::value)> inline auto write_significand(Char* out, UInt significand, int significand_size, int integral_size, Char decimal_point) -> Char* { if (!decimal_point) return format_decimal(out, significand, significand_size).end; out += significand_size + 1; Char* end = out; int floating_size = significand_size - integral_size; for (int i = floating_size / 2; i > 0; --i) { out -= 2; copy2(out, digits2(static_cast(significand % 100))); significand /= 100; } if (floating_size % 2 != 0) { *--out = static_cast('0' + significand % 10); significand /= 10; } *--out = decimal_point; format_decimal(out - integral_size, significand, integral_size); return end; } template >::value)> inline auto write_significand(OutputIt out, UInt significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. Char buffer[digits10() + 2]; auto end = write_significand(buffer, significand, significand_size, integral_size, decimal_point); return detail::copy_noinline(buffer, end, out); } template FMT_CONSTEXPR auto write_significand(OutputIt out, const char* significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { out = detail::copy_noinline(significand, significand + integral_size, out); if (!decimal_point) return out; *out++ = decimal_point; return detail::copy_noinline(significand + integral_size, significand + significand_size, out); } template FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, int significand_size, int integral_size, Char decimal_point, const Grouping& grouping) -> OutputIt { if (!grouping.has_separator()) { return write_significand(out, significand, significand_size, integral_size, decimal_point); } auto buffer = basic_memory_buffer(); write_significand(basic_appender(buffer), significand, significand_size, integral_size, decimal_point); grouping.apply( out, basic_string_view(buffer.data(), to_unsigned(integral_size))); return detail::copy_noinline(buffer.data() + integral_size, buffer.end(), out); } template > FMT_CONSTEXPR20 auto do_write_float(OutputIt out, const DecimalFP& f, const format_specs& specs, float_specs fspecs, locale_ref loc) -> OutputIt { auto significand = f.significand; int significand_size = get_significand_size(f); const Char zero = static_cast('0'); auto sign = fspecs.sign; size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); using iterator = reserve_iterator; Char decimal_point = fspecs.locale ? detail::decimal_point(loc) : static_cast('.'); int output_exp = f.exponent + significand_size - 1; auto use_exp_format = [=]() { if (fspecs.format == float_format::exp) return true; if (fspecs.format != float_format::general) return false; // Use the fixed notation if the exponent is in [exp_lower, exp_upper), // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. const int exp_lower = -4, exp_upper = 16; return output_exp < exp_lower || output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); }; if (use_exp_format()) { int num_zeros = 0; if (fspecs.showpoint) { num_zeros = fspecs.precision - significand_size; if (num_zeros < 0) num_zeros = 0; size += to_unsigned(num_zeros); } else if (significand_size == 1) { decimal_point = Char(); } auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; int exp_digits = 2; if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); char exp_char = specs.upper ? 'E' : 'e'; auto write = [=](iterator it) { if (sign) *it++ = detail::sign(sign); // Insert a decimal point after the first digit and add an exponent. it = write_significand(it, significand, significand_size, 1, decimal_point); if (num_zeros > 0) it = detail::fill_n(it, num_zeros, zero); *it++ = static_cast(exp_char); return write_exponent(output_exp, it); }; return specs.width > 0 ? write_padded(out, specs, size, write) : base_iterator(out, write(reserve(out, size))); } int exp = f.exponent + significand_size; if (f.exponent >= 0) { // 1234e5 -> 123400000[.0+] size += to_unsigned(f.exponent); int num_zeros = fspecs.precision - exp; abort_fuzzing_if(num_zeros > 5000); if (fspecs.showpoint) { ++size; if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 0; if (num_zeros > 0) size += to_unsigned(num_zeros); } auto grouping = Grouping(loc, fspecs.locale); size += to_unsigned(grouping.count_separators(exp)); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); it = write_significand(it, significand, significand_size, f.exponent, grouping); if (!fspecs.showpoint) return it; *it++ = decimal_point; return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; }); } else if (exp > 0) { // 1234e-2 -> 12.34[0+] int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); auto grouping = Grouping(loc, fspecs.locale); size += to_unsigned(grouping.count_separators(exp)); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); it = write_significand(it, significand, significand_size, exp, decimal_point, grouping); return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; }); } // 1234e-6 -> 0.001234 int num_zeros = -exp; if (significand_size == 0 && fspecs.precision >= 0 && fspecs.precision < num_zeros) { num_zeros = fspecs.precision; } bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); *it++ = zero; if (!pointy) return it; *it++ = decimal_point; it = detail::fill_n(it, num_zeros, zero); return write_significand(it, significand, significand_size); }); } template class fallback_digit_grouping { public: constexpr fallback_digit_grouping(locale_ref, bool) {} constexpr auto has_separator() const -> bool { return false; } constexpr auto count_separators(int) const -> int { return 0; } template constexpr auto apply(Out out, basic_string_view) const -> Out { return out; } }; template FMT_CONSTEXPR20 auto write_float(OutputIt out, const DecimalFP& f, const format_specs& specs, float_specs fspecs, locale_ref loc) -> OutputIt { if (is_constant_evaluated()) { return do_write_float>(out, f, specs, fspecs, loc); } else { return do_write_float(out, f, specs, fspecs, loc); } } template constexpr auto isnan(T value) -> bool { return value != value; // std::isnan doesn't support __float128. } template struct has_isfinite : std::false_type {}; template struct has_isfinite> : std::true_type {}; template ::value&& has_isfinite::value)> FMT_CONSTEXPR20 auto isfinite(T value) -> bool { constexpr T inf = T(std::numeric_limits::infinity()); if (is_constant_evaluated()) return !detail::isnan(value) && value < inf && value > -inf; return std::isfinite(value); } template ::value)> FMT_CONSTEXPR auto isfinite(T value) -> bool { T inf = T(std::numeric_limits::infinity()); // std::isfinite doesn't support __float128. return !detail::isnan(value) && value < inf && value > -inf; } template ::value)> FMT_INLINE FMT_CONSTEXPR bool signbit(T value) { if (is_constant_evaluated()) { #ifdef __cpp_if_constexpr if constexpr (std::numeric_limits::is_iec559) { auto bits = detail::bit_cast(static_cast(value)); return (bits >> (num_bits() - 1)) != 0; } #endif } return std::signbit(static_cast(value)); } inline FMT_CONSTEXPR20 void adjust_precision(int& precision, int exp10) { // Adjust fixed precision by exponent because it is relative to decimal // point. if (exp10 > 0 && precision > max_value() - exp10) FMT_THROW(format_error("number is too big")); precision += exp10; } class bigint { private: // A bigint is stored as an array of bigits (big digits), with bigit at index // 0 being the least significant one. using bigit = uint32_t; using double_bigit = uint64_t; enum { bigits_capacity = 32 }; basic_memory_buffer bigits_; int exp_; FMT_CONSTEXPR20 auto operator[](int index) const -> bigit { return bigits_[to_unsigned(index)]; } FMT_CONSTEXPR20 auto operator[](int index) -> bigit& { return bigits_[to_unsigned(index)]; } static constexpr const int bigit_bits = num_bits(); friend struct formatter; FMT_CONSTEXPR20 void subtract_bigits(int index, bigit other, bigit& borrow) { auto result = static_cast((*this)[index]) - other - borrow; (*this)[index] = static_cast(result); borrow = static_cast(result >> (bigit_bits * 2 - 1)); } FMT_CONSTEXPR20 void remove_leading_zeros() { int num_bigits = static_cast(bigits_.size()) - 1; while (num_bigits > 0 && (*this)[num_bigits] == 0) --num_bigits; bigits_.resize(to_unsigned(num_bigits + 1)); } // Computes *this -= other assuming aligned bigints and *this >= other. FMT_CONSTEXPR20 void subtract_aligned(const bigint& other) { FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); FMT_ASSERT(compare(*this, other) >= 0, ""); bigit borrow = 0; int i = other.exp_ - exp_; for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) subtract_bigits(i, other.bigits_[j], borrow); while (borrow > 0) subtract_bigits(i, 0, borrow); remove_leading_zeros(); } FMT_CONSTEXPR20 void multiply(uint32_t value) { const double_bigit wide_value = value; bigit carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { double_bigit result = bigits_[i] * wide_value + carry; bigits_[i] = static_cast(result); carry = static_cast(result >> bigit_bits); } if (carry != 0) bigits_.push_back(carry); } template ::value || std::is_same::value)> FMT_CONSTEXPR20 void multiply(UInt value) { using half_uint = conditional_t::value, uint64_t, uint32_t>; const int shift = num_bits() - bigit_bits; const UInt lower = static_cast(value); const UInt upper = value >> num_bits(); UInt carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { UInt result = lower * bigits_[i] + static_cast(carry); carry = (upper * bigits_[i] << shift) + (result >> bigit_bits) + (carry >> bigit_bits); bigits_[i] = static_cast(result); } while (carry != 0) { bigits_.push_back(static_cast(carry)); carry >>= bigit_bits; } } template ::value || std::is_same::value)> FMT_CONSTEXPR20 void assign(UInt n) { size_t num_bigits = 0; do { bigits_[num_bigits++] = static_cast(n); n >>= bigit_bits; } while (n != 0); bigits_.resize(num_bigits); exp_ = 0; } public: FMT_CONSTEXPR20 bigint() : exp_(0) {} explicit bigint(uint64_t n) { assign(n); } bigint(const bigint&) = delete; void operator=(const bigint&) = delete; FMT_CONSTEXPR20 void assign(const bigint& other) { auto size = other.bigits_.size(); bigits_.resize(size); auto data = other.bigits_.data(); copy(data, data + size, bigits_.data()); exp_ = other.exp_; } template FMT_CONSTEXPR20 void operator=(Int n) { FMT_ASSERT(n > 0, ""); assign(uint64_or_128_t(n)); } FMT_CONSTEXPR20 auto num_bigits() const -> int { return static_cast(bigits_.size()) + exp_; } FMT_NOINLINE FMT_CONSTEXPR20 auto operator<<=(int shift) -> bigint& { FMT_ASSERT(shift >= 0, ""); exp_ += shift / bigit_bits; shift %= bigit_bits; if (shift == 0) return *this; bigit carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { bigit c = bigits_[i] >> (bigit_bits - shift); bigits_[i] = (bigits_[i] << shift) + carry; carry = c; } if (carry != 0) bigits_.push_back(carry); return *this; } template FMT_CONSTEXPR20 auto operator*=(Int value) -> bigint& { FMT_ASSERT(value > 0, ""); multiply(uint32_or_64_or_128_t(value)); return *this; } friend FMT_CONSTEXPR20 auto compare(const bigint& lhs, const bigint& rhs) -> int { int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); if (num_lhs_bigits != num_rhs_bigits) return num_lhs_bigits > num_rhs_bigits ? 1 : -1; int i = static_cast(lhs.bigits_.size()) - 1; int j = static_cast(rhs.bigits_.size()) - 1; int end = i - j; if (end < 0) end = 0; for (; i >= end; --i, --j) { bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; if (lhs_bigit != rhs_bigit) return lhs_bigit > rhs_bigit ? 1 : -1; } if (i != j) return i > j ? 1 : -1; return 0; } // Returns compare(lhs1 + lhs2, rhs). friend FMT_CONSTEXPR20 auto add_compare(const bigint& lhs1, const bigint& lhs2, const bigint& rhs) -> int { auto minimum = [](int a, int b) { return a < b ? a : b; }; auto maximum = [](int a, int b) { return a > b ? a : b; }; int max_lhs_bigits = maximum(lhs1.num_bigits(), lhs2.num_bigits()); int num_rhs_bigits = rhs.num_bigits(); if (max_lhs_bigits + 1 < num_rhs_bigits) return -1; if (max_lhs_bigits > num_rhs_bigits) return 1; auto get_bigit = [](const bigint& n, int i) -> bigit { return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; }; double_bigit borrow = 0; int min_exp = minimum(minimum(lhs1.exp_, lhs2.exp_), rhs.exp_); for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { double_bigit sum = static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); bigit rhs_bigit = get_bigit(rhs, i); if (sum > rhs_bigit + borrow) return 1; borrow = rhs_bigit + borrow - sum; if (borrow > 1) return -1; borrow <<= bigit_bits; } return borrow != 0 ? -1 : 0; } // Assigns pow(10, exp) to this bigint. FMT_CONSTEXPR20 void assign_pow10(int exp) { FMT_ASSERT(exp >= 0, ""); if (exp == 0) return *this = 1; // Find the top bit. int bitmask = 1; while (exp >= bitmask) bitmask <<= 1; bitmask >>= 1; // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) by // repeated squaring and multiplication. *this = 5; bitmask >>= 1; while (bitmask != 0) { square(); if ((exp & bitmask) != 0) *this *= 5; bitmask >>= 1; } *this <<= exp; // Multiply by pow(2, exp) by shifting. } FMT_CONSTEXPR20 void square() { int num_bigits = static_cast(bigits_.size()); int num_result_bigits = 2 * num_bigits; basic_memory_buffer n(std::move(bigits_)); bigits_.resize(to_unsigned(num_result_bigits)); auto sum = uint128_t(); for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { // Compute bigit at position bigit_index of the result by adding // cross-product terms n[i] * n[j] such that i + j == bigit_index. for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { // Most terms are multiplied twice which can be optimized in the future. sum += static_cast(n[i]) * n[j]; } (*this)[bigit_index] = static_cast(sum); sum >>= num_bits(); // Compute the carry. } // Do the same for the top half. for (int bigit_index = num_bigits; bigit_index < num_result_bigits; ++bigit_index) { for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) sum += static_cast(n[i++]) * n[j--]; (*this)[bigit_index] = static_cast(sum); sum >>= num_bits(); } remove_leading_zeros(); exp_ *= 2; } // If this bigint has a bigger exponent than other, adds trailing zero to make // exponents equal. This simplifies some operations such as subtraction. FMT_CONSTEXPR20 void align(const bigint& other) { int exp_difference = exp_ - other.exp_; if (exp_difference <= 0) return; int num_bigits = static_cast(bigits_.size()); bigits_.resize(to_unsigned(num_bigits + exp_difference)); for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) bigits_[j] = bigits_[i]; memset(bigits_.data(), 0, to_unsigned(exp_difference) * sizeof(bigit)); exp_ -= exp_difference; } // Divides this bignum by divisor, assigning the remainder to this and // returning the quotient. FMT_CONSTEXPR20 auto divmod_assign(const bigint& divisor) -> int { FMT_ASSERT(this != &divisor, ""); if (compare(*this, divisor) < 0) return 0; FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); align(divisor); int quotient = 0; do { subtract_aligned(divisor); ++quotient; } while (compare(*this, divisor) >= 0); return quotient; } }; // format_dragon flags. enum dragon { predecessor_closer = 1, fixup = 2, // Run fixup to correct exp10 which can be off by one. fixed = 4, }; // Formats a floating-point number using a variation of the Fixed-Precision // Positive Floating-Point Printout ((FPP)^2) algorithm by Steele & White: // https://fmt.dev/papers/p372-steele.pdf. FMT_CONSTEXPR20 inline void format_dragon(basic_fp value, unsigned flags, int num_digits, buffer& buf, int& exp10) { bigint numerator; // 2 * R in (FPP)^2. bigint denominator; // 2 * S in (FPP)^2. // lower and upper are differences between value and corresponding boundaries. bigint lower; // (M^- in (FPP)^2). bigint upper_store; // upper's value if different from lower. bigint* upper = nullptr; // (M^+ in (FPP)^2). // Shift numerator and denominator by an extra bit or two (if lower boundary // is closer) to make lower and upper integers. This eliminates multiplication // by 2 during later computations. bool is_predecessor_closer = (flags & dragon::predecessor_closer) != 0; int shift = is_predecessor_closer ? 2 : 1; if (value.e >= 0) { numerator = value.f; numerator <<= value.e + shift; lower = 1; lower <<= value.e; if (is_predecessor_closer) { upper_store = 1; upper_store <<= value.e + 1; upper = &upper_store; } denominator.assign_pow10(exp10); denominator <<= shift; } else if (exp10 < 0) { numerator.assign_pow10(-exp10); lower.assign(numerator); if (is_predecessor_closer) { upper_store.assign(numerator); upper_store <<= 1; upper = &upper_store; } numerator *= value.f; numerator <<= shift; denominator = 1; denominator <<= shift - value.e; } else { numerator = value.f; numerator <<= shift; denominator.assign_pow10(exp10); denominator <<= shift - value.e; lower = 1; if (is_predecessor_closer) { upper_store = 1ULL << 1; upper = &upper_store; } } int even = static_cast((value.f & 1) == 0); if (!upper) upper = &lower; bool shortest = num_digits < 0; if ((flags & dragon::fixup) != 0) { if (add_compare(numerator, *upper, denominator) + even <= 0) { --exp10; numerator *= 10; if (num_digits < 0) { lower *= 10; if (upper != &lower) *upper *= 10; } } if ((flags & dragon::fixed) != 0) adjust_precision(num_digits, exp10 + 1); } // Invariant: value == (numerator / denominator) * pow(10, exp10). if (shortest) { // Generate the shortest representation. num_digits = 0; char* data = buf.data(); for (;;) { int digit = numerator.divmod_assign(denominator); bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. // numerator + upper >[=] pow10: bool high = add_compare(numerator, *upper, denominator) + even > 0; data[num_digits++] = static_cast('0' + digit); if (low || high) { if (!low) { ++data[num_digits - 1]; } else if (high) { int result = add_compare(numerator, numerator, denominator); // Round half to even. if (result > 0 || (result == 0 && (digit % 2) != 0)) ++data[num_digits - 1]; } buf.try_resize(to_unsigned(num_digits)); exp10 -= num_digits - 1; return; } numerator *= 10; lower *= 10; if (upper != &lower) *upper *= 10; } } // Generate the given number of digits. exp10 -= num_digits - 1; if (num_digits <= 0) { auto digit = '0'; if (num_digits == 0) { denominator *= 10; digit = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; } buf.push_back(digit); return; } buf.try_resize(to_unsigned(num_digits)); for (int i = 0; i < num_digits - 1; ++i) { int digit = numerator.divmod_assign(denominator); buf[i] = static_cast('0' + digit); numerator *= 10; } int digit = numerator.divmod_assign(denominator); auto result = add_compare(numerator, numerator, denominator); if (result > 0 || (result == 0 && (digit % 2) != 0)) { if (digit == 9) { const auto overflow = '0' + 10; buf[num_digits - 1] = overflow; // Propagate the carry. for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { buf[i] = '0'; ++buf[i - 1]; } if (buf[0] == overflow) { buf[0] = '1'; if ((flags & dragon::fixed) != 0) buf.push_back('0'); else ++exp10; } return; } ++digit; } buf[num_digits - 1] = static_cast('0' + digit); } // Formats a floating-point number using the hexfloat format. template ::value)> FMT_CONSTEXPR20 void format_hexfloat(Float value, format_specs specs, buffer& buf) { // float is passed as double to reduce the number of instantiations and to // simplify implementation. static_assert(!std::is_same::value, ""); using info = dragonbox::float_info; // Assume Float is in the format [sign][exponent][significand]. using carrier_uint = typename info::carrier_uint; constexpr auto num_float_significand_bits = detail::num_significand_bits(); basic_fp f(value); f.e += num_float_significand_bits; if (!has_implicit_bit()) --f.e; constexpr auto num_fraction_bits = num_float_significand_bits + (has_implicit_bit() ? 1 : 0); constexpr auto num_xdigits = (num_fraction_bits + 3) / 4; constexpr auto leading_shift = ((num_xdigits - 1) * 4); const auto leading_mask = carrier_uint(0xF) << leading_shift; const auto leading_xdigit = static_cast((f.f & leading_mask) >> leading_shift); if (leading_xdigit > 1) f.e -= (32 - countl_zero(leading_xdigit) - 1); int print_xdigits = num_xdigits - 1; if (specs.precision >= 0 && print_xdigits > specs.precision) { const int shift = ((print_xdigits - specs.precision - 1) * 4); const auto mask = carrier_uint(0xF) << shift; const auto v = static_cast((f.f & mask) >> shift); if (v >= 8) { const auto inc = carrier_uint(1) << (shift + 4); f.f += inc; f.f &= ~(inc - 1); } // Check long double overflow if (!has_implicit_bit()) { const auto implicit_bit = carrier_uint(1) << num_float_significand_bits; if ((f.f & implicit_bit) == implicit_bit) { f.f >>= 4; f.e += 4; } } print_xdigits = specs.precision; } char xdigits[num_bits() / 4]; detail::fill_n(xdigits, sizeof(xdigits), '0'); format_uint<4>(xdigits, f.f, num_xdigits, specs.upper); // Remove zero tail while (print_xdigits > 0 && xdigits[print_xdigits] == '0') --print_xdigits; buf.push_back('0'); buf.push_back(specs.upper ? 'X' : 'x'); buf.push_back(xdigits[0]); if (specs.alt || print_xdigits > 0 || print_xdigits < specs.precision) buf.push_back('.'); buf.append(xdigits + 1, xdigits + 1 + print_xdigits); for (; print_xdigits < specs.precision; ++print_xdigits) buf.push_back('0'); buf.push_back(specs.upper ? 'P' : 'p'); uint32_t abs_e; if (f.e < 0) { buf.push_back('-'); abs_e = static_cast(-f.e); } else { buf.push_back('+'); abs_e = static_cast(f.e); } format_decimal(appender(buf), abs_e, detail::count_digits(abs_e)); } template ::value)> FMT_CONSTEXPR20 void format_hexfloat(Float value, format_specs specs, buffer& buf) { format_hexfloat(static_cast(value), specs, buf); } constexpr auto fractional_part_rounding_thresholds(int index) -> uint32_t { // For checking rounding thresholds. // The kth entry is chosen to be the smallest integer such that the // upper 32-bits of 10^(k+1) times it is strictly bigger than 5 * 10^k. // It is equal to ceil(2^31 + 2^32/10^(k + 1)). // These are stored in a string literal because we cannot have static arrays // in constexpr functions and non-static ones are poorly optimized. return U"\x9999999a\x828f5c29\x80418938\x80068db9\x8000a7c6\x800010c7" U"\x800001ae\x8000002b"[index]; } template FMT_CONSTEXPR20 auto format_float(Float value, int precision, float_specs specs, buffer& buf) -> int { // float is passed as double to reduce the number of instantiations. static_assert(!std::is_same::value, ""); FMT_ASSERT(value >= 0, "value is negative"); auto converted_value = convert_float(value); const bool fixed = specs.format == float_format::fixed; if (value <= 0) { // <= instead of == to silence a warning. if (precision <= 0 || !fixed) { buf.push_back('0'); return 0; } buf.try_resize(to_unsigned(precision)); fill_n(buf.data(), precision, '0'); return -precision; } int exp = 0; bool use_dragon = true; unsigned dragon_flags = 0; if (!is_fast_float() || is_constant_evaluated()) { const auto inv_log2_10 = 0.3010299956639812; // 1 / log2(10) using info = dragonbox::float_info; const auto f = basic_fp(converted_value); // Compute exp, an approximate power of 10, such that // 10^(exp - 1) <= value < 10^exp or 10^exp <= value < 10^(exp + 1). // This is based on log10(value) == log2(value) / log2(10) and approximation // of log2(value) by e + num_fraction_bits idea from double-conversion. auto e = (f.e + count_digits<1>(f.f) - 1) * inv_log2_10 - 1e-10; exp = static_cast(e); if (e > exp) ++exp; // Compute ceil. dragon_flags = dragon::fixup; } else if (precision < 0) { // Use Dragonbox for the shortest format. if (specs.binary32) { auto dec = dragonbox::to_decimal(static_cast(value)); write(appender(buf), dec.significand); return dec.exponent; } auto dec = dragonbox::to_decimal(static_cast(value)); write(appender(buf), dec.significand); return dec.exponent; } else { // Extract significand bits and exponent bits. using info = dragonbox::float_info; auto br = bit_cast(static_cast(value)); const uint64_t significand_mask = (static_cast(1) << num_significand_bits()) - 1; uint64_t significand = (br & significand_mask); int exponent = static_cast((br & exponent_mask()) >> num_significand_bits()); if (exponent != 0) { // Check if normal. exponent -= exponent_bias() + num_significand_bits(); significand |= (static_cast(1) << num_significand_bits()); significand <<= 1; } else { // Normalize subnormal inputs. FMT_ASSERT(significand != 0, "zeros should not appear here"); int shift = countl_zero(significand); FMT_ASSERT(shift >= num_bits() - num_significand_bits(), ""); shift -= (num_bits() - num_significand_bits() - 2); exponent = (std::numeric_limits::min_exponent - num_significand_bits()) - shift; significand <<= shift; } // Compute the first several nonzero decimal significand digits. // We call the number we get the first segment. const int k = info::kappa - dragonbox::floor_log10_pow2(exponent); exp = -k; const int beta = exponent + dragonbox::floor_log2_pow10(k); uint64_t first_segment; bool has_more_segments; int digits_in_the_first_segment; { const auto r = dragonbox::umul192_upper128( significand << beta, dragonbox::get_cached_power(k)); first_segment = r.high(); has_more_segments = r.low() != 0; // The first segment can have 18 ~ 19 digits. if (first_segment >= 1000000000000000000ULL) { digits_in_the_first_segment = 19; } else { // When it is of 18-digits, we align it to 19-digits by adding a bogus // zero at the end. digits_in_the_first_segment = 18; first_segment *= 10; } } // Compute the actual number of decimal digits to print. if (fixed) adjust_precision(precision, exp + digits_in_the_first_segment); // Use Dragon4 only when there might be not enough digits in the first // segment. if (digits_in_the_first_segment > precision) { use_dragon = false; if (precision <= 0) { exp += digits_in_the_first_segment; if (precision < 0) { // Nothing to do, since all we have are just leading zeros. buf.try_resize(0); } else { // We may need to round-up. buf.try_resize(1); if ((first_segment | static_cast(has_more_segments)) > 5000000000000000000ULL) { buf[0] = '1'; } else { buf[0] = '0'; } } } // precision <= 0 else { exp += digits_in_the_first_segment - precision; // When precision > 0, we divide the first segment into three // subsegments, each with 9, 9, and 0 ~ 1 digits so that each fits // in 32-bits which usually allows faster calculation than in // 64-bits. Since some compiler (e.g. MSVC) doesn't know how to optimize // division-by-constant for large 64-bit divisors, we do it here // manually. The magic number 7922816251426433760 below is equal to // ceil(2^(64+32) / 10^10). const uint32_t first_subsegment = static_cast( dragonbox::umul128_upper64(first_segment, 7922816251426433760ULL) >> 32); const uint64_t second_third_subsegments = first_segment - first_subsegment * 10000000000ULL; uint64_t prod; uint32_t digits; bool should_round_up; int number_of_digits_to_print = precision > 9 ? 9 : precision; // Print a 9-digits subsegment, either the first or the second. auto print_subsegment = [&](uint32_t subsegment, char* buffer) { int number_of_digits_printed = 0; // If we want to print an odd number of digits from the subsegment, if ((number_of_digits_to_print & 1) != 0) { // Convert to 64-bit fixed-point fractional form with 1-digit // integer part. The magic number 720575941 is a good enough // approximation of 2^(32 + 24) / 10^8; see // https://jk-jeon.github.io/posts/2022/12/fixed-precision-formatting/#fixed-length-case // for details. prod = ((subsegment * static_cast(720575941)) >> 24) + 1; digits = static_cast(prod >> 32); *buffer = static_cast('0' + digits); number_of_digits_printed++; } // If we want to print an even number of digits from the // first_subsegment, else { // Convert to 64-bit fixed-point fractional form with 2-digits // integer part. The magic number 450359963 is a good enough // approximation of 2^(32 + 20) / 10^7; see // https://jk-jeon.github.io/posts/2022/12/fixed-precision-formatting/#fixed-length-case // for details. prod = ((subsegment * static_cast(450359963)) >> 20) + 1; digits = static_cast(prod >> 32); copy2(buffer, digits2(digits)); number_of_digits_printed += 2; } // Print all digit pairs. while (number_of_digits_printed < number_of_digits_to_print) { prod = static_cast(prod) * static_cast(100); digits = static_cast(prod >> 32); copy2(buffer + number_of_digits_printed, digits2(digits)); number_of_digits_printed += 2; } }; // Print first subsegment. print_subsegment(first_subsegment, buf.data()); // Perform rounding if the first subsegment is the last subsegment to // print. if (precision <= 9) { // Rounding inside the subsegment. // We round-up if: // - either the fractional part is strictly larger than 1/2, or // - the fractional part is exactly 1/2 and the last digit is odd. // We rely on the following observations: // - If fractional_part >= threshold, then the fractional part is // strictly larger than 1/2. // - If the MSB of fractional_part is set, then the fractional part // must be at least 1/2. // - When the MSB of fractional_part is set, either // second_third_subsegments being nonzero or has_more_segments // being true means there are further digits not printed, so the // fractional part is strictly larger than 1/2. if (precision < 9) { uint32_t fractional_part = static_cast(prod); should_round_up = fractional_part >= fractional_part_rounding_thresholds( 8 - number_of_digits_to_print) || ((fractional_part >> 31) & ((digits & 1) | (second_third_subsegments != 0) | has_more_segments)) != 0; } // Rounding at the subsegment boundary. // In this case, the fractional part is at least 1/2 if and only if // second_third_subsegments >= 5000000000ULL, and is strictly larger // than 1/2 if we further have either second_third_subsegments > // 5000000000ULL or has_more_segments == true. else { should_round_up = second_third_subsegments > 5000000000ULL || (second_third_subsegments == 5000000000ULL && ((digits & 1) != 0 || has_more_segments)); } } // Otherwise, print the second subsegment. else { // Compilers are not aware of how to leverage the maximum value of // second_third_subsegments to find out a better magic number which // allows us to eliminate an additional shift. 1844674407370955162 = // ceil(2^64/10) < ceil(2^64*(10^9/(10^10 - 1))). const uint32_t second_subsegment = static_cast(dragonbox::umul128_upper64( second_third_subsegments, 1844674407370955162ULL)); const uint32_t third_subsegment = static_cast(second_third_subsegments) - second_subsegment * 10; number_of_digits_to_print = precision - 9; print_subsegment(second_subsegment, buf.data() + 9); // Rounding inside the subsegment. if (precision < 18) { // The condition third_subsegment != 0 implies that the segment was // of 19 digits, so in this case the third segment should be // consisting of a genuine digit from the input. uint32_t fractional_part = static_cast(prod); should_round_up = fractional_part >= fractional_part_rounding_thresholds( 8 - number_of_digits_to_print) || ((fractional_part >> 31) & ((digits & 1) | (third_subsegment != 0) | has_more_segments)) != 0; } // Rounding at the subsegment boundary. else { // In this case, the segment must be of 19 digits, thus // the third subsegment should be consisting of a genuine digit from // the input. should_round_up = third_subsegment > 5 || (third_subsegment == 5 && ((digits & 1) != 0 || has_more_segments)); } } // Round-up if necessary. if (should_round_up) { ++buf[precision - 1]; for (int i = precision - 1; i > 0 && buf[i] > '9'; --i) { buf[i] = '0'; ++buf[i - 1]; } if (buf[0] > '9') { buf[0] = '1'; if (fixed) buf[precision++] = '0'; else ++exp; } } buf.try_resize(to_unsigned(precision)); } } // if (digits_in_the_first_segment > precision) else { // Adjust the exponent for its use in Dragon4. exp += digits_in_the_first_segment - 1; } } if (use_dragon) { auto f = basic_fp(); bool is_predecessor_closer = specs.binary32 ? f.assign(static_cast(value)) : f.assign(converted_value); if (is_predecessor_closer) dragon_flags |= dragon::predecessor_closer; if (fixed) dragon_flags |= dragon::fixed; // Limit precision to the maximum possible number of significant digits in // an IEEE754 double because we don't need to generate zeros. const int max_double_digits = 767; if (precision > max_double_digits) precision = max_double_digits; format_dragon(f, dragon_flags, precision, buf, exp); } if (!fixed && !specs.showpoint) { // Remove trailing zeros. auto num_digits = buf.size(); while (num_digits > 0 && buf[num_digits - 1] == '0') { --num_digits; ++exp; } buf.try_resize(num_digits); } return exp; } template FMT_CONSTEXPR20 auto write_float(OutputIt out, T value, format_specs specs, locale_ref loc) -> OutputIt { sign_t sign = specs.sign; if (detail::signbit(value)) { // value < 0 is false for NaN so use signbit. sign = sign::minus; value = -value; } else if (sign == sign::minus) { sign = sign::none; } if (!detail::isfinite(value)) return write_nonfinite(out, detail::isnan(value), specs, sign); if (specs.align == align::numeric && sign) { auto it = reserve(out, 1); *it++ = detail::sign(sign); out = base_iterator(out, it); sign = sign::none; if (specs.width != 0) --specs.width; } memory_buffer buffer; if (specs.type == presentation_type::hexfloat) { if (sign) buffer.push_back(detail::sign(sign)); format_hexfloat(convert_float(value), specs, buffer); return write_bytes(out, {buffer.data(), buffer.size()}, specs); } int precision = specs.precision >= 0 || specs.type == presentation_type::none ? specs.precision : 6; if (specs.type == presentation_type::exp) { if (precision == max_value()) report_error("number is too big"); else ++precision; } else if (specs.type != presentation_type::fixed && precision == 0) { precision = 1; } float_specs fspecs = parse_float_type_spec(specs); fspecs.sign = sign; if (const_check(std::is_same())) fspecs.binary32 = true; int exp = format_float(convert_float(value), precision, fspecs, buffer); fspecs.precision = precision; auto f = big_decimal_fp{buffer.data(), static_cast(buffer.size()), exp}; return write_float(out, f, specs, fspecs, loc); } template ::value)> FMT_CONSTEXPR20 auto write(OutputIt out, T value, format_specs specs, locale_ref loc = {}) -> OutputIt { if (const_check(!is_supported_floating_point(value))) return out; return specs.localized && write_loc(out, value, specs, loc) ? out : write_float(out, value, specs, loc); } template ::value)> FMT_CONSTEXPR20 auto write(OutputIt out, T value) -> OutputIt { if (is_constant_evaluated()) return write(out, value, format_specs()); if (const_check(!is_supported_floating_point(value))) return out; auto sign = sign_t::none; if (detail::signbit(value)) { sign = sign::minus; value = -value; } constexpr auto specs = format_specs(); using floaty = conditional_t::value, double, T>; using floaty_uint = typename dragonbox::float_info::carrier_uint; floaty_uint mask = exponent_mask(); if ((bit_cast(value) & mask) == mask) return write_nonfinite(out, std::isnan(value), specs, sign); auto fspecs = float_specs(); fspecs.sign = sign; auto dec = dragonbox::to_decimal(static_cast(value)); return write_float(out, dec, specs, fspecs, {}); } template ::value && !is_fast_float::value)> inline auto write(OutputIt out, T value) -> OutputIt { return write(out, value, format_specs()); } template auto write(OutputIt out, monostate, format_specs = {}, locale_ref = {}) -> OutputIt { FMT_ASSERT(false, ""); return out; } template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) -> OutputIt { return copy_noinline(value.begin(), value.end(), out); } template ::value)> constexpr auto write(OutputIt out, const T& value) -> OutputIt { return write(out, to_string_view(value)); } // FMT_ENABLE_IF() condition separated to workaround an MSVC bug. template < typename Char, typename OutputIt, typename T, bool check = std::is_enum::value && !std::is_same::value && mapped_type_constant>::value != type::custom_type, FMT_ENABLE_IF(check)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { return write(out, static_cast>(value)); } template ::value)> FMT_CONSTEXPR auto write(OutputIt out, T value, const format_specs& specs = {}, locale_ref = {}) -> OutputIt { return specs.type != presentation_type::none && specs.type != presentation_type::string ? write(out, value ? 1 : 0, specs, {}) : write_bytes(out, value ? "true" : "false", specs); } template FMT_CONSTEXPR auto write(OutputIt out, Char value) -> OutputIt { auto it = reserve(out, 1); *it++ = value; return base_iterator(out, it); } template FMT_CONSTEXPR20 auto write(OutputIt out, const Char* value) -> OutputIt { if (value) return write(out, basic_string_view(value)); report_error("string pointer is null"); return out; } template ::value)> auto write(OutputIt out, const T* value, const format_specs& specs = {}, locale_ref = {}) -> OutputIt { return write_ptr(out, bit_cast(value), &specs); } // A write overload that handles implicit conversions. template > FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> enable_if_t< std::is_class::value && !has_to_string_view::value && !is_floating_point::value && !std::is_same::value && !std::is_same().map( value))>>::value, OutputIt> { return write(out, arg_mapper().map(value)); } template > FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> enable_if_t::value == type::custom_type && !std::is_fundamental::value, OutputIt> { auto formatter = typename Context::template formatter_type(); auto parse_ctx = typename Context::parse_context_type({}); formatter.parse(parse_ctx); auto ctx = Context(out, {}, {}); return formatter.format(value, ctx); } // An argument visitor that formats the argument and writes it via the output // iterator. It's a class and not a generic lambda for compatibility with C++11. template struct default_arg_formatter { using iterator = basic_appender; using context = buffered_context; iterator out; basic_format_args args; locale_ref loc; template auto operator()(T value) -> iterator { return write(out, value); } auto operator()(typename basic_format_arg::handle h) -> iterator { basic_format_parse_context parse_ctx({}); context format_ctx(out, args, loc); h.format(parse_ctx, format_ctx); return format_ctx.out(); } }; template struct arg_formatter { using iterator = basic_appender; using context = buffered_context; iterator out; const format_specs& specs; locale_ref locale; template FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { return detail::write(out, value, specs, locale); } auto operator()(typename basic_format_arg::handle) -> iterator { // User-defined types are handled separately because they require access // to the parse context. return out; } }; struct width_checker { template ::value)> FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { if (is_negative(value)) report_error("negative width"); return static_cast(value); } template ::value)> FMT_CONSTEXPR auto operator()(T) -> unsigned long long { report_error("width is not integer"); return 0; } }; struct precision_checker { template ::value)> FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { if (is_negative(value)) report_error("negative precision"); return static_cast(value); } template ::value)> FMT_CONSTEXPR auto operator()(T) -> unsigned long long { report_error("precision is not integer"); return 0; } }; template FMT_CONSTEXPR auto get_dynamic_spec(FormatArg arg) -> int { unsigned long long value = arg.visit(Handler()); if (value > to_unsigned(max_value())) report_error("number is too big"); return static_cast(value); } template FMT_CONSTEXPR auto get_arg(Context& ctx, ID id) -> decltype(ctx.arg(id)) { auto arg = ctx.arg(id); if (!arg) report_error("argument not found"); return arg; } template FMT_CONSTEXPR void handle_dynamic_spec(int& value, arg_ref ref, Context& ctx) { switch (ref.kind) { case arg_id_kind::none: break; case arg_id_kind::index: value = detail::get_dynamic_spec(get_arg(ctx, ref.val.index)); break; case arg_id_kind::name: value = detail::get_dynamic_spec(get_arg(ctx, ref.val.name)); break; } } #if FMT_USE_USER_DEFINED_LITERALS # if FMT_USE_NONTYPE_TEMPLATE_ARGS template Str> struct statically_named_arg : view { static constexpr auto name = Str.data; const T& value; statically_named_arg(const T& v) : value(v) {} }; template Str> struct is_named_arg> : std::true_type {}; template Str> struct is_statically_named_arg> : std::true_type {}; template Str> struct udl_arg { template auto operator=(T&& value) const { return statically_named_arg(std::forward(value)); } }; # else template struct udl_arg { const Char* str; template auto operator=(T&& value) const -> named_arg { return {str, std::forward(value)}; } }; # endif #endif // FMT_USE_USER_DEFINED_LITERALS template auto vformat(const Locale& loc, basic_string_view fmt, typename detail::vformat_args::type args) -> std::basic_string { auto buf = basic_memory_buffer(); detail::vformat_to(buf, fmt, args, detail::locale_ref(loc)); return {buf.data(), buf.size()}; } using format_func = void (*)(detail::buffer&, int, const char*); FMT_API void format_error_code(buffer& out, int error_code, string_view message) noexcept; using fmt::report_error; FMT_API void report_error(format_func func, int error_code, const char* message) noexcept; } // namespace detail FMT_BEGIN_EXPORT FMT_API auto vsystem_error(int error_code, string_view format_str, format_args args) -> std::system_error; /** * Constructs `std::system_error` with a message formatted with * `fmt::format(fmt, args...)`. * `error_code` is a system error code as given by `errno`. * * **Example**: * * // This throws std::system_error with the description * // cannot open file 'madeup': No such file or directory * // or similar (system message may vary). * const char* filename = "madeup"; * std::FILE* file = std::fopen(filename, "r"); * if (!file) * throw fmt::system_error(errno, "cannot open file '{}'", filename); */ template auto system_error(int error_code, format_string fmt, T&&... args) -> std::system_error { return vsystem_error(error_code, fmt, fmt::make_format_args(args...)); } /** * Formats an error message for an error returned by an operating system or a * language runtime, for example a file opening error, and writes it to `out`. * The format is the same as the one used by `std::system_error(ec, message)` * where `ec` is `std::error_code(error_code, std::generic_category())`. * It is implementation-defined but normally looks like: * * : * * where `` is the passed message and `` is the system * message corresponding to the error code. * `error_code` is a system error code as given by `errno`. */ FMT_API void format_system_error(detail::buffer& out, int error_code, const char* message) noexcept; // Reports a system error without throwing an exception. // Can be used to report errors from destructors. FMT_API void report_system_error(int error_code, const char* message) noexcept; /// A fast integer formatter. class format_int { private: // Buffer should be large enough to hold all digits (digits10 + 1), // a sign and a null character. enum { buffer_size = std::numeric_limits::digits10 + 3 }; mutable char buffer_[buffer_size]; char* str_; template FMT_CONSTEXPR20 auto format_unsigned(UInt value) -> char* { auto n = static_cast>(value); return detail::format_decimal(buffer_, n, buffer_size - 1).begin; } template FMT_CONSTEXPR20 auto format_signed(Int value) -> char* { auto abs_value = static_cast>(value); bool negative = value < 0; if (negative) abs_value = 0 - abs_value; auto begin = format_unsigned(abs_value); if (negative) *--begin = '-'; return begin; } public: explicit FMT_CONSTEXPR20 format_int(int value) : str_(format_signed(value)) {} explicit FMT_CONSTEXPR20 format_int(long value) : str_(format_signed(value)) {} explicit FMT_CONSTEXPR20 format_int(long long value) : str_(format_signed(value)) {} explicit FMT_CONSTEXPR20 format_int(unsigned value) : str_(format_unsigned(value)) {} explicit FMT_CONSTEXPR20 format_int(unsigned long value) : str_(format_unsigned(value)) {} explicit FMT_CONSTEXPR20 format_int(unsigned long long value) : str_(format_unsigned(value)) {} /// Returns the number of characters written to the output buffer. FMT_CONSTEXPR20 auto size() const -> size_t { return detail::to_unsigned(buffer_ - str_ + buffer_size - 1); } /// Returns a pointer to the output buffer content. No terminating null /// character is appended. FMT_CONSTEXPR20 auto data() const -> const char* { return str_; } /// Returns a pointer to the output buffer content with terminating null /// character appended. FMT_CONSTEXPR20 auto c_str() const -> const char* { buffer_[buffer_size - 1] = '\0'; return str_; } /// Returns the content of the output buffer as an `std::string`. auto str() const -> std::string { return std::string(str_, size()); } }; template struct formatter::value>> : formatter, Char> { template auto format(const T& value, FormatContext& ctx) const -> decltype(ctx.out()) { auto&& val = format_as(value); // Make an lvalue reference for format. return formatter, Char>::format(val, ctx); } }; #define FMT_FORMAT_AS(Type, Base) \ template \ struct formatter : formatter { \ template \ auto format(Type value, FormatContext& ctx) const -> decltype(ctx.out()) { \ return formatter::format(value, ctx); \ } \ } FMT_FORMAT_AS(signed char, int); FMT_FORMAT_AS(unsigned char, unsigned); FMT_FORMAT_AS(short, int); FMT_FORMAT_AS(unsigned short, unsigned); FMT_FORMAT_AS(long, detail::long_type); FMT_FORMAT_AS(unsigned long, detail::ulong_type); FMT_FORMAT_AS(Char*, const Char*); FMT_FORMAT_AS(std::nullptr_t, const void*); FMT_FORMAT_AS(detail::std_string_view, basic_string_view); FMT_FORMAT_AS(void*, const void*); template class formatter, Char> : public formatter, Char> {}; template struct formatter : formatter, Char> {}; /** * Converts `p` to `const void*` for pointer formatting. * * **Example**: * * auto s = fmt::format("{}", fmt::ptr(p)); */ template auto ptr(T p) -> const void* { static_assert(std::is_pointer::value, ""); return detail::bit_cast(p); } /** * Converts `e` to the underlying type. * * **Example**: * * enum class color { red, green, blue }; * auto s = fmt::format("{}", fmt::underlying(color::red)); */ template constexpr auto underlying(Enum e) noexcept -> underlying_t { return static_cast>(e); } namespace enums { template ::value)> constexpr auto format_as(Enum e) noexcept -> underlying_t { return static_cast>(e); } } // namespace enums class bytes { private: string_view data_; friend struct formatter; public: explicit bytes(string_view data) : data_(data) {} }; template <> struct formatter { private: detail::dynamic_format_specs<> specs_; public: template FMT_CONSTEXPR auto parse(ParseContext& ctx) -> const char* { return parse_format_specs(ctx.begin(), ctx.end(), specs_, ctx, detail::type::string_type); } template auto format(bytes b, FormatContext& ctx) const -> decltype(ctx.out()) { auto specs = specs_; detail::handle_dynamic_spec(specs.width, specs.width_ref, ctx); detail::handle_dynamic_spec( specs.precision, specs.precision_ref, ctx); return detail::write_bytes(ctx.out(), b.data_, specs); } }; // group_digits_view is not derived from view because it copies the argument. template struct group_digits_view { T value; }; /** * Returns a view that formats an integer value using ',' as a * locale-independent thousands separator. * * **Example**: * * fmt::print("{}", fmt::group_digits(12345)); * // Output: "12,345" */ template auto group_digits(T value) -> group_digits_view { return {value}; } template struct formatter> : formatter { private: detail::dynamic_format_specs<> specs_; public: template FMT_CONSTEXPR auto parse(ParseContext& ctx) -> const char* { return parse_format_specs(ctx.begin(), ctx.end(), specs_, ctx, detail::type::int_type); } template auto format(group_digits_view t, FormatContext& ctx) const -> decltype(ctx.out()) { auto specs = specs_; detail::handle_dynamic_spec(specs.width, specs.width_ref, ctx); detail::handle_dynamic_spec( specs.precision, specs.precision_ref, ctx); auto arg = detail::make_write_int_arg(t.value, specs.sign); return detail::write_int( ctx.out(), static_cast>(arg.abs_value), arg.prefix, specs, detail::digit_grouping("\3", ",")); } }; template struct nested_view { const formatter* fmt; const T* value; }; template struct formatter, Char> { template FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { return ctx.begin(); } template auto format(nested_view view, FormatContext& ctx) const -> decltype(ctx.out()) { return view.fmt->format(*view.value, ctx); } }; template struct nested_formatter { private: int width_; detail::fill_t fill_; align_t align_ : 4; formatter formatter_; public: constexpr nested_formatter() : width_(0), align_(align_t::none) {} FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) -> decltype(ctx.begin()) { auto specs = detail::dynamic_format_specs(); auto it = parse_format_specs(ctx.begin(), ctx.end(), specs, ctx, detail::type::none_type); width_ = specs.width; fill_ = specs.fill; align_ = specs.align; ctx.advance_to(it); return formatter_.parse(ctx); } template auto write_padded(FormatContext& ctx, F write) const -> decltype(ctx.out()) { if (width_ == 0) return write(ctx.out()); auto buf = basic_memory_buffer(); write(basic_appender(buf)); auto specs = format_specs(); specs.width = width_; specs.fill = fill_; specs.align = align_; return detail::write( ctx.out(), basic_string_view(buf.data(), buf.size()), specs); } auto nested(const T& value) const -> nested_view { return nested_view{&formatter_, &value}; } }; /** * Converts `value` to `std::string` using the default format for type `T`. * * **Example**: * * std::string answer = fmt::to_string(42); */ template ::value && !detail::has_format_as::value)> inline auto to_string(const T& value) -> std::string { auto buffer = memory_buffer(); detail::write(appender(buffer), value); return {buffer.data(), buffer.size()}; } template ::value)> FMT_NODISCARD inline auto to_string(T value) -> std::string { // The buffer should be large enough to store the number including the sign // or "false" for bool. constexpr int max_size = detail::digits10() + 2; char buffer[max_size > 5 ? static_cast(max_size) : 5]; char* begin = buffer; return std::string(begin, detail::write(begin, value)); } template FMT_NODISCARD auto to_string(const basic_memory_buffer& buf) -> std::basic_string { auto size = buf.size(); detail::assume(size < std::basic_string().max_size()); return std::basic_string(buf.data(), size); } template ::value && detail::has_format_as::value)> inline auto to_string(const T& value) -> std::string { return to_string(format_as(value)); } FMT_END_EXPORT namespace detail { template void vformat_to(buffer& buf, basic_string_view fmt, typename vformat_args::type args, locale_ref loc) { auto out = basic_appender(buf); if (fmt.size() == 2 && equal2(fmt.data(), "{}")) { auto arg = args.get(0); if (!arg) report_error("argument not found"); arg.visit(default_arg_formatter{out, args, loc}); return; } struct format_handler { basic_format_parse_context parse_context; buffered_context context; format_handler(basic_appender p_out, basic_string_view str, basic_format_args> p_args, locale_ref p_loc) : parse_context(str), context(p_out, p_args, p_loc) {} void on_text(const Char* begin, const Char* end) { auto text = basic_string_view(begin, to_unsigned(end - begin)); context.advance_to(write(context.out(), text)); } FMT_CONSTEXPR auto on_arg_id() -> int { return parse_context.next_arg_id(); } FMT_CONSTEXPR auto on_arg_id(int id) -> int { parse_context.check_arg_id(id); return id; } FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { parse_context.check_arg_id(id); int arg_id = context.arg_id(id); if (arg_id < 0) report_error("argument not found"); return arg_id; } FMT_INLINE void on_replacement_field(int id, const Char*) { auto arg = get_arg(context, id); context.advance_to(arg.visit(default_arg_formatter{ context.out(), context.args(), context.locale()})); } auto on_format_specs(int id, const Char* begin, const Char* end) -> const Char* { auto arg = get_arg(context, id); // Not using a visitor for custom types gives better codegen. if (arg.format_custom(begin, parse_context, context)) return parse_context.begin(); auto specs = detail::dynamic_format_specs(); begin = parse_format_specs(begin, end, specs, parse_context, arg.type()); detail::handle_dynamic_spec( specs.width, specs.width_ref, context); detail::handle_dynamic_spec( specs.precision, specs.precision_ref, context); if (begin == end || *begin != '}') report_error("missing '}' in format string"); context.advance_to(arg.visit( arg_formatter{context.out(), specs, context.locale()})); return begin; } FMT_NORETURN void on_error(const char* message) { report_error(message); } }; detail::parse_format_string(fmt, format_handler(out, fmt, args, loc)); } FMT_BEGIN_EXPORT #ifndef FMT_HEADER_ONLY extern template FMT_API void vformat_to(buffer&, string_view, typename vformat_args<>::type, locale_ref); extern template FMT_API auto thousands_sep_impl(locale_ref) -> thousands_sep_result; extern template FMT_API auto thousands_sep_impl(locale_ref) -> thousands_sep_result; extern template FMT_API auto decimal_point_impl(locale_ref) -> char; extern template FMT_API auto decimal_point_impl(locale_ref) -> wchar_t; #endif // FMT_HEADER_ONLY FMT_END_EXPORT template template FMT_CONSTEXPR FMT_INLINE auto native_formatter::format( const T& val, FormatContext& ctx) const -> decltype(ctx.out()) { if (specs_.width_ref.kind == arg_id_kind::none && specs_.precision_ref.kind == arg_id_kind::none) { return write(ctx.out(), val, specs_, ctx.locale()); } auto specs = specs_; handle_dynamic_spec(specs.width, specs.width_ref, ctx); handle_dynamic_spec(specs.precision, specs.precision_ref, ctx); return write(ctx.out(), val, specs, ctx.locale()); } } // namespace detail FMT_BEGIN_EXPORT template struct formatter : detail::native_formatter {}; #if FMT_USE_USER_DEFINED_LITERALS inline namespace literals { /** * User-defined literal equivalent of `fmt::arg`. * * **Example**: * * using namespace fmt::literals; * fmt::print("The answer is {answer}.", "answer"_a=42); */ # if FMT_USE_NONTYPE_TEMPLATE_ARGS template constexpr auto operator""_a() { using char_t = remove_cvref_t; return detail::udl_arg(); } # else constexpr auto operator""_a(const char* s, size_t) -> detail::udl_arg { return {s}; } # endif } // namespace literals #endif // FMT_USE_USER_DEFINED_LITERALS FMT_API auto vformat(string_view fmt, format_args args) -> std::string; /** * Formats `args` according to specifications in `fmt` and returns the result * as a string. * * **Example**: * * #include * std::string message = fmt::format("The answer is {}.", 42); */ template FMT_NODISCARD FMT_INLINE auto format(format_string fmt, T&&... args) -> std::string { return vformat(fmt, fmt::make_format_args(args...)); } template ::value)> inline auto vformat(const Locale& loc, string_view fmt, format_args args) -> std::string { return detail::vformat(loc, fmt, args); } template ::value)> inline auto format(const Locale& loc, format_string fmt, T&&... args) -> std::string { return fmt::vformat(loc, string_view(fmt), fmt::make_format_args(args...)); } template ::value&& detail::is_locale::value)> auto vformat_to(OutputIt out, const Locale& loc, string_view fmt, format_args args) -> OutputIt { using detail::get_buffer; auto&& buf = get_buffer(out); detail::vformat_to(buf, fmt, args, detail::locale_ref(loc)); return detail::get_iterator(buf, out); } template ::value&& detail::is_locale::value)> FMT_INLINE auto format_to(OutputIt out, const Locale& loc, format_string fmt, T&&... args) -> OutputIt { return vformat_to(out, loc, fmt, fmt::make_format_args(args...)); } template ::value)> FMT_NODISCARD FMT_INLINE auto formatted_size(const Locale& loc, format_string fmt, T&&... args) -> size_t { auto buf = detail::counting_buffer<>(); detail::vformat_to(buf, fmt, fmt::make_format_args(args...), detail::locale_ref(loc)); return buf.count(); } FMT_END_EXPORT FMT_END_NAMESPACE #ifdef FMT_HEADER_ONLY # define FMT_FUNC inline # include "format-inl.h" #else # define FMT_FUNC #endif // Restore _LIBCPP_REMOVE_TRANSITIVE_INCLUDES. #ifdef FMT_REMOVE_TRANSITIVE_INCLUDES # undef _LIBCPP_REMOVE_TRANSITIVE_INCLUDES #endif #endif // FMT_FORMAT_H_ ================================================ FILE: vendor/format.cc ================================================ // Formatting library for C++ // // Copyright (c) 2012 - 2016, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #include "fmt/format-inl.h" FMT_BEGIN_NAMESPACE namespace detail { template FMT_API auto dragonbox::to_decimal(float x) noexcept -> dragonbox::decimal_fp; template FMT_API auto dragonbox::to_decimal(double x) noexcept -> dragonbox::decimal_fp; #ifndef FMT_STATIC_THOUSANDS_SEPARATOR template FMT_API locale_ref::locale_ref(const std::locale& loc); template FMT_API auto locale_ref::get() const -> std::locale; #endif // Explicit instantiations for char. template FMT_API auto thousands_sep_impl(locale_ref) -> thousands_sep_result; template FMT_API auto decimal_point_impl(locale_ref) -> char; template FMT_API void buffer::append(const char*, const char*); template FMT_API void vformat_to(buffer&, string_view, typename vformat_args<>::type, locale_ref); // Explicit instantiations for wchar_t. template FMT_API auto thousands_sep_impl(locale_ref) -> thousands_sep_result; template FMT_API auto decimal_point_impl(locale_ref) -> wchar_t; template FMT_API void buffer::append(const wchar_t*, const wchar_t*); } // namespace detail FMT_END_NAMESPACE ================================================ FILE: vendor/json.hpp ================================================ // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT /****************************************************************************\ * Note on documentation: The source files contain links to the online * * documentation of the public API at https://json.nlohmann.me. This URL * * contains the most recent documentation and should also be applicable to * * previous versions; documentation for deprecated functions is not * * removed, but marked deprecated. See "Generate documentation" section in * * file docs/README.md. * \****************************************************************************/ #ifndef INCLUDE_NLOHMANN_JSON_HPP_ #define INCLUDE_NLOHMANN_JSON_HPP_ #include // all_of, find, for_each #include // nullptr_t, ptrdiff_t, size_t #include // hash, less #include // initializer_list #ifndef JSON_NO_IO #include // istream, ostream #endif // JSON_NO_IO #include // random_access_iterator_tag #include // unique_ptr #include // string, stoi, to_string #include // declval, forward, move, pair, swap #include // vector // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT // This file contains all macro definitions affecting or depending on the ABI #ifndef JSON_SKIP_LIBRARY_VERSION_CHECK #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 #warning "Already included a different version of the library!" #endif #endif #endif #define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) #define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) #define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) #ifndef JSON_DIAGNOSTICS #define JSON_DIAGNOSTICS 0 #endif #ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 #endif #if JSON_DIAGNOSTICS #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag #else #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS #endif #if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp #else #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON #endif #ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 #endif // Construct the namespace ABI tags component #define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b #define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) #define NLOHMANN_JSON_ABI_TAGS \ NLOHMANN_JSON_ABI_TAGS_CONCAT( \ NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) // Construct the namespace version component #define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ _v ## major ## _ ## minor ## _ ## patch #define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) #if NLOHMANN_JSON_NAMESPACE_NO_VERSION #define NLOHMANN_JSON_NAMESPACE_VERSION #else #define NLOHMANN_JSON_NAMESPACE_VERSION \ NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ NLOHMANN_JSON_VERSION_MINOR, \ NLOHMANN_JSON_VERSION_PATCH) #endif // Combine namespace components #define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b #define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) #ifndef NLOHMANN_JSON_NAMESPACE #define NLOHMANN_JSON_NAMESPACE \ nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ NLOHMANN_JSON_ABI_TAGS, \ NLOHMANN_JSON_NAMESPACE_VERSION) #endif #ifndef NLOHMANN_JSON_NAMESPACE_BEGIN #define NLOHMANN_JSON_NAMESPACE_BEGIN \ namespace nlohmann \ { \ inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ NLOHMANN_JSON_ABI_TAGS, \ NLOHMANN_JSON_NAMESPACE_VERSION) \ { #endif #ifndef NLOHMANN_JSON_NAMESPACE_END #define NLOHMANN_JSON_NAMESPACE_END \ } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ } // namespace nlohmann #endif // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // transform #include // array #include // forward_list #include // inserter, front_inserter, end #include // map #include // string #include // tuple, make_tuple #include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible #include // unordered_map #include // pair, declval #include // valarray // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // nullptr_t #include // exception #if JSON_DIAGNOSTICS #include // accumulate #endif #include // runtime_error #include // to_string #include // vector // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // array #include // size_t #include // uint8_t #include // string // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // declval, pair // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT // #include NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { template struct make_void { using type = void; }; template using void_t = typename make_void::type; } // namespace detail NLOHMANN_JSON_NAMESPACE_END NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { // https://en.cppreference.com/w/cpp/experimental/is_detected struct nonesuch { nonesuch() = delete; ~nonesuch() = delete; nonesuch(nonesuch const&) = delete; nonesuch(nonesuch const&&) = delete; void operator=(nonesuch const&) = delete; void operator=(nonesuch&&) = delete; }; template class Op, class... Args> struct detector { using value_t = std::false_type; using type = Default; }; template class Op, class... Args> struct detector>, Op, Args...> { using value_t = std::true_type; using type = Op; }; template class Op, class... Args> using is_detected = typename detector::value_t; template class Op, class... Args> struct is_detected_lazy : is_detected { }; template class Op, class... Args> using detected_t = typename detector::type; template class Op, class... Args> using detected_or = detector; template class Op, class... Args> using detected_or_t = typename detected_or::type; template class Op, class... Args> using is_detected_exact = std::is_same>; template class Op, class... Args> using is_detected_convertible = std::is_convertible, To>; } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-FileCopyrightText: 2016-2021 Evan Nemerson // SPDX-License-Identifier: MIT /* Hedley - https://nemequ.github.io/hedley * Created by Evan Nemerson */ #if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) #if defined(JSON_HEDLEY_VERSION) #undef JSON_HEDLEY_VERSION #endif #define JSON_HEDLEY_VERSION 15 #if defined(JSON_HEDLEY_STRINGIFY_EX) #undef JSON_HEDLEY_STRINGIFY_EX #endif #define JSON_HEDLEY_STRINGIFY_EX(x) #x #if defined(JSON_HEDLEY_STRINGIFY) #undef JSON_HEDLEY_STRINGIFY #endif #define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) #if defined(JSON_HEDLEY_CONCAT_EX) #undef JSON_HEDLEY_CONCAT_EX #endif #define JSON_HEDLEY_CONCAT_EX(a,b) a##b #if defined(JSON_HEDLEY_CONCAT) #undef JSON_HEDLEY_CONCAT #endif #define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) #if defined(JSON_HEDLEY_CONCAT3_EX) #undef JSON_HEDLEY_CONCAT3_EX #endif #define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c #if defined(JSON_HEDLEY_CONCAT3) #undef JSON_HEDLEY_CONCAT3 #endif #define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) #if defined(JSON_HEDLEY_VERSION_ENCODE) #undef JSON_HEDLEY_VERSION_ENCODE #endif #define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) #if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) #undef JSON_HEDLEY_VERSION_DECODE_MAJOR #endif #define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) #if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) #undef JSON_HEDLEY_VERSION_DECODE_MINOR #endif #define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) #if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) #undef JSON_HEDLEY_VERSION_DECODE_REVISION #endif #define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) #if defined(JSON_HEDLEY_GNUC_VERSION) #undef JSON_HEDLEY_GNUC_VERSION #endif #if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) #elif defined(__GNUC__) #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) #endif #if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) #undef JSON_HEDLEY_GNUC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_GNUC_VERSION) #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_MSVC_VERSION) #undef JSON_HEDLEY_MSVC_VERSION #endif #if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) #elif defined(_MSC_FULL_VER) && !defined(__ICL) #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) #elif defined(_MSC_VER) && !defined(__ICL) #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) #endif #if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) #undef JSON_HEDLEY_MSVC_VERSION_CHECK #endif #if !defined(JSON_HEDLEY_MSVC_VERSION) #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) #elif defined(_MSC_VER) && (_MSC_VER >= 1400) #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) #elif defined(_MSC_VER) && (_MSC_VER >= 1200) #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) #else #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) #endif #if defined(JSON_HEDLEY_INTEL_VERSION) #undef JSON_HEDLEY_INTEL_VERSION #endif #if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) #elif defined(__INTEL_COMPILER) && !defined(__ICL) #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) #endif #if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) #undef JSON_HEDLEY_INTEL_VERSION_CHECK #endif #if defined(JSON_HEDLEY_INTEL_VERSION) #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_INTEL_CL_VERSION) #undef JSON_HEDLEY_INTEL_CL_VERSION #endif #if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) #endif #if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK #endif #if defined(JSON_HEDLEY_INTEL_CL_VERSION) #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_PGI_VERSION) #undef JSON_HEDLEY_PGI_VERSION #endif #if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) #endif #if defined(JSON_HEDLEY_PGI_VERSION_CHECK) #undef JSON_HEDLEY_PGI_VERSION_CHECK #endif #if defined(JSON_HEDLEY_PGI_VERSION) #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION) #undef JSON_HEDLEY_SUNPRO_VERSION #endif #if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) #elif defined(__SUNPRO_C) #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) #elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) #elif defined(__SUNPRO_CC) #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK #endif #if defined(JSON_HEDLEY_SUNPRO_VERSION) #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) #undef JSON_HEDLEY_EMSCRIPTEN_VERSION #endif #if defined(__EMSCRIPTEN__) #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK #endif #if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_ARM_VERSION) #undef JSON_HEDLEY_ARM_VERSION #endif #if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) #elif defined(__CC_ARM) && defined(__ARMCC_VERSION) #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) #endif #if defined(JSON_HEDLEY_ARM_VERSION_CHECK) #undef JSON_HEDLEY_ARM_VERSION_CHECK #endif #if defined(JSON_HEDLEY_ARM_VERSION) #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_IBM_VERSION) #undef JSON_HEDLEY_IBM_VERSION #endif #if defined(__ibmxl__) #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) #elif defined(__xlC__) && defined(__xlC_ver__) #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) #elif defined(__xlC__) #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) #endif #if defined(JSON_HEDLEY_IBM_VERSION_CHECK) #undef JSON_HEDLEY_IBM_VERSION_CHECK #endif #if defined(JSON_HEDLEY_IBM_VERSION) #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_VERSION) #undef JSON_HEDLEY_TI_VERSION #endif #if \ defined(__TI_COMPILER_VERSION__) && \ ( \ defined(__TMS470__) || defined(__TI_ARM__) || \ defined(__MSP430__) || \ defined(__TMS320C2000__) \ ) #if (__TI_COMPILER_VERSION__ >= 16000000) #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #endif #if defined(JSON_HEDLEY_TI_VERSION_CHECK) #undef JSON_HEDLEY_TI_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_VERSION) #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_CL2000_VERSION) #undef JSON_HEDLEY_TI_CL2000_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_CL2000_VERSION) #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_CL430_VERSION) #undef JSON_HEDLEY_TI_CL430_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_CL430_VERSION) #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_ARMCL_VERSION) #undef JSON_HEDLEY_TI_ARMCL_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_ARMCL_VERSION) #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_CL6X_VERSION) #undef JSON_HEDLEY_TI_CL6X_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_CL6X_VERSION) #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_CL7X_VERSION) #undef JSON_HEDLEY_TI_CL7X_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_CL7X_VERSION) #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TI_CLPRU_VERSION) #undef JSON_HEDLEY_TI_CLPRU_VERSION #endif #if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) #endif #if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TI_CLPRU_VERSION) #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_CRAY_VERSION) #undef JSON_HEDLEY_CRAY_VERSION #endif #if defined(_CRAYC) #if defined(_RELEASE_PATCHLEVEL) #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) #else #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) #endif #endif #if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) #undef JSON_HEDLEY_CRAY_VERSION_CHECK #endif #if defined(JSON_HEDLEY_CRAY_VERSION) #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_IAR_VERSION) #undef JSON_HEDLEY_IAR_VERSION #endif #if defined(__IAR_SYSTEMS_ICC__) #if __VER__ > 1000 #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) #else #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) #endif #endif #if defined(JSON_HEDLEY_IAR_VERSION_CHECK) #undef JSON_HEDLEY_IAR_VERSION_CHECK #endif #if defined(JSON_HEDLEY_IAR_VERSION) #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_TINYC_VERSION) #undef JSON_HEDLEY_TINYC_VERSION #endif #if defined(__TINYC__) #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) #endif #if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) #undef JSON_HEDLEY_TINYC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_TINYC_VERSION) #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_DMC_VERSION) #undef JSON_HEDLEY_DMC_VERSION #endif #if defined(__DMC__) #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) #endif #if defined(JSON_HEDLEY_DMC_VERSION_CHECK) #undef JSON_HEDLEY_DMC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_DMC_VERSION) #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION) #undef JSON_HEDLEY_COMPCERT_VERSION #endif #if defined(__COMPCERT_VERSION__) #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK #endif #if defined(JSON_HEDLEY_COMPCERT_VERSION) #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_PELLES_VERSION) #undef JSON_HEDLEY_PELLES_VERSION #endif #if defined(__POCC__) #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) #endif #if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) #undef JSON_HEDLEY_PELLES_VERSION_CHECK #endif #if defined(JSON_HEDLEY_PELLES_VERSION) #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_MCST_LCC_VERSION) #undef JSON_HEDLEY_MCST_LCC_VERSION #endif #if defined(__LCC__) && defined(__LCC_MINOR__) #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) #endif #if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_MCST_LCC_VERSION) #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_GCC_VERSION) #undef JSON_HEDLEY_GCC_VERSION #endif #if \ defined(JSON_HEDLEY_GNUC_VERSION) && \ !defined(__clang__) && \ !defined(JSON_HEDLEY_INTEL_VERSION) && \ !defined(JSON_HEDLEY_PGI_VERSION) && \ !defined(JSON_HEDLEY_ARM_VERSION) && \ !defined(JSON_HEDLEY_CRAY_VERSION) && \ !defined(JSON_HEDLEY_TI_VERSION) && \ !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ !defined(__COMPCERT__) && \ !defined(JSON_HEDLEY_MCST_LCC_VERSION) #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION #endif #if defined(JSON_HEDLEY_GCC_VERSION_CHECK) #undef JSON_HEDLEY_GCC_VERSION_CHECK #endif #if defined(JSON_HEDLEY_GCC_VERSION) #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) #else #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) #endif #if defined(JSON_HEDLEY_HAS_ATTRIBUTE) #undef JSON_HEDLEY_HAS_ATTRIBUTE #endif #if \ defined(__has_attribute) && \ ( \ (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ ) # define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) #else # define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE #endif #if defined(__has_attribute) #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) #else #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE #endif #if defined(__has_attribute) #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) #else #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE #endif #if \ defined(__has_cpp_attribute) && \ defined(__cplusplus) && \ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) #else #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS #endif #if !defined(__cplusplus) || !defined(__has_cpp_attribute) #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) #elif \ !defined(JSON_HEDLEY_PGI_VERSION) && \ !defined(JSON_HEDLEY_IAR_VERSION) && \ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) #else #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE #endif #if defined(__has_cpp_attribute) && defined(__cplusplus) #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) #else #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE #endif #if defined(__has_cpp_attribute) && defined(__cplusplus) #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) #else #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_BUILTIN) #undef JSON_HEDLEY_HAS_BUILTIN #endif #if defined(__has_builtin) #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) #else #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) #undef JSON_HEDLEY_GNUC_HAS_BUILTIN #endif #if defined(__has_builtin) #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) #else #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) #undef JSON_HEDLEY_GCC_HAS_BUILTIN #endif #if defined(__has_builtin) #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) #else #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_FEATURE) #undef JSON_HEDLEY_HAS_FEATURE #endif #if defined(__has_feature) #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) #else #define JSON_HEDLEY_HAS_FEATURE(feature) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) #undef JSON_HEDLEY_GNUC_HAS_FEATURE #endif #if defined(__has_feature) #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) #else #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_FEATURE) #undef JSON_HEDLEY_GCC_HAS_FEATURE #endif #if defined(__has_feature) #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) #else #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_EXTENSION) #undef JSON_HEDLEY_HAS_EXTENSION #endif #if defined(__has_extension) #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) #else #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) #undef JSON_HEDLEY_GNUC_HAS_EXTENSION #endif #if defined(__has_extension) #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) #else #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) #undef JSON_HEDLEY_GCC_HAS_EXTENSION #endif #if defined(__has_extension) #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) #else #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) #else #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) #else #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE #endif #if defined(__has_declspec_attribute) #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) #else #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_HAS_WARNING) #undef JSON_HEDLEY_HAS_WARNING #endif #if defined(__has_warning) #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) #else #define JSON_HEDLEY_HAS_WARNING(warning) (0) #endif #if defined(JSON_HEDLEY_GNUC_HAS_WARNING) #undef JSON_HEDLEY_GNUC_HAS_WARNING #endif #if defined(__has_warning) #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) #else #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_GCC_HAS_WARNING) #undef JSON_HEDLEY_GCC_HAS_WARNING #endif #if defined(__has_warning) #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) #else #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ defined(__clang__) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) #elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) #define JSON_HEDLEY_PRAGMA(value) __pragma(value) #else #define JSON_HEDLEY_PRAGMA(value) #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) #undef JSON_HEDLEY_DIAGNOSTIC_PUSH #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_POP) #undef JSON_HEDLEY_DIAGNOSTIC_POP #endif #if defined(__clang__) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") #elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") #elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) #elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") #elif \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") #elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") #else #define JSON_HEDLEY_DIAGNOSTIC_PUSH #define JSON_HEDLEY_DIAGNOSTIC_POP #endif /* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ #endif #if defined(__cplusplus) # if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") # if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") # if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") # define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ xpr \ JSON_HEDLEY_DIAGNOSTIC_POP # else # define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ xpr \ JSON_HEDLEY_DIAGNOSTIC_POP # endif # else # define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ xpr \ JSON_HEDLEY_DIAGNOSTIC_POP # endif # endif #endif #if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x #endif #if defined(JSON_HEDLEY_CONST_CAST) #undef JSON_HEDLEY_CONST_CAST #endif #if defined(__cplusplus) # define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) #elif \ JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) # define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ ((T) (expr)); \ JSON_HEDLEY_DIAGNOSTIC_POP \ })) #else # define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) #endif #if defined(JSON_HEDLEY_REINTERPRET_CAST) #undef JSON_HEDLEY_REINTERPRET_CAST #endif #if defined(__cplusplus) #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) #else #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) #endif #if defined(JSON_HEDLEY_STATIC_CAST) #undef JSON_HEDLEY_STATIC_CAST #endif #if defined(__cplusplus) #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) #else #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) #endif #if defined(JSON_HEDLEY_CPP_CAST) #undef JSON_HEDLEY_CPP_CAST #endif #if defined(__cplusplus) # if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") # define JSON_HEDLEY_CPP_CAST(T, expr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ ((T) (expr)) \ JSON_HEDLEY_DIAGNOSTIC_POP # elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) # define JSON_HEDLEY_CPP_CAST(T, expr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("diag_suppress=Pe137") \ JSON_HEDLEY_DIAGNOSTIC_POP # else # define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) # endif #else # define JSON_HEDLEY_CPP_CAST(T, expr) (expr) #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED #endif #if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") #elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") #elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) #elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") #elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") #elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) #elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") #elif \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") #elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") #else #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") #elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") #elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) #elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") #elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") #elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) #elif \ JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") #elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") #elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") #else #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") #elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") #elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) #elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) #elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") #elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") #elif \ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") #elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") #else #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL #endif #if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") #elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") #elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") #else #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL #endif #if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION #endif #if JSON_HEDLEY_HAS_WARNING("-Wunused-function") #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") #elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") #elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) #elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") #else #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION #endif #if defined(JSON_HEDLEY_DEPRECATED) #undef JSON_HEDLEY_DEPRECATED #endif #if defined(JSON_HEDLEY_DEPRECATED_FOR) #undef JSON_HEDLEY_DEPRECATED_FOR #endif #if \ JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) #elif \ (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) #elif defined(__cplusplus) && (__cplusplus >= 201402L) #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) #elif \ JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") #else #define JSON_HEDLEY_DEPRECATED(since) #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) #endif #if defined(JSON_HEDLEY_UNAVAILABLE) #undef JSON_HEDLEY_UNAVAILABLE #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) #else #define JSON_HEDLEY_UNAVAILABLE(available_since) #endif #if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) #undef JSON_HEDLEY_WARN_UNUSED_RESULT #endif #if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) #elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) #elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) #elif defined(_Check_return_) /* SAL */ #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ #else #define JSON_HEDLEY_WARN_UNUSED_RESULT #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) #endif #if defined(JSON_HEDLEY_SENTINEL) #undef JSON_HEDLEY_SENTINEL #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) #else #define JSON_HEDLEY_SENTINEL(position) #endif #if defined(JSON_HEDLEY_NO_RETURN) #undef JSON_HEDLEY_NO_RETURN #endif #if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_NO_RETURN __noreturn #elif \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) #elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L #define JSON_HEDLEY_NO_RETURN _Noreturn #elif defined(__cplusplus) && (__cplusplus >= 201103L) #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) #elif \ JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) #elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") #elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) #elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) #else #define JSON_HEDLEY_NO_RETURN #endif #if defined(JSON_HEDLEY_NO_ESCAPE) #undef JSON_HEDLEY_NO_ESCAPE #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) #else #define JSON_HEDLEY_NO_ESCAPE #endif #if defined(JSON_HEDLEY_UNREACHABLE) #undef JSON_HEDLEY_UNREACHABLE #endif #if defined(JSON_HEDLEY_UNREACHABLE_RETURN) #undef JSON_HEDLEY_UNREACHABLE_RETURN #endif #if defined(JSON_HEDLEY_ASSUME) #undef JSON_HEDLEY_ASSUME #endif #if \ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_ASSUME(expr) __assume(expr) #elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) #elif \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) #if defined(__cplusplus) #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) #else #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) #endif #endif #if \ (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() #elif defined(JSON_HEDLEY_ASSUME) #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) #endif #if !defined(JSON_HEDLEY_ASSUME) #if defined(JSON_HEDLEY_UNREACHABLE) #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) #else #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) #endif #endif #if defined(JSON_HEDLEY_UNREACHABLE) #if \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) #else #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() #endif #else #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) #endif #if !defined(JSON_HEDLEY_UNREACHABLE) #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) #endif JSON_HEDLEY_DIAGNOSTIC_PUSH #if JSON_HEDLEY_HAS_WARNING("-Wpedantic") #pragma clang diagnostic ignored "-Wpedantic" #endif #if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #endif #if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) #if defined(__clang__) #pragma clang diagnostic ignored "-Wvariadic-macros" #elif defined(JSON_HEDLEY_GCC_VERSION) #pragma GCC diagnostic ignored "-Wvariadic-macros" #endif #endif #if defined(JSON_HEDLEY_NON_NULL) #undef JSON_HEDLEY_NON_NULL #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) #else #define JSON_HEDLEY_NON_NULL(...) #endif JSON_HEDLEY_DIAGNOSTIC_POP #if defined(JSON_HEDLEY_PRINTF_FORMAT) #undef JSON_HEDLEY_PRINTF_FORMAT #endif #if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) #elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) #elif \ JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) #elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) #else #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) #endif #if defined(JSON_HEDLEY_CONSTEXPR) #undef JSON_HEDLEY_CONSTEXPR #endif #if defined(__cplusplus) #if __cplusplus >= 201103L #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) #endif #endif #if !defined(JSON_HEDLEY_CONSTEXPR) #define JSON_HEDLEY_CONSTEXPR #endif #if defined(JSON_HEDLEY_PREDICT) #undef JSON_HEDLEY_PREDICT #endif #if defined(JSON_HEDLEY_LIKELY) #undef JSON_HEDLEY_LIKELY #endif #if defined(JSON_HEDLEY_UNLIKELY) #undef JSON_HEDLEY_UNLIKELY #endif #if defined(JSON_HEDLEY_UNPREDICTABLE) #undef JSON_HEDLEY_UNPREDICTABLE #endif #if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) #endif #if \ (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) # define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) # define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) # define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) # define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) # define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) #elif \ (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) # define JSON_HEDLEY_PREDICT(expr, expected, probability) \ (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) # define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ (__extension__ ({ \ double hedley_probability_ = (probability); \ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ })) # define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ (__extension__ ({ \ double hedley_probability_ = (probability); \ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ })) # define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) # define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) #else # define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) # define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) # define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) # define JSON_HEDLEY_LIKELY(expr) (!!(expr)) # define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) #endif #if !defined(JSON_HEDLEY_UNPREDICTABLE) #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) #endif #if defined(JSON_HEDLEY_MALLOC) #undef JSON_HEDLEY_MALLOC #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_MALLOC __declspec(restrict) #else #define JSON_HEDLEY_MALLOC #endif #if defined(JSON_HEDLEY_PURE) #undef JSON_HEDLEY_PURE #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) # define JSON_HEDLEY_PURE __attribute__((__pure__)) #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) # define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") #elif defined(__cplusplus) && \ ( \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ ) # define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") #else # define JSON_HEDLEY_PURE #endif #if defined(JSON_HEDLEY_CONST) #undef JSON_HEDLEY_CONST #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_CONST __attribute__((__const__)) #elif \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) #define JSON_HEDLEY_CONST _Pragma("no_side_effect") #else #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE #endif #if defined(JSON_HEDLEY_RESTRICT) #undef JSON_HEDLEY_RESTRICT #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) #define JSON_HEDLEY_RESTRICT restrict #elif \ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ defined(__clang__) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_RESTRICT __restrict #elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) #define JSON_HEDLEY_RESTRICT _Restrict #else #define JSON_HEDLEY_RESTRICT #endif #if defined(JSON_HEDLEY_INLINE) #undef JSON_HEDLEY_INLINE #endif #if \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ (defined(__cplusplus) && (__cplusplus >= 199711L)) #define JSON_HEDLEY_INLINE inline #elif \ defined(JSON_HEDLEY_GCC_VERSION) || \ JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) #define JSON_HEDLEY_INLINE __inline__ #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_INLINE __inline #else #define JSON_HEDLEY_INLINE #endif #if defined(JSON_HEDLEY_ALWAYS_INLINE) #undef JSON_HEDLEY_ALWAYS_INLINE #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) # define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) # define JSON_HEDLEY_ALWAYS_INLINE __forceinline #elif defined(__cplusplus) && \ ( \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ ) # define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) # define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") #else # define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE #endif #if defined(JSON_HEDLEY_NEVER_INLINE) #undef JSON_HEDLEY_NEVER_INLINE #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) #elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") #elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") #elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) #elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) #else #define JSON_HEDLEY_NEVER_INLINE #endif #if defined(JSON_HEDLEY_PRIVATE) #undef JSON_HEDLEY_PRIVATE #endif #if defined(JSON_HEDLEY_PUBLIC) #undef JSON_HEDLEY_PUBLIC #endif #if defined(JSON_HEDLEY_IMPORT) #undef JSON_HEDLEY_IMPORT #endif #if defined(_WIN32) || defined(__CYGWIN__) # define JSON_HEDLEY_PRIVATE # define JSON_HEDLEY_PUBLIC __declspec(dllexport) # define JSON_HEDLEY_IMPORT __declspec(dllimport) #else # if \ JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ ( \ defined(__TI_EABI__) && \ ( \ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ ) \ ) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) # define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) # define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) # else # define JSON_HEDLEY_PRIVATE # define JSON_HEDLEY_PUBLIC # endif # define JSON_HEDLEY_IMPORT extern #endif #if defined(JSON_HEDLEY_NO_THROW) #undef JSON_HEDLEY_NO_THROW #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) #define JSON_HEDLEY_NO_THROW __declspec(nothrow) #else #define JSON_HEDLEY_NO_THROW #endif #if defined(JSON_HEDLEY_FALL_THROUGH) #undef JSON_HEDLEY_FALL_THROUGH #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) #elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) #elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) #elif defined(__fallthrough) /* SAL */ #define JSON_HEDLEY_FALL_THROUGH __fallthrough #else #define JSON_HEDLEY_FALL_THROUGH #endif #if defined(JSON_HEDLEY_RETURNS_NON_NULL) #undef JSON_HEDLEY_RETURNS_NON_NULL #endif #if \ JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) #elif defined(_Ret_notnull_) /* SAL */ #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ #else #define JSON_HEDLEY_RETURNS_NON_NULL #endif #if defined(JSON_HEDLEY_ARRAY_PARAM) #undef JSON_HEDLEY_ARRAY_PARAM #endif #if \ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ !defined(__STDC_NO_VLA__) && \ !defined(__cplusplus) && \ !defined(JSON_HEDLEY_PGI_VERSION) && \ !defined(JSON_HEDLEY_TINYC_VERSION) #define JSON_HEDLEY_ARRAY_PARAM(name) (name) #else #define JSON_HEDLEY_ARRAY_PARAM(name) #endif #if defined(JSON_HEDLEY_IS_CONSTANT) #undef JSON_HEDLEY_IS_CONSTANT #endif #if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) #undef JSON_HEDLEY_REQUIRE_CONSTEXPR #endif /* JSON_HEDLEY_IS_CONSTEXPR_ is for HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ #if defined(JSON_HEDLEY_IS_CONSTEXPR_) #undef JSON_HEDLEY_IS_CONSTEXPR_ #endif #if \ JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) #endif #if !defined(__cplusplus) # if \ JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) #if defined(__INTPTR_TYPE__) #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) #else #include #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) #endif # elif \ ( \ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ !defined(JSON_HEDLEY_PGI_VERSION) && \ !defined(JSON_HEDLEY_IAR_VERSION)) || \ (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) #if defined(__INTPTR_TYPE__) #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) #else #include #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) #endif # elif \ defined(JSON_HEDLEY_GCC_VERSION) || \ defined(JSON_HEDLEY_INTEL_VERSION) || \ defined(JSON_HEDLEY_TINYC_VERSION) || \ defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ defined(__clang__) # define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ sizeof(void) != \ sizeof(*( \ 1 ? \ ((void*) ((expr) * 0L) ) : \ ((struct { char v[sizeof(void) * 2]; } *) 1) \ ) \ ) \ ) # endif #endif #if defined(JSON_HEDLEY_IS_CONSTEXPR_) #if !defined(JSON_HEDLEY_IS_CONSTANT) #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) #endif #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) #else #if !defined(JSON_HEDLEY_IS_CONSTANT) #define JSON_HEDLEY_IS_CONSTANT(expr) (0) #endif #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) #endif #if defined(JSON_HEDLEY_BEGIN_C_DECLS) #undef JSON_HEDLEY_BEGIN_C_DECLS #endif #if defined(JSON_HEDLEY_END_C_DECLS) #undef JSON_HEDLEY_END_C_DECLS #endif #if defined(JSON_HEDLEY_C_DECL) #undef JSON_HEDLEY_C_DECL #endif #if defined(__cplusplus) #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { #define JSON_HEDLEY_END_C_DECLS } #define JSON_HEDLEY_C_DECL extern "C" #else #define JSON_HEDLEY_BEGIN_C_DECLS #define JSON_HEDLEY_END_C_DECLS #define JSON_HEDLEY_C_DECL #endif #if defined(JSON_HEDLEY_STATIC_ASSERT) #undef JSON_HEDLEY_STATIC_ASSERT #endif #if \ !defined(__cplusplus) && ( \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ defined(_Static_assert) \ ) # define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) #elif \ (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) # define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) #else # define JSON_HEDLEY_STATIC_ASSERT(expr, message) #endif #if defined(JSON_HEDLEY_NULL) #undef JSON_HEDLEY_NULL #endif #if defined(__cplusplus) #if __cplusplus >= 201103L #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) #elif defined(NULL) #define JSON_HEDLEY_NULL NULL #else #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) #endif #elif defined(NULL) #define JSON_HEDLEY_NULL NULL #else #define JSON_HEDLEY_NULL ((void*) 0) #endif #if defined(JSON_HEDLEY_MESSAGE) #undef JSON_HEDLEY_MESSAGE #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") # define JSON_HEDLEY_MESSAGE(msg) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ JSON_HEDLEY_PRAGMA(message msg) \ JSON_HEDLEY_DIAGNOSTIC_POP #elif \ JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) # define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) #elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) # define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) #elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) # define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) #elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) # define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) #else # define JSON_HEDLEY_MESSAGE(msg) #endif #if defined(JSON_HEDLEY_WARNING) #undef JSON_HEDLEY_WARNING #endif #if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") # define JSON_HEDLEY_WARNING(msg) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ JSON_HEDLEY_PRAGMA(clang warning msg) \ JSON_HEDLEY_DIAGNOSTIC_POP #elif \ JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) # define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) #elif \ JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) # define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) #else # define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) #endif #if defined(JSON_HEDLEY_REQUIRE) #undef JSON_HEDLEY_REQUIRE #endif #if defined(JSON_HEDLEY_REQUIRE_MSG) #undef JSON_HEDLEY_REQUIRE_MSG #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) # if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") # define JSON_HEDLEY_REQUIRE(expr) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ __attribute__((diagnose_if(!(expr), #expr, "error"))) \ JSON_HEDLEY_DIAGNOSTIC_POP # define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ __attribute__((diagnose_if(!(expr), msg, "error"))) \ JSON_HEDLEY_DIAGNOSTIC_POP # else # define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) # define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) # endif #else # define JSON_HEDLEY_REQUIRE(expr) # define JSON_HEDLEY_REQUIRE_MSG(expr,msg) #endif #if defined(JSON_HEDLEY_FLAGS) #undef JSON_HEDLEY_FLAGS #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) #else #define JSON_HEDLEY_FLAGS #endif #if defined(JSON_HEDLEY_FLAGS_CAST) #undef JSON_HEDLEY_FLAGS_CAST #endif #if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) # define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ JSON_HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("warning(disable:188)") \ ((T) (expr)); \ JSON_HEDLEY_DIAGNOSTIC_POP \ })) #else # define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) #endif #if defined(JSON_HEDLEY_EMPTY_BASES) #undef JSON_HEDLEY_EMPTY_BASES #endif #if \ (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) #else #define JSON_HEDLEY_EMPTY_BASES #endif /* Remaining macros are deprecated. */ #if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK #endif #if defined(__clang__) #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) #else #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) #endif #if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) #undef JSON_HEDLEY_CLANG_HAS_BUILTIN #endif #define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) #if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) #undef JSON_HEDLEY_CLANG_HAS_FEATURE #endif #define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) #if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) #undef JSON_HEDLEY_CLANG_HAS_EXTENSION #endif #define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) #if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE #endif #define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) #if defined(JSON_HEDLEY_CLANG_HAS_WARNING) #undef JSON_HEDLEY_CLANG_HAS_WARNING #endif #define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) #endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ // This file contains all internal macro definitions (except those affecting ABI) // You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them // #include // exclude unsupported compilers #if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) #if defined(__clang__) #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" #endif #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" #endif #endif #endif // C++ language standard detection // if the user manually specified the used c++ version this is skipped #if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) #define JSON_HAS_CPP_14 #endif // the cpp 11 flag is always specified because it is the minimal required version #define JSON_HAS_CPP_11 #endif #ifdef __has_include #if __has_include() #include #endif #endif #if !defined(JSON_HAS_FILESYSTEM) && !defined(JSON_HAS_EXPERIMENTAL_FILESYSTEM) #ifdef JSON_HAS_CPP_17 #if defined(__cpp_lib_filesystem) #define JSON_HAS_FILESYSTEM 1 #elif defined(__cpp_lib_experimental_filesystem) #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 #elif !defined(__has_include) #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 #elif __has_include() #define JSON_HAS_FILESYSTEM 1 #elif __has_include() #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 #endif // std::filesystem does not work on MinGW GCC 8: https://sourceforge.net/p/mingw-w64/bugs/737/ #if defined(__MINGW32__) && defined(__GNUC__) && __GNUC__ == 8 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif // no filesystem support before GCC 8: https://en.cppreference.com/w/cpp/compiler_support #if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif // no filesystem support before Clang 7: https://en.cppreference.com/w/cpp/compiler_support #if defined(__clang_major__) && __clang_major__ < 7 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif // no filesystem support before MSVC 19.14: https://en.cppreference.com/w/cpp/compiler_support #if defined(_MSC_VER) && _MSC_VER < 1914 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif // no filesystem support before iOS 13 #if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 130000 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif // no filesystem support before macOS Catalina #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #endif #endif #endif #ifndef JSON_HAS_EXPERIMENTAL_FILESYSTEM #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 0 #endif #ifndef JSON_HAS_FILESYSTEM #define JSON_HAS_FILESYSTEM 0 #endif #ifndef JSON_HAS_THREE_WAY_COMPARISON #if defined(__cpp_impl_three_way_comparison) && __cpp_impl_three_way_comparison >= 201907L \ && defined(__cpp_lib_three_way_comparison) && __cpp_lib_three_way_comparison >= 201907L #define JSON_HAS_THREE_WAY_COMPARISON 1 #else #define JSON_HAS_THREE_WAY_COMPARISON 0 #endif #endif #ifndef JSON_HAS_RANGES // ranges header shipping in GCC 11.1.0 (released 2021-04-27) has syntax error #if defined(__GLIBCXX__) && __GLIBCXX__ == 20210427 #define JSON_HAS_RANGES 0 #elif defined(__cpp_lib_ranges) #define JSON_HAS_RANGES 1 #else #define JSON_HAS_RANGES 0 #endif #endif #ifndef JSON_HAS_STATIC_RTTI #if !defined(_HAS_STATIC_RTTI) || _HAS_STATIC_RTTI != 0 #define JSON_HAS_STATIC_RTTI 1 #else #define JSON_HAS_STATIC_RTTI 0 #endif #endif #ifdef JSON_HAS_CPP_17 #define JSON_INLINE_VARIABLE inline #else #define JSON_INLINE_VARIABLE #endif #if JSON_HEDLEY_HAS_ATTRIBUTE(no_unique_address) #define JSON_NO_UNIQUE_ADDRESS [[no_unique_address]] #else #define JSON_NO_UNIQUE_ADDRESS #endif // disable documentation warnings on clang #if defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdocumentation" #pragma clang diagnostic ignored "-Wdocumentation-unknown-command" #endif // allow disabling exceptions #if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) #define JSON_THROW(exception) throw exception #define JSON_TRY try #define JSON_CATCH(exception) catch(exception) #define JSON_INTERNAL_CATCH(exception) catch(exception) #else #include #define JSON_THROW(exception) std::abort() #define JSON_TRY if(true) #define JSON_CATCH(exception) if(false) #define JSON_INTERNAL_CATCH(exception) if(false) #endif // override exception macros #if defined(JSON_THROW_USER) #undef JSON_THROW #define JSON_THROW JSON_THROW_USER #endif #if defined(JSON_TRY_USER) #undef JSON_TRY #define JSON_TRY JSON_TRY_USER #endif #if defined(JSON_CATCH_USER) #undef JSON_CATCH #define JSON_CATCH JSON_CATCH_USER #undef JSON_INTERNAL_CATCH #define JSON_INTERNAL_CATCH JSON_CATCH_USER #endif #if defined(JSON_INTERNAL_CATCH_USER) #undef JSON_INTERNAL_CATCH #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER #endif // allow overriding assert #if !defined(JSON_ASSERT) #include // assert #define JSON_ASSERT(x) assert(x) #endif // allow to access some private functions (needed by the test suite) #if defined(JSON_TESTS_PRIVATE) #define JSON_PRIVATE_UNLESS_TESTED public #else #define JSON_PRIVATE_UNLESS_TESTED private #endif /*! @brief macro to briefly define a mapping between an enum and JSON @def NLOHMANN_JSON_SERIALIZE_ENUM @since version 3.4.0 */ #define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ template \ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ { \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [e](const std::pair& ej_pair) -> bool \ { \ return ej_pair.first == e; \ }); \ j = ((it != std::end(m)) ? it : std::begin(m))->second; \ } \ template \ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ { \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [&j](const std::pair& ej_pair) -> bool \ { \ return ej_pair.second == j; \ }); \ e = ((it != std::end(m)) ? it : std::begin(m))->first; \ } // Ugly macros to avoid uglier copy-paste when specializing basic_json. They // may be removed in the future once the class is split. #define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ template class ObjectType, \ template class ArrayType, \ class StringType, class BooleanType, class NumberIntegerType, \ class NumberUnsignedType, class NumberFloatType, \ template class AllocatorType, \ template class JSONSerializer, \ class BinaryType, \ class CustomBaseClass> #define NLOHMANN_BASIC_JSON_TPL \ basic_json // Macros to simplify conversion from/to types #define NLOHMANN_JSON_EXPAND( x ) x #define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME #define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ NLOHMANN_JSON_PASTE64, \ NLOHMANN_JSON_PASTE63, \ NLOHMANN_JSON_PASTE62, \ NLOHMANN_JSON_PASTE61, \ NLOHMANN_JSON_PASTE60, \ NLOHMANN_JSON_PASTE59, \ NLOHMANN_JSON_PASTE58, \ NLOHMANN_JSON_PASTE57, \ NLOHMANN_JSON_PASTE56, \ NLOHMANN_JSON_PASTE55, \ NLOHMANN_JSON_PASTE54, \ NLOHMANN_JSON_PASTE53, \ NLOHMANN_JSON_PASTE52, \ NLOHMANN_JSON_PASTE51, \ NLOHMANN_JSON_PASTE50, \ NLOHMANN_JSON_PASTE49, \ NLOHMANN_JSON_PASTE48, \ NLOHMANN_JSON_PASTE47, \ NLOHMANN_JSON_PASTE46, \ NLOHMANN_JSON_PASTE45, \ NLOHMANN_JSON_PASTE44, \ NLOHMANN_JSON_PASTE43, \ NLOHMANN_JSON_PASTE42, \ NLOHMANN_JSON_PASTE41, \ NLOHMANN_JSON_PASTE40, \ NLOHMANN_JSON_PASTE39, \ NLOHMANN_JSON_PASTE38, \ NLOHMANN_JSON_PASTE37, \ NLOHMANN_JSON_PASTE36, \ NLOHMANN_JSON_PASTE35, \ NLOHMANN_JSON_PASTE34, \ NLOHMANN_JSON_PASTE33, \ NLOHMANN_JSON_PASTE32, \ NLOHMANN_JSON_PASTE31, \ NLOHMANN_JSON_PASTE30, \ NLOHMANN_JSON_PASTE29, \ NLOHMANN_JSON_PASTE28, \ NLOHMANN_JSON_PASTE27, \ NLOHMANN_JSON_PASTE26, \ NLOHMANN_JSON_PASTE25, \ NLOHMANN_JSON_PASTE24, \ NLOHMANN_JSON_PASTE23, \ NLOHMANN_JSON_PASTE22, \ NLOHMANN_JSON_PASTE21, \ NLOHMANN_JSON_PASTE20, \ NLOHMANN_JSON_PASTE19, \ NLOHMANN_JSON_PASTE18, \ NLOHMANN_JSON_PASTE17, \ NLOHMANN_JSON_PASTE16, \ NLOHMANN_JSON_PASTE15, \ NLOHMANN_JSON_PASTE14, \ NLOHMANN_JSON_PASTE13, \ NLOHMANN_JSON_PASTE12, \ NLOHMANN_JSON_PASTE11, \ NLOHMANN_JSON_PASTE10, \ NLOHMANN_JSON_PASTE9, \ NLOHMANN_JSON_PASTE8, \ NLOHMANN_JSON_PASTE7, \ NLOHMANN_JSON_PASTE6, \ NLOHMANN_JSON_PASTE5, \ NLOHMANN_JSON_PASTE4, \ NLOHMANN_JSON_PASTE3, \ NLOHMANN_JSON_PASTE2, \ NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) #define NLOHMANN_JSON_PASTE2(func, v1) func(v1) #define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) #define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) #define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) #define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) #define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) #define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) #define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) #define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) #define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) #define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) #define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) #define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) #define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) #define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) #define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) #define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) #define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) #define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) #define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) #define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) #define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) #define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) #define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) #define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) #define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) #define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) #define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) #define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) #define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) #define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) #define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) #define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) #define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) #define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) #define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) #define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) #define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) #define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) #define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) #define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) #define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) #define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) #define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) #define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) #define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) #define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) #define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) #define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) #define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) #define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) #define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) #define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) #define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) #define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) #define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) #define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) #define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) #define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) #define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) #define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) #define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) #define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) #define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; #define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); #define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); /*! @brief macro @def NLOHMANN_DEFINE_TYPE_INTRUSIVE @since version 3.9.0 */ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } /*! @brief macro @def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE @since version 3.9.0 */ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } // inspired from https://stackoverflow.com/a/26745591 // allows to call any std function as if (e.g. with begin): // using std::begin; begin(x); // // it allows using the detected idiom to retrieve the return type // of such an expression #define NLOHMANN_CAN_CALL_STD_FUNC_IMPL(std_name) \ namespace detail { \ using std::std_name; \ \ template \ using result_of_##std_name = decltype(std_name(std::declval()...)); \ } \ \ namespace detail2 { \ struct std_name##_tag \ { \ }; \ \ template \ std_name##_tag std_name(T&&...); \ \ template \ using result_of_##std_name = decltype(std_name(std::declval()...)); \ \ template \ struct would_call_std_##std_name \ { \ static constexpr auto const value = ::nlohmann::detail:: \ is_detected_exact::value; \ }; \ } /* namespace detail2 */ \ \ template \ struct would_call_std_##std_name : detail2::would_call_std_##std_name \ { \ } #ifndef JSON_USE_IMPLICIT_CONVERSIONS #define JSON_USE_IMPLICIT_CONVERSIONS 1 #endif #if JSON_USE_IMPLICIT_CONVERSIONS #define JSON_EXPLICIT #else #define JSON_EXPLICIT explicit #endif #ifndef JSON_DISABLE_ENUM_SERIALIZATION #define JSON_DISABLE_ENUM_SERIALIZATION 0 #endif #ifndef JSON_USE_GLOBAL_UDLS #define JSON_USE_GLOBAL_UDLS 1 #endif #if JSON_HAS_THREE_WAY_COMPARISON #include // partial_ordering #endif NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { /////////////////////////// // JSON type enumeration // /////////////////////////// /*! @brief the JSON type enumeration This enumeration collects the different JSON types. It is internally used to distinguish the stored values, and the functions @ref basic_json::is_null(), @ref basic_json::is_object(), @ref basic_json::is_array(), @ref basic_json::is_string(), @ref basic_json::is_boolean(), @ref basic_json::is_number() (with @ref basic_json::is_number_integer(), @ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), @ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and @ref basic_json::is_structured() rely on it. @note There are three enumeration entries (number_integer, number_unsigned, and number_float), because the library distinguishes these three types for numbers: @ref basic_json::number_unsigned_t is used for unsigned integers, @ref basic_json::number_integer_t is used for signed integers, and @ref basic_json::number_float_t is used for floating-point numbers or to approximate integers which do not fit in the limits of their respective type. @sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON value with the default value for a given type @since version 1.0.0 */ enum class value_t : std::uint8_t { null, ///< null value object, ///< object (unordered set of name/value pairs) array, ///< array (ordered collection of values) string, ///< string value boolean, ///< boolean value number_integer, ///< number value (signed integer) number_unsigned, ///< number value (unsigned integer) number_float, ///< number value (floating-point) binary, ///< binary array (ordered collection of bytes) discarded ///< discarded by the parser callback function }; /*! @brief comparison operator for JSON types Returns an ordering that is similar to Python: - order: null < boolean < number < object < array < string < binary - furthermore, each type is not smaller than itself - discarded values are not comparable - binary is represented as a b"" string in python and directly comparable to a string; however, making a binary array directly comparable with a string would be surprising behavior in a JSON file. @since version 1.0.0 */ #if JSON_HAS_THREE_WAY_COMPARISON inline std::partial_ordering operator<=>(const value_t lhs, const value_t rhs) noexcept // *NOPAD* #else inline bool operator<(const value_t lhs, const value_t rhs) noexcept #endif { static constexpr std::array order = {{ 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, 6 /* binary */ } }; const auto l_index = static_cast(lhs); const auto r_index = static_cast(rhs); #if JSON_HAS_THREE_WAY_COMPARISON if (l_index < order.size() && r_index < order.size()) { return order[l_index] <=> order[r_index]; // *NOPAD* } return std::partial_ordering::unordered; #else return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; #endif } // GCC selects the built-in operator< over an operator rewritten from // a user-defined spaceship operator // Clang, MSVC, and ICC select the rewritten candidate // (see GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105200) #if JSON_HAS_THREE_WAY_COMPARISON && defined(__GNUC__) inline bool operator<(const value_t lhs, const value_t rhs) noexcept { return std::is_lt(lhs <=> rhs); // *NOPAD* } #endif } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT // #include NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { /*! @brief replace all occurrences of a substring by another string @param[in,out] s the string to manipulate; changed so that all occurrences of @a f are replaced with @a t @param[in] f the substring to replace with @a t @param[in] t the string to replace @a f @pre The search string @a f must not be empty. **This precondition is enforced with an assertion.** @since version 2.0.0 */ template inline void replace_substring(StringType& s, const StringType& f, const StringType& t) { JSON_ASSERT(!f.empty()); for (auto pos = s.find(f); // find first occurrence of f pos != StringType::npos; // make sure f was found s.replace(pos, f.size(), t), // replace with t, and pos = s.find(f, pos + t.size())) // find next occurrence of f {} } /*! * @brief string escaping as described in RFC 6901 (Sect. 4) * @param[in] s string to escape * @return escaped string * * Note the order of escaping "~" to "~0" and "/" to "~1" is important. */ template inline StringType escape(StringType s) { replace_substring(s, StringType{"~"}, StringType{"~0"}); replace_substring(s, StringType{"/"}, StringType{"~1"}); return s; } /*! * @brief string unescaping as described in RFC 6901 (Sect. 4) * @param[in] s string to unescape * @return unescaped string * * Note the order of escaping "~1" to "/" and "~0" to "~" is important. */ template static void unescape(StringType& s) { replace_substring(s, StringType{"~1"}, StringType{"/"}); replace_substring(s, StringType{"~0"}, StringType{"~"}); } } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // size_t // #include NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { /// struct to capture the start position of the current token struct position_t { /// the total number of characters read std::size_t chars_read_total = 0; /// the number of characters read in the current line std::size_t chars_read_current_line = 0; /// the number of lines read std::size_t lines_read = 0; /// conversion to size_t to preserve SAX interface constexpr operator size_t() const { return chars_read_total; } }; } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-FileCopyrightText: 2018 The Abseil Authors // SPDX-License-Identifier: MIT #include // array #include // size_t #include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type #include // index_sequence, make_index_sequence, index_sequence_for // #include NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { template using uncvref_t = typename std::remove_cv::type>::type; #ifdef JSON_HAS_CPP_14 // the following utilities are natively available in C++14 using std::enable_if_t; using std::index_sequence; using std::make_index_sequence; using std::index_sequence_for; #else // alias templates to reduce boilerplate template using enable_if_t = typename std::enable_if::type; // The following code is taken from https://github.com/abseil/abseil-cpp/blob/10cb35e459f5ecca5b2ff107635da0bfa41011b4/absl/utility/utility.h // which is part of Google Abseil (https://github.com/abseil/abseil-cpp), licensed under the Apache License 2.0. //// START OF CODE FROM GOOGLE ABSEIL // integer_sequence // // Class template representing a compile-time integer sequence. An instantiation // of `integer_sequence` has a sequence of integers encoded in its // type through its template arguments (which is a common need when // working with C++11 variadic templates). `absl::integer_sequence` is designed // to be a drop-in replacement for C++14's `std::integer_sequence`. // // Example: // // template< class T, T... Ints > // void user_function(integer_sequence); // // int main() // { // // user_function's `T` will be deduced to `int` and `Ints...` // // will be deduced to `0, 1, 2, 3, 4`. // user_function(make_integer_sequence()); // } template struct integer_sequence { using value_type = T; static constexpr std::size_t size() noexcept { return sizeof...(Ints); } }; // index_sequence // // A helper template for an `integer_sequence` of `size_t`, // `absl::index_sequence` is designed to be a drop-in replacement for C++14's // `std::index_sequence`. template using index_sequence = integer_sequence; namespace utility_internal { template struct Extend; // Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency. template struct Extend, SeqSize, 0> { using type = integer_sequence < T, Ints..., (Ints + SeqSize)... >; }; template struct Extend, SeqSize, 1> { using type = integer_sequence < T, Ints..., (Ints + SeqSize)..., 2 * SeqSize >; }; // Recursion helper for 'make_integer_sequence'. // 'Gen::type' is an alias for 'integer_sequence'. template struct Gen { using type = typename Extend < typename Gen < T, N / 2 >::type, N / 2, N % 2 >::type; }; template struct Gen { using type = integer_sequence; }; } // namespace utility_internal // Compile-time sequences of integers // make_integer_sequence // // This template alias is equivalent to // `integer_sequence`, and is designed to be a drop-in // replacement for C++14's `std::make_integer_sequence`. template using make_integer_sequence = typename utility_internal::Gen::type; // make_index_sequence // // This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`, // and is designed to be a drop-in replacement for C++14's // `std::make_index_sequence`. template using make_index_sequence = make_integer_sequence; // index_sequence_for // // Converts a typename pack into an index sequence of the same length, and // is designed to be a drop-in replacement for C++14's // `std::index_sequence_for()` template using index_sequence_for = make_index_sequence; //// END OF CODE FROM GOOGLE ABSEIL #endif // dispatch utility (taken from ranges-v3) template struct priority_tag : priority_tag < N - 1 > {}; template<> struct priority_tag<0> {}; // taken from ranges-v3 template struct static_const { static JSON_INLINE_VARIABLE constexpr T value{}; }; #ifndef JSON_HAS_CPP_17 template constexpr T static_const::value; #endif template inline constexpr std::array make_array(Args&& ... args) { return std::array {{static_cast(std::forward(args))...}}; } } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // numeric_limits #include // false_type, is_constructible, is_integral, is_same, true_type #include // declval #include // tuple #include // char_traits // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #include // random_access_iterator_tag // #include // #include // #include NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { template struct iterator_types {}; template struct iterator_types < It, void_t> { using difference_type = typename It::difference_type; using value_type = typename It::value_type; using pointer = typename It::pointer; using reference = typename It::reference; using iterator_category = typename It::iterator_category; }; // This is required as some compilers implement std::iterator_traits in a way that // doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. template struct iterator_traits { }; template struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> : iterator_types { }; template struct iterator_traits::value>> { using iterator_category = std::random_access_iterator_tag; using value_type = T; using difference_type = ptrdiff_t; using pointer = T*; using reference = T&; }; } // namespace detail NLOHMANN_JSON_NAMESPACE_END // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT // #include NLOHMANN_JSON_NAMESPACE_BEGIN NLOHMANN_CAN_CALL_STD_FUNC_IMPL(begin); NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT // #include NLOHMANN_JSON_NAMESPACE_BEGIN NLOHMANN_CAN_CALL_STD_FUNC_IMPL(end); NLOHMANN_JSON_NAMESPACE_END // #include // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ // | | |__ | | | | | | version 3.11.3 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2013-2023 Niels Lohmann // SPDX-License-Identifier: MIT #ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ #define INCLUDE_NLOHMANN_JSON_FWD_HPP_ #include // int64_t, uint64_t #include // map #include // allocator #include // string #include // vector // #include /*! @brief namespace for Niels Lohmann @see https://github.com/nlohmann @since version 1.0.0 */ NLOHMANN_JSON_NAMESPACE_BEGIN /*! @brief default JSONSerializer template argument This serializer ignores the template arguments and uses ADL ([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) for serialization. */ template struct adl_serializer; /// a class to store JSON values /// @sa https://json.nlohmann.me/api/basic_json/ template class ObjectType = std::map, template class ArrayType = std::vector, class StringType = std::string, class BooleanType = bool, class NumberIntegerType = std::int64_t, class NumberUnsignedType = std::uint64_t, class NumberFloatType = double, template class AllocatorType = std::allocator, template class JSONSerializer = adl_serializer, class BinaryType = std::vector, // cppcheck-suppress syntaxError class CustomBaseClass = void> class basic_json; /// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document /// @sa https://json.nlohmann.me/api/json_pointer/ template class json_pointer; /*! @brief default specialization @sa https://json.nlohmann.me/api/json/ */ using json = basic_json<>; /// @brief a minimal map-like container that preserves insertion order /// @sa https://json.nlohmann.me/api/ordered_map/ template struct ordered_map; /// @brief specialization that maintains the insertion order of object keys /// @sa https://json.nlohmann.me/api/ordered_json/ using ordered_json = basic_json; NLOHMANN_JSON_NAMESPACE_END #endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ NLOHMANN_JSON_NAMESPACE_BEGIN /*! @brief detail namespace with internal helper functions This namespace collects functions that should not be exposed, implementations of some @ref basic_json methods, and meta-programming helpers. @since version 2.1.0 */ namespace detail { ///////////// // helpers // ///////////// // Note to maintainers: // // Every trait in this file expects a non CV-qualified type. // The only exceptions are in the 'aliases for detected' section // (i.e. those of the form: decltype(T::member_function(std::declval()))) // // In this case, T has to be properly CV-qualified to constraint the function arguments // (e.g. to_json(BasicJsonType&, const T&)) template struct is_basic_json : std::false_type {}; NLOHMANN_BASIC_JSON_TPL_DECLARATION struct is_basic_json : std::true_type {}; // used by exceptions create() member functions // true_type for pointer to possibly cv-qualified basic_json or std::nullptr_t // false_type otherwise template struct is_basic_json_context : std::integral_constant < bool, is_basic_json::type>::type>::value || std::is_same::value > {}; ////////////////////// // json_ref helpers // ////////////////////// template class json_ref; template struct is_json_ref : std::false_type {}; template struct is_json_ref> : std::true_type {}; ////////////////////////// // aliases for detected // ////////////////////////// template using mapped_type_t = typename T::mapped_type; template using key_type_t = typename T::key_type; template using value_type_t = typename T::value_type; template using difference_type_t = typename T::difference_type; template using pointer_t = typename T::pointer; template using reference_t = typename T::reference; template using iterator_category_t = typename T::iterator_category; template using to_json_function = decltype(T::to_json(std::declval()...)); template using from_json_function = decltype(T::from_json(std::declval()...)); template using get_template_function = decltype(std::declval().template get()); // trait checking if JSONSerializer::from_json(json const&, udt&) exists template struct has_from_json : std::false_type {}; // trait checking if j.get is valid // use this trait instead of std::is_constructible or std::is_convertible, // both rely on, or make use of implicit conversions, and thus fail when T // has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) template struct is_getable { static constexpr bool value = is_detected::value; }; template struct has_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> { using serializer = typename BasicJsonType::template json_serializer; static constexpr bool value = is_detected_exact::value; }; // This trait checks if JSONSerializer::from_json(json const&) exists // this overload is used for non-default-constructible user-defined-types template struct has_non_default_from_json : std::false_type {}; template struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> { using serializer = typename BasicJsonType::template json_serializer; static constexpr bool value = is_detected_exact::value; }; // This trait checks if BasicJsonType::json_serializer::to_json exists // Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. template struct has_to_json : std::false_type {}; template struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> { using serializer = typename BasicJsonType::template json_serializer; static constexpr bool value = is_detected_exact::value; }; template using detect_key_compare = typename T::key_compare; template struct has_key_compare : std::integral_constant::value> {}; // obtains the actual object key comparator template struct actual_object_comparator { using object_t = typename BasicJsonType::object_t; using object_comparator_t = typename BasicJsonType::default_object_comparator_t; using type = typename std::conditional < has_key_compare::value, typename object_t::key_compare, object_comparator_t>::type; }; template using actual_object_comparator_t = typename actual_object_comparator::type; ///////////////// // char_traits // ///////////////// // Primary template of char_traits calls std char_traits template struct char_traits : std::char_traits {}; // Explicitly define char traits for unsigned char since it is not standard template<> struct char_traits : std::char_traits { using char_type = unsigned char; using int_type = uint64_t; // Redefine to_int_type function static int_type to_int_type(char_type c) noexcept { return static_cast(c); } static char_type to_char_type(int_type i) noexcept { return static_cast(i); } static constexpr int_type eof() noexcept { return static_cast(EOF); } }; // Explicitly define char traits for signed char since it is not standard template<> struct char_traits : std::char_traits { using char_type = signed char; using int_type = uint64_t; // Redefine to_int_type function static int_type to_int_type(char_type c) noexcept { return static_cast(c); } static char_type to_char_type(int_type i) noexcept { return static_cast(i); } static constexpr int_type eof() noexcept { return static_cast(EOF); } }; /////////////////// // is_ functions // /////////////////// // https://en.cppreference.com/w/cpp/types/conjunction template struct conjunction : std::true_type { }; template struct conjunction : B { }; template struct conjunction : std::conditional(B::value), conjunction, B>::type {}; // https://en.cppreference.com/w/cpp/types/negation template struct negation : std::integral_constant < bool, !B::value > { }; // Reimplementation of is_constructible and is_default_constructible, due to them being broken for // std::pair and std::tuple until LWG 2367 fix (see https://cplusplus.github.io/LWG/lwg-defects.html#2367). // This causes compile errors in e.g. clang 3.5 or gcc 4.9. template struct is_default_constructible : std::is_default_constructible {}; template struct is_default_constructible> : conjunction, is_default_constructible> {}; template struct is_default_constructible> : conjunction, is_default_constructible> {}; template struct is_default_constructible> : conjunction...> {}; template struct is_default_constructible> : conjunction...> {}; template struct is_constructible : std::is_constructible {}; template struct is_constructible> : is_default_constructible> {}; template struct is_constructible> : is_default_constructible> {}; template struct is_constructible> : is_default_constructible> {}; template struct is_constructible> : is_default_constructible> {}; template struct is_iterator_traits : std::false_type {}; template struct is_iterator_traits> { private: using traits = iterator_traits; public: static constexpr auto value = is_detected::value && is_detected::value && is_detected::value && is_detected::value && is_detected::value; }; template struct is_range { private: using t_ref = typename std::add_lvalue_reference::type; using iterator = detected_t; using sentinel = detected_t; // to be 100% correct, it should use https://en.cppreference.com/w/cpp/iterator/input_or_output_iterator // and https://en.cppreference.com/w/cpp/iterator/sentinel_for // but reimplementing these would be too much work, as a lot of other concepts are used underneath static constexpr auto is_iterator_begin = is_iterator_traits>::value; public: static constexpr bool value = !std::is_same::value && !std::is_same::value && is_iterator_begin; }; template using iterator_t = enable_if_t::value, result_of_begin())>>; template using range_value_t = value_type_t>>; // The following implementation of is_complete_type is taken from // https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ // and is written by Xiang Fan who agreed to using it in this library. template struct is_complete_type : std::false_type {}; template struct is_complete_type : std::true_type {}; template struct is_compatible_object_type_impl : std::false_type {}; template struct is_compatible_object_type_impl < BasicJsonType, CompatibleObjectType, enable_if_t < is_detected::value&& is_detected::value >> { using object_t = typename BasicJsonType::object_t; // macOS's is_constructible does not play well with nonesuch... static constexpr bool value = is_constructible::value && is_constructible::value; }; template struct is_compatible_object_type : is_compatible_object_type_impl {}; template struct is_constructible_object_type_impl : std::false_type {}; template struct is_constructible_object_type_impl < BasicJsonType, ConstructibleObjectType, enable_if_t < is_detected::value&& is_detected::value >> { using object_t = typename BasicJsonType::object_t; static constexpr bool value = (is_default_constructible::value && (std::is_move_assignable::value || std::is_copy_assignable::value) && (is_constructible::value && std::is_same < typename object_t::mapped_type, typename ConstructibleObjectType::mapped_type >::value)) || (has_from_json::value || has_non_default_from_json < BasicJsonType, typename ConstructibleObjectType::mapped_type >::value); }; template struct is_constructible_object_type : is_constructible_object_type_impl {}; template struct is_compatible_string_type { static constexpr auto value = is_constructible::value; }; template struct is_constructible_string_type { // launder type through decltype() to fix compilation failure on ICPC #ifdef __INTEL_COMPILER using laundered_type = decltype(std::declval()); #else using laundered_type = ConstructibleStringType; #endif static constexpr auto value = conjunction < is_constructible, is_detected_exact>::value; }; template struct is_compatible_array_type_impl : std::false_type {}; template struct is_compatible_array_type_impl < BasicJsonType, CompatibleArrayType, enable_if_t < is_detected::value&& is_iterator_traits>>::value&& // special case for types like std::filesystem::path whose iterator's value_type are themselves // c.f. https://github.com/nlohmann/json/pull/3073 !std::is_same>::value >> { static constexpr bool value = is_constructible>::value; }; template struct is_compatible_array_type : is_compatible_array_type_impl {}; template struct is_constructible_array_type_impl : std::false_type {}; template struct is_constructible_array_type_impl < BasicJsonType, ConstructibleArrayType, enable_if_t::value >> : std::true_type {}; template struct is_constructible_array_type_impl < BasicJsonType, ConstructibleArrayType, enable_if_t < !std::is_same::value&& !is_compatible_string_type::value&& is_default_constructible::value&& (std::is_move_assignable::value || std::is_copy_assignable::value)&& is_detected::value&& is_iterator_traits>>::value&& is_detected::value&& // special case for types like std::filesystem::path whose iterator's value_type are themselves // c.f. https://github.com/nlohmann/json/pull/3073 !std::is_same>::value&& is_complete_type < detected_t>::value >> { using value_type = range_value_t; static constexpr bool value = std::is_same::value || has_from_json::value || has_non_default_from_json < BasicJsonType, value_type >::value; }; template struct is_constructible_array_type : is_constructible_array_type_impl {}; template struct is_compatible_integer_type_impl : std::false_type {}; template struct is_compatible_integer_type_impl < RealIntegerType, CompatibleNumberIntegerType, enable_if_t < std::is_integral::value&& std::is_integral::value&& !std::is_same::value >> { // is there an assert somewhere on overflows? using RealLimits = std::numeric_limits; using CompatibleLimits = std::numeric_limits; static constexpr auto value = is_constructible::value && CompatibleLimits::is_integer && RealLimits::is_signed == CompatibleLimits::is_signed; }; template struct is_compatible_integer_type : is_compatible_integer_type_impl {}; template struct is_compatible_type_impl: std::false_type {}; template struct is_compatible_type_impl < BasicJsonType, CompatibleType, enable_if_t::value >> { static constexpr bool value = has_to_json::value; }; template struct is_compatible_type : is_compatible_type_impl {}; template struct is_constructible_tuple : std::false_type {}; template struct is_constructible_tuple> : conjunction...> {}; template struct is_json_iterator_of : std::false_type {}; template struct is_json_iterator_of : std::true_type {}; template struct is_json_iterator_of : std::true_type {}; // checks if a given type T is a template specialization of Primary template