Repository: BBC-Esq/ChromaDB-Plugin-for-LM-Studio Branch: main Commit: d99ae7577a79 Files: 90 Total size: 2.8 MB Directory structure: gitextract_gve6w__6/ ├── .gitignore ├── Assets/ │ ├── SentenceTransformer.py │ ├── core.py │ ├── user_manual_consolidated.md │ └── vision_model_table.html ├── CSS/ │ └── template.css ├── README.md ├── Tokenizer/ │ ├── special_tokens_map.json │ ├── tokenizer.json │ ├── tokenizer.model │ └── tokenizer_config.json ├── __main__.py ├── charts/ │ ├── __init__.py │ ├── all_gpus.py │ ├── gpu_info.py │ ├── models_chat.py │ ├── models_vector.py │ └── models_vision.py ├── chat/ │ ├── __init__.py │ ├── base.py │ ├── jeeves.py │ ├── kobold.py │ ├── lm_studio.py │ ├── local_model.py │ ├── minimax.py │ └── openai.py ├── core/ │ ├── __init__.py │ ├── chatgpt_settings.py │ ├── config.py │ ├── constants.py │ ├── extract_metadata.py │ ├── initialize.py │ └── utilities.py ├── db/ │ ├── __init__.py │ ├── choose_documents.py │ ├── create_symlinks.py │ ├── cuda_manager.py │ ├── database_interactions.py │ ├── document_processor.py │ ├── embedding_models.py │ ├── process_manager.py │ ├── sqlite_operations.py │ ├── stage_extract.py │ ├── stage_split.py │ └── stage_tokenize.py ├── gui/ │ ├── __init__.py │ ├── credentials.py │ ├── dialogs/ │ │ ├── __init__.py │ │ ├── ai_backends_dialog.py │ │ ├── chatgpt_tab.py │ │ ├── kobold_tab.py │ │ ├── lm_studio_tab.py │ │ └── minimax_tab.py │ ├── download_model.py │ ├── main_window.py │ ├── metrics_bar.py │ ├── tabs.py │ ├── tabs_databases/ │ │ ├── __init__.py │ │ ├── create.py │ │ ├── manage.py │ │ └── query.py │ ├── tabs_models/ │ │ ├── __init__.py │ │ └── models.py │ ├── tabs_settings/ │ │ ├── __init__.py │ │ ├── database_create.py │ │ ├── database_query.py │ │ ├── settings.py │ │ ├── tts.py │ │ └── vision.py │ └── tabs_tools/ │ ├── __init__.py │ ├── misc.py │ ├── ocr.py │ ├── scrape.py │ ├── tools.py │ ├── transcribe.py │ └── vision.py ├── gui.py ├── modules/ │ ├── __init__.py │ ├── kokoro.py │ ├── ocr.py │ ├── process_images.py │ ├── scraper.py │ ├── transcribe.py │ ├── tts.py │ └── voice_recorder.py ├── setup_windows.py └── tools/ ├── __init__.py ├── check_packages.py ├── chunk_userguide.py └── replace_sourcecode.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # Virtual environment Lib/ Scripts/ Include/ pyvenv.cfg # Python __pycache__/ *.pyc *.pyo # Models (large binary files) Models/ # User data Vector_DB/ Vector_DB_Backup/ Docs_for_DB/ Scraped_Documentation/ # Test files TEST - FILES/ # Build artifacts *.egg-info/ # Config (contains user API keys) config.yaml # Share (installed by tessdata pip packages) share/ # Misc ffmpeg.exe push_to_github.ps1 metadata.txt chat_history.txt *.log .claude/ .lock # Tests (local only) tests/ ================================================ FILE: Assets/SentenceTransformer.py ================================================ # modified 4.1.0 to modify "_text_length" method and add debugging from __future__ import annotations import copy import importlib import json import logging import math import os import queue import shutil import sys import logging, sys import logging import tempfile import traceback import warnings from collections import OrderedDict from collections.abc import Iterable, Iterator from contextlib import contextmanager from multiprocessing import Queue from pathlib import Path from typing import Any, Callable, Literal, overload import numpy as np import numpy.typing as npt import torch import torch.multiprocessing as mp import transformers from huggingface_hub import HfApi from packaging import version from torch import Tensor, device, nn from tqdm.autonotebook import trange from transformers import is_torch_npu_available from transformers.dynamic_module_utils import get_class_from_dynamic_module, get_relative_import_files from sentence_transformers.model_card import SentenceTransformerModelCardData, generate_model_card from sentence_transformers.similarity_functions import SimilarityFunction from . import __MODEL_HUB_ORGANIZATION__, __version__ from .evaluation import SentenceEvaluator from .fit_mixin import FitMixin from .models import Normalize, Pooling, Transformer from .peft_mixin import PeftAdapterMixin from .quantization import quantize_embeddings from .util import ( batch_to_device, get_device_name, import_from_string, is_sentence_transformer_model, load_dir_path, load_file_path, save_to_hub_args_decorator, truncate_embeddings, ) logger = logging.getLogger(__name__) class SentenceTransformer(nn.Sequential, FitMixin, PeftAdapterMixin): """ Loads or creates a SentenceTransformer model that can be used to map sentences / text to embeddings. Args: model_name_or_path (str, optional): If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from the Hugging Face Hub with that name. modules (Iterable[nn.Module], optional): A list of torch Modules that should be called sequentially, can be used to create custom SentenceTransformer models from scratch. device (str, optional): Device (like "cuda", "cpu", "mps", "npu") that should be used for computation. If None, checks if a GPU can be used. prompts (Dict[str, str], optional): A dictionary with prompts for the model. The key is the prompt name, the value is the prompt text. The prompt text will be prepended before any text to encode. For example: `{"query": "query: ", "passage": "passage: "}` or `{"clustering": "Identify the main category based on the titles in "}`. default_prompt_name (str, optional): The name of the prompt that should be used by default. If not set, no prompt will be applied. similarity_fn_name (str or SimilarityFunction, optional): The name of the similarity function to use. Valid options are "cosine", "dot", "euclidean", and "manhattan". If not set, it is automatically set to "cosine" if `similarity` or `similarity_pairwise` are called while `model.similarity_fn_name` is still `None`. cache_folder (str, optional): Path to store models. Can also be set by the SENTENCE_TRANSFORMERS_HOME environment variable. trust_remote_code (bool, optional): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to True for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. revision (str, optional): The specific model version to use. It can be a branch name, a tag name, or a commit id, for a stored model on Hugging Face. local_files_only (bool, optional): Whether or not to only look at local files (i.e., do not try to download the model). token (bool or str, optional): Hugging Face authentication token to download private models. use_auth_token (bool or str, optional): Deprecated argument. Please use `token` instead. truncate_dim (int, optional): The dimension to truncate sentence embeddings to. `None` does no truncation. Truncation is only applicable during inference when :meth:`SentenceTransformer.encode` is called. model_kwargs (Dict[str, Any], optional): Additional model configuration parameters to be passed to the Hugging Face Transformers model. Particularly useful options are: - ``torch_dtype``: Override the default `torch.dtype` and load the model under a specific `dtype`. The different options are: 1. ``torch.float16``, ``torch.bfloat16`` or ``torch.float``: load in a specified ``dtype``, ignoring the model's ``config.torch_dtype`` if one exists. If not specified - the model will get loaded in ``torch.float`` (fp32). 2. ``"auto"`` - A ``torch_dtype`` entry in the ``config.json`` file of the model will be attempted to be used. If this entry isn't found then next check the ``dtype`` of the first weight in the checkpoint that's of a floating point type and use that as ``dtype``. This will load the model using the ``dtype`` it was saved in at the end of the training. It can't be used as an indicator of how the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32. - ``attn_implementation``: The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using `F.scaled_dot_product_attention `_), or `"flash_attention_2"` (using `Dao-AILab/flash-attention `_). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation. - ``provider``: If backend is "onnx", this is the provider to use for inference, for example "CPUExecutionProvider", "CUDAExecutionProvider", etc. See https://onnxruntime.ai/docs/execution-providers/ for all ONNX execution providers. - ``file_name``: If backend is "onnx" or "openvino", this is the file name to load, useful for loading optimized or quantized ONNX or OpenVINO models. - ``export``: If backend is "onnx" or "openvino", then this is a boolean flag specifying whether this model should be exported to the backend. If not specified, the model will be exported only if the model repository or directory does not already contain an exported model. See the `PreTrainedModel.from_pretrained `_ documentation for more details. tokenizer_kwargs (Dict[str, Any], optional): Additional tokenizer configuration parameters to be passed to the Hugging Face Transformers tokenizer. See the `AutoTokenizer.from_pretrained `_ documentation for more details. config_kwargs (Dict[str, Any], optional): Additional model configuration parameters to be passed to the Hugging Face Transformers config. See the `AutoConfig.from_pretrained `_ documentation for more details. model_card_data (:class:`~sentence_transformers.model_card.SentenceTransformerModelCardData`, optional): A model card data object that contains information about the model. This is used to generate a model card when saving the model. If not set, a default model card data object is created. backend (str): The backend to use for inference. Can be one of "torch" (default), "onnx", or "openvino". See https://sbert.net/docs/sentence_transformer/usage/efficiency.html for benchmarking information on the different backends. Example: :: from sentence_transformers import SentenceTransformer # Load a pre-trained SentenceTransformer model model = SentenceTransformer('all-mpnet-base-v2') # Encode some texts sentences = [ "The weather is lovely today.", "It's so sunny outside!", "He drove to the stadium.", ] embeddings = model.encode(sentences) print(embeddings.shape) # (3, 768) # Get the similarity scores between all sentences similarities = model.similarity(embeddings, embeddings) print(similarities) # tensor([[1.0000, 0.6817, 0.0492], # [0.6817, 1.0000, 0.0421], # [0.0492, 0.0421, 1.0000]]) """ def __init__( self, model_name_or_path: str | None = None, modules: Iterable[nn.Module] | None = None, device: str | None = None, prompts: dict[str, str] | None = None, default_prompt_name: str | None = None, similarity_fn_name: str | SimilarityFunction | None = None, cache_folder: str | None = None, trust_remote_code: bool = False, revision: str | None = None, local_files_only: bool = False, token: bool | str | None = None, use_auth_token: bool | str | None = None, truncate_dim: int | None = None, model_kwargs: dict[str, Any] | None = None, tokenizer_kwargs: dict[str, Any] | None = None, config_kwargs: dict[str, Any] | None = None, model_card_data: SentenceTransformerModelCardData | None = None, backend: Literal["torch", "onnx", "openvino"] = "torch", ) -> None: # Note: self._load_sbert_model can also update `self.prompts` and `self.default_prompt_name` self.prompts = prompts or {} self.default_prompt_name = default_prompt_name self.similarity_fn_name = similarity_fn_name self.trust_remote_code = trust_remote_code self.truncate_dim = truncate_dim self.model_card_data = model_card_data or SentenceTransformerModelCardData() self.module_kwargs = None self._model_card_vars = {} self._model_card_text = None self._model_config = {} self.backend = backend if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4 of SentenceTransformers.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if cache_folder is None: cache_folder = os.getenv("SENTENCE_TRANSFORMERS_HOME") if device is None: device = get_device_name() logger.info(f"Use pytorch device_name: {device}") if device == "hpu" and importlib.util.find_spec("optimum") is not None: from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi adapt_transformers_to_gaudi() if model_name_or_path is not None and model_name_or_path != "": logger.info(f"Load pretrained SentenceTransformer: {model_name_or_path}") # Old models that don't belong to any organization basic_transformer_models = [ "albert-base-v1", "albert-base-v2", "albert-large-v1", "albert-large-v2", "albert-xlarge-v1", "albert-xlarge-v2", "albert-xxlarge-v1", "albert-xxlarge-v2", "bert-base-cased-finetuned-mrpc", "bert-base-cased", "bert-base-chinese", "bert-base-german-cased", "bert-base-german-dbmdz-cased", "bert-base-german-dbmdz-uncased", "bert-base-multilingual-cased", "bert-base-multilingual-uncased", "bert-base-uncased", "bert-large-cased-whole-word-masking-finetuned-squad", "bert-large-cased-whole-word-masking", "bert-large-cased", "bert-large-uncased-whole-word-masking-finetuned-squad", "bert-large-uncased-whole-word-masking", "bert-large-uncased", "camembert-base", "ctrl", "distilbert-base-cased-distilled-squad", "distilbert-base-cased", "distilbert-base-german-cased", "distilbert-base-multilingual-cased", "distilbert-base-uncased-distilled-squad", "distilbert-base-uncased-finetuned-sst-2-english", "distilbert-base-uncased", "distilgpt2", "distilroberta-base", "gpt2-large", "gpt2-medium", "gpt2-xl", "gpt2", "openai-gpt", "roberta-base-openai-detector", "roberta-base", "roberta-large-mnli", "roberta-large-openai-detector", "roberta-large", "t5-11b", "t5-3b", "t5-base", "t5-large", "t5-small", "transfo-xl-wt103", "xlm-clm-ende-1024", "xlm-clm-enfr-1024", "xlm-mlm-100-1280", "xlm-mlm-17-1280", "xlm-mlm-en-2048", "xlm-mlm-ende-1024", "xlm-mlm-enfr-1024", "xlm-mlm-enro-1024", "xlm-mlm-tlm-xnli15-1024", "xlm-mlm-xnli15-1024", "xlm-roberta-base", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "xlm-roberta-large", "xlnet-base-cased", "xlnet-large-cased", ] if not os.path.exists(model_name_or_path): # Not a path, load from hub if "\\" in model_name_or_path or model_name_or_path.count("/") > 1: raise FileNotFoundError(f"Path {model_name_or_path} not found") if "/" not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models: # A model from sentence-transformers model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path if is_sentence_transformer_model( model_name_or_path, token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ): modules, self.module_kwargs = self._load_sbert_model( model_name_or_path, token=token, cache_folder=cache_folder, revision=revision, trust_remote_code=trust_remote_code, local_files_only=local_files_only, model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs, config_kwargs=config_kwargs, ) else: modules = self._load_auto_model( model_name_or_path, token=token, cache_folder=cache_folder, revision=revision, trust_remote_code=trust_remote_code, local_files_only=local_files_only, model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs, config_kwargs=config_kwargs, ) if modules is not None and not isinstance(modules, OrderedDict): modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)]) super().__init__(modules) # Ensure all tensors in the model are of the same dtype as the first tensor # This is necessary if the first module has been given a lower precision via # model_kwargs["torch_dtype"]. The rest of the model should be loaded in the same dtype # See #2887 for more details try: dtype = next(self.parameters()).dtype self.to(dtype) except StopIteration: pass self.to(device) self.is_hpu_graph_enabled = False if self.default_prompt_name is not None and self.default_prompt_name not in self.prompts: raise ValueError( f"Default prompt name '{self.default_prompt_name}' not found in the configured prompts " f"dictionary with keys {list(self.prompts.keys())!r}." ) if self.prompts: logger.info(f"{len(self.prompts)} prompts are loaded, with the keys: {list(self.prompts.keys())}") if self.default_prompt_name: logger.warning( f"Default prompt name is set to '{self.default_prompt_name}'. " "This prompt will be applied to all `encode()` calls, except if `encode()` " "is called with `prompt` or `prompt_name` parameters." ) # Ideally, INSTRUCTOR models should set `include_prompt=False` in their pooling configuration, but # that would be a breaking change for users currently using the InstructorEmbedding project. # So, instead we hardcode setting it for the main INSTRUCTOR models, and otherwise give a warning if we # suspect the user is using an INSTRUCTOR model. if model_name_or_path in ("hkunlp/instructor-base", "hkunlp/instructor-large", "hkunlp/instructor-xl"): self.set_pooling_include_prompt(include_prompt=False) elif ( model_name_or_path and "/" in model_name_or_path and "instructor" in model_name_or_path.split("/")[1].lower() ): if any([module.include_prompt for module in self if isinstance(module, Pooling)]): logger.warning( "Instructor models require `include_prompt=False` in the pooling configuration. " "Either update the model configuration or call `model.set_pooling_include_prompt(False)` after loading the model." ) # Pass the model to the model card data for later use in generating a model card upon saving this model self.model_card_data.register_model(self) def get_backend(self) -> Literal["torch", "onnx", "openvino"]: """Return the backend used for inference, which can be one of "torch", "onnx", or "openvino". Returns: str: The backend used for inference. """ return self.backend # Return a single tensor because we're passing a single sentence. @overload def encode( self, sentences: str, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: Literal["sentence_embedding", "token_embeddings"] = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: Literal[False] = ..., convert_to_tensor: bool = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> Tensor: ... # Return a single array, because convert_to_numpy is True # and "sentence_embeddings" is passed @overload def encode( self, sentences: str | list[str] | np.ndarray, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: Literal["sentence_embedding"] = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: Literal[True] = ..., convert_to_tensor: Literal[False] = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> np.ndarray: ... # Return a single tensor, because convert_to_tensor is True # and "sentence_embeddings" is passed @overload def encode( self, sentences: str | list[str] | np.ndarray, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: Literal["sentence_embedding"] = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: bool = ..., convert_to_tensor: Literal[True] = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> Tensor: ... # Return a list of tensors. Value of convert_ doesn't matter. @overload def encode( self, sentences: list[str] | np.ndarray, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: Literal["sentence_embedding", "token_embeddings"] = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: bool = ..., convert_to_tensor: bool = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> list[Tensor]: ... # Return a list of dict of features, ignore the conversion args. @overload def encode( self, sentences: list[str] | np.ndarray, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: None = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: bool = ..., convert_to_tensor: bool = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> list[dict[str, Tensor]]: ... # Return a dict of features, ignore the conversion args. @overload def encode( self, sentences: str, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: None = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: bool = ..., convert_to_tensor: bool = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> dict[str, Tensor]: ... # If "token_embeddings" is True, then the output is a single tensor. @overload def encode( self, sentences: str, prompt_name: str | None = ..., prompt: str | None = ..., batch_size: int = ..., show_progress_bar: bool | None = ..., output_value: Literal["token_embeddings"] = ..., precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = ..., convert_to_numpy: bool = ..., convert_to_tensor: bool = ..., device: str | None = ..., normalize_embeddings: bool = ..., **kwargs, ) -> Tensor: ... def encode( self, sentences: str | list[str] | np.ndarray, prompt_name: str | None = None, prompt: str | None = None, batch_size: int = 32, show_progress_bar: bool | None = None, output_value: Literal["sentence_embedding", "token_embeddings"] | None = "sentence_embedding", precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = "float32", convert_to_numpy: bool = True, convert_to_tensor: bool = False, device: str | None = None, normalize_embeddings: bool = False, **kwargs, ) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: """ Computes sentence embeddings. Args: sentences (Union[str, List[str]]): The sentences to embed. prompt_name (Optional[str], optional): The name of the prompt to use for encoding. Must be a key in the `prompts` dictionary, which is either set in the constructor or loaded from the model configuration. For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", ...}, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If ``prompt`` is also set, this argument is ignored. Defaults to None. prompt (Optional[str], optional): The prompt to use for encoding. For example, if the prompt is "query: ", then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If ``prompt`` is set, ``prompt_name`` is ignored. Defaults to None. batch_size (int, optional): The batch size used for the computation. Defaults to 32. show_progress_bar (bool, optional): Whether to output a progress bar when encode sentences. Defaults to None. output_value (Optional[Literal["sentence_embedding", "token_embeddings"]], optional): The type of embeddings to return: "sentence_embedding" to get sentence embeddings, "token_embeddings" to get wordpiece token embeddings, and `None`, to get all output values. Defaults to "sentence_embedding". precision (Literal["float32", "int8", "uint8", "binary", "ubinary"], optional): The precision to use for the embeddings. Can be "float32", "int8", "uint8", "binary", or "ubinary". All non-float32 precisions are quantized embeddings. Quantized embeddings are smaller in size and faster to compute, but may have a lower accuracy. They are useful for reducing the size of the embeddings of a corpus for semantic search, among other tasks. Defaults to "float32". convert_to_numpy (bool, optional): Whether the output should be a list of numpy vectors. If False, it is a list of PyTorch tensors. Defaults to True. convert_to_tensor (bool, optional): Whether the output should be one large tensor. Overwrites `convert_to_numpy`. Defaults to False. device (str, optional): Which :class:`torch.device` to use for the computation. Defaults to None. normalize_embeddings (bool, optional): Whether to normalize returned vectors to have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. Defaults to False. Returns: Union[List[Tensor], ndarray, Tensor]: By default, a 2d numpy array with shape [num_inputs, output_dimension] is returned. If only one string input is provided, then the output is a 1d array with shape [output_dimension]. If ``convert_to_tensor``, a torch Tensor is returned instead. If ``self.truncate_dim <= output_dimension`` then output_dimension is ``self.truncate_dim``. Example: :: from sentence_transformers import SentenceTransformer # Load a pre-trained SentenceTransformer model model = SentenceTransformer('all-mpnet-base-v2') # Encode some texts sentences = [ "The weather is lovely today.", "It's so sunny outside!", "He drove to the stadium.", ] embeddings = model.encode(sentences) print(embeddings.shape) # (3, 768) """ if self.device.type == "hpu" and not self.is_hpu_graph_enabled: import habana_frameworks.torch as ht if hasattr(ht, "hpu") and hasattr(ht.hpu, "wrap_in_hpu_graph"): ht.hpu.wrap_in_hpu_graph(self, disable_tensor_cache=True) self.is_hpu_graph_enabled = True self.eval() if show_progress_bar is None: show_progress_bar = logger.getEffectiveLevel() in (logging.INFO, logging.DEBUG) if convert_to_tensor: convert_to_numpy = False if output_value != "sentence_embedding": convert_to_tensor = False convert_to_numpy = False input_was_string = False if isinstance(sentences, str) or not hasattr( sentences, "__len__" ): # Cast an individual sentence to a list with length 1 sentences = [sentences] input_was_string = True if prompt is None: if prompt_name is not None: try: prompt = self.prompts[prompt_name] except KeyError: raise ValueError( f"Prompt name '{prompt_name}' not found in the configured prompts dictionary with keys {list(self.prompts.keys())!r}." ) elif self.default_prompt_name is not None: prompt = self.prompts.get(self.default_prompt_name, None) else: if prompt_name is not None: logger.warning( "Encode with either a `prompt`, a `prompt_name`, or neither, but not both. " "Ignoring the `prompt_name` in favor of `prompt`." ) extra_features = {} if prompt is not None: sentences = [prompt + sentence for sentence in sentences] # Some models (e.g. INSTRUCTOR, GRIT) require removing the prompt before pooling # Tracking the prompt length allow us to remove the prompt during pooling tokenized_prompt = self.tokenize([prompt]) if "input_ids" in tokenized_prompt: extra_features["prompt_length"] = tokenized_prompt["input_ids"].shape[-1] - 1 if device is None: device = self.device self.to(device) all_embeddings = [] length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences]) sentences_sorted = [sentences[idx] for idx in length_sorted_idx] for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar): sentences_batch = sentences_sorted[start_index : start_index + batch_size] # #==DEBUG================================================================================================ # print(f"\n=== DEBUG: Before tokenization ===") # print(f"Batch size: {len(sentences_batch)}") # print(f"Sentences in batch:") # for i, sent in enumerate(sentences_batch): # print(f" [{i}] Type: {type(sent)}, Length: {len(sent) if hasattr(sent, '__len__') else 'no len'}") # print(f" Content: {repr(sent)}...") # #==DEBUG================================================================================================ features = self.tokenize(sentences_batch) # #==DEBUG================================================================================================ # print(f"\n=== DEBUG: After tokenization (features dict) ===") # print(f"Features keys: {list(features.keys())}") # for key, value in features.items(): # print(f" {key}:") # print(f" Type: {type(value)}") # if hasattr(value, 'shape'): # print(f" Shape: {value.shape}") # elif hasattr(value, '__len__'): # print(f" Length: {len(value)}") # if isinstance(value, (list, tuple)) and len(value) > 0: # print(f" First element type: {type(value[0])}") # if hasattr(value[0], '__len__'): # print(f" First element length: {len(value[0])}") # print(f" Sample content: {value}") # First 2 elements # print(f" Content preview: {str(value)}...") # #==DEBUG================================================================================================ # print( # f"SentenceTransformer.py - DEBUG: batch {start_index // batch_size} padded_side={self.tokenizer.padding_side if hasattr(self, 'tokenizer') else 'n/a'} " # f"max_len={self.tokenizer.model_max_length if hasattr(self, 'tokenizer') else 'n/a'} " # f"seq_lens={[len(ids) for ids in features['input_ids'].tolist()] if 'input_ids' in features else 'n/a'}" # ) if self.device.type == "hpu": if "input_ids" in features: curr_tokenize_len = features["input_ids"].shape additional_pad_len = 2 ** math.ceil(math.log2(curr_tokenize_len[1])) - curr_tokenize_len[1] features["input_ids"] = torch.cat( ( features["input_ids"], torch.ones((curr_tokenize_len[0], additional_pad_len), dtype=torch.int8), ), -1, ) features["attention_mask"] = torch.cat( ( features["attention_mask"], torch.zeros((curr_tokenize_len[0], additional_pad_len), dtype=torch.int8), ), -1, ) if "token_type_ids" in features: features["token_type_ids"] = torch.cat( ( features["token_type_ids"], torch.zeros((curr_tokenize_len[0], additional_pad_len), dtype=torch.int8), ), -1, ) features = batch_to_device(features, device) features.update(extra_features) with torch.no_grad(): out_features = self.forward(features, **kwargs) if self.device.type == "hpu": out_features = copy.deepcopy(out_features) out_features["sentence_embedding"] = truncate_embeddings( out_features["sentence_embedding"], self.truncate_dim ) if output_value == "token_embeddings": embeddings = [] for token_emb, attention in zip(out_features[output_value], out_features["attention_mask"]): last_mask_id = len(attention) - 1 while last_mask_id > 0 and attention[last_mask_id].item() == 0: last_mask_id -= 1 embeddings.append(token_emb[0 : last_mask_id + 1]) elif output_value is None: # Return all outputs embeddings = [] for idx in range(len(out_features["sentence_embedding"])): batch_item = {} for name, value in out_features.items(): try: batch_item[name] = value[idx] except TypeError: # Handle non-indexable values (like prompt_length) batch_item[name] = value embeddings.append(batch_item) else: # Sentence embeddings embeddings = out_features[output_value] embeddings = embeddings.detach() if normalize_embeddings: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) # fixes for #522 and #487 to avoid oom problems on gpu with large datasets if convert_to_numpy: embeddings = embeddings.cpu() all_embeddings.extend(embeddings) all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)] if precision and precision != "float32": all_embeddings = quantize_embeddings(all_embeddings, precision=precision) if convert_to_tensor: if len(all_embeddings): if isinstance(all_embeddings, np.ndarray): all_embeddings = torch.from_numpy(all_embeddings) else: all_embeddings = torch.stack(all_embeddings) else: all_embeddings = torch.Tensor() elif convert_to_numpy: if not isinstance(all_embeddings, np.ndarray): if all_embeddings and all_embeddings[0].dtype == torch.bfloat16: all_embeddings = np.asarray([emb.float().numpy() for emb in all_embeddings]) else: all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings]) elif isinstance(all_embeddings, np.ndarray): all_embeddings = [torch.from_numpy(embedding) for embedding in all_embeddings] if input_was_string: all_embeddings = all_embeddings[0] return all_embeddings def forward(self, input: dict[str, Tensor], **kwargs) -> dict[str, Tensor]: if self.module_kwargs is None: return super().forward(input) for module_name, module in self.named_children(): module_kwarg_keys = self.module_kwargs.get(module_name, []) module_kwargs = {key: value for key, value in kwargs.items() if key in module_kwarg_keys} input = module(input, **module_kwargs) return input @property def similarity_fn_name(self) -> Literal["cosine", "dot", "euclidean", "manhattan"]: """Return the name of the similarity function used by :meth:`SentenceTransformer.similarity` and :meth:`SentenceTransformer.similarity_pairwise`. Returns: Optional[str]: The name of the similarity function. Can be None if not set, in which case it will default to "cosine" when first called. Example: >>> model = SentenceTransformer("multi-qa-mpnet-base-dot-v1") >>> model.similarity_fn_name 'dot' """ if self._similarity_fn_name is None: self.similarity_fn_name = SimilarityFunction.COSINE return self._similarity_fn_name @similarity_fn_name.setter def similarity_fn_name( self, value: Literal["cosine", "dot", "euclidean", "manhattan"] | SimilarityFunction ) -> None: if isinstance(value, SimilarityFunction): value = value.value self._similarity_fn_name = value if value is not None: self._similarity = SimilarityFunction.to_similarity_fn(value) self._similarity_pairwise = SimilarityFunction.to_similarity_pairwise_fn(value) @overload def similarity(self, embeddings1: Tensor, embeddings2: Tensor) -> Tensor: ... @overload def similarity(self, embeddings1: npt.NDArray[np.float32], embeddings2: npt.NDArray[np.float32]) -> Tensor: ... @property def similarity(self) -> Callable[[Tensor | npt.NDArray[np.float32], Tensor | npt.NDArray[np.float32]], Tensor]: """ Compute the similarity between two collections of embeddings. The output will be a matrix with the similarity scores between all embeddings from the first parameter and all embeddings from the second parameter. This differs from `similarity_pairwise` which computes the similarity between each pair of embeddings. This method supports only embeddings with fp32 precision and does not accommodate quantized embeddings. Args: embeddings1 (Union[Tensor, ndarray]): [num_embeddings_1, embedding_dim] or [embedding_dim]-shaped numpy array or torch tensor. embeddings2 (Union[Tensor, ndarray]): [num_embeddings_2, embedding_dim] or [embedding_dim]-shaped numpy array or torch tensor. Returns: Tensor: A [num_embeddings_1, num_embeddings_2]-shaped torch tensor with similarity scores. Example: :: >>> model = SentenceTransformer("all-mpnet-base-v2") >>> sentences = [ ... "The weather is so nice!", ... "It's so sunny outside.", ... "He's driving to the movie theater.", ... "She's going to the cinema.", ... ] >>> embeddings = model.encode(sentences, normalize_embeddings=True) >>> model.similarity(embeddings, embeddings) tensor([[1.0000, 0.7235, 0.0290, 0.1309], [0.7235, 1.0000, 0.0613, 0.1129], [0.0290, 0.0613, 1.0000, 0.5027], [0.1309, 0.1129, 0.5027, 1.0000]]) >>> model.similarity_fn_name "cosine" >>> model.similarity_fn_name = "euclidean" >>> model.similarity(embeddings, embeddings) tensor([[-0.0000, -0.7437, -1.3935, -1.3184], [-0.7437, -0.0000, -1.3702, -1.3320], [-1.3935, -1.3702, -0.0000, -0.9973], [-1.3184, -1.3320, -0.9973, -0.0000]]) """ if self.similarity_fn_name is None: self.similarity_fn_name = SimilarityFunction.COSINE return self._similarity @overload def similarity_pairwise(self, embeddings1: Tensor, embeddings2: Tensor) -> Tensor: ... @overload def similarity_pairwise( self, embeddings1: npt.NDArray[np.float32], embeddings2: npt.NDArray[np.float32] ) -> Tensor: ... @property def similarity_pairwise( self, ) -> Callable[[Tensor | npt.NDArray[np.float32], Tensor | npt.NDArray[np.float32]], Tensor]: """ Compute the similarity between two collections of embeddings. The output will be a vector with the similarity scores between each pair of embeddings. This method supports only embeddings with fp32 precision and does not accommodate quantized embeddings. Args: embeddings1 (Union[Tensor, ndarray]): [num_embeddings, embedding_dim] or [embedding_dim]-shaped numpy array or torch tensor. embeddings2 (Union[Tensor, ndarray]): [num_embeddings, embedding_dim] or [embedding_dim]-shaped numpy array or torch tensor. Returns: Tensor: A [num_embeddings]-shaped torch tensor with pairwise similarity scores. Example: :: >>> model = SentenceTransformer("all-mpnet-base-v2") >>> sentences = [ ... "The weather is so nice!", ... "It's so sunny outside.", ... "He's driving to the movie theater.", ... "She's going to the cinema.", ... ] >>> embeddings = model.encode(sentences, normalize_embeddings=True) >>> model.similarity_pairwise(embeddings[::2], embeddings[1::2]) tensor([0.7235, 0.5027]) >>> model.similarity_fn_name "cosine" >>> model.similarity_fn_name = "euclidean" >>> model.similarity_pairwise(embeddings[::2], embeddings[1::2]) tensor([-0.7437, -0.9973]) """ if self.similarity_fn_name is None: self.similarity_fn_name = SimilarityFunction.COSINE return self._similarity_pairwise def start_multi_process_pool( self, target_devices: list[str] = None ) -> dict[Literal["input", "output", "processes"], Any]: """ Starts a multi-process pool to process the encoding with several independent processes via :meth:`SentenceTransformer.encode_multi_process `. This method is recommended if you want to encode on multiple GPUs or CPUs. It is advised to start only one process per GPU. This method works together with encode_multi_process and stop_multi_process_pool. Args: target_devices (List[str], optional): PyTorch target devices, e.g. ["cuda:0", "cuda:1", ...], ["npu:0", "npu:1", ...], or ["cpu", "cpu", "cpu", "cpu"]. If target_devices is None and CUDA/NPU is available, then all available CUDA/NPU devices will be used. If target_devices is None and CUDA/NPU is not available, then 4 CPU devices will be used. Returns: Dict[str, Any]: A dictionary with the target processes, an input queue, and an output queue. """ if target_devices is None: if torch.cuda.is_available(): target_devices = [f"cuda:{i}" for i in range(torch.cuda.device_count())] elif is_torch_npu_available(): target_devices = [f"npu:{i}" for i in range(torch.npu.device_count())] else: logger.info("CUDA/NPU is not available. Starting 4 CPU workers") target_devices = ["cpu"] * 4 logger.info("Start multi-process pool on devices: {}".format(", ".join(map(str, target_devices)))) self.to("cpu") self.share_memory() ctx = mp.get_context("spawn") input_queue = ctx.Queue() output_queue = ctx.Queue() processes = [] for device_id in target_devices: p = ctx.Process( target=SentenceTransformer._encode_multi_process_worker, args=(device_id, self, input_queue, output_queue), daemon=True, ) p.start() processes.append(p) return {"input": input_queue, "output": output_queue, "processes": processes} @staticmethod def stop_multi_process_pool(pool: dict[Literal["input", "output", "processes"], Any]) -> None: """ Stops all processes started with start_multi_process_pool. Args: pool (Dict[str, object]): A dictionary containing the input queue, output queue, and process list. Returns: None """ for p in pool["processes"]: p.terminate() for p in pool["processes"]: p.join() p.close() pool["input"].close() pool["output"].close() def encode_multi_process( self, sentences: list[str], pool: dict[Literal["input", "output", "processes"], Any], prompt_name: str | None = None, prompt: str | None = None, batch_size: int = 32, chunk_size: int = None, show_progress_bar: bool | None = None, precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] = "float32", normalize_embeddings: bool = False, ) -> np.ndarray: """ Encodes a list of sentences using multiple processes and GPUs via :meth:`SentenceTransformer.encode `. The sentences are chunked into smaller packages and sent to individual processes, which encode them on different GPUs or CPUs. This method is only suitable for encoding large sets of sentences. Args: sentences (List[str]): List of sentences to encode. pool (Dict[Literal["input", "output", "processes"], Any]): A pool of workers started with :meth:`SentenceTransformer.start_multi_process_pool `. prompt_name (Optional[str], optional): The name of the prompt to use for encoding. Must be a key in the `prompts` dictionary, which is either set in the constructor or loaded from the model configuration. For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ", ...}, then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If ``prompt`` is also set, this argument is ignored. Defaults to None. prompt (Optional[str], optional): The prompt to use for encoding. For example, if the prompt is "query: ", then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" because the sentence is appended to the prompt. If ``prompt`` is set, ``prompt_name`` is ignored. Defaults to None. batch_size (int): Encode sentences with batch size. (default: 32) chunk_size (int): Sentences are chunked and sent to the individual processes. If None, it determines a sensible size. Defaults to None. show_progress_bar (bool, optional): Whether to output a progress bar when encode sentences. Defaults to None. precision (Literal["float32", "int8", "uint8", "binary", "ubinary"]): The precision to use for the embeddings. Can be "float32", "int8", "uint8", "binary", or "ubinary". All non-float32 precisions are quantized embeddings. Quantized embeddings are smaller in size and faster to compute, but may have lower accuracy. They are useful for reducing the size of the embeddings of a corpus for semantic search, among other tasks. Defaults to "float32". normalize_embeddings (bool): Whether to normalize returned vectors to have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used. Defaults to False. Returns: np.ndarray: A 2D numpy array with shape [num_inputs, output_dimension]. Example: :: from sentence_transformers import SentenceTransformer def main(): model = SentenceTransformer("all-mpnet-base-v2") sentences = ["The weather is so nice!", "It's so sunny outside.", "He's driving to the movie theater.", "She's going to the cinema."] * 1000 pool = model.start_multi_process_pool() embeddings = model.encode_multi_process(sentences, pool) model.stop_multi_process_pool(pool) print(embeddings.shape) # => (4000, 768) if __name__ == "__main__": main() """ if chunk_size is None: chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000) if show_progress_bar is None: show_progress_bar = logger.getEffectiveLevel() in (logging.INFO, logging.DEBUG) # print(f"Chunk data into {math.ceil(len(sentences) / chunk_size)} packages of size {chunk_size}") input_queue = pool["input"] last_chunk_id = 0 chunk = [] for sentence in sentences: chunk.append(sentence) if len(chunk) >= chunk_size: input_queue.put( [last_chunk_id, batch_size, chunk, prompt_name, prompt, precision, normalize_embeddings] ) last_chunk_id += 1 chunk = [] if len(chunk) > 0: input_queue.put([last_chunk_id, batch_size, chunk, prompt_name, prompt, precision, normalize_embeddings]) last_chunk_id += 1 output_queue = pool["output"] results_list = sorted( [output_queue.get() for _ in trange(last_chunk_id, desc="Chunks", disable=not show_progress_bar)], key=lambda x: x[0], ) embeddings = np.concatenate([result[1] for result in results_list]) return embeddings @staticmethod def _encode_multi_process_worker( target_device: str, model: SentenceTransformer, input_queue: Queue, results_queue: Queue ) -> None: """ Internal working process to encode sentences in multi-process setup """ while True: try: chunk_id, batch_size, sentences, prompt_name, prompt, precision, normalize_embeddings = ( input_queue.get() ) embeddings = model.encode( sentences, prompt_name=prompt_name, prompt=prompt, device=target_device, show_progress_bar=False, precision=precision, convert_to_numpy=True, batch_size=batch_size, normalize_embeddings=normalize_embeddings, ) results_queue.put([chunk_id, embeddings]) except queue.Empty: break def set_pooling_include_prompt(self, include_prompt: bool) -> None: """ Sets the `include_prompt` attribute in the pooling layer in the model, if there is one. This is useful for INSTRUCTOR models, as the prompt should be excluded from the pooling strategy for these models. Args: include_prompt (bool): Whether to include the prompt in the pooling layer. Returns: None """ for module in self: if isinstance(module, Pooling): module.include_prompt = include_prompt break def get_max_seq_length(self) -> int | None: """ Returns the maximal sequence length that the model accepts. Longer inputs will be truncated. Returns: Optional[int]: The maximal sequence length that the model accepts, or None if it is not defined. """ if hasattr(self._first_module(), "max_seq_length"): return self._first_module().max_seq_length return None def tokenize(self, texts: list[str] | list[dict] | list[tuple[str, str]]) -> dict[str, Tensor]: """ Tokenizes the texts. Args: texts (Union[List[str], List[Dict], List[Tuple[str, str]]]): A list of texts to be tokenized. Returns: Dict[str, Tensor]: A dictionary of tensors with the tokenized texts. Common keys are "input_ids", "attention_mask", and "token_type_ids". """ # print(f"SentenceTransformer.py - DEBUG: tokenize(): got {len(texts)} texts") return self._first_module().tokenize(texts) def get_sentence_features(self, *features) -> dict[Literal["sentence_embedding"], Tensor]: return self._first_module().get_sentence_features(*features) def get_sentence_embedding_dimension(self) -> int | None: """ Returns the number of dimensions in the output of :meth:`SentenceTransformer.encode `. Returns: Optional[int]: The number of dimensions in the output of `encode`. If it's not known, it's `None`. """ output_dim = None for mod in reversed(self._modules.values()): sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None) if callable(sent_embedding_dim_method): output_dim = sent_embedding_dim_method() break if self.truncate_dim is not None: # The user requested truncation. If they set it to a dim greater than output_dim, # no truncation will actually happen. So return output_dim instead of self.truncate_dim return min(output_dim or np.inf, self.truncate_dim) return output_dim @contextmanager def truncate_sentence_embeddings(self, truncate_dim: int | None) -> Iterator[None]: """ In this context, :meth:`SentenceTransformer.encode ` outputs sentence embeddings truncated at dimension ``truncate_dim``. This may be useful when you are using the same model for different applications where different dimensions are needed. Args: truncate_dim (int, optional): The dimension to truncate sentence embeddings to. ``None`` does no truncation. Example: :: from sentence_transformers import SentenceTransformer model = SentenceTransformer("all-mpnet-base-v2") with model.truncate_sentence_embeddings(truncate_dim=16): embeddings_truncated = model.encode(["hello there", "hiya"]) assert embeddings_truncated.shape[-1] == 16 """ original_output_dim = self.truncate_dim try: self.truncate_dim = truncate_dim yield finally: self.truncate_dim = original_output_dim def _first_module(self) -> torch.nn.Module: """Returns the first module of this sequential embedder""" return self._modules[next(iter(self._modules))] def _last_module(self) -> torch.nn.Module: """Returns the last module of this sequential embedder""" return self._modules[next(reversed(self._modules))] def save( self, path: str, model_name: str | None = None, create_model_card: bool = True, train_datasets: list[str] | None = None, safe_serialization: bool = True, ) -> None: """ Saves a model and its configuration files to a directory, so that it can be loaded with ``SentenceTransformer(path)`` again. Args: path (str): Path on disc where the model will be saved. model_name (str, optional): Optional model name. create_model_card (bool, optional): If True, create a README.md with basic information about this model. train_datasets (List[str], optional): Optional list with the names of the datasets used to train the model. safe_serialization (bool, optional): If True, save the model using safetensors. If False, save the model the traditional (but unsafe) PyTorch way. """ if path is None: return os.makedirs(path, exist_ok=True) logger.info(f"Save model to {path}") modules_config = [] # Save some model info self._model_config["__version__"] = { "sentence_transformers": __version__, "transformers": transformers.__version__, "pytorch": torch.__version__, } with open(os.path.join(path, "config_sentence_transformers.json"), "w") as fOut: config = self._model_config.copy() config["prompts"] = self.prompts config["default_prompt_name"] = self.default_prompt_name config["similarity_fn_name"] = self.similarity_fn_name json.dump(config, fOut, indent=2) # Save modules for idx, name in enumerate(self._modules): module = self._modules[name] if idx == 0 and hasattr(module, "save_in_root"): # Save first module in the main folder model_path = path + "/" else: model_path = os.path.join(path, str(idx) + "_" + type(module).__name__) os.makedirs(model_path, exist_ok=True) # Try to save with safetensors, but fall back to the traditional PyTorch way if the module doesn't support it try: module.save(model_path, safe_serialization=safe_serialization) except TypeError: module.save(model_path) # "module" only works for Sentence Transformers as the modules have the same names as the classes class_ref = type(module).__module__ # For remote modules, we want to remove "transformers_modules.{repo_name}": if class_ref.startswith("transformers_modules."): class_file = sys.modules[class_ref].__file__ # Save the custom module file dest_file = Path(model_path) / (Path(class_file).name) shutil.copy(class_file, dest_file) # Save all files importeed in the custom module file for needed_file in get_relative_import_files(class_file): dest_file = Path(model_path) / (Path(needed_file).name) shutil.copy(needed_file, dest_file) # For remote modules, we want to ignore the "transformers_modules.{repo_id}" part, # i.e. we only want the filename class_ref = f"{class_ref.split('.')[-1]}.{type(module).__name__}" # For other cases, we want to add the class name: elif not class_ref.startswith("sentence_transformers."): class_ref = f"{class_ref}.{type(module).__name__}" module_config = {"idx": idx, "name": name, "path": os.path.basename(model_path), "type": class_ref} if self.module_kwargs and name in self.module_kwargs and (module_kwargs := self.module_kwargs[name]): module_config["kwargs"] = module_kwargs modules_config.append(module_config) with open(os.path.join(path, "modules.json"), "w") as fOut: json.dump(modules_config, fOut, indent=2) # Create model card if create_model_card: self._create_model_card(path, model_name, train_datasets) def save_pretrained( self, path: str, model_name: str | None = None, create_model_card: bool = True, train_datasets: list[str] | None = None, safe_serialization: bool = True, ) -> None: """ Saves a model and its configuration files to a directory, so that it can be loaded with ``SentenceTransformer(path)`` again. Args: path (str): Path on disc where the model will be saved. model_name (str, optional): Optional model name. create_model_card (bool, optional): If True, create a README.md with basic information about this model. train_datasets (List[str], optional): Optional list with the names of the datasets used to train the model. safe_serialization (bool, optional): If True, save the model using safetensors. If False, save the model the traditional (but unsafe) PyTorch way. """ self.save( path, model_name=model_name, create_model_card=create_model_card, train_datasets=train_datasets, safe_serialization=safe_serialization, ) def _create_model_card( self, path: str, model_name: str | None = None, train_datasets: list[str] | None = "deprecated" ) -> None: """ Create an automatic model and stores it in the specified path. If no training was done and the loaded model was a Sentence Transformer model already, then its model card is reused. Args: path (str): The path where the model card will be stored. model_name (Optional[str], optional): The name of the model. Defaults to None. train_datasets (Optional[List[str]], optional): Deprecated argument. Defaults to "deprecated". Returns: None """ if model_name: model_path = Path(model_name) if not model_path.exists() and not self.model_card_data.model_id: self.model_card_data.model_id = model_name # If we loaded a Sentence Transformer model from the Hub, and no training was done, then # we don't generate a new model card, but reuse the old one instead. if self._model_card_text and "generated_from_trainer" not in self.model_card_data.tags: model_card = self._model_card_text if self.model_card_data.model_id: # If the original model card was saved without a model_id, we replace the model_id with the new model_id model_card = model_card.replace( 'model = SentenceTransformer("sentence_transformers_model_id"', f'model = SentenceTransformer("{self.model_card_data.model_id}"', ) else: try: model_card = generate_model_card(self) except Exception: logger.error( f"Error while generating model card:\n{traceback.format_exc()}" "Consider opening an issue on https://github.com/UKPLab/sentence-transformers/issues with this traceback.\n" "Skipping model card creation." ) return with open(os.path.join(path, "README.md"), "w", encoding="utf8") as fOut: fOut.write(model_card) @save_to_hub_args_decorator def save_to_hub( self, repo_id: str, organization: str | None = None, token: str | None = None, private: bool | None = None, safe_serialization: bool = True, commit_message: str = "Add new SentenceTransformer model.", local_model_path: str | None = None, exist_ok: bool = False, replace_model_card: bool = False, train_datasets: list[str] | None = None, ) -> str: """ DEPRECATED, use `push_to_hub` instead. Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository. Args: repo_id (str): Repository name for your model in the Hub, including the user or organization. token (str, optional): An authentication token (See https://huggingface.co/settings/token) private (bool, optional): Set to true, for hosting a private model safe_serialization (bool, optional): If true, save the model using safetensors. If false, save the model the traditional PyTorch way commit_message (str, optional): Message to commit while pushing. local_model_path (str, optional): Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded exist_ok (bool, optional): If true, saving to an existing repository is OK. If false, saving only to a new repository is possible replace_model_card (bool, optional): If true, replace an existing model card in the hub with the automatically created model card train_datasets (List[str], optional): Datasets used to train the model. If set, the datasets will be added to the model card in the Hub. Returns: str: The url of the commit of your model in the repository on the Hugging Face Hub. """ logger.warning( "The `save_to_hub` method is deprecated and will be removed in a future version of SentenceTransformers." " Please use `push_to_hub` instead for future model uploads." ) if organization: if "/" not in repo_id: logger.warning( f'Providing an `organization` to `save_to_hub` is deprecated, please use `repo_id="{organization}/{repo_id}"` instead.' ) repo_id = f"{organization}/{repo_id}" elif repo_id.split("/")[0] != organization: raise ValueError( "Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id`." ) else: logger.warning( f'Providing an `organization` to `save_to_hub` is deprecated, please only use `repo_id="{repo_id}"` instead.' ) return self.push_to_hub( repo_id=repo_id, token=token, private=private, safe_serialization=safe_serialization, commit_message=commit_message, local_model_path=local_model_path, exist_ok=exist_ok, replace_model_card=replace_model_card, train_datasets=train_datasets, ) def push_to_hub( self, repo_id: str, token: str | None = None, private: bool | None = None, safe_serialization: bool = True, commit_message: str | None = None, local_model_path: str | None = None, exist_ok: bool = False, replace_model_card: bool = False, train_datasets: list[str] | None = None, revision: str | None = None, create_pr: bool = False, ) -> str: """ Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository. Args: repo_id (str): Repository name for your model in the Hub, including the user or organization. token (str, optional): An authentication token (See https://huggingface.co/settings/token) private (bool, optional): Set to true, for hosting a private model safe_serialization (bool, optional): If true, save the model using safetensors. If false, save the model the traditional PyTorch way commit_message (str, optional): Message to commit while pushing. local_model_path (str, optional): Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded exist_ok (bool, optional): If true, saving to an existing repository is OK. If false, saving only to a new repository is possible replace_model_card (bool, optional): If true, replace an existing model card in the hub with the automatically created model card train_datasets (List[str], optional): Datasets used to train the model. If set, the datasets will be added to the model card in the Hub. revision (str, optional): Branch to push the uploaded files to create_pr (bool, optional): If True, create a pull request instead of pushing directly to the main branch Returns: str: The url of the commit of your model in the repository on the Hugging Face Hub. """ api = HfApi(token=token) repo_url = api.create_repo( repo_id=repo_id, private=private, repo_type=None, exist_ok=exist_ok or create_pr, ) repo_id = repo_url.repo_id # Update the repo_id in case the old repo_id didn't contain a user or organization self.model_card_data.set_model_id(repo_id) if revision is not None: api.create_branch(repo_id=repo_id, branch=revision, exist_ok=True) if commit_message is None: backend = self.get_backend() if backend == "torch": commit_message = "Add new SentenceTransformer model" else: commit_message = f"Add new SentenceTransformer model with an {backend} backend" commit_description = "" if create_pr: commit_description = f"""\ Hello! *This pull request has been automatically generated from the [`push_to_hub`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer.push_to_hub) method from the Sentence Transformers library.* ## Full Model Architecture: ``` {self} ``` ## Tip: Consider testing this pull request before merging by loading the model from this PR with the `revision` argument: ```python from sentence_transformers import SentenceTransformer # TODO: Fill in the PR number pr_number = 2 model = SentenceTransformer( "{repo_id}", revision=f"refs/pr/{{pr_number}}", backend="{self.get_backend()}", ) # Verify that everything works as expected embeddings = model.encode(["The weather is lovely today.", "It's so sunny outside!", "He drove to the stadium."]) print(embeddings.shape) similarities = model.similarity(embeddings, embeddings) print(similarities) ``` """ if local_model_path: folder_url = api.upload_folder( repo_id=repo_id, folder_path=local_model_path, commit_message=commit_message, commit_description=commit_description, revision=revision, create_pr=create_pr, ) else: with tempfile.TemporaryDirectory() as tmp_dir: create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, "README.md")) self.save_pretrained( tmp_dir, model_name=repo_url.repo_id, create_model_card=create_model_card, train_datasets=train_datasets, safe_serialization=safe_serialization, ) folder_url = api.upload_folder( repo_id=repo_id, folder_path=tmp_dir, commit_message=commit_message, commit_description=commit_description, revision=revision, create_pr=create_pr, ) if create_pr: return folder_url.pr_url return folder_url.commit_url # def _text_length(self, text: list[int] | list[list[int]]) -> int: # """ # Help function to get the length for the input text. Text can be either # a list of ints (which means a single text as input), or a tuple of list of ints # (representing several text inputs to the model). # """ # if isinstance(text, dict): # {key: value} case # return len(next(iter(text.values()))) # elif not hasattr(text, "__len__"): # Object has no len() method # return 1 # elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints # return len(text) # else: # return sum([len(t) for t in text]) # Sum of length of individual strings # custom method that's more flexible and expansive def _text_length(self, text: str | list[int] | list[list[int]]) -> int: """ Help function to get the length for the input text. Text can be either a list of ints (which means a single text as input), or a tuple of list of ints (representing several text inputs to the model). """ if isinstance(text, str): # Handle string input directly return len(text) elif isinstance(text, dict): # {key: value} case return len(next(iter(text.values()))) elif not hasattr(text, "__len__"): # Object has no len() method return 1 elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints return len(text) else: return sum([len(t) for t in text]) # Sum of length of individual strings def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None) -> dict[str, float] | float: """ Evaluate the model based on an evaluator Args: evaluator (SentenceEvaluator): The evaluator used to evaluate the model. output_path (str, optional): The path where the evaluator can write the results. Defaults to None. Returns: The evaluation results. """ if output_path is not None: os.makedirs(output_path, exist_ok=True) return evaluator(self, output_path) def _load_auto_model( self, model_name_or_path: str, token: bool | str | None, cache_folder: str | None, revision: str | None = None, trust_remote_code: bool = False, local_files_only: bool = False, model_kwargs: dict[str, Any] | None = None, tokenizer_kwargs: dict[str, Any] | None = None, config_kwargs: dict[str, Any] | None = None, ) -> list[nn.Module]: """ Creates a simple Transformer + Mean Pooling model and returns the modules Args: model_name_or_path (str): The name or path of the pre-trained model. token (Optional[Union[bool, str]]): The token to use for the model. cache_folder (Optional[str]): The folder to cache the model. revision (Optional[str], optional): The revision of the model. Defaults to None. trust_remote_code (bool, optional): Whether to trust remote code. Defaults to False. local_files_only (bool, optional): Whether to use only local files. Defaults to False. model_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the model. Defaults to None. tokenizer_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the tokenizer. Defaults to None. config_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the config. Defaults to None. Returns: List[nn.Module]: A list containing the transformer model and the pooling model. """ logger.warning( f"No sentence-transformers model found with name {model_name_or_path}. Creating a new one with mean pooling." ) shared_kwargs = { "token": token, "trust_remote_code": trust_remote_code, "revision": revision, "local_files_only": local_files_only, } model_kwargs = shared_kwargs if model_kwargs is None else {**shared_kwargs, **model_kwargs} tokenizer_kwargs = shared_kwargs if tokenizer_kwargs is None else {**shared_kwargs, **tokenizer_kwargs} config_kwargs = shared_kwargs if config_kwargs is None else {**shared_kwargs, **config_kwargs} transformer_model = Transformer( model_name_or_path, cache_dir=cache_folder, model_args=model_kwargs, tokenizer_args=tokenizer_kwargs, config_args=config_kwargs, backend=self.backend, ) pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), "mean") if not local_files_only: self.model_card_data.set_base_model(model_name_or_path, revision=revision) return [transformer_model, pooling_model] def _load_module_class_from_ref( self, class_ref: str, model_name_or_path: str, trust_remote_code: bool, revision: str | None, model_kwargs: dict[str, Any] | None, ) -> nn.Module: # If the class is from sentence_transformers, we can directly import it, # otherwise, we try to import it dynamically, and if that fails, we fall back to the default import if class_ref.startswith("sentence_transformers."): return import_from_string(class_ref) if trust_remote_code or os.path.exists(model_name_or_path): code_revision = model_kwargs.pop("code_revision", None) if model_kwargs else None try: return get_class_from_dynamic_module( class_ref, model_name_or_path, revision=revision, code_revision=code_revision, ) except (OSError, ValueError): # Ignore the error if 1) the file does not exist, or 2) the class_ref is not correctly formatted/found pass return import_from_string(class_ref) def _load_sbert_model( self, model_name_or_path: str, token: bool | str | None, cache_folder: str | None, revision: str | None = None, trust_remote_code: bool = False, local_files_only: bool = False, model_kwargs: dict[str, Any] | None = None, tokenizer_kwargs: dict[str, Any] | None = None, config_kwargs: dict[str, Any] | None = None, ) -> dict[str, nn.Module]: """ Loads a full SentenceTransformer model using the modules.json file. Args: model_name_or_path (str): The name or path of the pre-trained model. token (Optional[Union[bool, str]]): The token to use for the model. cache_folder (Optional[str]): The folder to cache the model. revision (Optional[str], optional): The revision of the model. Defaults to None. trust_remote_code (bool, optional): Whether to trust remote code. Defaults to False. local_files_only (bool, optional): Whether to use only local files. Defaults to False. model_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the model. Defaults to None. tokenizer_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the tokenizer. Defaults to None. config_kwargs (Optional[Dict[str, Any]], optional): Additional keyword arguments for the config. Defaults to None. Returns: OrderedDict[str, nn.Module]: An ordered dictionary containing the modules of the model. """ # Check if the config_sentence_transformers.json file exists (exists since v2 of the framework) config_sentence_transformers_json_path = load_file_path( model_name_or_path, "config_sentence_transformers.json", token=token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ) if config_sentence_transformers_json_path is not None: with open(config_sentence_transformers_json_path) as fIn: self._model_config = json.load(fIn) if ( "__version__" in self._model_config and "sentence_transformers" in self._model_config["__version__"] and version.parse(self._model_config["__version__"]["sentence_transformers"]) > version.parse(__version__) ): logger.warning( f'You are trying to use a model that was created with Sentence Transformers version {self._model_config["__version__"]["sentence_transformers"]}, ' f"but you're currently using version {__version__}. This might cause unexpected behavior or errors. " "In that case, try to update to the latest version." ) # Set score functions & prompts if not already overridden by the __init__ calls if self._similarity_fn_name is None: self.similarity_fn_name = self._model_config.get("similarity_fn_name", None) if not self.prompts: self.prompts = self._model_config.get("prompts", {}) if not self.default_prompt_name: self.default_prompt_name = self._model_config.get("default_prompt_name", None) # Check if a readme exists model_card_path = load_file_path( model_name_or_path, "README.md", token=token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ) if model_card_path is not None: try: with open(model_card_path, encoding="utf8") as fIn: self._model_card_text = fIn.read() except Exception: pass # Load the modules of sentence transformer modules_json_path = load_file_path( model_name_or_path, "modules.json", token=token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ) with open(modules_json_path) as fIn: modules_config = json.load(fIn) modules = OrderedDict() module_kwargs = OrderedDict() for module_config in modules_config: class_ref = module_config["type"] module_class = self._load_module_class_from_ref( class_ref, model_name_or_path, trust_remote_code, revision, model_kwargs ) # For Transformer, don't load the full directory, rely on `transformers` instead # But, do load the config file first. if module_config["path"] == "": kwargs = {} for config_name in [ "sentence_bert_config.json", "sentence_roberta_config.json", "sentence_distilbert_config.json", "sentence_camembert_config.json", "sentence_albert_config.json", "sentence_xlm-roberta_config.json", "sentence_xlnet_config.json", ]: config_path = load_file_path( model_name_or_path, config_name, token=token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ) if config_path is not None: with open(config_path) as fIn: kwargs = json.load(fIn) # Don't allow configs to set trust_remote_code if "model_args" in kwargs and "trust_remote_code" in kwargs["model_args"]: kwargs["model_args"].pop("trust_remote_code") if "tokenizer_args" in kwargs and "trust_remote_code" in kwargs["tokenizer_args"]: kwargs["tokenizer_args"].pop("trust_remote_code") if "config_args" in kwargs and "trust_remote_code" in kwargs["config_args"]: kwargs["config_args"].pop("trust_remote_code") break hub_kwargs = { "token": token, "trust_remote_code": trust_remote_code, "revision": revision, "local_files_only": local_files_only, } # 3rd priority: config file if "model_args" not in kwargs: kwargs["model_args"] = {} if "tokenizer_args" not in kwargs: kwargs["tokenizer_args"] = {} if "config_args" not in kwargs: kwargs["config_args"] = {} # 2nd priority: hub_kwargs kwargs["model_args"].update(hub_kwargs) kwargs["tokenizer_args"].update(hub_kwargs) kwargs["config_args"].update(hub_kwargs) # 1st priority: kwargs passed to SentenceTransformer if model_kwargs: kwargs["model_args"].update(model_kwargs) if tokenizer_kwargs: kwargs["tokenizer_args"].update(tokenizer_kwargs) if config_kwargs: kwargs["config_args"].update(config_kwargs) # Try to initialize the module with a lot of kwargs, but only if the module supports them # Otherwise we fall back to the load method try: module = module_class(model_name_or_path, cache_dir=cache_folder, backend=self.backend, **kwargs) except TypeError: module = module_class.load(model_name_or_path) else: # Normalize does not require any files to be loaded if module_class == Normalize: module_path = None else: module_path = load_dir_path( model_name_or_path, module_config["path"], token=token, cache_folder=cache_folder, revision=revision, local_files_only=local_files_only, ) module = module_class.load(module_path) modules[module_config["name"]] = module module_kwargs[module_config["name"]] = module_config.get("kwargs", []) if revision is None: path_parts = Path(modules_json_path) if len(path_parts.parts) >= 2: revision_path_part = Path(modules_json_path).parts[-2] if len(revision_path_part) == 40: revision = revision_path_part if not local_files_only: self.model_card_data.set_base_model(model_name_or_path, revision=revision) return modules, module_kwargs @staticmethod def load(input_path) -> SentenceTransformer: return SentenceTransformer(input_path) @property def device(self) -> device: """ Get torch.device from module, assuming that the whole module has one device. In case there are no PyTorch parameters, fall back to CPU. """ if isinstance(self[0], Transformer): return self[0].auto_model.device try: return next(self.parameters()).device except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> list[tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) try: first_tuple = next(gen) return first_tuple[1].device except StopIteration: return torch.device("cpu") @property def tokenizer(self) -> Any: """ Property to get the tokenizer that is used by this model """ return self._first_module().tokenizer @tokenizer.setter def tokenizer(self, value) -> None: """ Property to set the tokenizer that should be used by this model """ self._first_module().tokenizer = value @property def max_seq_length(self) -> int: """ Returns the maximal input sequence length for the model. Longer inputs will be truncated. Returns: int: The maximal input sequence length. Example: :: from sentence_transformers import SentenceTransformer model = SentenceTransformer("all-mpnet-base-v2") print(model.max_seq_length) # => 384 """ return self._first_module().max_seq_length @max_seq_length.setter def max_seq_length(self, value) -> None: """ Property to set the maximal input sequence length for the model. Longer inputs will be truncated. """ self._first_module().max_seq_length = value @property def _target_device(self) -> torch.device: logger.warning( "`SentenceTransformer._target_device` has been deprecated, please use `SentenceTransformer.device` instead.", ) return self.device @_target_device.setter def _target_device(self, device: int | str | torch.device | None = None) -> None: self.to(device) @property def _no_split_modules(self) -> list[str]: try: return self._first_module()._no_split_modules except AttributeError: return [] @property def _keys_to_ignore_on_save(self) -> list[str]: try: return self._first_module()._keys_to_ignore_on_save except AttributeError: return [] def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None) -> None: # Propagate the gradient checkpointing to the transformer model for module in self: if isinstance(module, Transformer): return module.auto_model.gradient_checkpointing_enable(gradient_checkpointing_kwargs) ================================================ FILE: Assets/core.py ================================================ # custom code compatible with chattts 0.2.4 # adds the "local_dir" parameter import os import re import logging import tempfile from dataclasses import dataclass, asdict from typing import Literal, Optional, List, Tuple, Dict, Union from json import load from pathlib import Path import numpy as np import torch from vocos import Vocos from vocos.pretrained import instantiate_class from huggingface_hub import snapshot_download from .config import Config from .model import DVAE, Embed, GPT, gen_logits, Tokenizer, Speaker from .utils import ( load_safetensors, check_all_assets, download_all_assets, select_device, get_latest_modified_file, del_all, ) from .utils import logger as utils_logger from .norm import Normalizer class Chat: def __init__(self, logger=logging.getLogger(__name__)): self.logger = logger utils_logger.set_logger(logger) self.config = Config() self.normalizer = Normalizer( os.path.join(os.path.dirname(__file__), "res", "homophones_map.json"), logger, ) with open( os.path.join(os.path.dirname(__file__), "res", "sha256_map.json") ) as f: self.sha256_map: Dict[str, str] = load(f) self.context = GPT.Context() def has_loaded(self, use_decoder=False): not_finish = False check_list = ["vocos", "gpt", "tokenizer", "embed"] if use_decoder: check_list.append("decoder") else: check_list.append("dvae") for module in check_list: if not hasattr(self, module): self.logger.warning(f"{module} not initialized.") not_finish = True return not not_finish def download_models( self, source: Literal["huggingface", "local", "custom"] = "local", force_redownload=False, custom_path: Optional[torch.serialization.FILE_LIKE] = None, cache_dir: Optional[str] = None, local_dir: Optional[str] = None, ) -> Optional[str]: if source == "local": download_path = local_dir if local_dir else (custom_path if custom_path is not None else os.getcwd()) if ( not check_all_assets(Path(download_path), self.sha256_map, update=True) or force_redownload ): with tempfile.TemporaryDirectory() as tmp: download_all_assets(tmpdir=tmp, homedir=download_path) if not check_all_assets( Path(download_path), self.sha256_map, update=False ): self.logger.error( "download to local path %s failed.", download_path ) return None elif source == "huggingface": try: if local_dir: download_path = snapshot_download( repo_id="2Noise/ChatTTS", allow_patterns=["*.yaml", "*.json", "*.safetensors", "spk_stat.pt", "tokenizer.pt"], local_dir=local_dir, force_download=force_redownload ) if not check_all_assets(Path(download_path), self.sha256_map, update=False): self.logger.error("Model verification failed") return None elif cache_dir: download_path = snapshot_download( repo_id="2Noise/ChatTTS", allow_patterns=["*.yaml", "*.json", "*.safetensors", "spk_stat.pt", "tokenizer.pt"], cache_dir=cache_dir, force_download=force_redownload ) if not check_all_assets(Path(download_path), self.sha256_map, update=False): self.logger.error("Model verification failed") return None else: try: download_path = ( get_latest_modified_file( os.path.join( os.getenv( "HF_HOME", os.path.expanduser("~/.cache/huggingface") ), "hub/models--2Noise--ChatTTS/snapshots", ) ) if custom_path is None else get_latest_modified_file( os.path.join(custom_path, "models--2Noise--ChatTTS/snapshots") ) ) except: download_path = None if download_path is None or force_redownload: self.logger.log( logging.INFO, f"download from HF: https://huggingface.co/2Noise/ChatTTS", ) try: download_path = snapshot_download( repo_id="2Noise/ChatTTS", allow_patterns=["*.yaml", "*.json", "*.safetensors", "spk_stat.pt", "tokenizer.pt"], cache_dir=custom_path, force_download=force_redownload, ) except: download_path = None else: self.logger.log( logging.INFO, f"load latest snapshot from cache: {download_path}", ) except Exception as e: self.logger.error(f"Failed to download models: {str(e)}") download_path = None elif source == "custom": self.logger.log(logging.INFO, f"try to load from local: {custom_path}") if not check_all_assets(Path(custom_path), self.sha256_map, update=False): self.logger.error("check models in custom path %s failed.", custom_path) return None download_path = custom_path if download_path is None: self.logger.error("Model download failed") return None return download_path def load( self, source: Literal["huggingface", "local", "custom"] = "local", force_redownload=False, compile: bool = False, custom_path: Optional[torch.serialization.FILE_LIKE] = None, device: Optional[torch.device] = None, coef: Optional[torch.Tensor] = None, use_flash_attn=False, use_vllm=False, experimental: bool = False, cache_dir: Optional[str] = None, local_dir: Optional[str] = None, ) -> bool: download_path = self.download_models( source, force_redownload, custom_path, cache_dir, local_dir ) if download_path is None: return False return self._load( device=device, compile=compile, coef=coef, use_flash_attn=use_flash_attn, use_vllm=use_vllm, experimental=experimental, **{ k: os.path.join(download_path, v) for k, v in asdict(self.config.path).items() }, ) def unload(self): logger = self.logger self.normalizer.destroy() del self.normalizer del self.sha256_map del_list = ["vocos", "gpt", "decoder", "dvae", "tokenizer", "embed"] for module in del_list: if hasattr(self, module): delattr(self, module) self.__init__(logger) def sample_random_speaker(self) -> str: return self.speaker.sample_random() def sample_audio_speaker(self, wav: Union[np.ndarray, torch.Tensor]) -> str: return self.speaker.encode_prompt(self.dvae.sample_audio(wav)) @dataclass(repr=False, eq=False) class RefineTextParams: prompt: str = "" top_P: float = 0.7 top_K: int = 20 temperature: float = 0.7 repetition_penalty: float = 1.0 max_new_token: int = 384 min_new_token: int = 0 show_tqdm: bool = True ensure_non_empty: bool = True manual_seed: Optional[int] = None @dataclass(repr=False, eq=False) class InferCodeParams(RefineTextParams): prompt: str = "[speed_5]" spk_emb: Optional[str] = None spk_smp: Optional[str] = None txt_smp: Optional[str] = None temperature: float = 0.3 repetition_penalty: float = 1.05 max_new_token: int = 2048 stream_batch: int = 24 stream_speed: int = 12000 pass_first_n_batches: int = 2 def infer( self, text, stream=False, lang=None, skip_refine_text=False, refine_text_only=False, use_decoder=True, do_text_normalization=True, do_homophone_replacement=True, split_text=True, max_split_batch=4, params_refine_text=RefineTextParams(), params_infer_code=InferCodeParams(), ): self.context.set(False) if split_text and isinstance(text, str): if "\n" in text: text = text.split("\n") else: text = re.split(r"(?<=。)|(?<=\.\s)", text) nt = [] if isinstance(text, list): for t in text: if t: nt.append(t) text = nt else: text = [text] self.logger.info("split text into %d parts", len(text)) self.logger.debug("%s", str(text)) if len(text) == 0: return [] res_gen = self._infer( text, stream, lang, skip_refine_text, refine_text_only, use_decoder, do_text_normalization, do_homophone_replacement, split_text, max_split_batch, params_refine_text, params_infer_code, ) if stream: return res_gen elif not refine_text_only: stripped_wavs = [] thr = np.float32(1e-5) for wavs in res_gen: for wav in wavs: stripped_wavs.append(wav[np.abs(wav) > thr]) if split_text: return [np.concatenate(stripped_wavs)] return stripped_wavs else: return next(res_gen) def interrupt(self): self.context.set(True) @torch.no_grad() def _load( self, vocos_ckpt_path: str = None, dvae_ckpt_path: str = None, gpt_ckpt_path: str = None, embed_path: str = None, decoder_ckpt_path: str = None, tokenizer_path: str = None, device: Optional[torch.device] = None, compile: bool = False, coef: Optional[str] = None, use_flash_attn=False, use_vllm=False, experimental: bool = False, ): if device is None: device = select_device(experimental=experimental) self.logger.info("use device %s", str(device)) self.device = device self.device_gpt = device if "mps" not in str(device) else torch.device("cpu") self.compile = compile feature_extractor = instantiate_class( args=(), init=asdict(self.config.vocos.feature_extractor) ) backbone = instantiate_class(args=(), init=asdict(self.config.vocos.backbone)) head = instantiate_class(args=(), init=asdict(self.config.vocos.head)) vocos = ( Vocos(feature_extractor=feature_extractor, backbone=backbone, head=head) .to( # Vocos on mps will crash, use cpu fallback. # Plus, complex dtype used in the decode process of Vocos is not supported in torch_npu now, # so we put this calculation of data on CPU instead of NPU. "cpu" if "mps" in str(device) or "npu" in str(device) else device ) .eval() ) assert vocos_ckpt_path, "vocos_ckpt_path should not be None" vocos.load_state_dict(load_safetensors(vocos_ckpt_path)) self.vocos = vocos self.logger.log(logging.INFO, "vocos loaded.") # computation of MelSpectrogram on npu is not support now, use cpu fallback. dvae_device = torch.device("cpu") if "npu" in str(self.device) else device dvae = DVAE( decoder_config=asdict(self.config.dvae.decoder), encoder_config=asdict(self.config.dvae.encoder), vq_config=asdict(self.config.dvae.vq), dim=self.config.dvae.decoder.idim, coef=coef, device=dvae_device, ) coef = str(dvae) assert dvae_ckpt_path, "dvae_ckpt_path should not be None" dvae.load_pretrained(dvae_ckpt_path, dvae_device) self.dvae = dvae.eval() self.logger.log(logging.INFO, "dvae loaded.") embed = Embed( self.config.embed.hidden_size, self.config.embed.num_audio_tokens, self.config.embed.num_text_tokens, self.config.embed.num_vq, ) embed.load_pretrained(embed_path, device=device) self.embed = embed.to(device) self.logger.log(logging.INFO, "embed loaded.") gpt = GPT( gpt_config=asdict(self.config.gpt), embed=self.embed, use_flash_attn=use_flash_attn, use_vllm=use_vllm, device=device, device_gpt=self.device_gpt, logger=self.logger, ).eval() assert gpt_ckpt_path, "gpt_ckpt_path should not be None" gpt.load_pretrained(gpt_ckpt_path, embed_path, experimental=experimental) gpt.prepare(compile=compile and "cuda" in str(device)) self.gpt = gpt self.logger.log(logging.INFO, "gpt loaded.") self.speaker = Speaker( self.config.gpt.hidden_size, self.config.spk_stat, device ) self.logger.log(logging.INFO, "speaker loaded.") decoder = DVAE( decoder_config=asdict(self.config.decoder), dim=self.config.decoder.idim, coef=coef, device=device, ) coef = str(decoder) assert decoder_ckpt_path, "decoder_ckpt_path should not be None" decoder.load_pretrained(decoder_ckpt_path, device) self.decoder = decoder.eval() self.logger.log(logging.INFO, "decoder loaded.") if tokenizer_path: self.tokenizer = Tokenizer(tokenizer_path) self.logger.log(logging.INFO, "tokenizer loaded.") self.coef = coef return self.has_loaded() def _infer( self, text: Union[List[str], str], stream=False, lang=None, skip_refine_text=False, refine_text_only=False, use_decoder=True, do_text_normalization=True, do_homophone_replacement=True, split_text=True, max_split_batch=4, params_refine_text=RefineTextParams(), params_infer_code=InferCodeParams(), ): assert self.has_loaded(use_decoder=use_decoder) if not isinstance(text, list): text = [text] text = [ self.normalizer( t, do_text_normalization, do_homophone_replacement, lang, ) for t in text ] self.logger.debug("normed texts %s", str(text)) if not skip_refine_text: refined = self._refine_text( text, self.device, params_refine_text, ) text_tokens = refined.ids text_tokens = [i[i.less(self.tokenizer.break_0_ids)] for i in text_tokens] text = self.tokenizer.decode(text_tokens) refined.destroy() if refine_text_only: if split_text and isinstance(text, list): text = "\n".join(text) yield text return if split_text and len(text) > 1 and params_infer_code.spk_smp is None: refer_text = text[0] result = next( self._infer_code( refer_text, False, self.device, use_decoder, params_infer_code, ) ) wavs = self._decode_to_wavs( result.hiddens if use_decoder else result.ids, use_decoder, ) result.destroy() assert len(wavs), 1 params_infer_code.spk_smp = self.sample_audio_speaker(wavs[0]) params_infer_code.txt_smp = refer_text if stream: length = 0 pass_batch_count = 0 if split_text: n = len(text) // max_split_batch if len(text) % max_split_batch: n += 1 else: n = 1 max_split_batch = len(text) for i in range(n): text_remain = text[i * max_split_batch :] if len(text_remain) > max_split_batch: text_remain = text_remain[:max_split_batch] if split_text: self.logger.info( "infer split %d~%d", i * max_split_batch, i * max_split_batch + len(text_remain), ) for result in self._infer_code( text_remain, stream, self.device, use_decoder, params_infer_code, ): wavs = self._decode_to_wavs( result.hiddens if use_decoder else result.ids, use_decoder, ) result.destroy() if stream: pass_batch_count += 1 if pass_batch_count <= params_infer_code.pass_first_n_batches: continue a = length b = a + params_infer_code.stream_speed if b > wavs.shape[1]: b = wavs.shape[1] new_wavs = wavs[:, a:b] length = b yield new_wavs else: yield wavs if stream: new_wavs = wavs[:, length:] keep_cols = np.sum(np.abs(new_wavs) > 1e-5, axis=0) > 0 yield new_wavs[:][:, keep_cols] @torch.inference_mode() def _vocos_decode(self, spec: torch.Tensor) -> np.ndarray: if "mps" in str(self.device) or "npu" in str(self.device): return self.vocos.decode(spec.cpu()).cpu().numpy() else: return self.vocos.decode(spec).cpu().numpy() @torch.inference_mode() def _decode_to_wavs( self, result_list: List[torch.Tensor], use_decoder: bool, ): decoder = self.decoder if use_decoder else self.dvae max_x_len = -1 if len(result_list) == 0: return np.array([], dtype=np.float32) for result in result_list: if result.size(0) > max_x_len: max_x_len = result.size(0) batch_result = torch.zeros( (len(result_list), result_list[0].size(1), max_x_len), dtype=result_list[0].dtype, device=result_list[0].device, ) for i in range(len(result_list)): src = result_list[i] batch_result[i].narrow(1, 0, src.size(0)).copy_(src.permute(1, 0)) del src del_all(result_list) mel_specs = decoder(batch_result) del batch_result wavs = self._vocos_decode(mel_specs) del mel_specs return wavs @torch.no_grad() def _infer_code( self, text: Tuple[List[str], str], stream: bool, device: torch.device, return_hidden: bool, params: InferCodeParams, ): gpt = self.gpt if not isinstance(text, list): text = [text] assert len(text), "text should not be empty" if not isinstance(params.temperature, list): temperature = [params.temperature] * self.config.gpt.num_vq else: temperature = params.temperature input_ids, attention_mask, text_mask = self.tokenizer.encode( self.speaker.decorate_code_prompts( text, params.prompt, params.txt_smp, params.spk_emb, ), self.config.gpt.num_vq, prompt=( self.speaker.decode_prompt(params.spk_smp) if params.spk_smp is not None else None ), device=self.device_gpt, ) start_idx = input_ids.shape[-2] num_code = self.config.gpt.num_audio_tokens - 1 logits_warpers, logits_processors = gen_logits( num_code=num_code, top_P=params.top_P, top_K=params.top_K, repetition_penalty=params.repetition_penalty, ) if gpt.is_vllm: from .model.velocity import SamplingParams sample_params = SamplingParams( temperature=temperature, max_new_token=params.max_new_token, max_tokens=8192, min_new_token=params.min_new_token, logits_processors=(logits_processors, logits_warpers), eos_token=num_code, infer_text=False, start_idx=start_idx, ) input_ids = [i.tolist() for i in input_ids] result = gpt.llm.generate( None, sample_params, input_ids, ) token_ids = [] hidden_states = [] for i in result: token_ids.append(torch.tensor(i.outputs[0].token_ids)) hidden_states.append( i.outputs[0].hidden_states.to(torch.float32).to(self.device) ) del text_mask, input_ids return [ GPT.GenerationOutputs( ids=token_ids, hiddens=hidden_states, attentions=[], ), ] emb = self.embed(input_ids, text_mask) del text_mask if params.spk_emb is not None: self.speaker.apply( emb, params.spk_emb, input_ids, self.tokenizer.spk_emb_ids, self.gpt.device_gpt, ) result = gpt.generate( emb, input_ids, temperature=torch.tensor(temperature, device=device), eos_token=num_code, attention_mask=attention_mask, max_new_token=params.max_new_token, min_new_token=params.min_new_token, logits_processors=(*logits_processors, *logits_warpers), infer_text=False, return_hidden=return_hidden, stream=stream, show_tqdm=params.show_tqdm, ensure_non_empty=params.ensure_non_empty, stream_batch=params.stream_batch, manual_seed=params.manual_seed, context=self.context, ) del emb, input_ids return result @torch.no_grad() def _refine_text( self, text: str, device: torch.device, params: RefineTextParams, ): gpt = self.gpt if not isinstance(text, list): text = [text] input_ids, attention_mask, text_mask = self.tokenizer.encode( self.speaker.decorate_text_prompts(text, params.prompt), self.config.gpt.num_vq, device=self.device_gpt, ) logits_warpers, logits_processors = gen_logits( num_code=self.tokenizer.len, top_P=params.top_P, top_K=params.top_K, repetition_penalty=params.repetition_penalty, ) if gpt.is_vllm: from .model.velocity import SamplingParams sample_params = SamplingParams( repetition_penalty=params.repetition_penalty, temperature=params.temperature, top_p=params.top_P, top_k=params.top_K, max_new_token=params.max_new_token, max_tokens=8192, min_new_token=params.min_new_token, logits_processors=(logits_processors, logits_warpers), eos_token=self.tokenizer.eos_token, infer_text=True, start_idx=input_ids.shape[-2], ) input_ids_list = [i.tolist() for i in input_ids] del input_ids result = gpt.llm.generate( None, sample_params, input_ids_list, params.show_tqdm ) token_ids = [] hidden_states = [] for i in result: token_ids.append(torch.tensor(i.outputs[0].token_ids)) hidden_states.append(i.outputs[0].hidden_states) del text_mask, input_ids_list, result return GPT.GenerationOutputs( ids=token_ids, hiddens=hidden_states, attentions=[], ) emb = self.embed(input_ids, text_mask) del text_mask result = next( gpt.generate( emb, input_ids, temperature=torch.tensor([params.temperature], device=device), eos_token=self.tokenizer.eos_token, attention_mask=attention_mask, max_new_token=params.max_new_token, min_new_token=params.min_new_token, logits_processors=(*logits_processors, *logits_warpers), infer_text=True, stream=False, show_tqdm=params.show_tqdm, ensure_non_empty=params.ensure_non_empty, manual_seed=params.manual_seed, context=self.context, ) ) del emb, input_ids return result ================================================ FILE: Assets/user_manual_consolidated.md ================================================ ### What is the VectorDB-Plugin and what can it do? VectorDB-Plugin is a program that lets you build a vector database from your documents (text files, PDFs, images, etc.) and use it with a large language model for more accurate answers. This approach is known as Retrieval Augmented Generation (RAG) – the software finds relevant pieces of your data (embeddings) and feeds them into an AI chat model so the answers are based on your own content. In simple terms, VectorDB-Plugin "supercharges" a language model by giving it a memory of your files, which improves the factual accuracy of responses. You can search your database by asking questions in plain language, and the program will retrieve matching chunks from your data and have the chat model incorporate them into its answer. ### What are the system requirements and prerequisites? System Requirements for VectorDB-Plugin include a Windows operating system (Windows 10 or 11) and Python (version 3.11 or 3.12 is recommended). You should also have Git installed (with Git LFS for handling large model files) and Pandoc (a document converter). If you plan to use GPU acceleration or certain models, you'll need a suitable C++ compiler and possibly Visual Studio build tools on Windows. An NVIDIA GPU is optional but can greatly speed up embedding and model inference (the program will also work on CPU, just more slowly). Make sure you have sufficient disk space for storing models and databases – vector models and chat models can be several hundred MBs to a few GBs each. ### Why is Visual Studio required to run this program? Visual Studio is requried to run this program because some of the libraries that it relies on must be compiled before they can be installed. A common order that you will receive if you have not installed Visual Studio will state that "Microsoft Visual C++ 14.0 or greater is required" making it clear that you have not installed it correctly. Moreover, when installing Visual Studio you must also install "Build Tools" or select certain features. For example, when installing Visual Studio Build Tools 2022 you must choose "Desktop development with C++ workload" from the righthand side and check the boxes for "MSVC v143 – VS 2022 C++ x64/x86 build tools...", "Windows 10 SDK (10.0.19041.0 or later)," or "Windows 11 SDK (10.0.22621.0)," "C++ CMake tools for Windows," "C++ CMake tools for Windows," "C++ AddressSanitizer," and potentially others. ### How do I install and launch the VectorDB-Plugin? Download the latest release from the GitHub repository (look for a ZIP file under Releases). Extract the ZIP archive to a folder of your choice. Create a virtual environment by opening a command prompt within the "src" directory of the extracted files by running the command "python -m venv ." The second step is to activate the virtual environment by running the command ".\Scripts\activate". Third, run the setup script with the command "python setup_windows.py". It is important to note that this progam is only supported on Windows at this time. Lastly, you run the program by using the command "python gui.py". A window should open with this program's graphical user interface. ### How do I download or add embedding models? The Models Tab lets you browse and download embedding models. Models are grouped by providers with properties listed for each embedding model. To download a model, click the radio button next to the modle you want to download and then click "Download Selected Model". This will save the necessary model files to the "Models/Vector/" folder if you want to inspect them. The Original Precision of an embedding model is the original floating point format that a model was saved to by the creator - e.g. float32, float16 etc. The Parameters of an embedding model refers to how many parameters a particular model has - e.g. 109m means 109 million parameters. The Dimensions of an embedding model refers to how complex of embeddings that a particular model created. More complexity means the higher quality generally within the same embedding model family. For example, dimensions such as 768 or 1024. The Max Sequence of an embedding model refers to the maximum amount of tokens that an embedding model can process at a given time. The size of a model refers to the size on disk. ### How do I query the database for answers? Select the database you want to query from the dropdown menu. Choose a backend model for answering Local Models built-in AI Kobold LM Studio or ChatGPT each option uses different AI systems to generate responses. Enter your question in natural language in the text box for example what does the quarterly report say about revenue. If you only want to see the retrieved information without AI processing check the chunks only box. Click Submit Question the system searches your database for relevant content using semantic similarity. The results will display both the retrieved chunks so you can verify sources and a complete answer generated by your chosen AI model based on those chunks. You can continue with follow-up questions or new queries as needed. ### Which chat backend should I use? The program offers four options for generating answers from your database content. The Local Models backend uses chat models downloaded directly from Huggingface and does not rely on any exernal program. The Kobold backend connects to a Kobold server that has already loaded a chat model. You must download Kobold prior to using this backend and set it up correctly. The LM Studio backend is similar in that it requires downloading an external program prior to using it and setting it up correctly. The ChatGPT backed uses the API from Openai and connects to one of several models. You must first create an account with Openai and get an API key, which must then be entered into this program from the menu at the top. Unlike the other backends, the ChatGPT backend cannot run without an Internet connection. ### What is LM Studio chat model backend? LM Studio is an application that allows users to run and interact with local language models on their own hardware. This program integrates with LM Studio, and the GitHub repository contains detailed instructions for setup and usage. When you query the vector database within the Query Database tab you can choose LM Studio as the backend that ultimately receives the query (along with the contexts from the vector database) and provides a response to your question. LM Studio can be downloaded from this website: https://lmstudio.ai/. The documentation regarding how to properly set up the program is here: https://lmstudio.ai/docs/app. ### What is Kobold chat model backend? Kobold is an application that allows users to run and interact with local language models on their own hardware. This program integrates with Kobold, and the GitHub repository contains detailed instructions for setup and usage. When you query the vector database within the Query Database tab you can choose Kobold as the backend that ultimately receives the query (along with the contexts from the vector database) and provides a response to your question. You can get the latest release from Kobold from this website: https://github.com/LostRuins/koboldcpp. On Windows machines, it is crucial that you do two things before using Kobold. First, right-click on the file and check the "Unblock" checkbox near the bottom. Secondly, you must click the "Compatibility" tab and check the box that says "Run this program as an administrator." Without these steps it will likely fail. The documentation regarding how to use Kobold is here: https://github.com/LostRuins/koboldcpp/wiki. ### What is the OpenAI GPT Chat Model Backend? The Chat GPT models backend allows you to send queries directly to OpenAI and get a response. To do so you must first have an API key. To get an API key for accessing OpenAI's large language models, first create an account by visiting OpenAI's signup page and completing the registration. Once logged in, go to the API keys page, click "Create new secret key," optionally name it, and then click "Create secret key" to generate it. Make sure to copy and store the key securely, as it won't be shown again. To activate the key, visit the Billing section and add your payment details. For a more detailed walkthrough, you can refer to this step-by-step tutorial. ### What local chat models are available and how can I use them? The "local models" option within the Query Database Tab downloads chat models directly from Huggingface and requires no external program. You can select a local model from the pulldown menu and when you use it for the first time it will automatically download the model and it can then be used thereafter for subsequent queries. Please note that certain models are "gated," which means that you must first enter a huggingface access token. You can create an access token on Huggingface's website and then enter it within the "File" menu within this program in the upper left. You must do this before trying to use certain "gated" "local models". To get a Huggingface access token you must create a huggingface account and then go to your profile. On the left-hand side will be an "Access Tokens" option. Then in the upper right is a "Create new token" button. Check the box that says "Read access to contents of all public gated repos you can access" then click "Create token." ### How do I get a huggingface access token? Some chat models in this program are "gated" and require a Huggingface access token. If a model is gated and you haven't provided an access token this program will notify you. To obtain an access token you must create a huggingface account and then go to your profile. On the left-hand side will be an "Access Tokens" option. Once clicked, in the upper right is a "Create new token" button. Check the box that says "Read access to contents of all public gated repos you can access" then click "Create token." You can then enter the access token in this program by going to the "File" menu and selecting "Huggingface Access Token." You can subsequently change your access token within this program by repeating the same steps. ### What is a context limit or maximum sequence length? The phrase "context limit" refers to the maximum number of tokens that a model can handle at once. With chat model the phrase "context limit" is usually used and with embedding models it is customary to use the phrase "maximum sequence length." Regardless, it refers to the same thing. When you choose a chunk size in this program it is important to make sure that the chunk size does not exceed the maximum sequence length of the embedding model. You can see each model's limit in the Models Tab. Remember, these limits are given in tokens wherease the chunk size setting is in characters. This is because the text extraction and splitting operates in terms of characters. On average, one token is three to four character so you will need to do some rough math when setting the chunk size setting to make sure that it does not exceed the embedding model's maximum sequence length. ### What happens if I exceed the maximum sequence length of an embedding model? If the chunks you create will exceed the embedding model's maximum sequence length they will be truncated, leading to suboptimal search results. In other words, if a chunk is too long the end will be cut off before the embeddings are created in order to ensure that the chunk is less than the maximum sequence length. This obviously leads to suboptimal search results because some meaning is lost. You can check the maximum sequence length for all embedding models that this program uses by inspecting the model within the Models Tab. It is very important that you know the maximum sequence length before using an embedding model. ### How many contexts should I retrieve when querying the vector database? For simple question-answer use cases, 3-6 chunks should suffice. For a typical book, a chunk size of 1200 characters with an overlap of 600 characters can return up to 6 contexts. Advanced embedding models are often capable of retrieving the most relevant context in the first or second result. If you are not getting relevant results in the first three to six results then you desperately need to revise your queries because the issue is not with the number of contexts being returned. The type of query and how your phrase it can be even more important than the actual number of chunks returned. With that said, there are use cases for returning a lot of chunks as well for more complex scenarios, especially now that a lot of chat models have extended context limits. To give one example, let's say that you embed a lot of court cases and then ask a question of "What are the exceptions to the hearsay rule of evidence?" It might be reasonable to request 20-30 contexts, which are then fed to the chat model for a synthesized response. ### What does the chunks only checkbox do? Typically when you submit a query within the Query Database Tab it connects to your chosen backend to get a response from a chat model. However, if you check the "chunks only" checkbox it will only return the chunks retrieved from the vector database. This is good for seeing verbatim what would be sent to the chat model backend in case you need that level of detail, but the primary purpose is to enable users to see the quality of the chunks that they are creating. For example, it gives you an idea of whether the chunks size setting you chose is sufficient, or it gives users an idea of whether a particular embedding model is creating a high enough quality of embeddings for their particular use case. ### What are embedding or vector models? Embedding models, which are sometimes referred to as vector models, are large language models specifically trained to convert a chunk of text into a number that represents the meaning of that number. This number, referred to as an "embedding" or "vector" can then be entered into database to be searched for similar vectors. ### Which embedding or vector model should I choose? There are several considerations when choosing which embedding model to use, which are important to understand because it can take significant time and compute resources to create a vector database. First, the size of the embedding model and how much VRAM it uses is a factor. In general, the large and more compute resources required for a model, the higher quality embeddings that it will produce. Also, the maximum sequence lengh of the model can be a factor. Most embedding models have traditionally had a 512 token limit but modern models now have limits of 8192 tokens or even higher. Thirdly, some embedding models are trained on specific languages like English while others are multilingual. All of these characteristics can be viewed within the Models Tab as well as the hyperlinks on the Models Tab to repository for each model so you can read more about each model. ### What are the dimensions of a vector or embedding model? The dimensions of a vector model refers to the level of detail of the embeddings that an embedding model will create. The more dimensions means a greater level of detail and higher quality embedding, but will require more time and computer resources to create. Technically speaking, the number of dimensions refers to the size of the array of numbers that is the "embedding," which, as described previously, represents the semantic meaning of a chunk of text. For example, the array of numbers might have 384 numbers, because the embedding model has 384 dimensions. ### What are some general tips for choosing an embedding model? Try to use as high of a quality of an embedding model as your system resources will allow. Although there are exceptions for newer embedding models, embedding models typically do not use as much VRAM as typical chat models, so the real limitation when choosing an embedding model is how much compute time you are willing to spend before the vector database is create. It is highly recommented to choose as high a quality of embedding model as possible. Also, if compute resources are limited make sure and check the "half" checkbox within the Settings Tab. This will run the embedding model in either bfloat16 or float16 (commonly referred to as half precision). Studies show that there is very little loss in quality between full precision and half precision. Lastly, always use "cuda" within the Settings Tab when creating embeddings if you have a GPU. ### What Are Vision Models? Vision models are a category of large language models trained to understand what is in an image. For purposes of this program, they are used to understand what's in an image, generate a summary for an image, which can then be put into the vector database. This program allows you to choose from multiple vision models within the Settings Tab. Before you take a lot of time to process a lot of images it is highly recommended that you test the various vision models within the Tools Tab to find one that suits you. ### What vision models are available in this program? The vision models that you can use in this program can be seen within the Settings Tab in the pulldown menu where you select the vision model you want to use. Each of these vision models can be researched on the huggingface website if you need more details. Also, you can Ask Jeeves for more information about a specific family of models. In general, the visions models are arranged within this pulldown menu from smallest at top to largest at the bottom. The larger the model generally means the higher quality results you will get, but not always. Smaller vision models that are newer sometimes outperform larger but older vision models. Also, some vision models excel at certain types of images over other types. The best strategy to choose an appropriate vision models before committing to processing a large number of images is to go to the Tools Tab and test the various vision models. You can Ask Jeeves for details of how to do this. ### Do you have any tips for choosing a vision model? When choosing a vision model it is recommended to choose the highest quality model that your system can run taking into consideration the amount of compute time you are willing to spend. Each vision model requires a certain amount of VRAM to use, which is typically much higher than embedding models. It is highly recommended to test all the models on a single image, which you can do within the Tools Tab, or if you already know your VRAM limitations, only test the vision models you know you have the resources to run. The Tools Tab allows you to test a particular vision model on multiple images or multiple visions models on a single image. Either way it's important to get a feel for the vision models' quality and compute resources required before committing to procesdsing a lot of images that will be put into a vector database. ### What is whisper and how does this program use voice recording or transcribing an audio file? Whisper is an advanced speech recognition model developed by OpenAI that transcribes audio into text. This program uses whisper models in two ways. First, to allow users to record their voice into the question box when querying the vector database. This can be done within the Query Database Tab; simply click the "Voice Recorder" button, record your question, and it will be output to the query box. Secondly, whisper models are used to create transcriptions of audio files that can subsequently be entered into a vector database. You can create these transcriptions within the Tools Tab. This will create a transcript of an audio file, which you will see within the Create Database Tab before creating the vector database. ### How can I record my question for the vector database query? To transcribe a spoken question, go to the "Query Database" tab, click the "Voice Recorder" button to begin recording and then speak clearly. Click the button again to stop recording, and the transcribed text will appear in the question box. ### How can I transcribe an audio file to be put into the vector database? To transcribe an audio file, navigate to the Tools tab, select an audio file (most file formats are supported such as .mp3, .wav, .m4a, .ogg, .wma, and .flac) and click the Transcribe button. After the transcription is complete you can see it in the "Create Database" tab and it will be entered into the vector database when you create it. The transcribing functionality uses the powerful `WhisperS2T` library with the `Ctranslate2` backend. Make sure to adjust the "Batch" setting when transcribing an audio file depending on the size of the whisper model you choose. Increasing the batch size can improve speed but demands more VRAM, so care should be taken not to exceed your GPU’s capacity. ### What are the distil variants of the whisper models when transcribing and audio file? Distil variants of Whisper models use approximately 70% of the resources of their full counterparts and are faster with very little loss in quality. ### What whisper model should I choose to transcribe a file? When transcribing an audio file in order to put it into a vector database it is generally recommended to use as high a quality of a whisper model as your hardware will support. The quality of a whisper model is determined by a few factors. Firstly, its size is the most important factor - e.g. large versus medium versus small. Secondly, the precision of the model that you use. This program allows you to choose float32 for the highest qualityy or bfloat16 or float16 (i.e. half precision). In general, using half precision results in about 95% of the quality of float32 for half the compute resources needed. Lastly, some of the whisper models come in "distil" variants that have certain layers of the model removed. Again, this typically gives approximately 95% of the non-distil variant for half the compute resources. It is highly recommended to test the various whisper models on a small audio file first before committing to transcribing a large audio file, which can be done within the Tools Tab. ### What are floating point formats, precision, and quantization? Understanding floating point formats is key when making decisions about model selection and quantization. Floating point formats represent real numbers in binary using a combination of sign, exponent, and fraction (mantissa) bits. The sign bit indicates whether the number is positive or negative. The exponent bits determine the range or magnitude of the value. The fraction or mantissa bits control the precision of the value. ### What are the common floating point formats? float32 32-bit floating point with 1 sign bit 8 exponent bits and 23 fraction bits this format provides high precision and a wide range making it a standard choice for many computing tasks float16 16-bit floating point comprising 1 sign bit 5 exponent bits and 10 fraction bits float16 offers reduced precision and range but uses less memory and computational power bfloat16 brain floating point this format features 1 sign bit 8 exponent bits and 7 fraction bits it has the same range as float32 but with lower precision making it particularly useful for deep learning applications range and precision comparison format float32 approximate range plus or minus 1.4 times 10 to the minus 45 to plus or minus 3.4 times 10 to the 38 precision in decimal digits 6 to 9 format float16 approximate range plus or minus 6.1 times 10 to the minus 5 to plus or minus 6.5 times 10 to the 4 precision in decimal digits 3 to 4 format bfloat16 approximate range plus or minus 1.2 times 10 to the minus 38 to plus or minus 3.4 times 10 to the 38 precision in decimal digits 2 to 3 ### What are precision and range regarding floating point formats and which should I use? The choice of floating point format has several key implications precision affects the detail and accuracy of computations range determines the scale of values that can be represented trade-offs arise when opting for lower precision formats as they reduce memory usage and increase processing speed but may slightly reduce accuracy ### What is Quantization? Quantization reduces the precision of the numbers used to represent a model's parameters which results in smaller models and lower computational requirements the main goals of quantization are to improve model speed reduce memory usage ram or vram and enable models to run on resource-constrained hardware there are two main methods of quantization post-training quantization is applied after the model is trained quantization-aware training incorporates quantization during the training process to minimize accuracy loss common quantization levels include int8 8-bit integer which significantly reduces model size but may introduce quantization errors and float16 or bfloat16 which reduces size with minimal impact on accuracy ### What are the aspects or effects of quantization? model size reduction smaller data types take up less storage performance increase reduced data size speeds up computation potential accuracy loss reduced precision may introduce errors though often negligible for many applications ## What settings are available in this program and how can I adjust them? The "Settings" Tab contains most of the settings for LM Studio, querying the database, creating the database, the text to speech functionality, and the vision models. Please ask me a question about the specific setting or group of settings you're interested in? ### What are the LM Studio Server settings? When using LM Studio as the chat model backend you can adjust a few settings from within the Settings Tab. In general, however, the LM Studio program has all the settings that you should adjust. For purposes of this program you can adjust the port to match what you set within LM Studio. Also, there is a checkbox you can check to see the thinking process if the model you are running within LM Studio has chain of thought. ### What are the database creation settings? The Device setting allows you to choose either CPU or CUDA when creating a vector database. It is always recommended to choose CUDA if available. The Chunk Size setting determines the size of the chunks of text that your documents will be broken into before being turned into embeddings. It is crucial to remember that this setting is in number of characters, not tokens, and that you must keep the chunks within the maximum sequence length of the embedding model you are using, as expressed in tokens, and which you can see within the Models Tab. Remember, each tokens is approximately 3-4 characters. The Overlap setting refers to how many characters at the beginning of a chunk are from the preceding chunk. When a document is processed sometimes it is split in the middle of an important concept and this setting ensures that there is an overlap to avoid losing meaning. A good rule of thumb is to set the Overlap setting to 30-49 percent of the Chunk Size setting. The half-precision setting, if checked, will run the embedding model in half precision resulting in a slight reduction in quality but half the compute resources. ### What are the database query settings? Within the Settings Tab you can adjust several settings when searching a vector database. The Device setting allows you to choose between CPU and CUDA. In contrast to creating a vector database, it is recommended to always use CPU. The Similarity setting sets a threshhold of relevance for a chunk of text before it will be returned as a result. You can set a value between zero and 1. A higher value will result in more chunks being returned but you should never use 1. The Contexts setting determines the maximum number of chunks that will be returned, again, subject to the Similarity setting. The Search Term Filter will require that any chunks returned include the specified term. The File Type setting allows you to only search for chunks of text that originated from a particular file type. ### How does the Contexts setting work exactly? Within the Settings Tab the Contexts setting when searching a vector database will return up to that many chunks of text assuming they all meet the Similarity setting that you choose. In other words, it sets the upper limit. If there are not that many chunks that also meet the Similarity setting it is possible to receive fewer chunks than the Contexts setting. ### What is the similarity setting? Within the Settings Tab the Similarity setting controls the requisite relevance of a chunk related to your query in order for it to possibly be returned. I say "possibly" because even though a chunk might meet the Similarity setting it might not be returned if, for example, your Contexts setting limits the numbe of chunks that will be returned. By defaut, this program will return chunks in order from highest relevance to lowest. It will return the most relevant chunks that meet the Similarity setting up to the maximum number of chunks specified in the Contexts setting. A higher Similarity setting means that more chunks will possibly be returned. A good default value is .8, but do not go above 1. ### What is the search term filter setting? Within the Settings Tab the Search Term Filter setting allows you to require that any chunks returned contain the specified search term. It is not case-sensitive, but it does require an exact match. For example, if you specify “child” it will only return chunks that include the term "child" somewhere in it. This would not include chunks that have the word "children" in it, however, since it requires a verbatim match. With that said, since it is not case-sensitive it would also include chunks with "Child" in them. This setting is especially useful when you know that a relevant chunk has a certain key word in it; otherwise, it is best to leave this blank. Click the Clear Filter button to clear any filters. Lastly, it is important to understand that this setting only applies after both the Similarity and Contexts settings. Therefore, if you set those settings too low you might not receive any chunks with your specified search term. ### What is the File Type setting? Within the Settings Tabe the File Type setting allows you to limit the chunks that are returned based on whether they originated from a particular type of file. Current options include images, documents, audio or all files. It is best to use the all files option unless you are sure that the chunks you are looking from originated from a particular type of file. ### What are text to speech models (aks TTS models) and how are they used in this program? Text to speech models (TTS) are large language models that were specifically trained to take text as input and output audio in a spoken voice format. This program allows you to use TTS models to speak the response that you get after querying the vector database. ### What text to speech models are availble in this program to use? You can choose various text to speech models within the Settings Tab. The current options are Bark, WhisperSpeech, ChatTTS, and Google TTS. The Bark backend has a Normal size that produces slightly higher quality and and a Small version that uses fewer resources. With Bark you can choose different speaker voices such as v2/en_speaker_6, which is usually considered the highest quality or v2/en_speaker_9, which is the only female voice. Using Bark requires a GPU, however. The WhisperSpeech backend consists of two models that you choose within the Settings Tab, both of which determine the quality. Experiment with both to find a setting that works with your hardware. WhisperSpeech, like Bark, requires a GPU but is generally less compute intensive than Bark at roughly the same quality. The ChatTTS backend is also a good option that can be run both on GPU or CPU. It produces audio slightly less quality than Bark or WhisperSpeech. Lastly, the Google TTS backend is the least compute intensive. However, it does not require a GPU and will instead connect to a free online Google service that provides TTS. ### What is the Bark text to speech? Bark TTS by Suno AI is a fully generative, open-source text-to-audio model that produces highly expressive and realistic speech, even capable of non-verbal vocalizations like laughter or sighs. Unlike traditional TTS systems that strictly follow input text, Bark can "freestyle," deviating for prosodic expressiveness or ambient cues, which makes it especially useful for creative applications like character dialogue, storytelling, and game development. It supports over 100 built-in speaker presets and auto-detects more than a dozen languages, although English remains the most polished. Bark uses EnCodec and a GPT-style transformer under the hood, trading speed for quality, and typically requires GPU acceleration. Despite its occasional unpredictability, its rich emotional output and open MIT license make it a standout for experimental and expressive use cases. ### What is the WhisperSpeech text to speech? WhisperSpeech by Collabora is a cutting-edge open-source project that "reverses" OpenAI's Whisper speech-to-text model to synthesize speech from semantic audio tokens, offering an exciting glimpse into the future of modular, multilingual TTS. Inspired by Google’s SPEAR-TTS, WhisperSpeech leverages Whisper’s deep linguistic understanding and language-neutral token representations to build a multilingual, speaker-aware system that supports voice cloning and polyglot speech (e.g. the same voice speaking in multiple languages). Though still under heavy development, early results show surprisingly natural and expressive audio, particularly given the open model’s small size. It’s not yet plug-and-play like Bark or ChatTTS, but its transparency, voice customization potential, and strong multilingual foundation make it a compelling choice for developers interested in training their own flexible, high-quality TTS pipeline. ### What is the ChatTTS text to speech? ChatTTS is an open-source conversational TTS model specifically designed for dialogue generation, with a focus on natural prosody, expressive timing, and multi-speaker interactions. Trained on over 100,000 hours of English and Chinese speech, it delivers highly realistic and emotionally resonant voices tailored for chatbots and AI companions. Unlike many TTS engines, ChatTTS includes conversational structure like speaker turns and can even insert interjections like laughter using special tokens. While it lacks a large preset voice library like Bark, it can produce distinct speakers and supports fine-tuning on custom data. It runs efficiently on consumer GPUs and offers Python bindings, making it one of the most practical and expressive TTS options for developers aiming to build natural, back-and-forth conversational agents in English or Mandarin. ### What is the Google TTS text to speech? Google TTS (Free Tier) offers industry-leading neural speech synthesis via a cloud API, backed by WaveNet and Neural2 models that produce ultra-clear, stable, and emotionally nuanced voices across 380+ voices and 50+ languages. Although not open-source, it provides a generous free tier (up to 4 million characters/month for standard voices and 1 million for WaveNet), making it highly accessible for small-scale use. Developers can fine-tune pronunciation and pacing using SSML, and even select expressive “Newscaster” or “Lively” voice styles. With near real-time performance via a fast cloud API and seamless Python integration, Google TTS is the gold standard for high-quality, multilingual TTS — ideal for production-ready applications where speech quality, reliability, and global language support outweigh the need for open-source control. ### What is the Chatterbox text to speech? Chatterbox, developed by Resemble AI, is a cutting-edge open-source text-to-speech (TTS) model that sets a new standard in voice synthesis. Released under the permissive MIT license, it offers developers and creators unparalleled freedom to use, modify, and distribute the software. Chatterbox's standout features include zero-shot voice cloning, allowing it to mimic any voice with just a few seconds of reference audio and emotion exaggeration control. Its alignment-informed inference ensures ultra-stable and natural-sounding speech, making it ideal for real-time applications like voice assistants and interactive media. In blind evaluations, Chatterbox has been consistently preferred over proprietary models like ElevenLabs, highlighting its superior performance in generating high-quality, expressive speech. With its combination of advanced features, open-source accessibility, and exceptional speech synthesis quality, Chatterbox stands out as a powerful tool for developers seeking a versatile and ethical TTS solution. ### Which text to speech backend or models should I use Generally it's recommended to experiment with each to your liking. However, in general Bark and WhisperSpeech produce the highest quality results, Chat TTS and Chatterbox are below them but can be run on GPU as well as CPU, and Google TTS is comparable to Chat TTS in terms of quality but requires an Internet connection. ### Can I back up or restore my databases and are they backed up automatically When you create a vector database it is automatically backed up. However, if you want to manually backup all databases you can go to the "Tools" tab and click the Backup All Databases button. Likewise, you can restore all backed up databases within the Tools Tab. ### What happens if I lose a configuration file and can I restore it? This program cannot function without the config.yaml file if you lose it accidentally or it gets corrupted for some reason you can restore a default version by if necessary copy the original configyaml from the assets folder to the main directory delete old files and folders in vector_db and vector_db_backup to prevent conflicts ### What are some good tips for searching a vector database? To improve your search results when searching a vector database it is important to understand the relationship between the various settings within the Settings Tab. When a vector database is searched it will first identify candidate chunks to return that meet the Similarity setting. Once it does that it will return the most relevant chunks up to the limit of the number of chunks that you set with the Contexts setting. After that, it will apply the Search Term Filter setting to remove any chunks that do not contain the verbatim search term (remember, this is case-insensitive howver). After that, these chunks are what are then sent to the chat model along with your initial query to get a response. ### General VRAM Considerations To conserve VRAM, disconnect secondary monitors from the GPU and, if available, use motherboard graphics ports instead. This requires enabling integrated graphics in the BIOS, which is often disabled by default when a dedicated GPU is installed. This can be particularly useful if your CPU has integrated graphics, such as Intel CPUs without an "F" suffix, which support motherboard graphics ports. ### How can I manage vram? For optimal performance, ensure that the entire LLM is loaded into VRAM. If only part of the model is loaded, performance can be significantly degraded. It’s also important to manage VRAM efficiently by ejecting unused models when creating the vector database and reloading the LLM after the database creation is complete. When querying the vector database, using the CPU instead of the GPU is recommended to conserve VRAM for the LLM, as querying is less resource-intensive and can be effectively handled by the CPU. ### What are the speed and VRAM requirements for the various chat models? You can always check the VRAM and speed for local models within the Tools Tab by clicking the "Chat Models" button, which will display a nice chart. However, in general smaller models like Qwen 3 - 0.6b deliver exceptional speed at over 200 characters per second while requiring minimal VRAM (1.3GB), mid-range models in the 2-9 billion parameter range offer a sweet spot for most users, with speeds ranging from 150-400 characters per second and VRAM usage between 2.5-9.5GB. Notable standouts include the GLM4-Z1 - 9b, which achieves an impressive 395 CPS while using under 10GB VRAM, and the Exaone models, which consistently deliver faster performance than similarly-sized alternatives. For users with high-end GPUs, the larger 24-32 billion parameter models provide enhanced reasoning capabilities at the cost of reduced speed (95-140 CPS) and substantial VRAM requirements (15-20GB). ### What are the speed and VRAM requirements for the various vision models? Vision models demonstrate a clear inverse relationship between speed and model size, with smaller models delivering significantly faster image processing while larger models provide enhanced accuracy at the cost of reduced throughput. The fastest performers are models like Ovis2 - 2b at 312 characters per second (CPS) and InternVL2.5 - 1b (289 CPS) with relatively low VRAM usage of 2.3-5.8GB. Florence-2 models, which can be run on a CPU, showcase interesting trade-offs. For example, Florence-2-Base achieves an impressive 971 CPS on GPU with only 2.6GB VRAM, CPU-only operations drops performance to 157 CPS. Mid-range models like Granite Vision - 2b (218 CPS, 4.1GB) and THUDM glm4v - 9b (201 CPS, 9.8GB) offer balanced performance for most use cases. The largest models such as Qwen VL - 7b (174 CPS, 9.6GB) require more resources. ### What are maximunm context length and maximum sequence length and how to they relate? Each embedding model has a maximum sequence length, and exceeding this limit can result in truncation. To avoid this, regularly check the maximum sequence length of the model and adjust your settings accordingly. Reducing chunk size or the number of contexts can help stay within these limits. Maximum "context length" refers to chat models and is very similar to maximum sequence length. The key thing to understand is that the chunks you put into the vector database should be within the max sequence length of the vector or embedding model you choose and the maximum context or chunks you retrieve from the vector database multiplied by their length should stay within the chat model's context length limit. And make sure to leave enough context for a response. ### What is the scrape documentaton feature? Within the Tools tab you can select multiple python libraries and scrape their documentation. Multiple .html files will be downloaded and you can subsequently create a vector database out of them. Larger more complex libraries can take a significant amount of time to scrape to make sure you have a stable Internet connection. ### Which vector or embedding models are available in this program? All of the embedding models that this program uses are listed on the Models Tab. You can click on a hyperlink for each one to find out more information. The embedding models sometimes change as different versions of this program are released and newer and better embedding models are released. This program vets all embedding models, however, before including them for usage. ### What is the manage databaes tab? The Manage Databases Tab allows you to see all of the vector databases that you have created thus far and what documents are in them. Select the database you want to view from the pulldown menu and you can see the files that have been embedded. Also, you can doubleclick any of the files to open it in your system's default program. When a vector database is created the location of the original file is saved as metadata. As long as you haven't moved the original file on your computer, this metadata will be used to locate the file and open it in the default program on your system. ### How can I create a vector database? Go to the Create Database tab and choose the files that you want to add to the vector database. If you select any file types that are not supported, the program will let you know and give you an option to automatically exclude them. Remember, you can repeat this process as many times as you with. Also, you can choose whether to select all of the files in a particular directory or simply choose individual files. To add audio transcriptions to the database you must first transcribe audio files individually, which can only be done within the Tools Tab. To input descriptions of images into the vector database choose an appropriate vision model from the Settings Tab. Any images you select will then automatically be processed by that vision model when you create the database. Remember, make sure and adjust the database creation settings within the Settings Tab before creating the database. ### Can I use images and audio files in my database? You can use both images and audio in your vector database. Images: When you add image files (like PNG, JPG, BMP), the selected vision model creates a text description of each image, which is then embedded like a regular text document. For example, a chart might be described as “A line graph showing revenue over time with an upward trend.” You can then search with queries like “What does the revenue trend look like?” and retrieve the image. Make sure you choose a vision model in the Settings Tab first and use the Test Vision Models tool within the Tools Tab ot preview captions before using a particular model. Audio: You can't add audio files directly, but you can use the Transcribe File tool (powered by OpenAI’s Whisper model) to convert audio to text. This transcript can then be added like any other document during database creation. If you try to upload audio directly, the program will prompt you to transcribe it first. By converting images and audio to text, the system supports rich, multi-modal queries — as long as content is processed correctly. ### What chat models are available with the local models option? Within the Query Database Tab if you choose the local models option it will allow you to use a specified number of chat models that will be downloaded directly from the Huggingface website. All of these models have been specifically chosen for their strength in question answering using contexts provided by a vector database. Please ask about a particular family of chat models for more information or you can visit the repository for the various chat models on Huggingface for more detailed information. The available chat models that this program uses sometimes changes as newer models come out with higher capabilities. All chat models that are added or removed will be noted in the release notes on Github for the record. ### What are the Qwen 3 Chat Models? Qwen3 is the latest release in the Qwen family of large language models. they come in six sizes ranging from .6 billion parameters to 32 billion gparameters and can be used under the Apache 2.0 license. A key innovation with the Qwen3 series is the hybrid "thinking" versus "non-thinking" modes that are available. This program has opted to use the thinking mode for all Qwen3 models as it tends to produce the best results for retrieval augmented generation purposes. The Qwen3 models are multilinguals and are touted as supporting up to 119 languages. They were trained on approximately 36 trillion tokens, which is double the amount used for Qwen 2.5. Qwen has consistently created some of the best open source and free models available and they are a staple of this program. ### What are the Granite 3.3 Chat Models? The Granite 3.3 chat models are the latest in the Granite series developed by IBM and are released under the Apache 2.0 license. They are "thinking" or "reasoning" models and have improved upon prior iterations in this regard. The Granite models were trained on synthetically generated datasets for long-context tasks and are good for retrieval augmented generation purposes. Version 3.3 of the models exceed the performance of Granite 3.1 and 3.2 by a significant margin. ### What are the GLM-Z1 Chat Models? The Z1 family of chat models are created by THUDM and demonstract strong performance across a wide range of tasks, including retrieval augmented generation. The benchmarks show that they are particularly strong in general-purpose question answering across a wide range of domains - e.g. science, math, and other areas. They come in a 9 billion parameter and 32 billion parameter variants and are a staple of this program due to their high quality on question answering tasks. ### What is the Mistral Small Chat Model? The Mistral Small chat model is the third iteration of Mistral models and has 24 billion parameters. It is released under the Apache 2.0 license for liberal usage. Compared to larger models such as LLaMA 3.3 with 70 billion parameters and Qwen 2.5 with 32 billion parameters, the Mistral Small 3 model achieves comparable quality results across a wide range of benchmarks. What is unique about the Mistral Small 3 model is its size of 24 billion parameters, which often sits in the sweet spot for VRAM usage for users having 24 gigabytes of VRAM. Sometimes larger models having 32 billion parameters will exceed the available VRAM with longer contexts but Mistral Small 3 leaves sufficient VRAM avaialble in such circumstances. Benchmark results also show that it excels at reasoning, coding, math, and instruction following, oftentimes producing more succinct answers than other similarly sized models. ### What are the MiniCPM-4 chat models? The MiniCPM-4 chat models are ultra-efficient, open-source LLMs built by the OpenBMB team for edge devices, offered in lightweight 0.5 B-parameter and 8 B-parameter versions. The 8b variant achieves comparable performance to Qwen3-8B using only 22% of the training data. The 0.5B parameter variant, despite having fewer parameters, significantly outperforms Qwen3-0.6B, Llama3.2-1B, and Gemma3-1B. The 8b variant matches Qwen3 8b and outperforms GLM4-9B, and exceeds larger models such as Gemma3-12B and approaches Phi4-14B. The major advantage is on 128k context sequences where it achieves 7x faster decoding than Qwen3-8b due to the fact that it only attends to ~6k tokens out of 128k (5% sparsity). These chat models focus on the English and Chinese languages. ### What is the Deepseek R1 Chat Model? The Deepseek R1 chat model was previously removed from this program, but it has been re-added since Deepseek released a newer and improved version in late May, 2025. This new version, based on the Qwen3 architecture, has significantly improved both its reasoning and generic response tasks and is an excellent choice for retrieval augmented generation. It claims to rival much larger open source models such as Qwen 3 32b and Phi-4 14b and even claims to outperform closed-source models such as OpenAI's os-mini (medium) and Google's Gemini 2.5 Flash, which is quite impressive. ### What are the BGE Embedding Models? The BGE family of embedding models were created by BAAI and have long been a staple within the embedding community and this program in particular. They are well-respected as producing high quality embeddings for reasonable compute resources. Although they are over a year old now, they are still regarded as producing quality embeddings for a reasonable compute cost for most use cases. At the time of their release they were state of the art for open source and free embedding models. ### What are the Granite Embedding Models? The Granite family of embedding models were created by IBM and are lightweight embedding models based on the RoBERTa architecture as opposed to the BERT architecture like most other embedding models. IBM touts these models as being suitable for "enterprise" use cases and come in 30.3 and 125 million parameter sizes. Along with the Snowflake Arctic embedding models, they are one of the fastest embedding models that this program offers when considered in relation to the quality of embeddings that they produce. In contrast to the Snowflake Arctic embedding models, however, they do not rely upon the Xformers library to achieve this, which is not supported by all graphics cards. The Granite embedding models were released in early 2025 under the liberal Apache-2.0 license. This program only usese the English-trained variations of the models. ### What are the Intfloat Embedding Models? Similar to the BGE embedding models produced by BAAI, the Intfloat embedding models have long been a staple of high quality embedding models in the community and this program. They include "small," "base," and "large' variants for your particular use case. They offer high quality embeddings for the compute resources required and often go head-to-head in comparison with the "bge" models from BAAI. Although they are well over a year old now they still offer high quality embeddings for a reasonable compute cost and many other embedding models have been built upon the e5 family of models. ### What are the Arctic Embedding Models? Snowflake's Arctic-embed models are retrieval-optimized text embedding models built on E5-small and E5-large embedding models created by Intfloat. Despite their relatively modest sizes, these models outperformed larger competitors on several benchmarks. They are also significantly faster than similarly sized models due to their reliance on the Xformers library. These models can, however, be run with or without reliance on the Xformers library depending on whether a user's hardware supports it. The Snowflake Arctic embedding models are also unique in that they have a maximum sequence length of 8192 tokens, which is far greater than the typical 512 token limit of other embedding models. ### What are the Qwen3 Embedding Models? Released in June, 2025, Alibaba’s Qwen 3 Embedding family delivers state-of-the-art text embeddings while staying friendly to everyday hardware. They are based on the popular Qwen 3 chat models but have special training to make them suitable for generating embeddings. As of June, 2025, they hold the top three ranked spots on the Huggingface leaderboard. They are primarily trained on English and Chinese data, but a fair amount of their training data is also from numerous other languages so they can be reliably used for multilingual embedding tasks as well. They are released under the liberal Apache-2.0 license. The Qwen 3 family of embedding models comes in three practical sizes—“small” (0.6 B parameters), “base” (4 B), and “large” (8 B). Even the 0.6 B version outperforms older 7 B embedding models, which is a phenomenal accomplishment while the 8 B model often edges out commercial offerings. All variants support long contexts (up to 32 k tokens). ### What is the Scrape Documentation tool? Scrape Documentation automatically downloads documentation from online sources to build vector databases without manual copy-pasting. Located in the Tools tab, simply select a documentation source from the dropdown menu (many common libraries are pre-configured) and click "Scrape." The program will fetch all relevant pages, showing progress as it works. Scraped content is stored in src/Scraped_Documentation//. Once complete, you'll need to add these files to a vector database through the Create Database tab - the scraper only retrieves and saves the docs but doesn't vectorize them. If documentation has been previously scraped, the entry appears in red, and you'll be warned before overwriting existing data. This feature is particularly useful for creating searchable knowledge bases from official documentation for technical Q&A using the VectorDB-Plugin. ### How do I test vision models on images? The Test Vision Models tool in the Tools tab lets you preview how vision models describe your images before adding them to a database. It offers two main options: (1) Multiple Files + One Vision Model, which tests one vision model on multiple images. First, select image files in the Create Database tab, then choose your vision model in Settings. Return to Tools and click "Multiple Files + One Vision Model – Process." The tool generates descriptions for all images without creating a database, showing average description length to help you evaluate the model's performance. Single Image + All Vision Models: Compare multiple vision models on one image. Click this option, select an image, then choose which vision models to test from the dialog (they're listed with VRAM requirements). The tool will sequentially process your image through each model and produce a comparison showing each model's description and processing time. This helps you balance quality versus speed when selecting a vision model. ### What is Optical Character Recognition? Optical character recognition (aka OCR) refers to whether a .pdf file has a text layer embedded within it representing the actual text in the document. The exact structure of the .pdf file format in general is beyond the scope of this tutorial, but generally a .pdf will have a "glyph" layer that contains the visual representations of text as we commonly understand them being in different "fonts" or other representations and styles. The "text layer" refers to a text representation of these common glyphs that a .pdf may or may not have, which is unseen but which is ultimately extracted when text is extracted from a .pdf document. If a .pdf does not have this text layer then text cannot be extracted from a .pdf unless OCR has been done on it, which you can do with this program. To do so, go to the Tool Tab, select a .pdf, and perform OCR. You can Ask Jeeves for more details regarding this if need be. ### How can I extract text from PDFs or images with OCR? The OCR tool, found in the Tools tab, converts image-based documents into searchable text using the built-in Tesseract engine. To use it: (1) Go to the "OPTICAL CHARACTER RECOGNITION" section in the Tools tab. (2) Ensure "Tesseract" is selected from the dropdown (it’s usually pre-selected). (3) Click "Choose PDF" to upload your scanned PDF or image file. (4) Click "Process" to start extracting text. Once processing is complete, the tool generates two outputs: (1) A new PDF file with an "_OCR" suffix that includes the original document along with an invisible, searchable text layer. (2) A plain text file containing all the recognized text, including page markers like [[page1]]. You can then upload either the OCR-enhanced PDF or the plain text file to your vector database using the Create Database tab. The tool works best with PDFs, including multi-page ones, but it also supports image files. OCR accuracy varies depending on the clarity and quality of the input, so it's important to review the results carefully when accuracy is critical. ### What other features does the Misc tab have? In addition to backup and restore, the Misc tab includes three visualization tools: GPU Comparison Chart: Click the "GPUs" button to open a chart that compares graphics cards based on performance and memory. You can filter results by VRAM range (e.g., 4–6 GB, 8 GB, 10–12 GB), making it easier to evaluate which GPUs are suitable for running various models. Chat Models Comparison: Selecting "Chat Models" brings up a chart comparing local chat models, displaying estimated VRAM usage and token generation speeds. Models are typically color-coded by category (e.g., general use vs. coding), giving you a clear picture of which ones align with your GPU capabilities. Vision Models Comparison: Clicking "Vision Models" launches a comparison of available vision captioning models, highlighting their size, VRAM requirements, and performance benchmarks such as processing time per image. All visualizations open in separate windows using matplotlib. These tools are purely informational, aimed at helping users make informed choices about model compatibility and system requirements. To return to the application, simply close the chart window. ### What is Ask Jeeves and how do I use it? Ask Jeeves is an integrated help assistant built into the VectorDB-Plugin, designed to serve as an in-app guide or Q&A tool. You can access it from the menu bar—look for the "Ask Jeeves" option. When launched, it opens a new window where you can type in questions about using the program. For instance, you might ask, “How do I add a PDF to my database?” or “What does chunk overlap mean?” Ask Jeeves will respond with helpful answers sourced from the documentation. Ask Jeeves is ideal for getting quick guidance while actively using the program, without needing to leave the interface or consult external resources. If the feature doesn’t respond or appears broken, users are encouraged to report the issue on GitHub, as it may indicate a problem with loading the help content. Think of Ask Jeeves as your on-demand tutor—just click it, type a plain-English question about the VectorDB-Plugin, and get clear explanations or step-by-step instructions. And yes, the name is a playful reference to the classic “Ask Jeeves” search engine, suggesting you can ask it anything! ### What are the InternVL3 Vision Models? InternVL3, released in April 2025, is an advanced open-source multimodal LLM series trained natively on interleaved text, image, and video data. It follows a ViT-MLP-LLM architecture with vision encoders up to 6B parameters and integrates with LLMs like InternLM 3 and Qwen2.5. A major innovation is Variable Visual Position Encoding (V2PE), which enhances long-context visual reasoning by using finer positional increments for visual tokens. The model employs Native Multimodal re-Training, combining language and vision learning in one stage, improving performance without separate alignment stages. InternVL3 also introduces Mixed Preference Optimization and uses dynamic image tiling, JPEG compression, and over 300K instruction-following samples for training. A Visual Process Reward Model improves inference via best-of-N reasoning chains. Empirically, InternVL3 achieves top scores across benchmarks like MMMU, MathVista, and OCRBench, outperforming previous models at all scales. It extends capabilities beyond traditional multimodal reasoning to tool use, 3D perception, GUI interaction, and industrial analysis. ### What are the Ovis2 Vision Models? Ovis2 launched in January 2025 as a second-generation multimodal large language model optimized for compact sizes (1B and 2B). It integrates Apple’s AIMv2 vision transformer and supports Qwen2.5 or InternLM 2.5 as its language backend. A key innovation is its visual embedding table, which structurally aligns image patches with textual tokens using a shared embedding strategy, improving coherence across modalities. Unlike traditional connector-based MLLMs, Ovis2 maps visual inputs into probabilistic tokens that interact with a large visual vocabulary (131,072 visual words), allowing for sparse, efficient visual representation. The model is instruction-tuned on diverse multimodal data, including videos, multilingual OCR, and charts, boosting chain-of-thought reasoning. Though not trained with quantization, 4-bit GPTQ versions were made available in March 2025. Ovis2 achieves state-of-the-art results across various benchmarks, including 89.1 on OCRBench and 83.6 on MMBench-V1.1 for the 8B version. Overall, Ovis2’s architectural advancements enable high performance on vision-language tasks while maintaining efficiency in smaller model sizes. ### What are the Florence-2 Vision Models? Florence-2, released by Microsoft in June 2024, comes in two sizes—Base (232M parameters) and Large (771M)—and uses a sequence-to-sequence architecture built on DaViT and Transformer layers. The model is trained on FLD-5B, a dataset with 5.4 billion annotations across 126 million images, created by the automated Florence data engine. Florence-2 integrates visual inputs with textual prompts and excels in zero-shot tasks, outperforming much larger models like Flamingo-80B on benchmarks such as COCO captioning and TextVQA. It performs well across multiple levels of granularity, from full images to specific regions and pixels, enabling state-of-the-art performance in various tasks. Its design allows for multitask learning without the need for separate modules, improving efficiency and simplifying deployment. Fine-tuning on public datasets further boosts its accuracy and robustness in real-world applications. Unlike traditional dual-encoder models like CLIP, Florence-2 uses a single Transformer stack with joint vision-text training, accepting both images and text prompts as input and producing outputs in text or structured formats. ### What are the Granite Vision Models? Granite Vision is IBM's enterprise-focused vision-language model optimized for visual document understanding released in February 2025. It has around 3 billion parameters and uses a SigLIP vision encoder, a two-layer GELU-activated MLP connector, and the granite-3.1-2b-instruct language model. Trained on 13 million images and 80 million instructions using public and synthetic data. Granite Vision excels at layout parsing, text recognition, and UI analysis, especially for charts and tables, achieving up to 95% accuracy in chart extraction. It matches or surpasses models like Phi3.5v and InternVL2 on document benchmarks such as DocVQA, ChartQA, and TextVQA. Unique features include sparse attention-based safety mechanisms and multi-layer feature extraction. The model, based on the LLaVA architecture, is open-source under the Apache 2.0 license and supports commercial use. Granite Vision consistently outperforms or matches Phi3.5v and InternVL2 across key benchmarks, highlighting its strong advantage in document-focused vision-language tasks. ### What are the Qwen2.5VL Vision Models? Qwen2.5-VL is the latest vision-language model in the Qwen family. It excels in visual understanding tasks like object recognition, text and chart analysis, and document parsing. The model features a streamlined ViT-based vision encoder with window attention, SwiGLU activations, RMSNorm, and dynamic resolution/frame rate training for video, enhanced by mRoPE in the time dimension. These architectural updates allow precise visual localization and robust multimodal reasoning. Qwen2.5-VL-7B outperforms peers like InternVL2.5-8B, MiniCPM-o 2.6, and GPT-4o-mini in multiple benchmarks: Document QA: DocVQA 95.7%, InfoVQA 82.6%, ChartQA 87.3% Text recognition: TextVQA 84.9%, OCRBench 864, CC_OCR 77.8% General VLU: MMBench 82.6%, MMVet 67.1% Math reasoning: MathVista 68.2%, MathVision 25.07% It also resists hallucination better than GPT-4o-mini (HallBench: 52.9% vs. 46.1%). The model integrates tightly with the Qwen2.5 LLM, sharing its tokenizer and text processing, while extending it with specialized vision-language handling and support for flexible image resolutions. ### What is the GLM-4V-9B Vision Model? GLM-4V-9B, developed by Zhipu AI and Tsinghua University, is a 9B-parameter bilingual (Chinese/English) multimodal model released in mid-2024 as part of the GLM (OpenGLM) series. It integrates vision into the pretrained GLM-4 LLM, supporting high-resolution inputs up to 1120×1120 and enabling general vision-language tasks like image QA, captioning, and reasoning. The model uses standard attention and likely linear patch embeddings, with training on large multilingual image-text datasets. GLM-4V-9B incorporates Mixed Preference Optimization (MPO) to enhance chain-of-thought alignment, similar to InternVL. It supports FP16 precision and an 8K context window, though quantization is not emphasized. Benchmarks show strong performance: it scored 81.1 on English MMBench and 786 on OCRBench, outperforming many open models and reportedly rivaling or exceeding GPT-4-turbo and Gemini 1.0 Pro on several vision tasks. ### What is the Molmo-D-0924 Vision Model? Molmo-D-0924 is a 7–8B parameter open-source vision-language model released by the Allen Institute (AI2) in September 2024, as part of the larger Molmo project. It combines Qwen2-7B as the language backbone with OpenAI’s CLIP-ViT as the vision encoder and is trained on a proprietary PixMo dataset of 1M high-quality image–text pairs. A key innovation is its support for multi-turn “pointing” in images via a special OLMo module, allowing the model to interactively highlight regions in response to queries—moving beyond standard text-only outputs. The model is decoder-only, optimized for interactive use, and runs efficiently on commodity GPUs with FP16 or bfloat16 precision. While users can’t fine-tune quality knobs beyond image size, it offers real-time responsiveness. On benchmarks, Molmo-7B-D performs between GPT-4V and GPT-4o and achieves state-of-the-art results among similarly sized open models, as confirmed by academic and human evaluations. ================================================ FILE: Assets/vision_model_table.html ================================================ Vision Model Table
Model Name GPU Metrics # Characters in Summary
Characters/s Memory Low High
InternVL2.5 - 1b 291.18 2.4 GB TBD TBD
Florence2-Base - 223m 766.49 2.6 GB 350 660
InternVL2.5 - 4b 173.57 3.1 GB TBD TBD
Moondream2 - 2b 344.97 4.5 GB 299 644
Florence2-Large - 772m 564.86 5.3 GB 450 650
Mississippi - 2b 320.00 5.3 GB TBD TBD
Ovis1.6-Llama3.2 - 3b 321.79 9.6 GB TBD TBD
GLM-4v - 9b 140.65 10.4 GB TBD TBD
llava 1.6 vicuna - 13b 120.98 11.2 GB 501 1045
Molmo-D-0924 - 8b 146.60 12.4 GB TBD TBD
================================================ FILE: CSS/template.css ================================================ DocQA_GUI { background-color: $bg_window; } QWidget { border: none; } QPushButton { background-color: $bg_control; color: $text_primary; font: 10pt "Segoe UI Historic"; border-radius: 5px; padding: 5px; min-width: 60px; border: 1px solid transparent; } QPushButton:hover { background-color: $bg_control_hover; border: 1px solid $border_focus; color: $text_primary; } QLabel { color: $text_primary; } QComboBox { background-color: $bg_control; color: $text_primary; border: 1px solid $bg_window; border-radius: 5px; padding: 3px; } QComboBox:hover, QComboBox:focus { background-color: $bg_control_hover; color: $text_primary; border: 1px solid $bg_window; } QComboBox QAbstractItemView { background-color: $bg_surface; color: $text_primary; border: 1px solid $bg_window; border-radius: 5px; } QComboBox QAbstractItemView::item:hover { background-color: $bg_list_hover; color: $text_primary; } QLineEdit { background-color: $bg_window; color: $text_input; border: 1px solid transparent; border-radius: 5px; padding: 3px; } QLineEdit:hover, QLineEdit:focus { border: 1px solid $border_focus; } QLineEdit::placeholder { color: $text_placeholder; } QRadioButton { color: $text_primary; } QGroupBox { border: 1px solid $bg_surface; border-radius: 5px; color: $text_primary; font-size: 12pt; padding: 10px; } DownloadModelDialog { background-color: $bg_window; } QFrame { background-color: $bg_window; } QTextEdit[readOnly="true"] { background-color: $bg_surface; color: $text_primary; border: 1px solid $bg_control; border-radius: 5px; selection-background-color: $selection_bg; selection-color: $selection_fg; font: 14pt "Segoe UI Historic"; } QTextEdit[readOnly="false"] { background-color: $bg_surface; color: $text_primary; border: 1px solid $bg_control; border-radius: 5px; selection-background-color: $selection_bg; selection-color: $selection_fg; font: 14pt "Segoe UI Historic"; } QTabWidget { background-color: $bg_window; border: none; } QTabWidget, QTabWidget::pane { margin: 0px; padding: 0px; border: none; } QTabBar::tab { background-color: $bg_tab; color: $text_primary; border-bottom-left-radius: 3px; border-bottom-right-radius: 3px; margin: 3px; padding: 5px 5px; } QTabBar::tab:selected { background-color: $bg_tab_selected; border-bottom: 3px solid $border_focus; } QTabBar::tab:hover { background-color: $bg_tab_hover; } QSplitter::handle { background-color: $bg_splitter; height: 5px; } QTreeView { color: $text_primary; } QHeaderView::section { background-color: $bg_control; color: $text_primary; border-radius: 5px; } QMenuBar { color: $text_primary; } QMenuBar::item { background: transparent; } QMenuBar::item:selected { background: $bg_menu_selected; } QCheckBox { color: $text_primary; } QCheckBox::indicator:unchecked:hover, QCheckBox::indicator:checked:hover { border: 1px solid $border_focus; border-radius: 5px; } QMessageBox { background-color: $bg_window; } QMessageBox QLabel { color: $text_primary; } QMessageBox QPushButton { background-color: $bg_dialog_button; color: $text_primary; border-radius: 5px; padding: 5px; border: none; } QMessageBox QPushButton:hover, QMessageBox QPushButton:pressed { background-color: $bg_control; } QAbstractItemView { background-color: $bg_surface; color: $text_primary; border: 1px solid $bg_control; border-radius: 5px; } QAbstractItemView::item:hover { background-color: $bg_list_hover; color: $text_primary; } QInputDialog { background-color: $bg_window; } QInputDialog QLabel { color: $text_primary; } QInputDialog QComboBox { background-color: $bg_control; color: $text_primary; border: 1px solid $bg_window; border-radius: 5px; padding: 3px; } QInputDialog QComboBox:hover { background-color: $bg_control_hover; border: 1px solid $border_focus; } QInputDialog QPushButton { background-color: $bg_dialog_button; color: $text_primary; border-radius: 5px; padding: 5px; min-width: 60px; border: none; } QInputDialog QPushButton:hover { background-color: $bg_control_hover; border: 1px solid $border_focus; } QDialog { background-color: $bg_window; } QDialog QLabel { color: $text_primary; } QDialog QLineEdit { background-color: $bg_surface; color: $text_primary; border: 1px solid $bg_control; border-radius: 5px; padding: 3px; } QDialog QLineEdit:hover, QDialog QLineEdit:focus { border: 1px solid $border_focus; } QDialog QDialogButtonBox QPushButton { background-color: $bg_dialog_button; color: $text_primary; border-radius: 5px; padding: 5px; min-width: 60px; border: none; } QDialog QDialogButtonBox QPushButton:hover { background-color: $bg_control_hover; border: 1px solid $border_focus; } ================================================ FILE: README.md ================================================
splash ### Create and search a vector database from a wide variety of file types and get more reliable [responses from an LLM](https://www.youtube.com/watch?v=8-ZAYI4MvtA). This is commonly referred to as ["retrieval augmented generation."](https://medium.com/@vici0549/search-images-with-vector-database-retrieval-augmented-generation-rag-3d5a48881de5)

Requirements

| Tool | Purpose | | ---------------------------------------------------------------------------------------- | ------------------------------------------------- | | 🪟 Microsoft Windows | **Only** for Windows but open to pull requests | | 🐍 [Python 3.11–3.13](https://www.python.org/downloads/) | Run the application | | 🌿 [Git](https://git-scm.com/downloads) | Clone / manage the repository | | 🧲 [Git LFS](https://git-lfs.com/) | Handle large model files | | 📄 [Pandoc](https://github.com/jgm/pandoc/releases) | Document parsing support | | 🛠️ [Visual C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) | Required for compiling dependencies |
Or you can try running these commands in Powershell on Windows: ### Install: ```powershell winget install Microsoft.VisualStudio.2022.BuildTools --silent --accept-source-agreements --accept-package-agreements --override "--wait --quiet --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 --add Microsoft.VisualStudio.Component.Windows11SDK.22621" ``` ### Verify installation: ``` Test-Path "C:\Program Files\Microsoft Visual Studio\2022\BuildTools\VC\Tools\MSVC" ```

Installation

### Download the latest "release," extract, navigate to the `src` folder, and run the following commands: ``` python -m venv . ``` ``` .\Scripts\activate ``` ``` python setup_windows.py ``` ``` python gui.py ```
### Inputs → Processing → Vector Database | | | | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | 📂 **Ingest** | 📄 `.pdf`, `.docx`, `.txt`, `.html`, `.htm`, `.md`, `.csv`, `.xls`, `.xlsx`, `.xlsm`, `.rtf`, `.eml`, `.msg`
🖼️ `.png`, `.jpg`, `.jpeg`, `.bmp`, `.gif`, `.tif`, `.tiff`
🎵 `.mp3`, `.wav`, `.m4a`, `.ogg`, `.wma`, `.flac` | | ⚙️ **Process** | 📝 Extract text from documents
🖼️ Generate descriptions from images
🎧 Transcribe speech from audio | | 🧠 **Store** | All processed content is embedded and saved into the vector database for searching. | ### Query → LLM → Output | | | | --------------- | ----------------------------------------------------------- | | ❓ **Ask** | ⌨️ Type **or** 🎙️ record a question | | 🧠 **Retrieve** | Relevant chunks are pulled from the vector database | | 🤖 **Generate** | Sent to an LLM (Local Model, [Kobold](https://github.com/LostRuins/koboldcpp), [LM Studio](https://lmstudio.ai/), or ChatGPT) | | 💬 **Respond** | LLM returns an answer based on the context you provided | | 🔊 **Optional** | Text-to-speech can read the response aloud |

Usage

> [!NOTE] > Instructions on how to use the program are being consolidated into the `Ask Jeeves` functionality, which can be accessed from the "Ask Jeeves" menu option. Please create an issue if Jeeves is not working.

Request a Feature or Report a Bug

Feel free to report bugs or request enhancements by creating an issue on github and I will respond promptly.

Contact

I welcome all suggestions - both positive and negative. You can e-mail me directly at "bbc@chintellalaw.com" or I can frequently be seen on the ```KoboldAI``` Discord server (moniker is ```vic49```). I am always happy to answer any quesitons or discuss anything vector database related! (no formal affiliation with ```KoboldAI```). ================================================ FILE: Tokenizer/special_tokens_map.json ================================================ { "bos_token": "", "eos_token": "", "unk_token": "" } ================================================ FILE: Tokenizer/tokenizer.json ================================================ { "version": "1.0", "truncation": null, "padding": null, "added_tokens": [ { "id": 0, "content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true }, { "id": 1, "content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true }, { "id": 2, "content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": false, "special": true } ], "normalizer": { "type": "Sequence", "normalizers": [ { "type": "Prepend", "prepend": "▁" }, { "type": "Replace", "pattern": { "String": " " }, "content": "▁" } ] }, "pre_tokenizer": null, "post_processor": { "type": "TemplateProcessing", "single": [ { "SpecialToken": { "id": "", "type_id": 0 } }, { "Sequence": { "id": "A", "type_id": 0 } } ], "pair": [ { "SpecialToken": { "id": "", "type_id": 0 } }, { "Sequence": { "id": "A", "type_id": 0 } }, { "SpecialToken": { "id": "", "type_id": 1 } }, { "Sequence": { "id": "B", "type_id": 1 } } ], "special_tokens": { "": { "id": "", "ids": [ 1 ], "tokens": [ "" ] } } }, "decoder": { "type": "Sequence", "decoders": [ { "type": "Replace", "pattern": { "String": "▁" }, "content": " " }, { "type": "ByteFallback" }, { "type": "Fuse" }, { "type": "Strip", "content": " ", "start": 1, "stop": 0 } ] }, "model": { "type": "BPE", "dropout": null, "unk_token": "", "continuing_subword_prefix": null, "end_of_word_suffix": null, "fuse_unk": true, "byte_fallback": true, "vocab": { "": 0, "": 1, "": 2, "<0x00>": 3, "<0x01>": 4, "<0x02>": 5, "<0x03>": 6, "<0x04>": 7, "<0x05>": 8, "<0x06>": 9, "<0x07>": 10, "<0x08>": 11, "<0x09>": 12, "<0x0A>": 13, "<0x0B>": 14, "<0x0C>": 15, "<0x0D>": 16, "<0x0E>": 17, "<0x0F>": 18, "<0x10>": 19, "<0x11>": 20, "<0x12>": 21, "<0x13>": 22, "<0x14>": 23, "<0x15>": 24, "<0x16>": 25, "<0x17>": 26, "<0x18>": 27, "<0x19>": 28, "<0x1A>": 29, "<0x1B>": 30, "<0x1C>": 31, "<0x1D>": 32, "<0x1E>": 33, "<0x1F>": 34, "<0x20>": 35, "<0x21>": 36, "<0x22>": 37, "<0x23>": 38, "<0x24>": 39, "<0x25>": 40, "<0x26>": 41, "<0x27>": 42, "<0x28>": 43, "<0x29>": 44, "<0x2A>": 45, "<0x2B>": 46, "<0x2C>": 47, "<0x2D>": 48, "<0x2E>": 49, "<0x2F>": 50, "<0x30>": 51, "<0x31>": 52, "<0x32>": 53, "<0x33>": 54, "<0x34>": 55, "<0x35>": 56, "<0x36>": 57, "<0x37>": 58, "<0x38>": 59, "<0x39>": 60, "<0x3A>": 61, "<0x3B>": 62, "<0x3C>": 63, "<0x3D>": 64, "<0x3E>": 65, "<0x3F>": 66, "<0x40>": 67, "<0x41>": 68, "<0x42>": 69, "<0x43>": 70, "<0x44>": 71, "<0x45>": 72, "<0x46>": 73, "<0x47>": 74, "<0x48>": 75, "<0x49>": 76, "<0x4A>": 77, "<0x4B>": 78, "<0x4C>": 79, "<0x4D>": 80, "<0x4E>": 81, "<0x4F>": 82, "<0x50>": 83, "<0x51>": 84, "<0x52>": 85, "<0x53>": 86, "<0x54>": 87, "<0x55>": 88, "<0x56>": 89, "<0x57>": 90, "<0x58>": 91, "<0x59>": 92, "<0x5A>": 93, "<0x5B>": 94, "<0x5C>": 95, "<0x5D>": 96, "<0x5E>": 97, "<0x5F>": 98, "<0x60>": 99, "<0x61>": 100, "<0x62>": 101, "<0x63>": 102, "<0x64>": 103, "<0x65>": 104, "<0x66>": 105, "<0x67>": 106, "<0x68>": 107, "<0x69>": 108, "<0x6A>": 109, "<0x6B>": 110, "<0x6C>": 111, "<0x6D>": 112, "<0x6E>": 113, "<0x6F>": 114, "<0x70>": 115, "<0x71>": 116, "<0x72>": 117, "<0x73>": 118, "<0x74>": 119, "<0x75>": 120, "<0x76>": 121, "<0x77>": 122, "<0x78>": 123, "<0x79>": 124, "<0x7A>": 125, "<0x7B>": 126, "<0x7C>": 127, "<0x7D>": 128, "<0x7E>": 129, "<0x7F>": 130, "<0x80>": 131, "<0x81>": 132, "<0x82>": 133, "<0x83>": 134, "<0x84>": 135, "<0x85>": 136, "<0x86>": 137, "<0x87>": 138, "<0x88>": 139, "<0x89>": 140, "<0x8A>": 141, "<0x8B>": 142, "<0x8C>": 143, "<0x8D>": 144, "<0x8E>": 145, "<0x8F>": 146, "<0x90>": 147, "<0x91>": 148, "<0x92>": 149, "<0x93>": 150, "<0x94>": 151, "<0x95>": 152, "<0x96>": 153, "<0x97>": 154, "<0x98>": 155, "<0x99>": 156, "<0x9A>": 157, "<0x9B>": 158, "<0x9C>": 159, "<0x9D>": 160, "<0x9E>": 161, "<0x9F>": 162, "<0xA0>": 163, "<0xA1>": 164, "<0xA2>": 165, "<0xA3>": 166, "<0xA4>": 167, "<0xA5>": 168, "<0xA6>": 169, "<0xA7>": 170, "<0xA8>": 171, "<0xA9>": 172, "<0xAA>": 173, "<0xAB>": 174, "<0xAC>": 175, "<0xAD>": 176, "<0xAE>": 177, "<0xAF>": 178, "<0xB0>": 179, "<0xB1>": 180, "<0xB2>": 181, "<0xB3>": 182, "<0xB4>": 183, "<0xB5>": 184, "<0xB6>": 185, "<0xB7>": 186, "<0xB8>": 187, "<0xB9>": 188, "<0xBA>": 189, "<0xBB>": 190, "<0xBC>": 191, "<0xBD>": 192, "<0xBE>": 193, "<0xBF>": 194, "<0xC0>": 195, "<0xC1>": 196, "<0xC2>": 197, "<0xC3>": 198, "<0xC4>": 199, "<0xC5>": 200, "<0xC6>": 201, "<0xC7>": 202, "<0xC8>": 203, "<0xC9>": 204, "<0xCA>": 205, "<0xCB>": 206, "<0xCC>": 207, "<0xCD>": 208, "<0xCE>": 209, "<0xCF>": 210, "<0xD0>": 211, "<0xD1>": 212, "<0xD2>": 213, "<0xD3>": 214, "<0xD4>": 215, "<0xD5>": 216, "<0xD6>": 217, "<0xD7>": 218, "<0xD8>": 219, "<0xD9>": 220, "<0xDA>": 221, "<0xDB>": 222, "<0xDC>": 223, "<0xDD>": 224, "<0xDE>": 225, "<0xDF>": 226, "<0xE0>": 227, "<0xE1>": 228, "<0xE2>": 229, "<0xE3>": 230, "<0xE4>": 231, "<0xE5>": 232, "<0xE6>": 233, "<0xE7>": 234, "<0xE8>": 235, "<0xE9>": 236, "<0xEA>": 237, "<0xEB>": 238, "<0xEC>": 239, "<0xED>": 240, "<0xEE>": 241, "<0xEF>": 242, "<0xF0>": 243, "<0xF1>": 244, "<0xF2>": 245, "<0xF3>": 246, "<0xF4>": 247, "<0xF5>": 248, "<0xF6>": 249, "<0xF7>": 250, "<0xF8>": 251, "<0xF9>": 252, "<0xFA>": 253, "<0xFB>": 254, "<0xFC>": 255, "<0xFD>": 256, "<0xFE>": 257, "<0xFF>": 258, "▁▁": 259, "▁▁▁▁": 260, "▁t": 261, "in": 262, "er": 263, "▁a": 264, "he": 265, "on": 266, "re": 267, "▁s": 268, "en": 269, "at": 270, "or": 271, "▁the": 272, "▁▁▁▁▁▁▁▁": 273, "es": 274, "▁w": 275, "an": 276, "▁c": 277, "is": 278, "it": 279, "ou": 280, "▁d": 281, "al": 282, "ar": 283, "▁p": 284, "▁f": 285, "ed": 286, "▁b": 287, "ing": 288, "▁o": 289, "▁m": 290, "le": 291, "nd": 292, "as": 293, "ic": 294, "▁h": 295, "ion": 296, "▁in": 297, "▁to": 298, "et": 299, "om": 300, "el": 301, "▁of": 302, "st": 303, "▁and": 304, "▁l": 305, "▁th": 306, "▁n": 307, "ent": 308, "il": 309, "ct": 310, "ro": 311, "▁re": 312, "id": 313, "am": 314, "▁I": 315, "ad": 316, "▁e": 317, "▁S": 318, "▁g": 319, "▁T": 320, "im": 321, "ot": 322, "ac": 323, "ur": 324, "▁(": 325, "ig": 326, "▁=": 327, "ol": 328, "ut": 329, "▁A": 330, "se": 331, "▁u": 332, "ve": 333, "▁C": 334, "if": 335, "ow": 336, "▁y": 337, "ch": 338, "ay": 339, "▁de": 340, "▁st": 341, "▁|": 342, "ver": 343, ");": 344, "▁\"": 345, "ly": 346, "▁be": 347, "**": 348, "▁is": 349, "od": 350, "▁M": 351, "ation": 352, "ul": 353, "▁for": 354, "▁▁▁▁▁": 355, "▁on": 356, "ag": 357, "ce": 358, "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁": 359, "ter": 360, "ir": 361, "th": 362, "▁v": 363, "qu": 364, "▁B": 365, "em": 366, "▁P": 367, "▁you": 368, "▁that": 369, "un": 370, "▁{": 371, "ith": 372, "ri": 373, "est": 374, "ab": 375, "--": 376, "ap": 377, "▁it": 378, "▁con": 379, "ate": 380, "us": 381, "▁H": 382, "um": 383, "▁D": 384, "os": 385, "pe": 386, "▁-": 387, "▁wh": 388, "▁al": 389, "▁as": 390, "and": 391, "ist": 392, "▁L": 393, "▁W": 394, "▁with": 395, "▁an": 396, "ere": 397, "▁*": 398, "▁R": 399, "▁he": 400, "▁F": 401, "oc": 402, "▁was": 403, "ers": 404, "ke": 405, "out": 406, "ht": 407, "▁r": 408, "ess": 409, "op": 410, "res": 411, "ie": 412, "▁E": 413, "▁\\": 414, "▁The": 415, "end": 416, "ld": 417, "▁N": 418, "ort": 419, "▁G": 420, "//": 421, "▁#": 422, "our": 423, "te": 424, "ill": 425, "ain": 426, "▁se": 427, "▁▁▁▁▁▁": 428, "▁$": 429, "▁pro": 430, "ore": 431, "▁com": 432, "ame": 433, "tr": 434, "▁ne": 435, "rom": 436, "ub": 437, "▁at": 438, "▁ex": 439, "ant": 440, "ue": 441, "▁or": 442, "▁}": 443, "art": 444, "ction": 445, "▁k": 446, "pt": 447, "nt": 448, "iv": 449, "de": 450, "▁O": 451, "pl": 452, "urn": 453, "ight": 454, "all": 455, "▁this": 456, "ser": 457, "ave": 458, "▁not": 459, "▁are": 460, "▁j": 461, "▁le": 462, "iz": 463, "▁'": 464, "age": 465, "ment": 466, "▁tr": 467, "ack": 468, "ust": 469, "()": 470, "->": 471, "ity": 472, "ine": 473, "ould": 474, "▁J": 475, "og": 476, "▁from": 477, "▁we": 478, "ell": 479, "▁sh": 480, "▁en": 481, "ure": 482, "port": 483, "▁ch": 484, "ne": 485, "▁by": 486, "per": 487, "ard": 488, "ass": 489, "ge": 490, "ak": 491, "are": 492, "ok": 493, "av": 494, "ive": 495, "ff": 496, "ies": 497, "ath": 498, "turn": 499, "▁U": 500, "int": 501, "----": 502, "▁im": 503, "ost": 504, "ial": 505, "▁have": 506, "ind": 507, "ip": 508, "ans": 509, "xt": 510, "▁do": 511, "cl": 512, "▁if": 513, "con": 514, "ia": 515, "▁his": 516, "ult": 517, "rou": 518, "▁su": 519, "ra": 520, "▁un": 521, "able": 522, "▁<": 523, "▁K": 524, "ome": 525, "▁qu": 526, "get": 527, "▁me": 528, "ast": 529, "ect": 530, "▁##": 531, "to": 532, "▁cl": 533, "▁ab": 534, "ice": 535, "ire": 536, "ber": 537, "one": 538, "ich": 539, "hen": 540, "▁can": 541, "▁Th": 542, "▁la": 543, "▁all": 544, "ime": 545, "ile": 546, "ide": 547, "\",": 548, "▁pl": 549, "▁V": 550, "ru": 551, "orm": 552, "▁had": 553, "ud": 554, "ase": 555, "ord": 556, "),": 557, "▁▁▁▁▁▁▁▁▁▁▁▁": 558, "▁her": 559, "▁In": 560, "ace": 561, "▁but": 562, "ata": 563, "::": 564, "****": 565, "ong": 566, "▁&": 567, "..": 568, "▁▁▁▁▁▁▁▁▁▁▁▁▁": 569, "ite": 570, "ype": 571, "act": 572, "ode": 573, "▁your": 574, "▁out": 575, "▁go": 576, "lic": 577, "ally": 578, "▁so": 579, "ork": 580, "au": 581, "▁up": 582, "▁_": 583, "ll": 584, "==": 585, "▁my": 586, "pp": 587, "cc": 588, "▁//": 589, "▁they": 590, "gh": 591, "▁us": 592, "ib": 593, "ions": 594, "ach": 595, "ens": 596, "▁ar": 597, "ob": 598, "elf": 599, "ook": 600, "ated": 601, "ang": 602, "ign": 603, "▁return": 604, "▁res": 605, "ck": 606, "ous": 607, "ст": 608, ").": 609, "▁п": 610, ".\"": 611, "на": 612, "▁i": 613, "ail": 614, "ep": 615, "▁ad": 616, "ance": 617, "(\"": 618, "▁**": 619, "ther": 620, "ake": 621, "▁will": 622, "▁comp": 623, "▁one": 624, "▁get": 625, "ov": 626, "▁Y": 627, "ary": 628, "ock": 629, "▁she": 630, "che": 631, "ft": 632, "▁new": 633, "▁des": 634, "▁li": 635, "ence": 636, "▁sa": 637, "ress": 638, "▁el": 639, "▁und": 640, "eg": 641, "fer": 642, "ry": 643, "ear": 644, "ose": 645, "very": 646, "',": 647, "▁+": 648, "▁в": 649, "▁He": 650, "ublic": 651, "▁their": 652, "ize": 653, "▁were": 654, "ink": 655, "own": 656, "In": 657, "{\\": 658, "▁has": 659, "▁per": 660, "▁It": 661, "▁St": 662, "her": 663, "ject": 664, "ра": 665, "ild": 666, "so": 667, "▁sp": 668, "ни": 669, "du": 670, "row": 671, "alue": 672, "set": 673, "form": 674, "com": 675, "▁man": 676, "ont": 677, "ull": 678, "▁cont": 679, "▁more": 680, "ick": 681, "▁would": 682, "▁ev": 683, "▁about": 684, "ition": 685, "▁z": 686, "ound": 687, "ree": 688, "▁Ch": 689, "▁which": 690, "io": 691, "();": 692, "▁who": 693, "err": 694, "ory": 695, "ount": 696, "ations": 697, "▁с": 698, "ring": 699, "": 876, "▁em": 877, "▁$\\": 878, "▁year": 879, "wn": 880, "},": 881, "▁del": 882, "ale": 883, "ty": 884, "fig": 885, "sp": 886, "hed": 887, "round": 888, "ew": 889, "▁di": 890, "▁der": 891, "ри": 892, "red": 893, "this": 894, "let": 895, "RE": 896, "ax": 897, "fr": 898, "essage": 899, "ough": 900, "▁comm": 901, "fo": 902, "uch": 903, "oy": 904, "▁people": 905, "ystem": 906, "▁first": 907, "▁function": 908, "ange": 909, "▁how": 910, "▁et": 911, "ah": 912, "▁look": 913, "то": 914, "und": 915, "▁under": 916, "ка": 917, "▁!": 918, "ray": 919, "ST": 920, "ific": 921, "ли": 922, "read": 923, "▁bet": 924, "ious": 925, "arg": 926, "▁need": 927, "math": 928, "▁на": 929, "ert": 930, "▁op": 931, "▁acc": 932, "Pro": 933, "▁est": 934, "▁Un": 935, "▁ent": 936, "▁rec": 937, "▁use": 938, "ен": 939, "▁par": 940, "az": 941, "▁д": 942, "▁Wh": 943, "self": 944, "▁ke": 945, "та": 946, "▁want": 947, "▁end": 948, "▁don": 949, "ek": 950, "ren": 951, "Name": 952, "▁=>": 953, "▁app": 954, "▁que": 955, "igh": 956, "▁bu": 957, "equ": 958, "vel": 959, "▁act": 960, "cre": 961, "AT": 962, "▁var": 963, "cess": 964, "====": 965, "Ex": 966, "▁add": 967, "▁mod": 968, "ung": 969, "▁where": 970, "ning": 971, "▁fl": 972, "als": 973, "tern": 974, "}}": 975, "▁Al": 976, "▁pos": 977, "ank": 978, "▁ap": 979, "eng": 980, "▁“": 981, "ble": 982, "▁reg": 983, "^{": 984, "▁She": 985, "▁*/": 986, "ude": 987, "add": 988, "▁two": 989, "▁col": 990, "▁sm": 991, "air": 992, "▁may": 993, "fore": 994, "▁You": 995, "rough": 996, "▁che": 997, "▁att": 998, "oth": 999, "ла": 1000, "▁co": 1001, "ates": 1002, "▁rem": 1003, "ood": 1004, "Type": 1005, "led": 1006, "ful": 1007, "▁self": 1008, "of": 1009, "▁Ar": 1010, "que": 1011, "▁every": 1012, "ref": 1013, "The": 1014, "▁And": 1015, "▁rel": 1016, "OR": 1017, "Id": 1018, "▁even": 1019, "EN": 1020, "▁hand": 1021, "ait": 1022, "▁should": 1023, "▁after": 1024, "▁dif": 1025, "ght": 1026, "ife": 1027, "ator": 1028, "ash": 1029, "ribut": 1030, "umber": 1031, "▁see": 1032, "ms": 1033, "▁call": 1034, "yn": 1035, "dd": 1036, "▁es": 1037, "▁make": 1038, "other": 1039, "▁—": 1040, "\");": 1041, "str": 1042, "▁long": 1043, "lement": 1044, "▁wor": 1045, "its": 1046, "▁If": 1047, "alse": 1048, "ль": 1049, "ward": 1050, "▁по": 1051, "val": 1052, "ons": 1053, "▁Z": 1054, "▁now": 1055, "data": 1056, "amp": 1057, "ense": 1058, "▁through": 1059, "▁down": 1060, "att": 1061, "▁static": 1062, "ics": 1063, "##": 1064, "pos": 1065, "▁void": 1066, "aw": 1067, "oun": 1068, "▁way": 1069, "ible": 1070, "vent": 1071, "ower": 1072, "▁think": 1073, "ts": 1074, "*/": 1075, "▁again": 1076, "ating": 1077, "те": 1078, "ner": 1079, "▁most": 1080, "line": 1081, "ym": 1082, "▁sub": 1083, "erson": 1084, "▁requ": 1085, "AL": 1086, "AR": 1087, "abel": 1088, "ond": 1089, "));": 1090, "▁Se": 1091, "▁But": 1092, "alk": 1093, "▁An": 1094, "new": 1095, "▁because": 1096, "ger": 1097, "ular": 1098, "roup": 1099, "ta": 1100, "...": 1101, "▁cons": 1102, "▁right": 1103, "▁fr": 1104, "be": 1105, "ily": 1106, "ки": 1107, "▁ph": 1108, "ead": 1109, "?\"": 1110, "▁gu": 1111, "▁else": 1112, "▁som": 1113, "rent": 1114, "co": 1115, "ement": 1116, "▁str": 1117, "ault": 1118, "▁з": 1119, "ло": 1120, "sert": 1121, "var": 1122, "type": 1123, "▁Com": 1124, "ле": 1125, "ins": 1126, "me": 1127, "way": 1128, "ident": 1129, "▁prov": 1130, "▁м": 1131, "▁true": 1132, "▁Pro": 1133, "fl": 1134, "▁sl": 1135, "▁As": 1136, "}\\": 1137, "ID": 1138, "ues": 1139, "▁inst": 1140, "▁name": 1141, "ox": 1142, "▁)": 1143, "li": 1144, "ames": 1145, "Res": 1146, "▁sur": 1147, "param": 1148, "▁start": 1149, "aj": 1150, "SE": 1151, "ask": 1152, "IT": 1153, "String": 1154, "▁ass": 1155, "▁play": 1156, "ting": 1157, "ton": 1158, "▁before": 1159, "▁pol": 1160, "arch": 1161, "▁well": 1162, "Com": 1163, "any": 1164, "olog": 1165, "▁err": 1166, "▁these": 1167, "ars": 1168, "eb": 1169, "▁br": 1170, "▁incl": 1171, "▁hel": 1172, "ern": 1173, "ody": 1174, "во": 1175, "▁ind": 1176, "----------------": 1177, "▁data": 1178, "▁good": 1179, "LE": 1180, "],": 1181, "▁av": 1182, "▁ac": 1183, "ider": 1184, "не": 1185, "▁Q": 1186, "▁min": 1187, "▁much": 1188, "ci": 1189, "els": 1190, "▁cur": 1191, "▁value": 1192, "ery": 1193, "uf": 1194, "▁loc": 1195, "reak": 1196, "ative": 1197, "imes": 1198, "Cl": 1199, "▁,": 1200, "▁ser": 1201, "▁die": 1202, "▁trans": 1203, "▁result": 1204, "ext": 1205, "▁aut": 1206, "land": 1207, "▁&&": 1208, "Ch": 1209, "ten": 1210, "}$": 1211, "▁type": 1212, "cond": 1213, "ices": 1214, "▁very": 1215, "▁own": 1216, "▁fil": 1217, "ities": 1218, "▁produ": 1219, "▁read": 1220, "▁form": 1221, "▁case": 1222, "ather": 1223, "ти": 1224, "да": 1225, "ер": 1226, "Th": 1227, "aut": 1228, "▁spec": 1229, "ij": 1230, "bl": 1231, "ility": 1232, "▁é": 1233, "▁er": 1234, "▁does": 1235, "▁here": 1236, "the": 1237, "ures": 1238, "▁%": 1239, "min": 1240, "▁null": 1241, "rap": 1242, "\")": 1243, "rr": 1244, "List": 1245, "right": 1246, "▁User": 1247, "UL": 1248, "ational": 1249, "▁being": 1250, "AN": 1251, "sk": 1252, "▁car": 1253, "ole": 1254, "▁dist": 1255, "plic": 1256, "ollow": 1257, "▁pres": 1258, "▁such": 1259, "ream": 1260, "ince": 1261, "gan": 1262, "▁For": 1263, "\":": 1264, "son": 1265, "rivate": 1266, "▁years": 1267, "▁serv": 1268, "▁made": 1269, "def": 1270, ";\r": 1271, "▁gl": 1272, "▁bel": 1273, "▁list": 1274, "▁cor": 1275, "▁det": 1276, "ception": 1277, "egin": 1278, "▁б": 1279, "▁char": 1280, "trans": 1281, "▁fam": 1282, "▁!=": 1283, "ouse": 1284, "▁dec": 1285, "ica": 1286, "▁many": 1287, "aking": 1288, "▁à": 1289, "▁sim": 1290, "ages": 1291, "uff": 1292, "ased": 1293, "man": 1294, "▁Sh": 1295, "iet": 1296, "irect": 1297, "▁Re": 1298, "▁differ": 1299, "▁find": 1300, "ethod": 1301, "▁\r": 1302, "ines": 1303, "▁inv": 1304, "▁point": 1305, "▁They": 1306, "▁used": 1307, "ctions": 1308, "▁still": 1309, "ió": 1310, "ined": 1311, "▁while": 1312, "It": 1313, "ember": 1314, "▁say": 1315, "▁help": 1316, "▁cre": 1317, "▁x": 1318, "▁Tr": 1319, "ument": 1320, "▁sk": 1321, "ought": 1322, "ually": 1323, "message": 1324, "▁Con": 1325, "▁mon": 1326, "ared": 1327, "work": 1328, "):": 1329, "ister": 1330, "arn": 1331, "ized": 1332, "Data": 1333, "orn": 1334, "▁head": 1335, "DE": 1336, "▁Le": 1337, "▁person": 1338, "ments": 1339, "ength": 1340, "▁false": 1341, "▁med": 1342, "▁De": 1343, "ache": 1344, "ited": 1345, "▁let": 1346, "▁show": 1347, "▁same": 1348, "uss": 1349, "▁gener": 1350, "▁у": 1351, "cur": 1352, "▁real": 1353, "ced": 1354, "\">": 1355, "struct": 1356, "begin": 1357, "cept": 1358, "▁bo": 1359, "ired": 1360, "▁Fr": 1361, "▁stud": 1362, "dev": 1363, "Ar": 1364, "(\\": 1365, "▁Cl": 1366, "ween": 1367, "▁too": 1368, "▁test": 1369, "▁day": 1370, "oh": 1371, "▁follow": 1372, "ature": 1373, "ze": 1374, "ien": 1375, "reg": 1376, "ces": 1377, "uring": 1378, "amb": 1379, "ina": 1380, "cri": 1381, "▁ed": 1382, "SS": 1383, "uck": 1384, "▁/*": 1385, "CT": 1386, "▁There": 1387, "▁take": 1388, "par": 1389, "ule": 1390, "cal": 1391, "for": 1392, "****************": 1393, "source": 1394, "▁those": 1395, "col": 1396, "▁eff": 1397, "mod": 1398, "cont": 1399, "}{": 1400, "▁around": 1401, "press": 1402, "by": 1403, "▁going": 1404, "ponse": 1405, "▁С": 1406, "▁line": 1407, "date": 1408, "code": 1409, "['": 1410, "▁life": 1411, "ason": 1412, "▁using": 1413, "▁val": 1414, "▁du": 1415, "yp": 1416, "▁▁▁▁▁▁▁▁▁▁▁▁▁▁": 1417, "▁On": 1418, "▁found": 1419, "olut": 1420, "']": 1421, "arent": 1422, "▁string": 1423, "▁met": 1424, "▁wr": 1425, "ush": 1426, "string": 1427, "size": 1428, "▁ver": 1429, "▁each": 1430, "value": 1431, "▁last": 1432, "▁got": 1433, "ven": 1434, "back": 1435, "Set": 1436, "ey": 1437, "rol": 1438, "▁cr": 1439, "thing": 1440, "ret": 1441, "és": 1442, "ism": 1443, "▁between": 1444, "Ob": 1445, "ething": 1446, "mp": 1447, "▁lo": 1448, "ats": 1449, "▁New": 1450, "ви": 1451, "ado": 1452, "dex": 1453, "ди": 1454, "▁pass": 1455, "wh": 1456, "▁den": 1457, "Get": 1458, "apt": 1459, "▁ask": 1460, "▁sup": 1461, "Value": 1462, "ны": 1463, "▁try": 1464, "lation": 1465, "day": 1466, "ness": 1467, "ets": 1468, "▁exper": 1469, "Tr": 1470, "▁Mar": 1471, "serv": 1472, "br": 1473, "▁number": 1474, "inal": 1475, "cent": 1476, "/*": 1477, "not": 1478, "ional": 1479, "▁final": 1480, "')": 1481, "▁run": 1482, "over": 1483, "▁never": 1484, "uc": 1485, "▁high": 1486, "yle": 1487, "▁ins": 1488, "▁best": 1489, "ittle": 1490, "ric": 1491, "▁sign": 1492, "▁dem": 1493, "iness": 1494, "gy": 1495, "▁war": 1496, "ished": 1497, "▁giv": 1498, "key": 1499, "▁X": 1500, "($": 1501, "▁child": 1502, "less": 1503, "ways": 1504, "incl": 1505, "rop": 1506, "raw": 1507, "://": 1508, "▁«": 1509, "no": 1510, "indow": 1511, "fe": 1512, "riend": 1513, "▁les": 1514, "▁los": 1515, "file": 1516, "formation": 1517, "ccess": 1518, "▁В": 1519, "na": 1520, "▁il": 1521, "ision": 1522, "ler": 1523, "▁art": 1524, "Cont": 1525, "▁world": 1526, "▁turn": 1527, "▁really": 1528, "▁Ex": 1529, "ма": 1530, "▁П": 1531, "ters": 1532, "arget": 1533, "Err": 1534, "▁happ": 1535, "time": 1536, "▁So": 1537, "div": 1538, "▁didn": 1539, "ada": 1540, "oot": 1541, "})": 1542, "▁sch": 1543, "▁cle": 1544, "▁something": 1545, "().": 1546, "▁cour": 1547, "ever": 1548, "ants": 1549, "▁?": 1550, "To": 1551, "▁`": 1552, "try": 1553, "ux": 1554, "ais": 1555, "ross": 1556, "hip": 1557, "▁rep": 1558, "label": 1559, "▁both": 1560, "*,": 1561, "ott": 1562, "ми": 1563, "ane": 1564, "▁open": 1565, "ww": 1566, "▁come": 1567, "▁ext": 1568, "rem": 1569, "_{\\": 1570, "▁old": 1571, "ched": 1572, "._": 1573, "ME": 1574, "ify": 1575, "gg": 1576, "Col": 1577, "view": 1578, "▁bus": 1579, "▁must": 1580, "▁different": 1581, "log": 1582, "ists": 1583, "roll": 1584, "ai": 1585, "▁за": 1586, "▁system": 1587, "ivers": 1588, "atus": 1589, "ote": 1590, "med": 1591, "].": 1592, "akes": 1593, "RO": 1594, "▁cent": 1595, "gram": 1596, "▁private": 1597, "▁great": 1598, "\";": 1599, "opy": 1600, "▁feel": 1601, "▁How": 1602, "////": 1603, "IC": 1604, "▁dr": 1605, "ains": 1606, "lock": 1607, "En": 1608, "▁Sch": 1609, "▁mat": 1610, "▁home": 1611, "perty": 1612, "test": 1613, "loc": 1614, "▁wom": 1615, "sw": 1616, "arly": 1617, "▁En": 1618, "▁ко": 1619, "den": 1620, "ста": 1621, "▁а": 1622, "eter": 1623, "▁includ": 1624, "ULL": 1625, "▁mem": 1626, "▁po": 1627, "▁little": 1628, "▁arg": 1629, "▁},": 1630, "include": 1631, "eta": 1632, "▁place": 1633, "idth": 1634, "ustom": 1635, "▁||": 1636, "▁tem": 1637, "ried": 1638, "▁fact": 1639, "ience": 1640, "▁Pl": 1641, "opt": 1642, "ele": 1643, "go": 1644, "AC": 1645, "inter": 1646, "========": 1647, "(),": 1648, "ots": 1649, "ral": 1650, "ique": 1651, "aving": 1652, "ml": 1653, "▁thought": 1654, "frac": 1655, "▁care": 1656, "());": 1657, "▁put": 1658, "▁might": 1659, "▁Amer": 1660, "▁(!": 1661, "ample": 1662, "alth": 1663, "▁few": 1664, "▁state": 1665, "sub": 1666, "▁Or": 1667, "];": 1668, "▁size": 1669, "▁Sp": 1670, "▁without": 1671, "▁poss": 1672, "eq": 1673, "play": 1674, "▁expect": 1675, "▁second": 1676, "▁String": 1677, "uild": 1678, "▁next": 1679, "++": 1680, "requ": 1681, "▁All": 1682, "▁men": 1683, "▁When": 1684, "iter": 1685, "ament": 1686, "net": 1687, "▁К": 1688, "ron": 1689, "aint": 1690, "▁Is": 1691, "ве": 1692, "pend": 1693, "translation": 1694, "▁го": 1695, "че": 1696, "▁van": 1697, "▁another": 1698, "▁ret": 1699, "▁La": 1700, "Mod": 1701, "ION": 1702, "list": 1703, "▁post": 1704, "da": 1705, "ware": 1706, "▁word": 1707, "Error": 1708, "▁seem": 1709, "▁contin": 1710, "atic": 1711, "▁three": 1712, "Object": 1713, "▁partic": 1714, "$.": 1715, "▁mark": 1716, "▁vis": 1717, "rc": 1718, "▁sw": 1719, "ptions": 1720, "▁break": 1721, "▁things": 1722, "ute": 1723, "ui": 1724, "▁That": 1725, "urs": 1726, "gl": 1727, "ру": 1728, "▁file": 1729, "use": 1730, "igned": 1731, "part": 1732, "Un": 1733, "▁equ": 1734, "(&": 1735, "▁lead": 1736, "rm": 1737, "ained": 1738, "▁Be": 1739, "path": 1740, "▁small": 1741, "ager": 1742, "▁always": 1743, "▁El": 1744, "▁order": 1745, "▁ey": 1746, "▁won": 1747, "ape": 1748, "▁left": 1749, "ava": 1750, "item": 1751, "hor": 1752, "▁away": 1753, "bb": 1754, "fun": 1755, "▁Ind": 1756, "mb": 1757, "▁struct": 1758, "▁process": 1759, "▁support": 1760, ");\r": 1761, "ión": 1762, "LO": 1763, "▁oper": 1764, "UT": 1765, "▁·": 1766, "PE": 1767, "load": 1768, "off": 1769, "▁No": 1770, "ives": 1771, "ican": 1772, "▁ve": 1773, "action": 1774, "';": 1775, "▁vo": 1776, "$,": 1777, "▁Gr": 1778, "pre": 1779, "ny": 1780, "aining": 1781, "ior": 1782, "init": 1783, "lection": 1784, "arm": 1785, "umn": 1786, "ags": 1787, "ци": 1788, "ско": 1789, "version": 1790, "▁To": 1791, "▁ref": 1792, "stand": 1793, "▁At": 1794, "ift": 1795, "▁ein": 1796, "face": 1797, "bo": 1798, "ified": 1799, "ved": 1800, "sum": 1801, "une": 1802, "ital": 1803, "ump": 1804, "comm": 1805, "▁mov": 1806, "elt": 1807, "▁von": 1808, "velop": 1809, "ctor": 1810, "head": 1811, "cle": 1812, "▁build": 1813, "inc": 1814, ".'": 1815, "bs": 1816, "info": 1817, "chn": 1818, "▁week": 1819, "▁book": 1820, "HE": 1821, "bar": 1822, "icense": 1823, "▁What": 1824, "▁quest": 1825, "urch": 1826, "ato": 1827, "left": 1828, "▁mar": 1829, "▁top": 1830, "FF": 1831, "▁friend": 1832, "▁beh": 1833, "▁field": 1834, "▁against": 1835, "ract": 1836, "ization": 1837, "user": 1838, "chen": 1839, "▁keep": 1840, "AD": 1841, "itor": 1842, "▁non": 1843, "ird": 1844, "ope": 1845, "▁rest": 1846, "▁dev": 1847, "▁__": 1848, "▁una": 1849, "▁term": 1850, "IS": 1851, "▁pop": 1852, "rist": 1853, "▁since": 1854, "ves": 1855, "▁hard": 1856, "pi": 1857, "util": 1858, "▁soc": 1859, "ene": 1860, "Exception": 1861, "▁local": 1862, "▁direct": 1863, "▁sure": 1864, "▁bro": 1865, "▁da": 1866, "▁": 2370, "aim": 2371, "▁service": 2372, "▁within": 2373, "angu": 2374, "▁Д": 2375, "uffer": 2376, "AG": 2377, "▁Do": 2378, "▁incre": 2379, "▁understand": 2380, "}^": 2381, "▁looked": 2382, "gen": 2383, "ailed": 2384, "▁е": 2385, "ayer": 2386, "▁One": 2387, "▁bas": 2388, "▁job": 2389, "mu": 2390, "but": 2391, "elta": 2392, "▁Christ": 2393, "uration": 2394, "▁record": 2395, "▁Univers": 2396, "ivid": 2397, "valid": 2398, "▁Р": 2399, "▁hold": 2400, "▁table": 2401, "ones": 2402, "link": 2403, "▁Ge": 2404, "▁offer": 2405, "ster": 2406, "Form": 2407, "={": 2408, "▁не": 2409, "stance": 2410, "▁govern": 2411, "▁techn": 2412, "▁prim": 2413, "*.": 2414, "cho": 2415, "max": 2416, "▁fore": 2417, "▁Can": 2418, "▁polit": 2419, "ories": 2420, "▁times": 2421, "▁dans": 2422, "▁air": 2423, "▁anything": 2424, "▁sever": 2425, "acy": 2426, "}_": 2427, "He": 2428, "▁least": 2429, "ips": 2430, "ENT": 2431, "do": 2432, "▁от": 2433, "▁cost": 2434, ".”": 2435, "▁children": 2436, "ability": 2437, "But": 2438, "▁path": 2439, "result": 2440, "acter": 2441, "▁element": 2442, "ee": 2443, "▁wait": 2444, "▁money": 2445, "Map": 2446, "td": 2447, "oin": 2448, "iving": 2449, "icht": 2450, "icy": 2451, "sch": 2452, "ste": 2453, "ду": 2454, "ored": 2455, "oud": 2456, "ille": 2457, "ised": 2458, "plication": 2459, "▁custom": 2460, "▁having": 2461, "ponent": 2462, "▁By": 2463, "ules": 2464, "ued": 2465, "atter": 2466, "And": 2467, "itive": 2468, "Def": 2469, "▁moment": 2470, "aterial": 2471, "Class": 2472, "ograph": 2473, "ike": 2474, "▁large": 2475, "▁####": 2476, "▁either": 2477, "duct": 2478, "▁Then": 2479, "▁Gu": 2480, "olean": 2481, "pert": 2482, "▁Get": 2483, "▁Ab": 2484, "▁short": 2485, "On": 2486, "iment": 2487, "▁project": 2488, "cript": 2489, "▁including": 2490, "ния": 2491, "▁making": 2492, "▁someone": 2493, "▁Fl": 2494, "▁sat": 2495, "▁company": 2496, "ocus": 2497, "pu": 2498, "▁God": 2499, "ification": 2500, "No": 2501, "▁sn": 2502, "ano": 2503, "ga": 2504, "▁au": 2505, "▁cou": 2506, "ás": 2507, "ended": 2508, "ту": 2509, "ober": 2510, "▁nothing": 2511, "▁net": 2512, "▁pot": 2513, "▁typ": 2514, "▁item": 2515, "rew": 2516, "Att": 2517, "▁young": 2518, "}\r": 2519, "nder": 2520, "start": 2521, "▁Sc": 2522, "*)": 2523, "▁enc": 2524, "▁women": 2525, "▁looking": 2526, "▁ро": 2527, "▁health": 2528, "Path": 2529, "▁After": 2530, "▁mult": 2531, "▁{\\": 2532, "▁land": 2533, "orld": 2534, "▁Des": 2535, "▁eng": 2536, "input": 2537, "▁Pol": 2538, "\"\"": 2539, "Code": 2540, "▁supp": 2541, "ainer": 2542, "heck": 2543, "▁mor": 2544, "▁mill": 2545, "▁aw": 2546, "fs": 2547, "▁doing": 2548, "tings": 2549, "ades": 2550, "▁toget": 2551, "▁certain": 2552, "▁together": 2553, "CE": 2554, "ideo": 2555, "▁American": 2556, "ony": 2557, "idd": 2558, "II": 2559, "ged": 2560, "ables": 2561, "▁ident": 2562, "iod": 2563, "▁parent": 2564, "For": 2565, "ambda": 2566, "ando": 2567, "=\\": 2568, "aged": 2569, "ending": 2570, "Int": 2571, "▁possible": 2572, "▁со": 2573, "ivity": 2574, "num": 2575, "rt": 2576, "ajor": 2577, "create": 2578, "ride": 2579, "▁knew": 2580, "bit": 2581, "itional": 2582, "▁lik": 2583, "▁Her": 2584, "ension": 2585, "\".": 2586, "oto": 2587, "▁exist": 2588, "aken": 2589, "▁actually": 2590, "ca": 2591, "▁Г": 2592, "хо": 2593, "inn": 2594, "All": 2595, "buf": 2596, "▁Me": 2597, "▁seen": 2598, "ops": 2599, "▁▁▁▁▁▁▁▁▁": 2600, "Not": 2601, "▁control": 2602, "▁respon": 2603, "};": 2604, "ilt": 2605, "isk": 2606, "▁bad": 2607, "▁often": 2608, "▁past": 2609, "aper": 2610, "▁reason": 2611, "eters": 2612, "▁wanted": 2613, "ura": 2614, "table": 2615, "ormal": 2616, "width": 2617, "га": 2618, "ptr": 2619, "▁dest": 2620, "▁design": 2621, "▁sound": 2622, "▁plan": 2623, "▁base": 2624, "hand": 2625, "gs": 2626, "▁says": 2627, "function": 2628, "▁tri": 2629, "mt": 2630, "▁invest": 2631, "▁available": 2632, "ayout": 2633, "▁och": 2634, "▁las": 2635, "illed": 2636, "Val": 2637, "▁ф": 2638, "iety": 2639, "mon": 2640, "Hand": 2641, "Fr": 2642, "iam": 2643, "pace": 2644, "▁Ob": 2645, "▁para": 2646, "▁meet": 2647, "▁sum": 2648, "Message": 2649, "ici": 2650, "▁known": 2651, "▁gen": 2652, "amma": 2653, "arr": 2654, "▁tre": 2655, "oke": 2656, "uth": 2657, "~\\": 2658, "▁experience": 2659, "icle": 2660, "▁Il": 2661, "▁sent": 2662, "▁others": 2663, "▁soft": 2664, "IP": 2665, "▁max": 2666, "ball": 2667, "▁market": 2668, "▁pour": 2669, "pression": 2670, "eps": 2671, "▁saw": 2672, "▁across": 2673, "▁Su": 2674, "Over": 2675, "ние": 2676, "ulation": 2677, "▁Reg": 2678, "▁+=": 2679, "body": 2680, ")\\": 2681, "▁print": 2682, "▁при": 2683, "db": 2684, "ources": 2685, "wards": 2686, "▁black": 2687, "со": 2688, "ili": 2689, "▁Ed": 2690, "▁complet": 2691, "▁single": 2692, "▁IN": 2693, "ached": 2694, "bt": 2695, "▁code": 2696, "▁bool": 2697, "▁area": 2698, "▁require": 2699, "▁problem": 2700, "aced": 2701, "Equ": 2702, "▁config": 2703, "vec": 2704, "ney": 2705, "cy": 2706, "Al": 2707, "▁account": 2708, "ymbol": 2709, "▁ste": 2710, "ges": 2711, "Array": 2712, "empl": 2713, "context": 2714, "Des": 2715, "Result": 2716, "ecut": 2717, "▁target": 2718, "▁getting": 2719, "\"/>": 2720, "ogle": 2721, "▁himself": 2722, "▁wasn": 2723, "▁block": 2724, "▁ant": 2725, "▁York": 2726, "▁become": 2727, "iff": 2728, "ports": 2729, "reate": 2730, "='": 2731, "cd": 2732, "location": 2733, "ет": 2734, "▁access": 2735, "gress": 2736, "ros": 2737, "Up": 2738, "▁working": 2739, "▁Am": 2740, "iqu": 2741, "cer": 2742, "▁((": 2743, "▁Per": 2744, "▁func": 2745, "▁girl": 2746, "▁above": 2747, "pen": 2748, "пи": 2749, "ido": 2750, "▁version": 2751, "TY": 2752, "▁;": 2753, "mary": 2754, "abled": 2755, "annel": 2756, "▁example": 2757, "▁context": 2758, "OP": 2759, "▁red": 2760, "▁cir": 2761, "sm": 2762, "Log": 2763, "▁space": 2764, "▁fut": 2765, "▁Gener": 2766, "ills": 2767, "▁dri": 2768, "_.": 2769, "▁felt": 2770, "▁offic": 2771, "▁===": 2772, "ii": 2773, "▁started": 2774, "▁Т": 2775, "▁});": 2776, "js": 2777, "▁front": 2778, "▁almost": 2779, "irm": 2780, "!\"": 2781, "signed": 2782, "▁yet": 2783, "▁trad": 2784, "ients": 2785, "ama": 2786, "▁input": 2787, "lim": 2788, "па": 2789, "▁ка": 2790, "▁camp": 2791, "ibr": 2792, "fect": 2793, "unt": 2794, "▁half": 2795, "▁cover": 2796, "anguage": 2797, "▁ben": 2798, "ha": 2799, "▁diff": 2800, "_\\": 2801, "▁об": 2802, "])": 2803, "odes": 2804, "hel": 2805, "ios": 2806, "▁О": 2807, "▁mot": 2808, "▁social": 2809, "////////": 2810, "▁stre": 2811, "ground": 2812, "ів": 2813, "object": 2814, "ples": 2815, "reed": 2816, "▁een": 2817, "▁based": 2818, "▁range": 2819, "An": 2820, "urg": 2821, "▁learn": 2822, "▁exc": 2823, "▁imp": 2824, "▁means": 2825, "▁wur": 2826, "ends": 2827, "void": 2828, "▁std": 2829, "▁particular": 2830, "ja": 2831, "▁source": 2832, "default": 2833, "py": 2834, "▁als": 2835, "scri": 2836, "status": 2837, "▁story": 2838, "▁begin": 2839, "▁position": 2840, "▁special": 2841, "php": 2842, "▁bar": 2843, "▁pract": 2844, "call": 2845, "▁das": 2846, "▁rad": 2847, "▁close": 2848, "www": 2849, "ере": 2850, "gu": 2851, "▁Er": 2852, "▁dom": 2853, "AM": 2854, "▁bed": 2855, "▁several": 2856, "aul": 2857, "box": 2858, "▁low": 2859, "pack": 2860, "Reg": 2861, "Of": 2862, "atures": 2863, "én": 2864, "eder": 2865, "uilder": 2866, "cast": 2867, "conom": 2868, "raft": 2869, "▁makes": 2870, "Loc": 2871, "http": 2872, "▁abs": 2873, "resh": 2874, "▁Will": 2875, "break": 2876, "▁options": 2877, "fort": 2878, "▁из": 2879, "▁anal": 2880, "▁env": 2881, "({": 2882, "event": 2883, "▁page": 2884, "ternal": 2885, "▁distribut": 2886, "▁food": 2887, "check": 2888, "CK": 2889, "▁во": 2890, "assert": 2891, "án": 2892, "base": 2893, "▁whole": 2894, "ación": 2895, "OD": 2896, "▁turned": 2897, "igma": 2898, "▁response": 2899, "▁University": 2900, "▁div": 2901, "apter": 2902, "▁results": 2903, "▁represent": 2904, "▁everything": 2905, "▁Cent": 2906, "utes": 2907, "rix": 2908, "▁Some": 2909, "▁behind": 2910, "▁creat": 2911, "place": 2912, "su": 2913, "▁Part": 2914, "umb": 2915, "mathbb": 2916, "ping": 2917, "▁match": 2918, "Out": 2919, "dom": 2920, "▁situ": 2921, "dr": 2922, "ara": 2923, "▁window": 2924, "ns": 2925, "lished": 2926, "▁Ver": 2927, "▁message": 2928, "▁Em": 2929, "▁human": 2930, "perties": 2931, "лу": 2932, "lem": 2933, "ORT": 2934, "▁early": 2935, "▁quick": 2936, "▁та": 2937, "roid": 2938, "▁country": 2939, "▁due": 2940, "▁Die": 2941, "▁trying": 2942, "▁live": 2943, "▁press": 2944, "INT": 2945, "With": 2946, "oved": 2947, "▁specific": 2948, "▁fall": 2949, "uk": 2950, "yl": 2951, "▁general": 2952, "му": 2953, "ну": 2954, "▁names": 2955, "where": 2956, "▁These": 2957, "▁sil": 2958, "ét": 2959, "▁ener": 2960, "▁Now": 2961, "▁address": 2962, "Response": 2963, "▁Mr": 2964, "▁answ": 2965, "▁film": 2966, "▁strong": 2967, "▁bring": 2968, "▁United": 2969, "▁ge": 2970, "▁woman": 2971, "New": 2972, "ett": 2973, ".)": 2974, "ename": 2975, "▁AN": 2976, "▁describ": 2977, "за": 2978, "ising": 2979, "EL": 2980, "ql": 2981, "▁fur": 2982, "ying": 2983, "▁Cal": 2984, "▁Dr": 2985, "ERR": 2986, "▁\\\\": 2987, "angle": 2988, "urope": 2989, "▁city": 2990, "▁index": 2991, "▁action": 2992, "▁However": 2993, "▁fig": 2994, "ias": 2995, "▁question": 2996, "▁Jan": 2997, "▁Med": 2998, "▁Cont": 2999, "amed": 3000, "Call": 3001, "plied": 3002, "tty": 3003, "▁individ": 3004, "page": 3005, "▁comb": 3006, "section": 3007, "▁Comm": 3008, "uel": 3009, "▁het": 3010, "▁Bar": 3011, "agement": 3012, "fin": 3013, "▁major": 3014, "oper": 3015, "api": 3016, "room": 3017, "▁„": 3018, "▁hab": 3019, "зи": 3020, "▁auf": 3021, "current": 3022, "ni": 3023, "▁include": 3024, "▁qui": 3025, "va": 3026, "UE": 3027, "▁idea": 3028, ",'": 3029, "▁required": 3030, "▁heart": 3031, "ibility": 3032, "iction": 3033, "Model": 3034, "write": 3035, "▁content": 3036, "▁wer": 3037, "▁hands": 3038, "zen": 3039, "char": 3040, "}^{": 3041, "▁mass": 3042, "ply": 3043, "▁nat": 3044, "rel": 3045, "▁dat": 3046, "================": 3047, "imal": 3048, "▁probably": 3049, "unch": 3050, "▁mer": 3051, "ilar": 3052, "ires": 3053, "▁watch": 3054, "SI": 3055, "▁cult": 3056, "▁mother": 3057, "▁government": 3058, "ording": 3059, "▁()": 3060, "▁pri": 3061, "▁link": 3062, "group": 3063, "OL": 3064, "▁near": 3065, "▁Ser": 3066, "Ser": 3067, "ito": 3068, "▁values": 3069, "▁java": 3070, "fully": 3071, "Count": 3072, "++)": 3073, "▁vi": 3074, "▁white": 3075, "mat": 3076, "ctx": 3077, "▁conc": 3078, "▁stay": 3079, "ging": 3080, "▁clear": 3081, "▁copy": 3082, "selves": 3083, "▁provide": 3084, "▁words": 3085, "comp": 3086, "args": 3087, "▁pick": 3088, "uly": 3089, "▁vari": 3090, "▁believe": 3091, "▁Co": 3092, "Property": 3093, "Group": 3094, "▁ten": 3095, "ischen": 3096, "eturn": 3097, "ival": 3098, "System": 3099, "CL": 3100, "bed": 3101, "▁total": 3102, "▁ist": 3103, "Input": 3104, "uments": 3105, "Manager": 3106, "ши": 3107, "▁win": 3108, "leep": 3109, "PI": 3110, "ного": 3111, "ruction": 3112, "▁inte": 3113, "App": 3114, "avor": 3115, "▁respect": 3116, "ators": 3117, "▁como": 3118, "▁cut": 3119, "FA": 3120, "▁sus": 3121, "▁App": 3122, "rect": 3123, "FI": 3124, "▁began": 3125, "oph": 3126, "▁sort": 3127, "though": 3128, "је": 3129, "icro": 3130, "Trans": 3131, "лі": 3132, "▁Inst": 3133, "request": 3134, "ор": 3135, "▁relations": 3136, "-\\": 3137, "Status": 3138, "жи": 3139, "▁father": 3140, "cs": 3141, "▁sex": 3142, "isch": 3143, "vo": 3144, "}_{": 3145, "aven": 3146, "▁Ne": 3147, "ATE": 3148, "itten": 3149, "▁ess": 3150, "TH": 3151, "ights": 3152, "▁hom": 3153, "▁today": 3154, "▁zu": 3155, "ita": 3156, "▁isn": 3157, "▁opt": 3158, "ogn": 3159, "ér": 3160, "▁whether": 3161, "ixed": 3162, "phi": 3163, "idence": 3164, "ald": 3165, "Client": 3166, "At": 3167, "▁death": 3168, "▁Let": 3169, "ius": 3170, "ги": 3171, "▁ре": 3172, "ben": 3173, ")\r": 3174, "ba": 3175, ">": 3193, "▁Just": 3194, "What": 3195, "atal": 3196, "▁Min": 3197, "▁Cor": 3198, "▁dark": 3199, "rl": 3200, "▁larg": 3201, "ding": 3202, "ón": 3203, "ouch": 3204, "▁um": 3205, "▁elect": 3206, "▁dam": 3207, "▁needs": 3208, "▁matter": 3209, "▁rather": 3210, "from": 3211, "ram": 3212, "▁і": 3213, "▁taken": 3214, "▁deal": 3215, "▁period": 3216, "▁Mon": 3217, "▁Л": 3218, "▁Aug": 3219, "run": 3220, "mm": 3221, "elle": 3222, "▁export": 3223, "Sc": 3224, "vis": 3225, "abor": 3226, "▁author": 3227, "ère": 3228, "▁remember": 3229, "▁redu": 3230, "▁List": 3231, "▁focus": 3232, "▁character": 3233, "Table": 3234, "▁individual": 3235, "▁needed": 3236, "bum": 3237, "▁style": 3238, "inary": 3239, "ersion": 3240, "oute": 3241, "▁Pe": 3242, "▁hon": 3243, "mut": 3244, "see": 3245, "▁became": 3246, "▁dire": 3247, "▁document": 3248, "sec": 3249, "ening": 3250, "▁visit": 3251, "▁fac": 3252, "tx": 3253, "down": 3254, "plit": 3255, "▁phys": 3256, "itting": 3257, "joy": 3258, "▁hig": 3259, "This": 3260, "Ad": 3261, "▁Brit": 3262, "▁employ": 3263, "▁ré": 3264, "▁т": 3265, "lambda": 3266, "▁impro": 3267, "▁Bo": 3268, "iding": 3269, "▁online": 3270, "mem": 3271, "atform": 3272, "▁War": 3273, "▁cas": 3274, "asure": 3275, "▁pur": 3276, "medi": 3277, "Dis": 3278, "▁Germ": 3279, "pc": 3280, "са": 3281, "▁friends": 3282, "▁Mc": 3283, "DI": 3284, "▁plus": 3285, "▁Set": 3286, "iddle": 3287, "itut": 3288, "▁depend": 3289, "rest": 3290, "▁Je": 3291, "▁hor": 3292, "▁entire": 3293, "Query": 3294, "▁refer": 3295, "▁hot": 3296, "▁Aust": 3297, "▁common": 3298, "ці": 3299, "▁pull": 3300, "▁Add": 3301, "▁season": 3302, "▁invol": 3303, "▁World": 3304, "client": 3305, "now": 3306, "true": 3307, "append": 3308, "itted": 3309, "empt": 3310, "){": 3311, "///": 3312, "▁prop": 3313, "imate": 3314, "SC": 3315, "▁hours": 3316, "▁hope": 3317, "andom": 3318, "ід": 3319, "istic": 3320, "▁property": 3321, "sg": 3322, ">(": 3323, "▁write": 3324, "mark": 3325, "find": 3326, "▁personal": 3327, "][": 3328, "rown": 3329, "Ph": 3330, "▁foot": 3331, "▁research": 3332, "ironment": 3333, "▁nom": 3334, "▁instance": 3335, "▁held": 3336, "De": 3337, "▁members": 3338, "▁fire": 3339, "▁history": 3340, "▁map": 3341, "▁discuss": 3342, "▁espec": 3343, "▁taking": 3344, "▁services": 3345, "▁indust": 3346, "igen": 3347, "▁Ass": 3348, "▁expected": 3349, "▁wurde": 3350, "dir": 3351, "▁among": 3352, "▁sugg": 3353, "rec": 3354, "Inter": 3355, "block": 3356, "▁Rep": 3357, "▁pain": 3358, "▁five": 3359, "▁fund": 3360, "rid": 3361, "arrow": 3362, "▁treat": 3363, "▁heard": 3364, "▁determ": 3365, "icult": 3366, "▁sense": 3367, "ese": 3368, "Fun": 3369, "▁months": 3370, "json": 3371, ",”": 3372, "TI": 3373, "orage": 3374, "▁У": 3375, "▁everyone": 3376, "▁clos": 3377, "iers": 3378, "airs": 3379, "define": 3380, "If": 3381, "osp": 3382, "▁wonder": 3383, "NA": 3384, "query": 3385, "pg": 3386, "ites": 3387, "▁material": 3388, "yd": 3389, "Read": 3390, "html": 3391, "TE": 3392, "Pr": 3393, "^{\\": 3394, "▁gave": 3395, "▁IS": 3396, "▁suggest": 3397, "Override": 3398, "rodu": 3399, "From": 3400, "▁Europe": 3401, "PO": 3402, "▁soon": 3403, "host": 3404, "▁Ber": 3405, "....": 3406, "▁Har": 3407, "▁energy": 3408, "><": 3409, "aves": 3410, "▁easy": 3411, "▁bre": 3412, "frame": 3413, "▁ground": 3414, "with": 3415, "▁inside": 3416, "ief": 3417, "▁mo": 3418, "pm": 3419, "pan": 3420, "igr": 3421, "▁om": 3422, "next": 3423, "omet": 3424, "▁status": 3425, "▁}\r": 3426, "▁music": 3427, "ora": 3428, "iles": 3429, "ki": 3430, "▁esc": 3431, "▁bes": 3432, "▁Dis": 3433, "▁host": 3434, "▁comes": 3435, "used": 3436, "▁future": 3437, "lick": 3438, "aid": 3439, "▁compet": 3440, "▁voice": 3441, "▁load": 3442, "evel": 3443, "▁neg": 3444, "▁command": 3445, "▁für": 3446, "▁pie": 3447, "▁quite": 3448, "▁blo": 3449, "agn": 3450, "ilon": 3451, "▁claim": 3452, "▁teach": 3453, "▁previous": 3454, "▁site": 3455, "color": 3456, "attr": 3457, "▁accept": 3458, "▁exact": 3459, ")}": 3460, "aft": 3461, "roller": 3462, "он": 3463, "oo": 3464, "Date": 3465, "▁ou": 3466, "sy": 3467, "▁pretty": 3468, "▁image": 3469, "BU": 3470, "▁terms": 3471, "▁search": 3472, "▁è": 3473, "▁Val": 3474, "▁‘": 3475, "▁Dav": 3476, "MS": 3477, "src": 3478, "mar": 3479, "incip": 3480, "▁couldn": 3481, "ados": 3482, "▁dro": 3483, "beta": 3484, "imum": 3485, "▁minutes": 3486, "▁grand": 3487, "▁»": 3488, "▁Our": 3489, "Str": 3490, "VER": 3491, "maz": 3492, "▁original": 3493, "ini": 3494, "▁coll": 3495, "loat": 3496, "▁os": 3497, "});": 3498, "summary": 3499, "▁wall": 3500, "Color": 3501, "▁vers": 3502, "▁della": 3503, "▁\"\"\"": 3504, "mathbf": 3505, "zer": 3506, "aur": 3507, "▁track": 3508, "▁associ": 3509, "▁suff": 3510, "▁inde": 3511, "ague": 3512, "▁Apr": 3513, "Le": 3514, "roups": 3515, "board": 3516, "▁attack": 3517, "▁series": 3518, "▁instead": 3519, "ham": 3520, "book": 3521, "▁six": 3522, "▁Rec": 3523, "▁coming": 3524, "urt": 3525, "▁global": 3526, "▁necess": 3527, "lege": 3528, "Pos": 3529, "▁leave": 3530, "▁pod": 3531, "ategory": 3532, "uz": 3533, "▁deep": 3534, "▁km": 3535, "▁outside": 3536, "has": 3537, "options": 3538, "▁Sm": 3539, "Sub": 3540, "rows": 3541, "▁ви": 3542, "▁States": 3543, "▁wrong": 3544, "▁however": 3545, "▁sem": 3546, "▁catch": 3547, "\"),": 3548, "model": 3549, "▁http": 3550, "▁option": 3551, "rie": 3552, "▁ста": 3553, "▁är": 3554, "▁enjoy": 3555, "nu": 3556, "▁pas": 3557, "▁amount": 3558, "▁respons": 3559, "▁Intern": 3560, "▁myself": 3561, "▁opp": 3562, "▁Sim": 3563, "▁sens": 3564, "Ed": 3565, "▁(\\": 3566, "▁students": 3567, "нов": 3568, "▁points": 3569, "arning": 3570, "UP": 3571, "elling": 3572, "▁cannot": 3573, "Be": 3574, "▁length": 3575, "null": 3576, "uint": 3577, "wise": 3578, "▁double": 3579, "ige": 3580, "ista": 3581, "▁estab": 3582, "anch": 3583, "▁ago": 3584, "▁bound": 3585, "▁fa": 3586, "▁clean": 3587, "▁simple": 3588, "mi": 3589, "########": 3590, "ifier": 3591, "▁General": 3592, "▁seemed": 3593, "ena": 3594, "▁age": 3595, "ной": 3596, "endif": 3597, "AA": 3598, "▁caus": 3599, "▁educ": 3600, "▁cell": 3601, "Gener": 3602, "space": 3603, "▁Your": 3604, "▁beaut": 3605, "gt": 3606, "▁limit": 3607, "▁date": 3608, "Util": 3609, "▁National": 3610, "ows": 3611, "pat": 3612, "quad": 3613, "▁ok": 3614, "▁И": 3615, "arth": 3616, "hat": 3617, "▁community": 3618, "oul": 3619, "▁econom": 3620, "Component": 3621, "bor": 3622, "usion": 3623, "▁below": 3624, "earch": 3625, "ores": 3626, "ban": 3627, "▁August": 3628, "▁further": 3629, "sigma": 3630, "▁ha": 3631, "ji": 3632, "▁comput": 3633, "гра": 3634, "▁None": 3635, "▁ter": 3636, "▁anyone": 3637, "▁task": 3638, "ente": 3639, "position": 3640, "pped": 3641, "▁aus": 3642, "Attribute": 3643, "req": 3644, "addr": 3645, "light": 3646, "ше": 3647, "▁arm": 3648, "cover": 3649, "upport": 3650, "▁Gl": 3651, "▁San": 3652, "▁writing": 3653, "▁lost": 3654, "▁Mark": 3655, "▁gre": 3656, "TYPE": 3657, "▁South": 3658, "▁perfect": 3659, "▁package": 3660, "▁infl": 3661, "haps": 3662, "▁Ang": 3663, "respon": 3664, "ris": 3665, "ptember": 3666, "▁building": 3667, "VAL": 3668, "free": 3669, "▁ce": 3670, "HT": 3671, "▁From": 3672, "ds": 3673, "roy": 3674, "achine": 3675, "nown": 3676, "▁saying": 3677, "▁бы": 3678, "oe": 3679, "Ref": 3680, "▁network": 3681, "parent": 3682, "uge": 3683, "▁similar": 3684, ">\r": 3685, "Builder": 3686, "▁living": 3687, "▁continue": 3688, "anger": 3689, "▁Red": 3690, "▁hair": 3691, "anced": 3692, "ians": 3693, "▁dead": 3694, "▁boolean": 3695, "ication": 3696, "▁де": 3697, "▁client": 3698, "uct": 3699, "▁•": 3700, "SP": 3701, "older": 3702, "пе": 3703, "udio": 3704, "▁deg": 3705, "asing": 3706, "▁step": 3707, "▁pers": 3708, "ção": 3709, "obj": 3710, "oz": 3711, "ula": 3712, "▁round": 3713, "▁upon": 3714, "▁resource": 3715, "▁valid": 3716, "▁II": 3717, "bug": 3718, "std": 3719, "▁ang": 3720, "span": 3721, "pol": 3722, "ialog": 3723, "▁phot": 3724, "?'": 3725, "DB": 3726, "▁Fin": 3727, "VE": 3728, "Em": 3729, "▁cam": 3730, "target": 3731, "pected": 3732, "Hel": 3733, "▁ut": 3734, "▁Test": 3735, "▁town": 3736, "align": 3737, "▁webs": 3738, "inner": 3739, "augh": 3740, "▁except": 3741, "▁initial": 3742, "enty": 3743, "lich": 3744, "▁Aut": 3745, "top": 3746, "▁fail": 3747, "ona": 3748, "▁benef": 3749, "anks": 3750, "ische": 3751, ".*": 3752, "▁signific": 3753, "▁contact": 3754, "Rec": 3755, "ario": 3756, "ottom": 3757, "▁relationship": 3758, "]);": 3759, "▁На": 3760, "Head": 3761, "format": 3762, "▁ét": 3763, "▁More": 3764, "actory": 3765, "portun": 3766, "+\\": 3767, "▁simply": 3768, "▁ep": 3769, "▁Russ": 3770, "ní": 3771, "ua": 3772, "erc": 3773, "▁longer": 3774, "inition": 3775, "ector": 3776, "aption": 3777, "▁profess": 3778, "▁Mus": 3779, "ilities": 3780, "ès": 3781, "▁Act": 3782, "offset": 3783, "▁ill": 3784, "band": 3785, "▁Ag": 3786, "▁По": 3787, "би": 3788, "content": 3789, "icon": 3790, "▁works": 3791, "ynam": 3792, "plement": 3793, "Resource": 3794, "Action": 3795, "▁difficult": 3796, "▁West": 3797, "▁video": 3798, "▁THE": 3799, "▁decl": 3800, "ondon": 3801, "ded": 3802, "}{\\": 3803, "ocr": 3804, "▁City": 3805, "▁я": 3806, "uer": 3807, "cz": 3808, "▁imag": 3809, "cr": 3810, "ete": 3811, "idget": 3812, "▁Mod": 3813, "▁forward": 3814, "▁pict": 3815, "orge": 3816, "▁subject": 3817, "update": 3818, "attle": 3819, "sa": 3820, "▁Ant": 3821, "▁running": 3822, "▁sal": 3823, "conne": 3824, "▁output": 3825, "adata": 3826, "ML": 3827, "Check": 3828, "ledge": 3829, "▁paper": 3830, "params": 3831, "avy": 3832, "▁af": 3833, "▁eine": 3834, "▁jour": 3835, "AY": 3836, "▁itself": 3837, "▁Str": 3838, "style": 3839, "That": 3840, "▁million": 3841, "▁language": 3842, "OS": 3843, "ving": 3844, "▁ма": 3845, "▁то": 3846, ")(": 3847, "▁buy": 3848, "./": 3849, "▁...": 3850, "▁tried": 3851, "▁compl": 3852, "▁activ": 3853, "apped": 3854, "Button": 3855, "Token": 3856, "▁provided": 3857, "iber": 3858, "▁created": 3859, "curity": 3860, "End": 3861, "ał": 3862, "uster": 3863, "izing": 3864, "omb": 3865, "▁sich": 3866, "▁compon": 3867, "▁See": 3868, "▁uint": 3869, "▁label": 3870, "vol": 3871, "ów": 3872, "ocol": 3873, "▁received": 3874, "▁intern": 3875, "це": 3876, "Run": 3877, "▁road": 3878, "▁Oct": 3879, "▁Comp": 3880, "▁study": 3881, "▁те": 3882, "Act": 3883, "▁tour": 3884, "▁State": 3885, "▁added": 3886, "https": 3887, "stream": 3888, "▁lower": 3889, "▁box": 3890, "▁Sk": 3891, "▁themselves": 3892, "▁cross": 3893, "▁echo": 3894, "▁device": 3895, "pose": 3896, "▁games": 3897, "PL": 3898, "Window": 3899, "ises": 3900, "title": 3901, "Stream": 3902, "zt": 3903, "▁Sw": 3904, "▁role": 3905, "iant": 3906, "ku": 3907, "sequ": 3908, "▁late": 3909, "▁sold": 3910, "ря": 3911, "Comm": 3912, "▁entre": 3913, "▁dog": 3914, "device": 3915, "Par": 3916, "▁likely": 3917, "^{-": 3918, "▁len": 3919, "▁Paul": 3920, "▁tool": 3921, "Off": 3922, "▁famil": 3923, "▁draw": 3924, "apping": 3925, "▁events": 3926, "cret": 3927, "rought": 3928, "Content": 3929, "▁software": 3930, "ria": 3931, "msg": 3932, "gamma": 3933, "▁hear": 3934, "Oper": 3935, "▁yourself": 3936, "▁liter": 3937, "emp": 3938, "▁separ": 3939, "▁З": 3940, "▁title": 3941, "Method": 3942, "mathrm": 3943, "▁slow": 3944, "▁Rom": 3945, "!!": 3946, "▁tax": 3947, "ска": 3948, "emplate": 3949, "oi": 3950, "▁Art": 3951, "false": 3952, "astic": 3953, "сть": 3954, "ocket": 3955, "▁ens": 3956, "TO": 3957, "amente": 3958, "local": 3959, "chie": 3960, "▁pan": 3961, "ний": 3962, "chema": 3963, "▁North": 3964, "зо": 3965, "▁>=": 3966, "Aut": 3967, "▁dig": 3968, "▁seems": 3969, "▁morning": 3970, "sole": 3971, "umer": 3972, "delta": 3973, "ité": 3974, "abase": 3975, "raf": 3976, "▁observ": 3977, "▁Est": 3978, "▁seg": 3979, "▁[]": 3980, "▁Pres": 3981, "iful": 3982, "push": 3983, "▁Off": 3984, "ipe": 3985, "ati": 3986, "▁dim": 3987, "ceed": 3988, "Ent": 3989, "____": 3990, "entry": 3991, "▁fight": 3992, "▁cred": 3993, "▁OR": 3994, "▁Dep": 3995, "${": 3996, "лен": 3997, "Create": 3998, "▁April": 3999, "ministr": 4000, "FL": 4001, "▁Ap": 4002, "▁Here": 4003, "private": 4004, "Instance": 4005, "iem": 4006, "▁office": 4007, "▁third": 4008, "▁update": 4009, "Line": 4010, "tag": 4011, "▁especially": 4012, "▁года": 4013, "▁cu": 4014, "▁kill": 4015, "aught": 4016, "▁swe": 4017, "Options": 4018, "IM": 4019, "CC": 4020, "▁compan": 4021, "just": 4022, "▁While": 4023, "izer": 4024, "▁мо": 4025, "ке": 4026, "▁auto": 4027, "▁band": 4028, "мен": 4029, "iques": 4030, "▁ple": 4031, "NO": 4032, "▁OF": 4033, "▁song": 4034, "▁Acc": 4035, "EXT": 4036, "ensor": 4037, "ining": 4038, "▁lat": 4039, "big": 4040, "▁King": 4041, "och": 4042, "si": 4043, "▁Hist": 4044, "▁quality": 4045, "mode": 4046, "▁opportun": 4047, "▁wouldn": 4048, ":**": 4049, "output": 4050, "▁feet": 4051, "▁mis": 4052, "df": 4053, "aging": 4054, "▁ме": 4055, "▁tro": 4056, "▁defined": 4057, "▁review": 4058, "▁Fil": 4059, ">>": 4060, "▁princip": 4061, "Base": 4062, "dict": 4063, "verage": 4064, "icient": 4065, "IF": 4066, "▁hit": 4067, "Page": 4068, "▁perm": 4069, "cel": 4070, "ít": 4071, "▁express": 4072, "▁indic": 4073, "▁September": 4074, "image": 4075, "▁products": 4076, "▁media": 4077, "change": 4078, "igger": 4079, "▁send": 4080, "last": 4081, "ming": 4082, "pa": 4083, "uary": 4084, "▁speak": 4085, "ный": 4086, "ще": 4087, "ysis": 4088, "lying": 4089, "▁ч": 4090, "like": 4091, "ры": 4092, "ві": 4093, "▁Mich": 4094, "MO": 4095, "▁Jah": 4096, "ensive": 4097, "▁share": 4098, "▁development": 4099, "CP": 4100, "spec": 4101, "▁fast": 4102, "het": 4103, "HO": 4104, "▁particip": 4105, "Block": 4106, "▁viol": 4107, "▁frame": 4108, "▁qual": 4109, "tre": 4110, "▁Ф": 4111, "▁toward": 4112, "fg": 4113, "Box": 4114, "Column": 4115, "▁milit": 4116, "▁March": 4117, "▁various": 4118, "pass": 4119, "▁Park": 4120, "▁Ben": 4121, "Frame": 4122, "▁normal": 4123, "open": 4124, "px": 4125, "▁phone": 4126, "▁Even": 4127, "▁ma": 4128, "ibrary": 4129, "Start": 4130, "idden": 4131, "rho": 4132, "graph": 4133, "acing": 4134, "'.": 4135, "arter": 4136, "mes": 4137, "inst": 4138, "▁ir": 4139, "active": 4140, "▁fem": 4141, "▁moved": 4142, "▁store": 4143, "▁price": 4144, "\").": 4145, "berg": 4146, "▁nov": 4147, "▁card": 4148, "ellow": 4149, "▁party": 4150, "▁Mor": 4151, "ael": 4152, "▁percent": 4153, "▁training": 4154, "▁ing": 4155, "imer": 4156, "▁Sam": 4157, "Default": 4158, "▁fuck": 4159, "▁complete": 4160, "uid": 4161, "▁details": 4162, "▁led": 4163, "Point": 4164, "▁Count": 4165, "▁regard": 4166, "zo": 4167, "▁Bro": 4168, "▁recogn": 4169, "▁Hol": 4170, "UM": 4171, "element": 4172, "Mode": 4173, "▁exam": 4174, "▁EX": 4175, "Image": 4176, "verse": 4177, "riter": 4178, "soft": 4179, "▁introdu": 4180, "▁surpr": 4181, "Buffer": 4182, "lector": 4183, "aren": 4184, "anged": 4185, "▁Pat": 4186, "▁Pal": 4187, "▁contr": 4188, "Handler": 4189, "▁features": 4190, "iple": 4191, "▁CON": 4192, "Fil": 4193, "▁Port": 4194, "▁thinking": 4195, "doc": 4196, "wer": 4197, "▁worked": 4198, "PC": 4199, "cm": 4200, "dat": 4201, "PRO": 4202, "▁Every": 4203, "▁era": 4204, "▁First": 4205, "gn": 4206, "▁immedi": 4207, "ovember": 4208, "apan": 4209, "▁extra": 4210, "▁section": 4211, "▁June": 4212, "▁via": 4213, "▁gone": 4214, "come": 4215, "▁stri": 4216, "^\\": 4217, "antly": 4218, "▁arch": 4219, "Source": 4220, "▁conv": 4221, "▁London": 4222, "Number": 4223, "▁questions": 4224, "andid": 4225, "▁played": 4226, "env": 4227, "▁School": 4228, "▁natural": 4229, "can": 4230, "▁news": 4231, "DR": 4232, "▁chall": 4233, "▁Soc": 4234, "▁э": 4235, "▁attempt": 4236, "*}": 4237, "Null": 4238, "rote": 4239, "▁bi": 4240, "▁written": 4241, "▁blood": 4242, "▁happened": 4243, "▁cause": 4244, "ashing": 4245, "▁William": 4246, "adem": 4247, "▁brought": 4248, "▁display": 4249, "ima": 4250, "▁finally": 4251, "tab": 4252, "▁returned": 4253, "ных": 4254, "nie": 4255, "▁q": 4256, "▁hers": 4257, "▁Pre": 4258, "▁dou": 4259, "buffer": 4260, "▁effort": 4261, "aine": 4262, "xy": 4263, "▁histor": 4264, "enu": 4265, "▁arriv": 4266, "▁Dem": 4267, "▁favor": 4268, "▁handle": 4269, "SET": 4270, "▁Public": 4271, "rupt": 4272, "▁ur": 4273, "▁force": 4274, "▁és": 4275, "ube": 4276, "Pre": 4277, "рі": 4278, "iny": 4279, "theta": 4280, "isf": 4281, "▁national": 4282, "Equal": 4283, "rench": 4284, "▁wife": 4285, "▁capt": 4286, "▁Inter": 4287, "tau": 4288, "▁sleep": 4289, "../../": 4290, "▁issue": 4291, "▁member": 4292, "▁await": 4293, "▁Dan": 4294, "zi": 4295, "inate": 4296, "▁sym": 4297, "chan": 4298, "▁Jack": 4299, "▁English": 4300, "▁sz": 4301, "ributes": 4302, "▁ign": 4303, "ál": 4304, "▁appear": 4305, "rad": 4306, "idge": 4307, "▁couple": 4308, "▁ship": 4309, "lig": 4310, "web": 4311, "▁usually": 4312, "▁ready": 4313, "▁vill": 4314, "▁Why": 4315, "ebru": 4316, "▁grad": 4317, "ords": 4318, "▁inf": 4319, "▁loss": 4320, "▁od": 4321, "▁Phil": 4322, "server": 4323, "▁Up": 4324, "▁buff": 4325, "▁filename": 4326, "ABLE": 4327, "iting": 4328, "efore": 4329, "()->": 4330, "▁conditions": 4331, "vm": 4332, "eld": 4333, "itz": 4334, "▁Trans": 4335, "▁weight": 4336, "▁higher": 4337, "▁rate": 4338, "▁accom": 4339, "vider": 4340, "OM": 4341, "▁ways": 4342, "coming": 4343, "▁lock": 4344, "▁etc": 4345, "▁avec": 4346, "▁takes": 4347, "▁Char": 4348, "▁November": 4349, "method": 4350, "▁Austral": 4351, "▁America": 4352, "long": 4353, "cember": 4354, "▁political": 4355, "flow": 4356, "▁maybe": 4357, "▁amb": 4358, "Layout": 4359, "iled": 4360, "omen": 4361, "ola": 4362, "icip": 4363, "partial": 4364, "True": 4365, "▁floor": 4366, "▁Def": 4367, "▁concern": 4368, "yr": 4369, "▁shows": 4370, "ih": 4371, "▁answer": 4372, "acc": 4373, "▁ball": 4374, "▁Rev": 4375, "▁sun": 4376, "▁quickly": 4377, "▁somet": 4378, "mente": 4379, "▁Mal": 4380, "undred": 4381, "▁issues": 4382, "ecause": 4383, "pes": 4384, "▁player": 4385, "▁parents": 4386, "▁popular": 4387, "▁mode": 4388, "▁mention": 4389, "NE": 4390, "Load": 4391, "▁regular": 4392, "aved": 4393, "?:": 4394, "year": 4395, "func": 4396, "▁performance": 4397, "▁July": 4398, "thern": 4399, "▁website": 4400, "ford": 4401, "PR": 4402, "ela": 4403, "level": 4404, "uit": 4405, "flags": 4406, "▁worth": 4407, "▁correspon": 4408, "▁British": 4409, "sim": 4410, "▁alone": 4411, "▁har": 4412, "▁ones": 4413, "obile": 4414, "▁dru": 4415, "chi": 4416, "▁David": 4417, "▁problems": 4418, "▁column": 4419, "();\r": 4420, "ZE": 4421, "▁relig": 4422, "ological": 4423, "▁region": 4424, "ady": 4425, "IO": 4426, "ander": 4427, "Net": 4428, "▁built": 4429, "▁install": 4430, "▁approach": 4431, "Cur": 4432, "▁fine": 4433, "▁talking": 4434, "▁changes": 4435, "Style": 4436, "▁Mart": 4437, "лю": 4438, "response": 4439, "teger": 4440, "{\r": 4441, "irit": 4442, "▁protected": 4443, "▁rele": 4444, "ership": 4445, "тель": 4446, "unsigned": 4447, "ialize": 4448, "▁https": 4449, "Tag": 4450, "▁$(": 4451, "more": 4452, "ypes": 4453, "▁stream": 4454, "etch": 4455, "▁engine": 4456, "KE": 4457, "cmd": 4458, "script": 4459, "ttp": 4460, "▁avoid": 4461, "▁terr": 4462, "▁rock": 4463, "▁ful": 4464, "Update": 4465, "▁environment": 4466, "▁prec": 4467, "▁са": 4468, "▁cases": 4469, "▁offset": 4470, "▁rais": 4471, "lib": 4472, "ées": 4473, "aa": 4474, "yt": 4475, "▁arr": 4476, "opyright": 4477, "first": 4478, "▁util": 4479, "▁feature": 4480, "posed": 4481, "ffect": 4482, "жа": 4483, "itude": 4484, "ements": 4485, "asc": 4486, "ador": 4487, "lections": 4488, "▁club": 4489, "]{": 4490, "▁*)": 4491, "ство": 4492, "▁imm": 4493, "▁former": 4494, "▁rights": 4495, "▁decided": 4496, "▁rev": 4497, "▁ment": 4498, "ani": 4499, "▁stru": 4500, "▁attention": 4501, "artment": 4502, "▁Ital": 4503, "alle": 4504, "▁bis": 4505, "gener": 4506, "▁integr": 4507, "ello": 4508, "rypt": 4509, "▁achie": 4510, "nes": 4511, "▁stra": 4512, "sb": 4513, "▁types": 4514, "▁RE": 4515, "Init": 4516, "▁comment": 4517, "▁addition": 4518, "▁ID": 4519, "ART": 4520, "FO": 4521, "щи": 4522, "Conne": 4523, "▁squ": 4524, "▁considered": 4525, "idad": 4526, "▁October": 4527, "cial": 4528, "▁Of": 4529, "▁travel": 4530, "▁boy": 4531, "').": 4532, "uy": 4533, "illa": 4534, "istry": 4535, "▁va": 4536, "▁Che": 4537, "ERT": 4538, "ende": 4539, "ungen": 4540, "aby": 4541, "▁Rober": 4542, "▁playing": 4543, "ils": 4544, "▁sam": 4545, "▁execut": 4546, "▁Us": 4547, "▁mut": 4548, "▁bal": 4549, "asse": 4550, "▁kids": 4551, "▁financ": 4552, "gor": 4553, "▁Sec": 4554, "bert": 4555, "▁High": 4556, "▁је": 4557, "▁kept": 4558, "button": 4559, "itory": 4560, "▁Rem": 4561, "▁DE": 4562, "▁reach": 4563, "▁bur": 4564, "Label": 4565, "át": 4566, "ago": 4567, "▁passed": 4568, "▁behav": 4569, "xFF": 4570, "▁Return": 4571, "STR": 4572, "▁Les": 4573, "▁ord": 4574, "ala": 4575, "inger": 4576, "▁Since": 4577, "▁experi": 4578, "▁shall": 4579, "▁star": 4580, "non": 4581, "▁gun": 4582, "▁Bel": 4583, "▁obj": 4584, "ares": 4585, "rs": 4586, "▁weeks": 4587, "nen": 4588, "▁Stre": 4589, "oring": 4590, "▁î": 4591, "▁serious": 4592, "times": 4593, "▁House": 4594, "▁roll": 4595, "▁register": 4596, "▁module": 4597, "▁applic": 4598, "IR": 4599, "▁cook": 4600, "aux": 4601, "▁save": 4602, "▁Cr": 4603, ",\r": 4604, "▁states": 4605, "▁empty": 4606, "▁autom": 4607, "figure": 4608, "iance": 4609, "▁happy": 4610, "▁fn": 4611, "▁jud": 4612, "▁hat": 4613, "ACK": 4614, "▁Fe": 4615, "$-": 4616, "ivil": 4617, "oted": 4618, "▁sizeof": 4619, "▁situation": 4620, "▁lives": 4621, "▁feeling": 4622, "▁risk": 4623, "▁January": 4624, "▁Object": 4625, "▁recomm": 4626, "▁вы": 4627, "▁potential": 4628, "eah": 4629, "▁complex": 4630, "printf": 4631, "istance": 4632, "irth": 4633, "lik": 4634, "aste": 4635, "▁whose": 4636, "Arg": 4637, "▁modern": 4638, "iones": 4639, "▁че": 4640, "▁sett": 4641, "▁Mag": 4642, "ae": 4643, "▁condition": 4644, "Length": 4645, "▁fit": 4646, "ounds": 4647, "▁changed": 4648, "▁guy": 4649, "filter": 4650, "atever": 4651, "éd": 4652, "remove": 4653, "▁hop": 4654, "▁Out": 4655, "▁Rich": 4656, "child": 4657, "▁included": 4658, "$\\": 4659, "▁Tom": 4660, "eline": 4661, "▁sometimes": 4662, "▁drink": 4663, "▁quant": 4664, "▁please": 4665, "▁Int": 4666, "rief": 4667, "▁exactly": 4668, "cing": 4669, "▁allowed": 4670, "build": 4671, "▁beautiful": 4672, "▁Well": 4673, "▁looks": 4674, "▁ü": 4675, "▁chance": 4676, "▁wrote": 4677, "▁nor": 4678, "▁failed": 4679, "Met": 4680, "▁prior": 4681, "▁hundred": 4682, "ской": 4683, "oria": 4684, "▁cy": 4685, "▁web": 4686, "▁mess": 4687, "leq": 4688, "dy": 4689, "tex": 4690, "▁anim": 4691, "atur": 4692, "▁structure": 4693, "option": 4694, "▁actual": 4695, "▁Franc": 4696, "enced": 4697, ".": 4884, "▁production": 4885, "iger": 4886, "▁ст": 4887, "show": 4888, "▁population": 4889, "▁park": 4890, "▁Ze": 4891, "▁necessary": 4892, "▁trust": 4893, "▁shown": 4894, "module": 4895, "GE": 4896, "▁lay": 4897, "▁announ": 4898, "▁className": 4899, "▁calcul": 4900, "Function": 4901, "▁Sal": 4902, "OK": 4903, "TP": 4904, "▁entry": 4905, "▁Stud": 4906, "▁items": 4907, "▁security": 4908, "Entry": 4909, "float": 4910, "ls": 4911, "ibly": 4912, "▁contribut": 4913, "▁Check": 4914, "MD": 4915, "▁improve": 4916, "Part": 4917, "▁systems": 4918, "Bl": 4919, "▁policy": 4920, "▁screen": 4921, "▁Any": 4922, "▁opened": 4923, "alloc": 4924, "▁December": 4925, "▁É": 4926, "▁email": 4927, "ader": 4928, "=>": 4929, "▁Hen": 4930, "▁info": 4931, "▁float": 4932, "▁switch": 4933, "ран": 4934, "urance": 4935, "▁assum": 4936, "ustr": 4937, "▁groups": 4938, "▁Read": 4939, "▁wat": 4940, "Sp": 4941, "вер": 4942, "RAN": 4943, "hib": 4944, "ALL": 4945, "▁hus": 4946, "Spec": 4947, "\"))": 4948, "▁French": 4949, "▁Class": 4950, "▁president": 4951, "▁definit": 4952, "▁Nor": 4953, "▁Thom": 4954, "aign": 4955, "Width": 4956, "Do": 4957, "▁{@": 4958, "agon": 4959, "▁Lu": 4960, "▁followed": 4961, "MM": 4962, "asons": 4963, "tmp": 4964, "▁throws": 4965, "ITY": 4966, "ном": 4967, "▁fair": 4968, "▁pen": 4969, "ég": 4970, "▁interface": 4971, "▁saf": 4972, "oon": 4973, "Back": 4974, "▁speed": 4975, "▁extends": 4976, "empty": 4977, "▁пере": 4978, "▁proper": 4979, "▁driv": 4980, "фи": 4981, "▁center": 4982, "header": 4983, "▁})": 4984, "wa": 4985, "▁middle": 4986, "▁choose": 4987, "▁Stad": 4988, "SO": 4989, "Factory": 4990, "Dev": 4991, "icles": 4992, "▁application": 4993, "▁models": 4994, "pite": 4995, "cap": 4996, "xi": 4997, "ospital": 4998, "▁dream": 4999, "END": 5000, "▁contract": 5001, "icrosoft": 5002, "▁thous": 5003, "izes": 5004, "▁да": 5005, "▁CO": 5006, "▁direction": 5007, "▁``": 5008, "▁drive": 5009, "Max": 5010, "cia": 5011, "▁continu": 5012, "▁Alex": 5013, "▁gold": 5014, "▁prep": 5015, "▁origin": 5016, "▁rap": 5017, "Op": 5018, "ously": 5019, "▁areas": 5020, "PORT": 5021, "она": 5022, "▁safe": 5023, "▁professional": 5024, "apache": 5025, "▁temper": 5026, "sz": 5027, "▁unit": 5028, "▁cop": 5029, "eqn": 5030, "Listener": 5031, "▁format": 5032, "select": 5033, "▁comfort": 5034, "▁meant": 5035, "iday": 5036, "eme": 5037, "▁active": 5038, "▁note": 5039, "▁Mil": 5040, "only": 5041, "▁<=": 5042, "▁neigh": 5043, "ao": 5044, "▁blue": 5045, "▁TV": 5046, "Child": 5047, "▁reached": 5048, "Address": 5049, "ств": 5050, "▁closed": 5051, "inder": 5052, "olo": 5053, "▁alt": 5054, "▁adm": 5055, "Format": 5056, "UI": 5057, "▁Ham": 5058, "▁frequ": 5059, "▁independ": 5060, "▁easily": 5061, "▁Land": 5062, "▁tor": 5063, "ography": 5064, "infty": 5065, "▁Work": 5066, "iven": 5067, "▁County": 5068, "▁src": 5069, "}$,": 5070, "parse": 5071, "CD": 5072, "▁Cour": 5073, "▁fol": 5074, "Entity": 5075, "pgf": 5076, "▁China": 5077, "▁Sub": 5078, "hood": 5079, "▁fields": 5080, "▁yes": 5081, "rend": 5082, "▁towards": 5083, "▁staff": 5084, "▁Air": 5085, "▁station": 5086, "atives": 5087, "▁impact": 5088, "вы": 5089, "▁directly": 5090, "issions": 5091, "iva": 5092, "|\\": 5093, "Ptr": 5094, "▁Sant": 5095, "Pol": 5096, "▁progress": 5097, "itar": 5098, "▁parts": 5099, "▁plant": 5100, "▁absolut": 5101, "▁guess": 5102, "eqref": 5103, "▁tim": 5104, "▁Lou": 5105, "▁cool": 5106, "alu": 5107, "▁mouth": 5108, "них": 5109, "▁height": 5110, "gest": 5111, "▁Post": 5112, "▁board": 5113, "▁tit": 5114, "▁hour": 5115, "▁server": 5116, "▁players": 5117, "rier": 5118, "Link": 5119, "▁President": 5120, "](": 5121, "▁construct": 5122, "handle": 5123, "}$.": 5124, "rying": 5125, "▁shop": 5126, "iana": 5127, "exp": 5128, "Helper": 5129, "Offset": 5130, "aches": 5131, "▁connection": 5132, "▁difference": 5133, "service": 5134, "▁gas": 5135, "▁priv": 5136, "▁univers": 5137, "▁wish": 5138, "Rem": 5139, "Url": 5140, "geb": 5141, "So": 5142, "ensions": 5143, "Module": 5144, "SIZE": 5145, "▁prem": 5146, "window": 5147, "▁dies": 5148, "del": 5149, "▁row": 5150, "▁average": 5151, "xim": 5152, "▁pu": 5153, "anç": 5154, "Det": 5155, "ker": 5156, "ya": 5157, "▁Det": 5158, "▁på": 5159, "▁named": 5160, "▁decision": 5161, "win": 5162, "▁George": 5163, "arily": 5164, "▁solution": 5165, "▁multiple": 5166, "ategy": 5167, "▁learning": 5168, "▁secret": 5169, "DO": 5170, "▁nice": 5171, "////////////////": 5172, "Su": 5173, "itation": 5174, "▁join": 5175, "▁elements": 5176, "▁emer": 5177, "tilde": 5178, "▁dep": 5179, "▁shot": 5180, "▁platform": 5181, "othing": 5182, "My": 5183, "edia": 5184, "oms": 5185, "aily": 5186, "([": 5187, "▁dress": 5188, "▁official": 5189, "estern": 5190, "▁discover": 5191, "▁mi": 5192, "ные": 5193, "CA": 5194, "oding": 5195, "▁Found": 5196, "▁affect": 5197, "Vis": 5198, "stract": 5199, "iced": 5200, "debug": 5201, "▁related": 5202, "▁spect": 5203, "ushed": 5204, "сько": 5205, "▁bank": 5206, "▁cele": 5207, "AND": 5208, "olf": 5209, "ем": 5210, "▁fill": 5211, "▁gives": 5212, "▁бу": 5213, "aron": 5214, "▁Jes": 5215, "REG": 5216, "▁sudd": 5217, "dated": 5218, "vi": 5219, "▁gi": 5220, "send": 5221, "cpp": 5222, "▁spent": 5223, "ande": 5224, "▁operation": 5225, "process": 5226, "▁inform": 5227, "▁Free": 5228, "yond": 5229, "▁perhaps": 5230, "▁surv": 5231, "▁Loc": 5232, "▁concl": 5233, "▁раз": 5234, "▁Over": 5235, "hol": 5236, "raz": 5237, "Write": 5238, "▁giving": 5239, "rd": 5240, "instance": 5241, "▁released": 5242, "▁Ro": 5243, "RA": 5244, "▁practice": 5245, "▁graph": 5246, "▁increase": 5247, "▁figure": 5248, "Filter": 5249, "HECK": 5250, "idx": 5251, "▁glass": 5252, "ski": 5253, "comes": 5254, "▁cat": 5255, "▁cold": 5256, "goto": 5257, "ufact": 5258, "▁Copyright": 5259, "}}\\": 5260, "▁streng": 5261, "▁dir": 5262, "token": 5263, "▁occur": 5264, "arlier": 5265, "▁measure": 5266, "▁sec": 5267, "▁más": 5268, "▁Net": 5269, "▁argument": 5270, "▁sou": 5271, "▁moving": 5272, "▁prefer": 5273, "mask": 5274, "<<": 5275, "▁breath": 5276, "▁physical": 5277, "▁positive": 5278, "▁sor": 5279, "▁depart": 5280, "▁remove": 5281, "▁kit": 5282, "▁meeting": 5283, "▁Data": 5284, "ograf": 5285, "actions": 5286, "▁parameters": 5287, "▁Att": 5288, "esch": 5289, "▁involved": 5290, "ät": 5291, "LL": 5292, "Bar": 5293, "▁си": 5294, "ech": 5295, "GET": 5296, "▁prevent": 5297, "▁beyond": 5298, "▁Other": 5299, "än": 5300, "byte": 5301, "▁sudden": 5302, "olve": 5303, "▁но": 5304, "LOG": 5305, "unit": 5306, "▁truth": 5307, "rat": 5308, "SD": 5309, "▁eat": 5310, "▁Mad": 5311, "▁provides": 5312, "▁session": 5313, "Dele": 5314, "▁convers": 5315, "center": 5316, "▁continued": 5317, "otion": 5318, "cache": 5319, "display": 5320, "▁protect": 5321, "ams": 5322, "▁pow": 5323, "CTION": 5324, "▁Mac": 5325, "mo": 5326, "ха": 5327, "▁distance": 5328, "▁Time": 5329, "gi": 5330, "▁sequ": 5331, "Target": 5332, "сле": 5333, "Server": 5334, "▁wide": 5335, "close": 5336, "▁cru": 5337, "Ext": 5338, "▁select": 5339, "▁pattern": 5340, "\"));": 5341, "Provider": 5342, "URL": 5343, "▁green": 5344, "▁waiting": 5345, "proto": 5346, "▁immediately": 5347, "common": 5348, "azione": 5349, "river": 5350, "▁sen": 5351, "▁!==": 5352, "▁February": 5353, "urb": 5354, "▁Sen": 5355, "dest": 5356, ">": 6122, "command": 6123, "atz": 6124, "▁mal": 6125, "став": 6126, "▁Press": 6127, "▁characters": 6128, "▁zero": 6129, "AGE": 6130, "rapper": 6131, "▁kitchen": 6132, "aming": 6133, "▁restr": 6134, "XX": 6135, "▁College": 6136, "▁Array": 6137, "▁fresh": 6138, "▁shift": 6139, "▁specified": 6140, "plete": 6141, "ITE": 6142, "▁Camp": 6143, "rial": 6144, "cb": 6145, "▁TH": 6146, "IB": 6147, "osen": 6148, "▁ú": 6149, "▁params": 6150, "ignment": 6151, "adding": 6152, "▁degree": 6153, "Local": 6154, "Oh": 6155, "▁zur": 6156, "▁levels": 6157, "CS": 6158, "finished": 6159, "Case": 6160, "riage": 6161, "Vector": 6162, "▁sea": 6163, "antic": 6164, "▁League": 6165, "▁therefore": 6166, "One": 6167, "Return": 6168, "Access": 6169, "vas": 6170, "▁ос": 6171, "▁rat": 6172, "Big": 6173, "▁behavior": 6174, "kr": 6175, "▁undefined": 6176, "▁Es": 6177, "▁appeared": 6178, "eles": 6179, "▁WAR": 6180, "Stat": 6181, "▁Google": 6182, "▁credit": 6183, "▁File": 6184, "anging": 6185, "house": 6186, "romise": 6187, "gent": 6188, "▁habit": 6189, "▁society": 6190, "▁encour": 6191, "▁paint": 6192, "pet": 6193, "▁UK": 6194, "aws": 6195, "onom": 6196, "Gl": 6197, "}_{\\": 6198, "eless": 6199, "emy": 6200, "▁Cong": 6201, "▁developed": 6202, "▁images": 6203, "▁ö": 6204, "▁font": 6205, "clear": 6206, "gin": 6207, "▁Lord": 6208, "▁transport": 6209, "▁::": 6210, "▁cup": 6211, "ulate": 6212, "▁During": 6213, "priv": 6214, "▁extrem": 6215, "▁Di": 6216, "▁doubt": 6217, "Py": 6218, "ifying": 6219, "split": 6220, "ego": 6221, "github": 6222, "▁),": 6223, "ROM": 6224, "▁chair": 6225, "▁trade": 6226, "▁nicht": 6227, "Top": 6228, "Store": 6229, "▁parte": 6230, "project": 6231, "nia": 6232, "▁від": 6233, "war": 6234, "▁Prof": 6235, "▁caught": 6236, "Thread": 6237, "ства": 6238, "author": 6239, "▁doll": 6240, "▁harm": 6241, "▁Gen": 6242, "tree": 6243, "etime": 6244, "cfg": 6245, "▁guys": 6246, "▁California": 6247, "▁Green": 6248, "▁movement": 6249, "iej": 6250, "▁statement": 6251, "▁seeing": 6252, "▁haven": 6253, "vention": 6254, "SL": 6255, "chedul": 6256, "iert": 6257, "▁primary": 6258, "▁civil": 6259, "rian": 6260, "▁button": 6261, "▁lived": 6262, "Pass": 6263, "sor": 6264, "▁watching": 6265, "▁skills": 6266, "tee": 6267, "Level": 6268, "▁scient": 6269, "hs": 6270, "▁agre": 6271, "cat": 6272, "▁tend": 6273, "▁Mill": 6274, "▁Cap": 6275, "ORD": 6276, "gle": 6277, "▁сво": 6278, "»,": 6279, "▁ahead": 6280, "vest": 6281, "▁Jose": 6282, "ischer": 6283, "și": 6284, "▁leaving": 6285, "▁для": 6286, "▁south": 6287, "▁consum": 6288, "Range": 6289, "▁activities": 6290, "Sec": 6291, "▁sales": 6292, "▁fix": 6293, "▁jed": 6294, "rum": 6295, "vector": 6296, "▁spot": 6297, "▁manufact": 6298, "кт": 6299, "orrow": 6300, "sign": 6301, "▁college": 6302, "▁driver": 6303, "▁definitely": 6304, "▁spend": 6305, "mission": 6306, "зу": 6307, "atively": 6308, "bi": 6309, "Callback": 6310, "▁particularly": 6311, "▁hell": 6312, "▁pool": 6313, "PRE": 6314, "▁clearly": 6315, "PT": 6316, "othes": 6317, "▁Id": 6318, "Location": 6319, "▁Run": 6320, "▁fixed": 6321, "▁Hand": 6322, "bal": 6323, "double": 6324, "Can": 6325, "Omega": 6326, "▁challeng": 6327, "▁standing": 6328, "iten": 6329, "▁mechan": 6330, "▁durch": 6331, "▁dell": 6332, "▁raised": 6333, "▁weak": 6334, "▁Du": 6335, "grad": 6336, "▁scene": 6337, "poss": 6338, "▁ton": 6339, "▁earth": 6340, "ulations": 6341, "▁strength": 6342, "aked": 6343, "▁remain": 6344, "▁Bi": 6345, "▁customer": 6346, "range": 6347, "▁interested": 6348, "ONE": 6349, "▁coff": 6350, "require": 6351, "▁Only": 6352, "▁Web": 6353, "▁farm": 6354, "▁activity": 6355, "▁rout": 6356, "bling": 6357, "SY": 6358, "▁Richard": 6359, "▁Ref": 6360, "▁кон": 6361, "▁jun": 6362, "born": 6363, "ijn": 6364, "Configuration": 6365, "uman": 6366, "EE": 6367, "▁married": 6368, "▁За": 6369, "▁fat": 6370, "▁kid": 6371, "▁Tur": 6372, "▁offered": 6373, "nic": 6374, "▁Big": 6375, "Gamma": 6376, "▁Health": 6377, "▁TR": 6378, "▁się": 6379, "▁construction": 6380, "▁Church": 6381, "▁Bet": 6382, "bus": 6383, "▁earn": 6384, "rict": 6385, "▁пра": 6386, "▁brain": 6387, "▁fra": 6388, "▁Op": 6389, "FIG": 6390, "ema": 6391, "▁European": 6392, "▁Saint": 6393, "ARE": 6394, "uri": 6395, "▁River": 6396, "{}": 6397, "▁sitting": 6398, "▁understanding": 6399, "▁plans": 6400, "ropri": 6401, "▁older": 6402, "▁pressure": 6403, "Impl": 6404, "▁peace": 6405, "Connection": 6406, "▁fi": 6407, "rich": 6408, "▁shut": 6409, "apers": 6410, "Port": 6411, "▁Look": 6412, "rim": 6413, "auth": 6414, "auto": 6415, "▁highly": 6416, "▁unless": 6417, "▁Wal": 6418, "▁ren": 6419, "ws": 6420, "▁core": 6421, "(-": 6422, "▁clim": 6423, "ruit": 6424, "▁callback": 6425, "hest": 6426, "▁Charles": 6427, "▁Long": 6428, "}=": 6429, "ър": 6430, "▁shared": 6431, "ulated": 6432, "gorithm": 6433, "▁Home": 6434, "▁village": 6435, "ees": 6436, "sv": 6437, "▁restaur": 6438, "rey": 6439, "▁Cast": 6440, "▁Person": 6441, "кий": 6442, "▁organiz": 6443, "▁Rad": 6444, "ponents": 6445, "▁werden": 6446, "▁bow": 6447, "sen": 6448, "ami": 6449, "Interface": 6450, "▁basis": 6451, "▁Company": 6452, "ernel": 6453, "itu": 6454, "Hash": 6455, "▁aan": 6456, "▁х": 6457, "▁smile": 6458, "xml": 6459, "▁scen": 6460, "amm": 6461, "tool": 6462, "aria": 6463, "▁accur": 6464, "settings": 6465, "▁Jesus": 6466, "acement": 6467, "power": 6468, "(!": 6469, "▁calls": 6470, "▁basic": 6471, "▁settings": 6472, "ript": 6473, "pool": 6474, "ctors": 6475, "▁Foundation": 6476, "▁weap": 6477, "KEY": 6478, "foot": 6479, "▁radio": 6480, "▁helped": 6481, "mann": 6482, "▁jump": 6483, "▁tick": 6484, "▁growing": 6485, "aten": 6486, "real": 6487, "▁increasing": 6488, "Device": 6489, "varepsilon": 6490, "▁sets": 6491, "▁advant": 6492, "Open": 6493, "▁reasons": 6494, "▁supposed": 6495, "oes": 6496, "ede": 6497, "teen": 6498, "ifdef": 6499, "▁delete": 6500, "▁&=": 6501, "▁Bill": 6502, "▁aim": 6503, "▁Ok": 6504, "▁Av": 6505, "reci": 6506, "acks": 6507, "iste": 6508, "Properties": 6509, "▁tmp": 6510, "▁dei": 6511, "PER": 6512, "DC": 6513, "sta": 6514, "нии": 6515, "▁limited": 6516, "▁greater": 6517, "description": 6518, "ori": 6519, "aints": 6520, "▁hy": 6521, "▁Mel": 6522, "▁CH": 6523, "cons": 6524, "▁surround": 6525, "▁Who": 6526, "arc": 6527, "▁telev": 6528, "itution": 6529, "▁equal": 6530, "кі": 6531, "▁Israel": 6532, "äh": 6533, "▁Caption": 6534, "▁exerc": 6535, "empor": 6536, "▁++": 6537, "▁lib": 6538, "make": 6539, "▁MA": 6540, "copy": 6541, "friend": 6542, "▁кото": 6543, "▁damage": 6544, "▁\\,": 6545, "oded": 6546, "▁none": 6547, "▁evalu": 6548, "ston": 6549, ">,": 6550, "FOR": 6551, "▁norm": 6552, "appe": 6553, "Session": 6554, "▁adult": 6555, "▁hospital": 6556, "▁recommend": 6557, "property": 6558, "stein": 6559, "final": 6560, "▁nu": 6561, "second": 6562, "▁aspect": 6563, "\")]": 6564, "жен": 6565, "amento": 6566, "▁rac": 6567, "save": 6568, "▁football": 6569, "Ab": 6570, "ungs": 6571, "abil": 6572, "▁Arch": 6573, "system": 6574, "hist": 6575, "▁luck": 6576, "render": 6577, "▁sein": 6578, "ioni": 6579, "▁rot": 6580, "▁corner": 6581, "▁appropri": 6582, "▁Software": 6583, "▁tele": 6584, "Delete": 6585, "▁According": 6586, "▁prison": 6587, "▁lic": 6588, "▁ми": 6589, "term": 6590, "sets": 6591, "▁vel": 6592, "▁rank": 6593, "▁existing": 6594, "▁Vir": 6595, "▁trip": 6596, "▁му": 6597, "avax": 6598, "▁ris": 6599, "▁define": 6600, "▁heat": 6601, "car": 6602, "▁convert": 6603, "email": 6604, "▁Under": 6605, "▁Ш": 6606, "▁Grand": 6607, "▁exists": 6608, "sys": 6609, "eff": 6610, "▁Top": 6611, "▁č": 6612, "▁tempor": 6613, "▁arguments": 6614, "▁supported": 6615, "ensed": 6616, "▁Francis": 6617, "▁coord": 6618, "▁achieve": 6619, "▁Name": 6620, "▁Jahr": 6621, "▁Gi": 6622, "she": 6623, "▁Dev": 6624, "▁alla": 6625, "▁WIT": 6626, "agment": 6627, "custom": 6628, "alls": 6629, "&&": 6630, "WE": 6631, "▁holding": 6632, "prototype": 6633, "▁fing": 6634, "▁bag": 6635, "▁Party": 6636, "stack": 6637, "▁economic": 6638, "▁Gal": 6639, "idents": 6640, "▁Jun": 6641, "▁showed": 6642, "osh": 6643, "▁Bay": 6644, "mail": 6645, "▁SO": 6646, "▁\"<": 6647, "graphics": 6648, "▁fu": 6649, "click": 6650, "▁battle": 6651, "{{": 6652, "▁Event": 6653, "rior": 6654, "chaft": 6655, "▁favorite": 6656, "usive": 6657, "support": 6658, "bm": 6659, "Kind": 6660, "▁safety": 6661, "▁Ent": 6662, "cup": 6663, "▁Australia": 6664, "▁destroy": 6665, "▁organization": 6666, "iden": 6667, "################": 6668, "dec": 6669, "▁za": 6670, "▁seven": 6671, "arely": 6672, "▁flag": 6673, "Dir": 6674, "▁Carl": 6675, "▁doctor": 6676, "▁variety": 6677, "▁Lin": 6678, "▁tom": 6679, "^{(": 6680, "Bo": 6681, "antes": 6682, "▁mine": 6683, "▁Mit": 6684, "▁describe": 6685, "Args": 6686, "LS": 6687, "API": 6688, "▁Luc": 6689, "phone": 6690, "▁science": 6691, "▁Oper": 6692, "Next": 6693, "▁investig": 6694, "▁demonstr": 6695, "▁Govern": 6696, "▁objects": 6697, "▁Louis": 6698, "▁Returns": 6699, "▁han": 6700, "nam": 6701, "▁comme": 6702, "▁presence": 6703, "▁pel": 6704, "▁detect": 6705, ")=": 6706, "▁Chinese": 6707, "▁rich": 6708, "▁classes": 6709, "▁expand": 6710, "▁Dom": 6711, "▁Dec": 6712, "sn": 6713, "peed": 6714, "▁Jim": 6715, "should": 6716, "▁Smith": 6717, "▁pages": 6718, "▁Jean": 6719, "rics": 6720, "▁Sund": 6721, "ads": 6722, "▁Their": 6723, "unicip": 6724, "ву": 6725, "▁download": 6726, "▁stress": 6727, "▁Pet": 6728, "menu": 6729, "reme": 6730, "▁compared": 6731, "Ste": 6732, "IND": 6733, "container": 6734, "▁Indian": 6735, "oren": 6736, "▁ses": 6737, "▁Whe": 6738, "▁roku": 6739, "▁established": 6740, "▁generally": 6741, "▁fle": 6742, "__(": 6743, "=\"+": 6744, "Var": 6745, "▁Make": 6746, "▁removed": 6747, "zz": 6748, "ün": 6749, "▁mix": 6750, "erk": 6751, "iation": 6752, "outer": 6753, "SK": 6754, "▁becomes": 6755, "▁Hall": 6756, "scious": 6757, "▁watched": 6758, "▁gather": 6759, "▁Result": 6760, "proof": 6761, "pay": 6762, "▁produced": 6763, "▁|=": 6764, "▁border": 6765, "▁din": 6766, "▁script": 6767, "▁actions": 6768, "▁mas": 6769, "ща": 6770, "ooth": 6771, "▁Techn": 6772, "Json": 6773, "▁filled": 6774, "ден": 6775, "undle": 6776, "сту": 6777, "Tool": 6778, "▁king": 6779, "▁ven": 6780, "stra": 6781, "▁predict": 6782, "▁lui": 6783, "▁WARRAN": 6784, "▁Fun": 6785, "Script": 6786, "▁powerful": 6787, "▁lose": 6788, "atically": 6789, "▁daily": 6790, "▁ring": 6791, "▁arrived": 6792, "Stack": 6793, "scope": 6794, "▁Back": 6795, "elij": 6796, "▁ze": 6797, "keys": 6798, "{\"": 6799, "VID": 6800, "▁license": 6801, "what": 6802, "▁proced": 6803, "rant": 6804, "estival": 6805, "agram": 6806, "▁LO": 6807, "▁Henry": 6808, "▁flags": 6809, "Down": 6810, "scription": 6811, "▁families": 6812, "isse": 6813, "bour": 6814, "▁Bur": 6815, "—\"": 6816, "▁brief": 6817, "▁creating": 6818, "▁clients": 6819, "rangle": 6820, "▁amazing": 6821, "▁sind": 6822, "▁covered": 6823, "Well": 6824, "сте": 6825, "тор": 6826, "▁Bas": 6827, "total": 6828, "▁Init": 6829, "▁sand": 6830, "Unit": 6831, "▁murder": 6832, "▁bright": 6833, "▁trav": 6834, "icans": 6835, "▁attribute": 6836, "fc": 6837, "▁placed": 6838, "EST": 6839, "Vari": 6840, "▁cos": 6841, "▁attract": 6842, "anel": 6843, "}).": 6844, "bytes": 6845, "▁parse": 6846, "▁belong": 6847, "BN": 6848, "▁Sol": 6849, "Po": 6850, "`,": 6851, "▁calling": 6852, "▁?>": 6853, "▁iter": 6854, "▁url": 6855, "▁evening": 6856, "reek": 6857, "▁honest": 6858, "▁director": 6859, "RC": 6860, "▁solid": 6861, "▁phil": 6862, "iene": 6863, "FAULT": 6864, "cope": 6865, "▁History": 6866, "▁Team": 6867, "reedom": 6868, "▁ru": 6869, "UB": 6870, "▁worse": 6871, "imo": 6872, "Mat": 6873, "▁Mex": 6874, "actor": 6875, "▁vor": 6876, "ться": 6877, "▁experiment": 6878, "▁Play": 6879, "▁Another": 6880, "▁happens": 6881, "uan": 6882, "▁patients": 6883, "▁rend": 6884, "▁Mo": 6885, "▁Tex": 6886, "▁wed": 6887, "tn": 6888, "insert": 6889, "▁па": 6890, "▁anti": 6891, "Match": 6892, "ampionship": 6893, "▁forces": 6894, "▁Hot": 6895, "▁phase": 6896, "▁template": 6897, "stop": 6898, "icated": 6899, "▁managed": 6900, "wait": 6901, "▁*(": 6902, "GB": 6903, "▁appoint": 6904, "ła": 6905, "▁stick": 6906, "▁FOR": 6907, "▁Vis": 6908, "tor": 6909, "▁př": 6910, "quest": 6911, "uses": 6912, "\");\r": 6913, "▁suddenly": 6914, "éc": 6915, "ND": 6916, "urop": 6917, "ред": 6918, "▁insurance": 6919, "access": 6920, "unfinished": 6921, "▁tamb": 6922, "▁sac": 6923, "▁Court": 6924, "▁missing": 6925, "▁Where": 6926, "▁Sum": 6927, "}^{\\": 6928, "▁sua": 6929, "_,": 6930, "▁thick": 6931, "▁Trump": 6932, "▁operations": 6933, "FS": 6934, "▁deux": 6935, "dz": 6936, "Template": 6937, "▁\"/": 6938, "▁odd": 6939, "▁reality": 6940, "▁teams": 6941, "▁cer": 6942, "oma": 6943, "▁și": 6944, "▁cloud": 6945, "▁Department": 6946, "Ne": 6947, "▁requires": 6948, "items": 6949, "▁III": 6950, "rightarrow": 6951, ")->": 6952, "▁writer": 6953, "replace": 6954, "▁thr": 6955, "jen": 6956, "▁ot": 6957, "▁occup": 6958, "▁eventually": 6959, "▁Math": 6960, "▁conserv": 6961, "amer": 6962, "▁Fort": 6963, "▁dry": 6964, "▁sexual": 6965, "▁costs": 6966, "▁forms": 6967, "▁Vict": 6968, "PAR": 6969, "framework": 6970, "▁ди": 6971, "Operation": 6972, "зна": 6973, "which": 6974, "▁tight": 6975, "Invalid": 6976, "▁partner": 6977, "▁пред": 6978, "▁thank": 6979, "▁guard": 6980, "hem": 6981, "Body": 6982, "▁emot": 6983, "IX": 6984, "fast": 6985, "що": 6986, "ño": 6987, "night": 6988, "▁Sci": 6989, "ника": 6990, "▁TO": 6991, "▁individuals": 6992, "сси": 6993, "}),": 6994, "False": 6995, "(\"%": 6996, "▁optim": 6997, "▁-->": 6998, "▁factor": 6999, "▁smaller": 7000, "▁contain": 7001, "spect": 7002, "Engine": 7003, "▁announced": 7004, "▁Democr": 7005, "▁rob": 7006, "▁flat": 7007, "osoph": 7008, "Search": 7009, "ahl": 7010, "▁Exception": 7011, "▁Ol": 7012, "equals": 7013, "▁unter": 7014, "shape": 7015, "NS": 7016, "Obj": 7017, "▁species": 7018, "weight": 7019, "you": 7020, "▁este": 7021, "▁View": 7022, "▁mission": 7023, "▁journal": 7024, "Values": 7025, "▁einem": 7026, "ismo": 7027, "▁projects": 7028, "▁Das": 7029, "rible": 7030, "▁serve": 7031, "▁opening": 7032, "▁hur": 7033, "▁programs": 7034, "▁USA": 7035, "iliar": 7036, "idos": 7037, "Br": 7038, "estamp": 7039, "▁tools": 7040, "anner": 7041, "RT": 7042, "▁Start": 7043, "▁bath": 7044, "▁coffee": 7045, "orter": 7046, "internal": 7047, "files": 7048, "INVAL": 7049, "ako": 7050, "dt": 7051, "▁Second": 7052, "▁alloc": 7053, "▁ended": 7054, "acional": 7055, "▁manager": 7056, "▁Sun": 7057, "agg": 7058, "▁leader": 7059, "olved": 7060, "▁что": 7061, "▁traditional": 7062, "shot": 7063, "rup": 7064, "CF": 7065, "▁Each": 7066, "wr": 7067, "▁Som": 7068, "▁materials": 7069, "▁msg": 7070, "▁syn": 7071, "▁produce": 7072, "▁storage": 7073, "subsection": 7074, "▁Sie": 7075, "▁IP": 7076, "CESS": 7077, "▁wa": 7078, "Record": 7079, "▁marketing": 7080, "plet": 7081, "Dialog": 7082, "▁mentioned": 7083, "▁Na": 7084, "▁Union": 7085, "▁API": 7086, "▁negative": 7087, "txt": 7088, "▁easier": 7089, "legal": 7090, "Dep": 7091, "▁novel": 7092, "eur": 7093, "ació": 7094, "▁Bud": 7095, "▁carry": 7096, "schaft": 7097, "▁broken": 7098, "▁trees": 7099, ">();": 7100, "▁emb": 7101, "ieder": 7102, "▁route": 7103, "ikel": 7104, "▁listen": 7105, "ashion": 7106, "▁Mrs": 7107, "▁equipment": 7108, "agger": 7109, "▁Thus": 7110, "▁matrix": 7111, "alla": 7112, "▁Tour": 7113, "▁conversation": 7114, "Mon": 7115, "ournal": 7116, "▁minute": 7117, "Am": 7118, "Api": 7119, "▁forget": 7120, "Me": 7121, "levant": 7122, "temp": 7123, "▁telling": 7124, "move": 7125, "▁independent": 7126, "toString": 7127, "edit": 7128, "▁Jac": 7129, "azz": 7130, "react": 7131, "▁cin": 7132, "▁Prov": 7133, "isted": 7134, "▁hash": 7135, "onna": 7136, "iki": 7137, "▁generated": 7138, "Render": 7139, "▁psych": 7140, "nav": 7141, "▁entr": 7142, "пра": 7143, "rx": 7144, "ATH": 7145, "▁assume": 7146, "Tree": 7147, "sembly": 7148, "▁Matt": 7149, "caption": 7150, "▁solutions": 7151, "▁faith": 7152, "▁digital": 7153, "▁excell": 7154, "▁Version": 7155, "Debug": 7156, "▁жи": 7157, "▁carried": 7158, "reset": 7159, "▁slowly": 7160, "ancing": 7161, "▁owner": 7162, "▁Ter": 7163, "▁Did": 7164, "▁gest": 7165, "▁été": 7166, "▁proof": 7167, "Font": 7168, "▁nob": 7169, "Co": 7170, "▁GNU": 7171, "▁liber": 7172, "itness": 7173, "▁hij": 7174, "▁vert": 7175, "ша": 7176, "FLAG": 7177, "MENT": 7178, "▁Son": 7179, "Mult": 7180, "▁district": 7181, "connect": 7182, "jection": 7183, "lymp": 7184, "▁realized": 7185, "mos": 7186, "ye": 7187, "▁render": 7188, "rio": 7189, "▁interpret": 7190, "▁slightly": 7191, "fix": 7192, "▁studies": 7193, "▁rid": 7194, "atre": 7195, "▁benefits": 7196, "▁Face": 7197, "ivery": 7198, "рия": 7199, "document": 7200, "▁asking": 7201, "Last": 7202, "arante": 7203, "▁Martin": 7204, "▁Ell": 7205, "▁vector": 7206, "▁forced": 7207, "оло": 7208, "PH": 7209, "WR": 7210, "▁Kl": 7211, "▁sky": 7212, "▁strategy": 7213, "ocked": 7214, "▁neck": 7215, "ści": 7216, "OUT": 7217, ")),": 7218, "Custom": 7219, "▁wie": 7220, "▁sweet": 7221, "▁temp": 7222, "▁foreign": 7223, "▁hall": 7224, "astr": 7225, "Ass": 7226, "MODE": 7227, "▁maximum": 7228, "annels": 7229, "▁tip": 7230, "▁seconds": 7231, "▁stack": 7232, "iga": 7233, "▁raise": 7234, "enable": 7235, "oir": 7236, "▁soul": 7237, "Ke": 7238, ")$.": 7239, "▁Tim": 7240, "ALSE": 7241, "iser": 7242, "contin": 7243, "bel": 7244, "▁mad": 7245, "lichen": 7246, "abe": 7247, "safe": 7248, "▁concent": 7249, "bound": 7250, "▁Requ": 7251, "switch": 7252, "▁stone": 7253, "▁transl": 7254, "▁vac": 7255, "andon": 7256, "▁Fore": 7257, "▁sounds": 7258, "▁Pop": 7259, "▁HT": 7260, "lia": 7261, "enter": 7262, "▁helps": 7263, "edy": 7264, "ствен": 7265, "anted": 7266, "▁Its": 7267, "▁Step": 7268, "Icon": 7269, "▁EXPECT": 7270, "ialized": 7271, "Post": 7272, "aze": 7273, "▁Carol": 7274, "▁req": 7275, "▁critical": 7276, "DS": 7277, "▁seat": 7278, "aped": 7279, "▁upper": 7280, "▁Sy": 7281, "▁explain": 7282, "▁'./": 7283, "utils": 7284, "possible": 7285, "▁dont": 7286, "Host": 7287, "▁approxim": 7288, "Async": 7289, "▁grab": 7290, "▁sources": 7291, "▁Mos": 7292, "▁Germany": 7293, "▁rub": 7294, "CHAN": 7295, "▁rain": 7296, "▁truly": 7297, "▁joined": 7298, "▁": 8276, "agnost": 8277, "▁proposed": 8278, "▁Game": 8279, "▁efforts": 8280, "вя": 8281, "tc": 8282, "ск": 8283, "▁intent": 8284, "▁Bre": 8285, "isc": 8286, "▁protest": 8287, "▁holds": 8288, "ometry": 8289, "▁Have": 8290, "▁detail": 8291, "▁WITHOUT": 8292, "yer": 8293, "▁Kon": 8294, "▁noticed": 8295, "▁requirements": 8296, "DEBUG": 8297, "kins": 8298, "▁Span": 8299, "▁cars": 8300, "meta": 8301, "▁kil": 8302, "▁Bron": 8303, "▁experienced": 8304, "▁remind": 8305, "ourse": 8306, "▁Western": 8307, "tered": 8308, "▁devices": 8309, "▁pictures": 8310, "▁tut": 8311, "\"`": 8312, "▁impossible": 8313, "▁rail": 8314, "▁feels": 8315, "icas": 8316, "illing": 8317, "▁accident": 8318, "▁'@": 8319, "________": 8320, "▁notes": 8321, "oman": 8322, "Parser": 8323, "▁discovered": 8324, "▁Roman": 8325, "▁budget": 8326, "▁guide": 8327, "king": 8328, "▁incred": 8329, "olar": 8330, "enden": 8331, "Desc": 8332, "▁wave": 8333, "бли": 8334, "igt": 8335, "▁restrict": 8336, "▁Ret": 8337, "▁mac": 8338, "ур": 8339, "BS": 8340, "ís": 8341, "▁generation": 8342, "dem": 8343, "alo": 8344, "бра": 8345, "▁ordered": 8346, "drop": 8347, "▁pp": 8348, "▁Review": 8349, "▁literally": 8350, "▁Sir": 8351, "▁Yeah": 8352, "▁density": 8353, "riz": 8354, "inde": 8355, "▁gain": 8356, "▁panel": 8357, "jet": 8358, "▁Times": 8359, "▁nella": 8360, "▁previously": 8361, "points": 8362, "Send": 8363, "▁Brown": 8364, "each": 8365, "▁trigger": 8366, "ometimes": 8367, "icos": 8368, "GR": 8369, "Panel": 8370, "ogen": 8371, "▁cm": 8372, "ructions": 8373, "▁kiss": 8374, "▁solo": 8375, "▁famous": 8376, "ran": 8377, "про": 8378, "▁thro": 8379, "Graph": 8380, "imit": 8381, "▁Value": 8382, "▁starts": 8383, "ipeline": 8384, "hd": 8385, "TC": 8386, "▁discussion": 8387, "▁truck": 8388, "aka": 8389, "Only": 8390, "▁Equ": 8391, "▁kö": 8392, "▁Bes": 8393, "▁critic": 8394, "▁propos": 8395, "▁batt": 8396, "▁Section": 8397, "Show": 8398, "gp": 8399, "STATE": 8400, "POST": 8401, "▁Nord": 8402, "▁innov": 8403, "▁crim": 8404, "axis": 8405, "▁Turn": 8406, "conn": 8407, "Runtime": 8408, "▁remaining": 8409, "oston": 8410, "▁Э": 8411, "▁windows": 8412, "▁Royal": 8413, "▁vide": 8414, "PP": 8415, "chron": 8416, "▁san": 8417, "▁rise": 8418, "▁delle": 8419, "▁Dur": 8420, "▁rapid": 8421, "cert": 8422, "LA": 8423, "edge": 8424, "▁\\]": 8425, "▁entered": 8426, "▁laws": 8427, "▁photo": 8428, "▁applications": 8429, "▁Berlin": 8430, "▁arrest": 8431, "▁federal": 8432, "▁Russia": 8433, "▁usual": 8434, "▁raw": 8435, "▁più": 8436, "être": 8437, "JSON": 8438, "SION": 8439, "xture": 8440, "istent": 8441, "▁Power": 8442, "Bit": 8443, "▁capacity": 8444, "▁cards": 8445, "UID": 8446, "iments": 8447, "▁dar": 8448, "▁Chicago": 8449, "▁comfortable": 8450, "tip": 8451, "bas": 8452, "▁mu": 8453, "▁enemy": 8454, "yan": 8455, "▁фи": 8456, "▁updated": 8457, "ango": 8458, "Ev": 8459, "Effect": 8460, "osing": 8461, "rence": 8462, "▁Congress": 8463, "▁defe": 8464, "▁ip": 8465, "▁tout": 8466, "▁freedom": 8467, "▁ao": 8468, "▁Therefore": 8469, "Edit": 8470, "▁Virgin": 8471, "REE": 8472, "argo": 8473, "▁Dam": 8474, "▁traffic": 8475, "ños": 8476, "▁alle": 8477, "▁depth": 8478, "Now": 8479, "▁sides": 8480, "▁годи": 8481, "Descriptor": 8482, "▁artikel": 8483, "▁narrow": 8484, "___": 8485, "kw": 8486, "uto": 8487, "▁Facebook": 8488, "tegr": 8489, "boolean": 8490, "nik": 8491, "bd": 8492, "Track": 8493, "▁gran": 8494, "reshold": 8495, "вет": 8496, "wrap": 8497, "▁noise": 8498, "igu": 8499, "▁Bon": 8500, "▁wy": 8501, "linux": 8502, "cks": 8503, "▁fans": 8504, "▁mach": 8505, "▁prices": 8506, "év": 8507, "outs": 8508, "standing": 8509, "▁categ": 8510, ";\\": 8511, "▁decre": 8512, "▁Saturday": 8513, "▁menu": 8514, "▁Nov": 8515, "▁Yet": 8516, "▁так": 8517, "liche": 8518, "▁Academ": 8519, "▁communication": 8520, "using": 8521, "▁Society": 8522, "▁nuc": 8523, "pective": 8524, "orial": 8525, "▁afraid": 8526, "▁animal": 8527, "▁turning": 8528, "dst": 8529, "mathfrak": 8530, "lers": 8531, "▁lots": 8532, "▁á": 8533, "▁Tra": 8534, "np": 8535, "▁rose": 8536, "▁GL": 8537, "▁helping": 8538, "▁winter": 8539, "▁ком": 8540, "Mock": 8541, "▁investment": 8542, "Use": 8543, "▁Canad": 8544, "нд": 8545, "Copy": 8546, "▁fly": 8547, "SER": 8548, "▁Far": 8549, "▁Ros": 8550, "amil": 8551, "▁fighting": 8552, "▁religious": 8553, "super": 8554, "screen": 8555, "▁furn": 8556, "▁surprised": 8557, "▁replied": 8558, "Activity": 8559, "▁Down": 8560, "▁insert": 8561, "▁Olymp": 8562, "▁pointed": 8563, "▁Card": 8564, "driver": 8565, "▁Da": 8566, "!--": 8567, "roud": 8568, "undo": 8569, "▁messages": 8570, "▁Point": 8571, "VM": 8572, "▁plane": 8573, "xc": 8574, "▁television": 8575, "ён": 8576, "▁thousands": 8577, "▁cris": 8578, "▁delay": 8579, "▁Next": 8580, "▁nombre": 8581, "▁tu": 8582, "▁skip": 8583, "road": 8584, "istration": 8585, "▁tur": 8586, "▁Develop": 8587, "▁Па": 8588, "▁дру": 8589, "▁wonderful": 8590, ">&": 8591, "▁Liber": 8592, "▁scope": 8593, "▁manage": 8594, "▁dass": 8595, "▁recall": 8596, "PM": 8597, "▁relevant": 8598, "▁Earth": 8599, "▁как": 8600, "▁apr": 8601, "▁ASS": 8602, "ién": 8603, "▁SH": 8604, "oom": 8605, "itet": 8606, "none": 8607, "asi": 8608, "▁motor": 8609, "▁Show": 8610, "nb": 8611, "▁factors": 8612, "▁forest": 8613, "▁вре": 8614, "thm": 8615, "▁municip": 8616, "▁turns": 8617, "▁Division": 8618, "EC": 8619, "▁disappe": 8620, "structor": 8621, "▁somewhere": 8622, "▁African": 8623, "▁Institute": 8624, "Grid": 8625, "▁teacher": 8626, "uries": 8627, "▁respectively": 8628, "▁SD": 8629, "▁alive": 8630, "▁pou": 8631, "▁Water": 8632, "фе": 8633, "▁changing": 8634, "▁afternoon": 8635, "▁orders": 8636, "Ret": 8637, "Pointer": 8638, "▁sav": 8639, "erg": 8640, "oked": 8641, "essions": 8642, "▁Fire": 8643, "aret": 8644, "imm": 8645, "▁desire": 8646, "▁що": 8647, "▁Design": 8648, "uture": 8649, "▁Office": 8650, "▁cmd": 8651, "▁eating": 8652, "Network": 8653, "▁rough": 8654, "operator": 8655, "IGN": 8656, "▁sports": 8657, "▁weren": 8658, "▁noted": 8659, "▁twice": 8660, "III": 8661, "▁anx": 8662, "▁elim": 8663, "▁ав": 8664, "▁io": 8665, "▁speech": 8666, "▁condu": 8667, "elles": 8668, "idade": 8669, "▁advance": 8670, "RI": 8671, "oca": 8672, "/\\": 8673, "apshot": 8674, "▁tail": 8675, "models": 8676, "ogy": 8677, "▁Jeff": 8678, "iration": 8679, "▁Kore": 8680, "▁leads": 8681, "bat": 8682, "Adapter": 8683, "category": 8684, "angular": 8685, "▁saved": 8686, "▁uniform": 8687, "▁né": 8688, "▁businesses": 8689, "Hist": 8690, "▁ар": 8691, "domain": 8692, "▁Si": 8693, "raise": 8694, "▁warn": 8695, "hetic": 8696, "▁Gro": 8697, ")).": 8698, "}>": 8699, "зе": 8700, "▁Amazon": 8701, "▁Organ": 8702, "▁Lake": 8703, "▁agreement": 8704, "xa": 8705, "▁perman": 8706, "▁containing": 8707, "▁strange": 8708, "сті": 8709, "▁stupid": 8710, "▁speaking": 8711, "▁Internet": 8712, "prefix": 8713, "esc": 8714, "Assert": 8715, "prote": 8716, "▁manner": 8717, "▁Sz": 8718, "unte": 8719, "iot": 8720, "Profile": 8721, "oven": 8722, "▁formed": 8723, "▁lit": 8724, "▁economy": 8725, "▁cz": 8726, "wid": 8727, "REQ": 8728, "▁chosen": 8729, "▁Produ": 8730, "oster": 8731, "stances": 8732, "awa": 8733, "▁Ren": 8734, "▁confirm": 8735, "▁Бо": 8736, "▁billion": 8737, "▁déc": 8738, "ých": 8739, "▁illustr": 8740, "TIES": 8741, "▁Pub": 8742, "▁ban": 8743, "aded": 8744, "ahn": 8745, "▁Cath": 8746, "nonumber": 8747, "▁worst": 8748, "▁Ме": 8749, "▁suggested": 8750, "stats": 8751, "▁cant": 8752, "▁align": 8753, "kappa": 8754, "▁hen": 8755, "▁initi": 8756, "'])": 8757, "BI": 8758, "▁garden": 8759, "▁secure": 8760, "▁\\[": 8761, "handler": 8762, "elli": 8763, "ldots": 8764, "secut": 8765, "▁extended": 8766, "}-": 8767, "anie": 8768, "▁Find": 8769, "▁Museum": 8770, "▁Conne": 8771, "yy": 8772, "▁passion": 8773, "akers": 8774, "ahr": 8775, "ologies": 8776, "▁equation": 8777, "▁occasion": 8778, "Let": 8779, "']['": 8780, "Print": 8781, "anes": 8782, "iente": 8783, "▁Today": 8784, "LECT": 8785, "▁Af": 8786, ",,": 8787, "▁Та": 8788, "▁```": 8789, "even": 8790, "sin": 8791, "urer": 8792, "▁°": 8793, "otimes": 8794, "▁IO": 8795, "▁poet": 8796, "()));": 8797, "▁−": 8798, "▁adopt": 8799, "phere": 8800, "#[": 8801, "▁centre": 8802, "oves": 8803, "▁ans": 8804, "dp": 8805, "▁Kir": 8806, "▁applicable": 8807, "fp": 8808, "▁visual": 8809, "▁okay": 8810, "oro": 8811, "▁opportunities": 8812, "Repository": 8813, "▁ll": 8814, "▁Rod": 8815, "▁shel": 8816, "▁launch": 8817, "▁conven": 8818, "▁Spe": 8819, "Amer": 8820, "▁cette": 8821, "Cond": 8822, "dep": 8823, "Own": 8824, "▁hook": 8825, "▁dict": 8826, "▁Those": 8827, "▁fellow": 8828, "▁philosoph": 8829, "vin": 8830, "ferences": 8831, "hav": 8832, "▁adding": 8833, "iverse": 8834, "game": 8835, "▁Blue": 8836, "▁clin": 8837, "note": 8838, "▁Ram": 8839, "мер": 8840, "covery": 8841, "ña": 8842, "▁би": 8843, "▁fashion": 8844, "▁broke": 8845, "▁'\\": 8846, "▁reader": 8847, "ное": 8848, "ности": 8849, "▁payment": 8850, "▁Lic": 8851, "▁lips": 8852, "▁academ": 8853, "▁Mot": 8854, "ells": 8855, "CHECK": 8856, "▁ру": 8857, "▁MS": 8858, "Editor": 8859, "▁zone": 8860, "iture": 8861, "▁IT": 8862, "runtime": 8863, "▁proceed": 8864, "лов": 8865, "▁Maria": 8866, "olver": 8867, "▁Thanks": 8868, "▁shouldn": 8869, "▁Joh": 8870, "▁Model": 8871, "▁Sov": 8872, "!'": 8873, "Di": 8874, "▁cancer": 8875, "Ident": 8876, "▁exchange": 8877, "iller": 8878, "inf": 8879, "LEN": 8880, "(){": 8881, "aga": 8882, "\"],": 8883, "uh": 8884, "▁Ken": 8885, "▁photos": 8886, "▁tiny": 8887, "▁gent": 8888, "ül": 8889, "▁Take": 8890, "idel": 8891, "outing": 8892, "Internal": 8893, "▁cells": 8894, "ним": 8895, "hard": 8896, "▁Town": 8897, "obe": 8898, "plex": 8899, "тер": 8900, "tons": 8901, "▁concentr": 8902, "mock": 8903, "vc": 8904, "áz": 8905, "▁Championship": 8906, "▁бе": 8907, "??": 8908, "éri": 8909, "aly": 8910, "▁Ц": 8911, "ierte": 8912, "▁totally": 8913, "▁Auf": 8914, "▁ourselves": 8915, "▁Self": 8916, "Forms": 8917, "ighter": 8918, "▁island": 8919, "fmt": 8920, "▁rc": 8921, "▁tells": 8922, "BB": 8923, "dit": 8924, "▁variables": 8925, "▁intended": 8926, "izont": 8927, "▁plays": 8928, "dam": 8929, "seq": 8930, "▁Sup": 8931, "▁cultural": 8932, "▁scream": 8933, "__,": 8934, "cipl": 8935, "Timeout": 8936, "▁ж": 8937, "orte": 8938, "▁replaced": 8939, "EM": 8940, "▁abandon": 8941, "▁Special": 8942, "ellen": 8943, "▁Bru": 8944, "irmed": 8945, "Te": 8946, "olt": 8947, "ju": 8948, "Argument": 8949, "▁neut": 8950, "scape": 8951, "▁Ray": 8952, "▁Polit": 8953, "▁crowd": 8954, "▁Windows": 8955, "iego": 8956, "▁escape": 8957, "▁Apache": 8958, "sync": 8959, "eben": 8960, "ifies": 8961, "ether": 8962, "Meta": 8963, "▁biggest": 8964, "Game": 8965, "▁transaction": 8966, "Env": 8967, "▁Мо": 8968, "▁plenty": 8969, "▁mel": 8970, "пре": 8971, "▁motiv": 8972, "▁ор": 8973, "organ": 8974, "▁mock": 8975, "▁$_": 8976, "ене": 8977, "▁Number": 8978, "cknow": 8979, "▁Update": 8980, "zero": 8981, "▁surprise": 8982, "cean": 8983, "pdf": 8984, "Global": 8985, "▁attend": 8986, "▁fond": 8987, "▁understood": 8988, "Nav": 8989, "▁Mic": 8990, "=$": 8991, "oking": 8992, "▁Stadium": 8993, "Close": 8994, "▁competition": 8995, "▁soldiers": 8996, "▁OP": 8997, "agne": 8998, "▁Anton": 8999, "Main": 9000, "ák": 9001, "▁#[": 9002, "▁Commit": 9003, "pyx": 9004, "▁east": 9005, "▁Order": 9006, "Float": 9007, "▁accepted": 9008, "▁monitor": 9009, "▁pad": 9010, "onic": 9011, "▁pushed": 9012, "▁replace": 9013, "CRE": 9014, "▁ride": 9015, "found": 9016, "=%": 9017, "вой": 9018, "▁matches": 9019, "▁Lie": 9020, "▁experiences": 9021, "Pool": 9022, "ups": 9023, "AV": 9024, "▁existence": 9025, "▁thin": 9026, "▁magn": 9027, "COMP": 9028, "home": 9029, "▁ni": 9030, "▁wurden": 9031, "лав": 9032, "▁teeth": 9033, "▁Stan": 9034, "appro": 9035, "anny": 9036, "ifts": 9037, "▁unknown": 9038, "▁homes": 9039, "▁entity": 9040, "cie": 9041, "ление": 9042, "iar": 9043, "▁compliance": 9044, "▁focused": 9045, "uzz": 9046, "=\\\"": 9047, "components": 9048, "Attr": 9049, "allery": 9050, "▁identify": 9051, "Ok": 9052, "pie": 9053, "▁Still": 9054, "▁offering": 9055, "▁busy": 9056, "ctl": 9057, "itors": 9058, "▁concerned": 9059, "▁brown": 9060, "clk": 9061, "Selected": 9062, "▁Block": 9063, "▁egy": 9064, "icing": 9065, "▁URL": 9066, "▁topic": 9067, "▁Product": 9068, "▁чи": 9069, "▁trial": 9070, "▁weekend": 9071, "lu": 9072, "▁IV": 9073, "▁Egy": 9074, "xC": 9075, "▁nove": 9076, "▁lett": 9077, "enne": 9078, "()).": 9079, ".**": 9080, "▁promise": 9081, "election": 9082, "Auth": 9083, "rv": 9084, "ril": 9085, "▁conduct": 9086, "▁maintain": 9087, "▁boat": 9088, "▁opposite": 9089, "spin": 9090, "webpack": 9091, "anta": 9092, "▁orient": 9093, "▁suc": 9094, "▁exercise": 9095, "▁efficient": 9096, "▁tradition": 9097, "▁zw": 9098, "▁Sud": 9099, "going": 9100, "▁Pier": 9101, "inv": 9102, "ipes": 9103, "ensuremath": 9104, "▁conver": 9105, "creen": 9106, "▁terror": 9107, "▁Dou": 9108, "▁invalid": 9109, "ceived": 9110, "▁Arab": 9111, "▁wire": 9112, "application": 9113, "shift": 9114, "Generic": 9115, "▁Plan": 9116, "▁Wall": 9117, "▁directory": 9118, "▁egg": 9119, "▁wealth": 9120, "random": 9121, "attribute": 9122, "▁hide": 9123, "Serial": 9124, "cam": 9125, "▁ital": 9126, "▁Line": 9127, "▁CHECK": 9128, "ployment": 9129, "▁massive": 9130, "▁extract": 9131, "chain": 9132, "Rest": 9133, "▁Las": 9134, "▁bear": 9135, "▁links": 9136, "▁newsp": 9137, "▁FC": 9138, "Card": 9139, "aks": 9140, "▁visible": 9141, "▁Marc": 9142, "▁Boston": 9143, "▁reserved": 9144, "▁roof": 9145, "licenses": 9146, "dc": 9147, "▁Information": 9148, "▁witness": 9149, "Sk": 9150, "*),": 9151, "Scope": 9152, "'];": 9153, "▁Mir": 9154, "uding": 9155, "▁trend": 9156, "rep": 9157, "▁musical": 9158, "▁neither": 9159, "▁Creat": 9160, "▁positions": 9161, "LC": 9162, "ridge": 9163, "▁officers": 9164, "▁violence": 9165, "▁Tem": 9166, "▁Sus": 9167, "▁Way": 9168, "After": 9169, "acket": 9170, "▁Sou": 9171, "acer": 9172, "||": 9173, "▁remark": 9174, "water": 9175, "ně": 9176, "▁Са": 9177, "▁sed": 9178, "Each": 9179, "▁photograph": 9180, "▁letters": 9181, "▁invent": 9182, "▁Mas": 9183, "▁songs": 9184, "ól": 9185, "kind": 9186, "▁Non": 9187, "▁dust": 9188, "**:": 9189, "nabla": 9190, ".\",": 9191, "Lock": 9192, "▁До": 9193, "▁cluster": 9194, "loss": 9195, "▁ASSERT": 9196, "fall": 9197, "▁reject": 9198, "▁Spring": 9199, "▁wedding": 9200, "▁grav": 9201, "ression": 9202, "limit": 9203, "RES": 9204, "]}": 9205, "▁listed": 9206, "▁Tele": 9207, "hline": 9208, "▁chief": 9209, "MEM": 9210, "дар": 9211, "▁expensive": 9212, "trace": 9213, "▁Rog": 9214, "▁Coll": 9215, "▁Author": 9216, "▁Board": 9217, "▁Capt": 9218, "TEXT": 9219, "▁recon": 9220, "esta": 9221, "▁properly": 9222, "▁&\\": 9223, "leton": 9224, "iker": 9225, "Gu": 9226, "▁Kom": 9227, "oco": 9228, "▁anymore": 9229, "▁taste": 9230, "▁Santa": 9231, "gex": 9232, "▁Secret": 9233, "▁talent": 9234, "▁moments": 9235, "▁Ba": 9236, "▁extr": 9237, "▁Commission": 9238, "▁modify": 9239, "▁Figure": 9240, "▁domin": 9241, "▁plot": 9242, "enger": 9243, "utch": 9244, "▁cities": 9245, "▁nut": 9246, "profile": 9247, "▁Stat": 9248, "▁nodes": 9249, "▁ns": 9250, "essages": 9251, "impl": 9252, "icker": 9253, "▁examples": 9254, "abeth": 9255, "▁stated": 9256, "fire": 9257, "bul": 9258, "▁dangerous": 9259, "▁Pay": 9260, "▁Gre": 9261, "▁Monday": 9262, "esome": 9263, "igan": 9264, "rund": 9265, "prise": 9266, "fail": 9267, "▁Never": 9268, "Av": 9269, "▁linear": 9270, "▁ul": 9271, "WAR": 9272, "рен": 9273, "▁AT": 9274, "▁dop": 9275, "▁nou": 9276, "Dest": 9277, "▁claims": 9278, "enda": 9279, "▁crazy": 9280, "gel": 9281, "oggle": 9282, "▁representation": 9283, "inen": 9284, "▁alternative": 9285, "DM": 9286, "ABILITY": 9287, "faces": 9288, "▁doors": 9289, "ativ": 9290, "Look": 9291, "▁JSON": 9292, "▁appearance": 9293, "бря": 9294, "SQL": 9295, "▁silence": 9296, "udo": 9297, "▁Director": 9298, "Statement": 9299, "selected": 9300, "high": 9301, "prime": 9302, "▁ignore": 9303, "▁colors": 9304, "ushing": 9305, "▁virt": 9306, "manager": 9307, "▁remote": 9308, "ło": 9309, "small": 9310, "▁crime": 9311, "rb": 9312, "▁creation": 9313, "▁flight": 9314, "▁Sign": 9315, "ILE": 9316, "▁DO": 9317, "comment": 9318, "▁Cost": 9319, ".__": 9320, "▁Cop": 9321, "▁vom": 9322, "▁Science": 9323, "ления": 9324, "oop": 9325, "interface": 9326, "▁WARRANTIES": 9327, "▁Page": 9328, "******": 9329, "ском": 9330, "TRUE": 9331, "▁repeated": 9332, "▁его": 9333, "шо": 9334, "▁roz": 9335, "Pe": 9336, "▁ISBN": 9337, "irts": 9338, "poses": 9339, "})$": 9340, "▁І": 9341, "children": 9342, "bles": 9343, "ECT": 9344, "▁iz": 9345, "▁builder": 9346, "▁Media": 9347, "iat": 9348, "▁contrast": 9349, "”,": 9350, "▁Link": 9351, "▁Education": 9352, "▁joint": 9353, "▁external": 9354, "▁роз": 9355, "▁bits": 9356, "FORM": 9357, "erman": 9358, "wp": 9359, "▁Mike": 9360, "▁Master": 9361, "▁senior": 9362, "▁Nav": 9363, "▁recorded": 9364, "eling": 9365, "esh": 9366, "fx": 9367, "кан": 9368, "▁tall": 9369, "▁Johnson": 9370, "▁sono": 9371, "▁anche": 9372, "icken": 9373, "loop": 9374, "iciency": 9375, "emporary": 9376, "▁Does": 9377, "▁relation": 9378, "мы": 9379, "was": 9380, "low": 9381, "ichte": 9382, "▁Jones": 9383, "▁bedroom": 9384, "DIS": 9385, "▁magnet": 9386, "▁Engine": 9387, "▁feelings": 9388, "GC": 9389, "▁torn": 9390, "▁relationships": 9391, "▁Ре": 9392, "▁proud": 9393, "▁twe": 9394, "oval": 9395, "▁waste": 9396, "▁reduced": 9397, "ilton": 9398, "BP": 9399, "▁forgot": 9400, "▁bodies": 9401, "▁Haw": 9402, "lag": 9403, "▁www": 9404, "door": 9405, "▁sufficient": 9406, "▁dollars": 9407, "Len": 9408, "▁talked": 9409, "▁bond": 9410, "▁Bor": 9411, "}}{": 9412, "rod": 9413, "Password": 9414, "quare": 9415, "▁lights": 9416, "eren": 9417, "▁thirty": 9418, "NC": 9419, "▁TODO": 9420, "▁respond": 9421, "ких": 9422, "direct": 9423, "ação": 9424, "▁heav": 9425, "Media": 9426, "exit": 9427, "License": 9428, "`.": 9429, "▁mixed": 9430, "▁desk": 9431, "▁teaching": 9432, "▁maj": 9433, "▁nerv": 9434, "inations": 9435, "typeof": 9436, "▁coast": 9437, "▁же": 9438, "▁beside": 9439, "ummy": 9440, "Doc": 9441, "▁schedule": 9442, "▁recover": 9443, "▁Further": 9444, "▁steel": 9445, "boot": 9446, "▁Perhaps": 9447, "▁съ": 9448, "▁Os": 9449, "rick": 9450, "▁Ви": 9451, "Support": 9452, "▁(_": 9453, "nil": 9454, "pis": 9455, "xpected": 9456, "▁processing": 9457, "Build": 9458, "arian": 9459, "▁icon": 9460, "▁CA": 9461, "wick": 9462, "=(": 9463, "▁algorithm": 9464, "▁Young": 9465, "▁Management": 9466, "▁ancient": 9467, "ность": 9468, "oti": 9469, "▁combination": 9470, "world": 9471, "nn": 9472, "▁dram": 9473, "enabled": 9474, "Ac": 9475, "CCESS": 9476, "aration": 9477, "▁blocks": 9478, "▁Angeles": 9479, "▁Qual": 9480, "▁succeed": 9481, "network": 9482, "▁oblig": 9483, "springframework": 9484, "▁Tre": 9485, "okes": 9486, "mun": 9487, "▁Network": 9488, "Del": 9489, "▁estate": 9490, "▁liqu": 9491, "▁pob": 9492, "▁dad": 9493, "▁distinct": 9494, "▁Tit": 9495, "▁Lear": 9496, "ferred": 9497, "android": 9498, "▁subsequ": 9499, "▁Florida": 9500, "subset": 9501, "▁whisper": 9502, "Vol": 9503, "ulous": 9504, "▁crew": 9505, "▁lug": 9506, "pid": 9507, "ocity": 9508, "skb": 9509, "▁tea": 9510, "ун": 9511, "▁honor": 9512, "▁Ins": 9513, "▁gew": 9514, "Details": 9515, "eneath": 9516, "atar": 9517, "▁_{": 9518, "amen": 9519, "▁setup": 9520, "Transaction": 9521, "▁blank": 9522, "Failed": 9523, "job": 9524, "▁pret": 9525, "ße": 9526, "loor": 9527, "ří": 9528, "ncia": 9529, "▁anywhere": 9530, "▁Light": 9531, "▁Ak": 9532, "BD": 9533, "▁excited": 9534, "agers": 9535, "▁warning": 9536, "▁processes": 9537, "hu": 9538, "▁youth": 9539, "▁dogs": 9540, "▁oct": 9541, "▁nine": 9542, "Writer": 9543, "grid": 9544, "▁importance": 9545, "estic": 9546, "▁carefully": 9547, "master": 9548, "▁decisions": 9549, "▁pin": 9550, "▁crack": 9551, "TEST": 9552, "▁Local": 9553, "▁Right": 9554, "▁vast": 9555, "▁faster": 9556, "▁institut": 9557, "▁annual": 9558, "LAN": 9559, "▁episode": 9560, "▁XV": 9561, "▁delivery": 9562, "tl": 9563, "FP": 9564, "circ": 9565, "▁typically": 9566, "igo": 9567, "▁intel": 9568, "nat": 9569, "xb": 9570, "стро": 9571, ")-": 9572, "▁Bal": 9573, "▁Jos": 9574, "▁gonna": 9575, "▁Rest": 9576, "jor": 9577, "onia": 9578, "orship": 9579, "overy": 9580, "LINE": 9581, "]:": 9582, "Queue": 9583, "▁compare": 9584, "▁apartment": 9585, "▁rul": 9586, "Dr": 9587, "gency": 9588, "▁obviously": 9589, "zie": 9590, "ycl": 9591, "fortunately": 9592, "▁stepped": 9593, "▁Seg": 9594, "▁Which": 9595, "▁PC": 9596, "▁ast": 9597, "endor": 9598, "▁permission": 9599, "COL": 9600, "▁TEST": 9601, "Pay": 9602, "ères": 9603, "▁studied": 9604, "▁accompl": 9605, "role": 9606, "Where": 9607, "protobuf": 9608, "metadata": 9609, "Job": 9610, "▁Four": 9611, "plements": 9612, "disable": 9613, "▁loud": 9614, "▁happening": 9615, "▁Using": 9616, "rog": 9617, "▁depends": 9618, "ím": 9619, "'\\": 9620, "▁taught": 9621, "shared": 9622, "▁attributes": 9623, "▁Action": 9624, "▁dess": 9625, "▁houses": 9626, "▁reset": 9627, "▁bien": 9628, "▁explicit": 9629, "LOW": 9630, "->_": 9631, "▁PM": 9632, "Category": 9633, "oice": 9634, "into": 9635, "▁mail": 9636, "▁authority": 9637, "▁unable": 9638, "filename": 9639, "ék": 9640, "лей": 9641, "▁sector": 9642, "appoint": 9643, "▁hang": 9644, "▁cel": 9645, "related": 9646, "itate": 9647, "▁'<": 9648, "amber": 9649, "▁cheap": 9650, "▁enabled": 9651, "▁division": 9652, "Any": 9653, "▁hier": 9654, "▁Head": 9655, "ntax": 9656, "uda": 9657, "▁limitations": 9658, "▁studio": 9659, "media": 9660, "▁circle": 9661, "нова": 9662, "▁laug": 9663, "acts": 9664, "▁Во": 9665, "ód": 9666, "pled": 9667, "LOC": 9668, "Expr": 9669, ">:": 9670, "▁prés": 9671, "▁laughed": 9672, "▁Three": 9673, "лы": 9674, "▁ends": 9675, "▁fundament": 9676, "▁inher": 9677, "▁liv": 9678, "bid": 9679, "▁responsibility": 9680, "▁checked": 9681, "▁Pac": 9682, "▁fault": 9683, "▁yellow": 9684, "▁salt": 9685, "▁Francisco": 9686, "▁^": 9687, "▁ON": 9688, "▁beauty": 9689, "yg": 9690, "▁Aff": 9691, "▁Eq": 9692, "▁magic": 9693, "▁handler": 9694, "xE": 9695, "▁numerous": 9696, "▁hole": 9697, "▁rooms": 9698, "cción": 9699, "▁Arm": 9700, "person": 9701, "▁buildings": 9702, "▁plate": 9703, "bled": 9704, "errors": 9705, "▁Again": 9706, "▁Default": 9707, "▁Hard": 9708, "tó": 9709, "hus": 9710, "▁dimension": 9711, "iale": 9712, "▁Mult": 9713, "▁Government": 9714, "Func": 9715, "▁blow": 9716, "▁rect": 9717, "erra": 9718, "connection": 9719, "▁passing": 9720, "ßen": 9721, "phas": 9722, "ensional": 9723, "record": 9724, "cohol": 9725, "▁Harry": 9726, "izontal": 9727, "▁finger": 9728, "▁younger": 9729, "▁SC": 9730, "operation": 9731, "BY": 9732, "heim": 9733, "▁Bad": 9734, "▁storm": 9735, "▁Nat": 9736, "▁buying": 9737, "▁Sometimes": 9738, "▁Ста": 9739, "essed": 9740, "▁damn": 9741, "▁meg": 9742, "umes": 9743, "ünd": 9744, "тра": 9745, "▁silver": 9746, "wd": 9747, "hidden": 9748, "ardo": 9749, "▁communities": 9750, "▁diet": 9751, "otted": 9752, "▁bat": 9753, "ancer": 9754, "▁fmt": 9755, "▁Pen": 9756, "▁til": 9757, "Enum": 9758, "PATH": 9759, "▁matters": 9760, "timeout": 9761, "------------": 9762, "kan": 9763, "▁Corpor": 9764, "=\"../../": 9765, "▁Ale": 9766, "hentication": 9767, "▁complic": 9768, "▁Security": 9769, "OFF": 9770, "Rad": 9771, "apse": 9772, "▁dance": 9773, "▁permissions": 9774, "▁warrant": 9775, "▁lad": 9776, "▁isol": 9777, "dl": 9778, "▁Au": 9779, "yes": 9780, "▁tv": 9781, "▁provider": 9782, "▁terrible": 9783, "▁department": 9784, "eral": 9785, "▁implementation": 9786, "SR": 9787, "▁hearing": 9788, "▁Kn": 9789, "FR": 9790, "tv": 9791, "▁diss": 9792, "FUN": 9793, "▁durante": 9794, "osis": 9795, "▁tasks": 9796, "▁Blo": 9797, "вод": 9798, "▁branch": 9799, "▁politics": 9800, "▁Elle": 9801, "▁leadership": 9802, "expr": 9803, "▁techniques": 9804, "prec": 9805, "Sigma": 9806, "imately": 9807, "tk": 9808, "achment": 9809, "▁Enter": 9810, "▁creative": 9811, "▁зна": 9812, "appy": 9813, "unched": 9814, "▁'',": 9815, "onder": 9816, "{-": 9817, "NUM": 9818, "▁narr": 9819, "Memory": 9820, "▁winning": 9821, "▁Follow": 9822, "*/\r": 9823, "vision": 9824, "resents": 9825, "zione": 9826, "▁latter": 9827, "▁requests": 9828, "▁margin": 9829, "▁{\"": 9830, "video": 9831, "cn": 9832, "▁Image": 9833, "Tim": 9834, "CONFIG": 9835, "▁allowing": 9836, "▁combined": 9837, "PUT": 9838, "▁instanceof": 9839, "igin": 9840, "▁pero": 9841, "▁''": 9842, "▁confidence": 9843, "▁equivalent": 9844, "pad": 9845, "effect": 9846, "RX": 9847, "▁lang": 9848, "strong": 9849, "▁bridge": 9850, "aya": 9851, "▁treated": 9852, "▁forth": 9853, "SW": 9854, "▁accounts": 9855, "▁PO": 9856, "▁listening": 9857, "Route": 9858, "()))": 9859, "cpy": 9860, "▁reform": 9861, "▁gate": 9862, "▁Walk": 9863, "▁somehow": 9864, "tf": 9865, "▁layout": 9866, "umin": 9867, "▁considering": 9868, "▁premi": 9869, "▁Mom": 9870, "athan": 9871, "Gen": 9872, "▁planet": 9873, "amples": 9874, "▁MO": 9875, "shop": 9876, "▁premier": 9877, "▁simpl": 9878, "▁segu": 9879, "LY": 9880, "Sum": 9881, "▁tables": 9882, "ska": 9883, "▁ž": 9884, "pd": 9885, "▁sous": 9886, "▁conference": 9887, "▁Dat": 9888, "Scroll": 9889, "▁standards": 9890, "▁гру": 9891, "esse": 9892, "▁citizens": 9893, "▁occurred": 9894, "▁democr": 9895, "▁elev": 9896, "▁Sem": 9897, "ensus": 9898, "headers": 9899, "▁Chris": 9900, "imento": 9901, "kom": 9902, "Cor": 9903, "MIN": 9904, "usher": 9905, "Database": 9906, "▁formal": 9907, "igne": 9908, "▁organizations": 9909, "▁Ire": 9910, "Xml": 9911, "из": 9912, "▁pray": 9913, "▁bomb": 9914, "▁mand": 9915, "erts": 9916, "▁clock": 9917, "▁buck": 9918, "вали": 9919, "ensch": 9920, "▁volt": 9921, "▁films": 9922, "▁plants": 9923, "inode": 9924, "Boolean": 9925, "▁restaurant": 9926, "ían": 9927, "▁debut": 9928, "pages": 9929, "▁wordt": 9930, "▁Ба": 9931, "▁greatest": 9932, "(\"/": 9933, "▁copyright": 9934, "▁rit": 9935, "sizeof": 9936, "Trace": 9937, "uent": 9938, "тур": 9939, "▁ko": 9940, ":\\": 9941, "▁bigger": 9942, "▁perfectly": 9943, "tenance": 9944, "MASK": 9945, "ré": 9946, "▁ett": 9947, "▁nose": 9948, "▁craft": 9949, "iteral": 9950, "▁discussed": 9951, "▁Jewish": 9952, "Cap": 9953, "▁Unless": 9954, "▁Jackson": 9955, "Attributes": 9956, "▁lunch": 9957, "öl": 9958, "atr": 9959, "▁paying": 9960, "Parse": 9961, "()\r": 9962, "lad": 9963, "▁rare": 9964, "▁[];": 9965, "stone": 9966, "▁unc": 9967, "▁defense": 9968, "}+": 9969, "▁Global": 9970, "▁Soviet": 9971, "▁Australian": 9972, "▁gli": 9973, "variant": 9974, "▁Ron": 9975, "▁loan": 9976, "Step": 9977, "member": 9978, "Sch": 9979, "▁Committee": 9980, "▁spending": 9981, "▁Tri": 9982, "▁Journal": 9983, "▁sugar": 9984, "elly": 9985, "HTML": 9986, "▁advent": 9987, "wing": 9988, "▁Whether": 9989, "oration": 9990, "▁NE": 9991, "iveness": 9992, "▁hav": 9993, "▁conscious": 9994, "een": 9995, "Symbol": 9996, "▁ку": 9997, "Logger": 9998, "▁Little": 9999, "widet": 10000, "ocation": 10001, "pin": 10002, "▁symmet": 10003, "▁AD": 10004, "▁posts": 10005, "shal": 10006, "▁Conf": 10007, "▁chose": 10008, "mal": 10009, "ulo": 10010, "▁Method": 10011, "▁missed": 10012, "Remove": 10013, "Auto": 10014, "VALUE": 10015, "thlet": 10016, "▁Force": 10017, "pf": 10018, "▁Я": 10019, "late": 10020, "▁pul": 10021, "Pop": 10022, "▁advanced": 10023, "aires": 10024, "ressed": 10025, "AME": 10026, "bell": 10027, "aching": 10028, "ić": 10029, "echo": 10030, "HS": 10031, "▁funny": 10032, "рии": 10033, "▁eer": 10034, "▁veget": 10035, "▁fourth": 10036, "cf": 10037, "transform": 10038, "▁grown": 10039, "▁McC": 10040, "site": 10041, "▁beneath": 10042, "▁shell": 10043, "xd": 10044, "Play": 10045, "short": 10046, "Role": 10047, "▁religion": 10048, "inator": 10049, "}<": 10123, "asp": 10124, "ajo": 10125, "exports": 10126, "▁Node": 10127, "▁jako": 10128, "▁ya": 10129, "▁successfully": 10130, "▁friendly": 10131, "buff": 10132, "DEFAULT": 10133, "▁pregn": 10134, "Required": 10135, "▁binary": 10136, "isting": 10137, "▁stared": 10138, "▁circumstances": 10139, "▁хо": 10140, "rei": 10141, "▁Го": 10142, "Transform": 10143, "cnt": 10144, "▁Ext": 10145, "report": 10146, "VERSION": 10147, "▁analy": 10148, "▁Marg": 10149, "▁alleg": 10150, "builder": 10151, "ToString": 10152, "Layer": 10153, "íst": 10154, "Prop": 10155, "▁Emp": 10156, "}]": 10157, "▁selling": 10158, "▁queue": 10159, "▁seriously": 10160, "▁Lead": 10161, "textit": 10162, "testing": 10163, "▁Пре": 10164, "security": 10165, "iał": 10166, "ún": 10167, "chip": 10168, "▁candidate": 10169, "▁minister": 10170, "eria": 10171, "▁Het": 10172, "дин": 10173, "▁Britain": 10174, "▁barely": 10175, "▁sty": 10176, "▁Spanish": 10177, "▁Ven": 10178, "timer": 10179, "ків": 10180, "▁documents": 10181, "('.": 10182, "▁debug": 10183, "▁contro": 10184, "стоя": 10185, "▁joy": 10186, "Sn": 10187, "Inv": 10188, "▁protocol": 10189, "▁faces": 10190, "▁Despite": 10191, "sed": 10192, "Conf": 10193, "ARG": 10194, "▁evolution": 10195, "▁tod": 10196, "▁Promise": 10197, "▁posted": 10198, "Perm": 10199, "bet": 10200, "Ang": 10201, "Just": 10202, "▁rum": 10203, "layer": 10204, "▁behavi": 10205, "ipping": 10206, "▁dynam": 10207, "▁scheme": 10208, "▁proto": 10209, ")/": 10210, "Collections": 10211, "riev": 10212, "▁Click": 10213, "▁uns": 10214, "widetilde": 10215, "▁remembered": 10216, "гі": 10217, "inates": 10218, "▁incorpor": 10219, "▁Description": 10220, "▁prepare": 10221, "▁Final": 10222, "uation": 10223, "▁Queen": 10224, ">;": 10225, "▁automatically": 10226, "▁sharp": 10227, "▁meat": 10228, "ateur": 10229, "astern": 10230, "▁stuck": 10231, "ASSERT": 10232, "▁planned": 10233, "dots": 10234, "ookie": 10235, "▁Histor": 10236, "▁reviews": 10237, "IMP": 10238, "▁answered": 10239, "Total": 10240, "▁sau": 10241, "▁Mexico": 10242, "continue": 10243, "▁Apple": 10244, "likely": 10245, "зва": 10246, "users": 10247, "▁identified": 10248, "▁Lev": 10249, "▁mol": 10250, "▁Islam": 10251, "▁committed": 10252, "writ": 10253, "бер": 10254, "rift": 10255, "▁interrupt": 10256, "▁readonly": 10257, "schema": 10258, "Sm": 10259, "Double": 10260, "aza": 10261, "▁Hal": 10262, "Move": 10263, "▁Series": 10264, "inline": 10265, "▁которы": 10266, "soc": 10267, "▁tent": 10268, "▁amer": 10269, "aki": 10270, "▁lady": 10271, "▁tired": 10272, "ifi": 10273, "▁même": 10274, "ouver": 10275, "▁aside": 10276, "Did": 10277, "',\r": 10278, "▁bringing": 10279, "Drawing": 10280, "aro": 10281, "▁Rh": 10282, "▁Naz": 10283, "esso": 10284, "▁reaction": 10285, "mitted": 10286, "▁absolute": 10287, "haust": 10288, "(()": 10289, "▁Task": 10290, "ERS": 10291, "▁^{": 10292, "VD": 10293, "▁tone": 10294, "dist": 10295, "vs": 10296, "▁wheel": 10297, "▁administration": 10298, "▁interests": 10299, "▁pointer": 10300, "▁encounter": 10301, "aver": 10302, "▁nord": 10303, "ket": 10304, "▁beach": 10305, "▁enjoyed": 10306, "contains": 10307, "▁append": 10308, "Wait": 10309, "▁squad": 10310, "zel": 10311, "▁medium": 10312, "▁sending": 10313, "▁Lady": 10314, "ções": 10315, "▁destination": 10316, "nych": 10317, "▁conflict": 10318, "▁Ly": 10319, "▁vul": 10320, "▁basically": 10321, "reated": 10322, "black": 10323, "ugins": 10324, "▁calm": 10325, "érie": 10326, "har": 10327, "лан": 10328, "▁Се": 10329, "watch": 10330, "▁Put": 10331, "▁dump": 10332, "acher": 10333, "scroll": 10334, "▁claimed": 10335, "▁Control": 10336, "▁blind": 10337, "enti": 10338, "▁Keep": 10339, "▁Development": 10340, "images": 10341, "▁tough": 10342, "gebra": 10343, "▁sept": 10344, "hew": 10345, "▁skill": 10346, "▁Tay": 10347, "▁któ": 10348, "owner": 10349, "pare": 10350, "▁fee": 10351, "▁continues": 10352, "▁kan": 10353, "bes": 10354, "▁cha": 10355, "ovo": 10356, "▁Night": 10357, "icture": 10358, "shire": 10359, "▁essay": 10360, "▁suppose": 10361, "etic": 10362, "Art": 10363, "acon": 10364, "lla": 10365, "words": 10366, "▁comparison": 10367, "▁BE": 10368, "▁challenges": 10369, "▁ol": 10370, "citep": 10371, "▁Foot": 10372, "▁Such": 10373, "▁papers": 10374, "activ": 10375, "quer": 10376, "тя": 10377, "▁То": 10378, "ський": 10379, "thur": 10380, "done": 10381, "▁shock": 10382, "▁dedicated": 10383, "▁correspond": 10384, "Second": 10385, "▁bull": 10386, "life": 10387, "indent": 10388, "▁figures": 10389, "▁Andrew": 10390, "isp": 10391, "▁favour": 10392, "зда": 10393, "▁Elect": 10394, "Full": 10395, "▁nearby": 10396, "▁Register": 10397, "Scale": 10398, "ications": 10399, "ин": 10400, "▁AM": 10401, "pair": 10402, "▁perspective": 10403, "▁nos": 10404, "apa": 10405, "ostał": 10406, "▁Pers": 10407, "icer": 10408, "▁plastic": 10409, "дов": 10410, "ciples": 10411, "zą": 10412, "clos": 10413, "▁уча": 10414, "▁Á": 10415, "plugin": 10416, "▁angle": 10417, "▁commission": 10418, "▁funds": 10419, "▁indu": 10420, "▁drawn": 10421, "ám": 10422, "▁developing": 10423, "▁segment": 10424, "isme": 10425, "scr": 10426, "▁lies": 10427, "▁IL": 10428, "▁api": 10429, "Extension": 10430, "▁scal": 10431, "install": 10432, "▁Week": 10433, "▁gentle": 10434, "▁Canadian": 10435, "▁dialog": 10436, "▁articles": 10437, "Theme": 10438, "SM": 10439, "▁Bul": 10440, "▁leur": 10441, "▁stom": 10442, "Plugin": 10443, "▁после": 10444, "▁stead": 10445, "▁ś": 10446, "ipher": 10447, "▁prze": 10448, "▁draft": 10449, "bottom": 10450, "▁{};": 10451, "▁stayed": 10452, "feature": 10453, "▁vot": 10454, "▁fabric": 10455, "ça": 10456, "('#": 10457, "rea": 10458, "▁reput": 10459, "▁Cir": 10460, "▁AL": 10461, "▁assertEquals": 10462, "results": 10463, "▁Cross": 10464, "ursday": 10465, "▁audio": 10466, "▁gap": 10467, "▁streets": 10468, "▁scientific": 10469, "platform": 10470, "▁auss": 10471, "▁Cro": 10472, "▁partial": 10473, "unc": 10474, "▁choices": 10475, "▁или": 10476, "pred": 10477, "▁heads": 10478, "terday": 10479, "▁Nick": 10480, "▁weird": 10481, "asant": 10482, "▁represented": 10483, "▁пи": 10484, "DP": 10485, "orders": 10486, "clock": 10487, "▁Ho": 10488, "arters": 10489, "Cmd": 10490, "oga": 10491, "Keys": 10492, "Report": 10493, "▁Vill": 10494, "▁Mu": 10495, "▁owned": 10496, "SUCCESS": 10497, "▁typeof": 10498, "hdr": 10499, "uable": 10500, "▁neighborhood": 10501, "▁AP": 10502, "▁resulting": 10503, "▁shadow": 10504, "STRING": 10505, "▁videos": 10506, "лення": 10507, "expect": 10508, "▁Valley": 10509, "▁goto": 10510, "▁Sher": 10511, "frastr": 10512, "▁operating": 10513, "▁это": 10514, "▁Licensed": 10515, "Variable": 10516, "▁PR": 10517, "▁Hans": 10518, "clone": 10519, "▁Gesch": 10520, "▁Band": 10521, "........": 10522, "uing": 10523, "▁hundreds": 10524, "▁ок": 10525, "▁emotional": 10526, "▁Indust": 10527, ")+": 10528, "▁Egypt": 10529, "▁franç": 10530, "▁š": 10531, "▁fasc": 10532, "onto": 10533, "▁Adam": 10534, "▁laid": 10535, "▁rig": 10536, "▁detailed": 10537, "▁implements": 10538, "▁university": 10539, "▁Hy": 10540, "▁grid": 10541, "▁regions": 10542, "Stop": 10543, "▁slot": 10544, "▁angry": 10545, "▁-=": 10546, "▁waited": 10547, "Vert": 10548, "\":\"": 10549, "▁elem": 10550, "▁rég": 10551, "owed": 10552, "Member": 10553, "▁ratio": 10554, "isen": 10555, "▁Lem": 10556, "gery": 10557, "▁cream": 10558, "▁était": 10559, "▁geb": 10560, "unique": 10561, "▁Deb": 10562, "▁factory": 10563, "że": 10564, "dialog": 10565, "▁Config": 10566, "Sync": 10567, "angers": 10568, "▁governing": 10569, "▁Hun": 10570, "Space": 10571, "▁jest": 10572, "icious": 10573, "▁emphas": 10574, "umps": 10575, "▁Esp": 10576, "▁sul": 10577, "▁historical": 10578, "ija": 10579, "▁lying": 10580, "▁Steve": 10581, "▁measures": 10582, "osto": 10583, "?”": 10584, "▁pocket": 10585, "▁Sat": 10586, "▁pitch": 10587, "▁natur": 10588, "▁humans": 10589, "▁Simon": 10590, "adores": 10591, "(\"\\": 10592, "inking": 10593, "▁expos": 10594, "material": 10595, "▁apparently": 10596, "▁Camb": 10597, "▁Box": 10598, "▁spaces": 10599, "exists": 10600, "▁acting": 10601, "ORY": 10602, "зова": 10603, "Good": 10604, "ienne": 10605, "▁Williams": 10606, "▁fruit": 10607, "iera": 10608, "▁Lim": 10609, "▁trait": 10610, "▁artists": 10611, "▁absor": 10612, "rait": 10613, "LOAD": 10614, "▁movies": 10615, "▁dynamic": 10616, "asts": 10617, "▁Integer": 10618, "▁smoke": 10619, "пі": 10620, "angel": 10621, ">(\"": 10622, "▁instrument": 10623, "▁fuel": 10624, "ної": 10625, "atalogue": 10626, "▁serial": 10627, "Files": 10628, "▁bathroom": 10629, "ilo": 10630, "esto": 10631, "▁pm": 10632, "entials": 10633, "▁Online": 10634, "white": 10635, "▁tips": 10636, "▁capable": 10637, "Fig": 10638, "TV": 10639, "▁он": 10640, "ké": 10641, "bitr": 10642, "Mapping": 10643, "▁tak": 10644, "ющи": 10645, "вля": 10646, ")\",": 10647, "▁Karl": 10648, "▁Human": 10649, "▁Pot": 10650, "▁represents": 10651, "▁consistent": 10652, "_(": 10653, "wen": 10654, "▁Rose": 10655, "law": 10656, "▁FROM": 10657, "▁begins": 10658, "▁edit": 10659, "▁mountain": 10660, "▁chapter": 10661, "▁wondered": 10662, "▁industrial": 10663, "▁Major": 10664, "▁ges": 10665, "▁directed": 10666, "eros": 10667, "▁Wild": 10668, "liament": 10669, "Book": 10670, "username": 10671, "hot": 10672, "▁nam": 10673, "▁league": 10674, "bra": 10675, "кон": 10676, "▁Tal": 10677, "▁Ва": 10678, "▁exports": 10679, "(@": 10680, "▁sharing": 10681, "▁Tro": 10682, "ść": 10683, "uesday": 10684, "ylv": 10685, "▁guitar": 10686, "elen": 10687, "Selection": 10688, "▁confident": 10689, "rypto": 10690, "▁hors": 10691, "editor": 10692, "▁shoulders": 10693, "getName": 10694, "encing": 10695, "SELECT": 10696, "вши": 10697, "▁kinds": 10698, "▁Wel": 10699, "▁purposes": 10700, "Matrix": 10701, "invalid": 10702, "▁owners": 10703, "▁Records": 10704, "▁Process": 10705, "▁chat": 10706, "▁Dor": 10707, "▁bin": 10708, "redit": 10709, "oire": 10710, "▁Total": 10711, "▁Family": 10712, "ARY": 10713, "▁bread": 10714, "▁compre": 10715, "▁shoes": 10716, "▁raz": 10717, "▁trace": 10718, "nej": 10719, "orted": 10720, "hn": 10721, "▁procedure": 10722, "properties": 10723, "plier": 10724, "▁hero": 10725, "panel": 10726, "▁marked": 10727, "▁worried": 10728, "\\|": 10729, "pts": 10730, "▁Support": 10731, "▁serving": 10732, "Fail": 10733, "▁disappoint": 10734, "▁Scot": 10735, "▁pleasure": 10736, "▁judge": 10737, "zeich": 10738, "▁forever": 10739, "▁Zeit": 10740, "uous": 10741, "inent": 10742, "▁dw": 10743, "▁waren": 10744, "▁flash": 10745, "▁troops": 10746, "▁drugs": 10747, "▁diam": 10748, ".~": 10749, "imp": 10750, "inned": 10751, "▁EV": 10752, "Struct": 10753, "▁justice": 10754, "▁officials": 10755, "ffff": 10756, "▁Common": 10757, "▁Cat": 10758, "▁tomorrow": 10759, "▁él": 10760, "Texture": 10761, "qpoint": 10762, "▁Fried": 10763, "▁Term": 10764, "pgfqpoint": 10765, "▁nem": 10766, "norm": 10767, "▁hardly": 10768, "oda": 10769, "zeta": 10770, "emic": 10771, "▁полу": 10772, "▁loaded": 10773, "kes": 10774, "ció": 10775, "▁fool": 10776, "▁trick": 10777, "▁dst": 10778, "Find": 10779, "▁все": 10780, "}},": 10781, "▁framework": 10782, "▁merely": 10783, "▁union": 10784, "▁Edward": 10785, "rif": 10786, "Flag": 10787, "▁crisis": 10788, "▁finite": 10789, "▁lol": 10790, "▁Kim": 10791, "ната": 10792, "since": 10793, "▁compat": 10794, "▁pert": 10795, "ibilities": 10796, "▁también": 10797, "ibli": 10798, "▁teen": 10799, "▁sympt": 10800, "oral": 10801, "ders": 10802, "otte": 10803, "при": 10804, "▁Jane": 10805, "▁originally": 10806, "▁throat": 10807, "mag": 10808, "sup": 10809, "uni": 10810, "$$": 10811, "▁Library": 10812, "▁attacks": 10813, "ingen": 10814, "('/": 10815, "▁hes": 10816, "coin": 10817, "ounce": 10818, "▁Academy": 10819, "MODULE": 10820, "isms": 10821, "▁Adv": 10822, "▁Bol": 10823, "▁incident": 10824, ")^{": 10825, "▁bij": 10826, "▁Rome": 10827, "▁Italy": 10828, "events": 10829, "▁Fern": 10830, "▁ber": 10831, "▁silent": 10832, "▁pier": 10833, "▁YO": 10834, "▁plain": 10835, "Bas": 10836, "▁pill": 10837, "rase": 10838, "▁carrying": 10839, "▁resp": 10840, "ную": 10841, "▁typical": 10842, "Wrapper": 10843, "▁gau": 10844, "▁chemical": 10845, "▁hal": 10846, "throw": 10847, "Cluster": 10848, "▁Gab": 10849, "▁Girl": 10850, "quir": 10851, "▁Arg": 10852, "▁relief": 10853, "▁Ве": 10854, "dm": 10855, "▁frustr": 10856, "\\%": 10857, "▁stores": 10858, "▁bottle": 10859, "▁Lew": 10860, "two": 10861, "stad": 10862, "▁cheek": 10863, "▁concerns": 10864, "▁helpful": 10865, "▁coverage": 10866, "isi": 10867, "ADD": 10868, "async": 10869, "▁approximately": 10870, "iffer": 10871, "hook": 10872, "▁enum": 10873, "ová": 10874, "▁evil": 10875, "▁constantly": 10876, "apply": 10877, "▁siè": 10878, "▁practices": 10879, "▁teachers": 10880, "▁Sn": 10881, "▁Awards": 10882, "▁substant": 10883, "▁$.": 10884, "dk": 10885, "▁mob": 10886, "▁ingred": 10887, "vere": 10888, "Multi": 10889, "пер": 10890, "stal": 10891, "yard": 10892, "required": 10893, "vement": 10894, "▁intelligence": 10895, "▁thinks": 10896, "▁personally": 10897, "▁trained": 10898, "orney": 10899, ")\\": 11266, "anal": 11267, "Section": 11268, "plus": 11269, "üt": 11270, "▁embed": 11271, "▁strings": 11272, "Before": 11273, "proc": 11274, "▁спо": 11275, "trl": 11276, "vr": 11277, "Background": 11278, "logger": 11279, "agraph": 11280, "iest": 11281, "▁goods": 11282, "batch": 11283, "▁optional": 11284, "▁Taylor": 11285, "▁recognize": 11286, "walk": 11287, "▁Hit": 11288, "▁Elizabeth": 11289, "}:": 11290, "▁careful": 11291, "краї": 11292, "▁locations": 11293, "▁structures": 11294, "▁disk": 11295, "▁ships": 11296, "▁suo": 11297, "▁sowie": 11298, "▁Ess": 11299, "▁Hash": 11300, "▁reasonable": 11301, "▁Moreover": 11302, "▁formula": 11303, "▁Centre": 11304, "▁residents": 11305, "RS": 11306, "Ids": 11307, "▁Know": 11308, "▁trib": 11309, "▁rés": 11310, "▁stable": 11311, "▁Would": 11312, "▁breaking": 11313, "▁meal": 11314, "▁phen": 11315, "▁fel": 11316, "▁Fred": 11317, "Author": 11318, "▁capture": 11319, "opts": 11320, "▁everywhere": 11321, "▁sque": 11322, "▁moder": 11323, "setup": 11324, "▁Supp": 11325, "▁whenever": 11326, "{(": 11327, "wart": 11328, "▁toe": 11329, "Prefix": 11330, "hou": 11331, "gage": 11332, ">\"": 11333, "▁frag": 11334, "▁Theorem": 11335, "memory": 11336, "▁contents": 11337, "docs": 11338, "}'": 11339, "▁Irish": 11340, "Then": 11341, "aats": 11342, "Save": 11343, "▁agency": 11344, "▁име": 11345, "дова": 11346, "▁Function": 11347, "NN": 11348, "destroy": 11349, "▁Message": 11350, "▁cancel": 11351, "▁superior": 11352, "▁ec": 11353, "▁literature": 11354, "▁PART": 11355, "Il": 11356, "▁Cab": 11357, "engine": 11358, "▁basket": 11359, "worth": 11360, "▁Sel": 11361, "fetch": 11362, "▁Stadt": 11363, "▁Ки": 11364, "▁conj": 11365, "▁seiner": 11366, "▁confirmed": 11367, "▁Argent": 11368, "amar": 11369, "pgfpath": 11370, "▁struggle": 11371, "Pattern": 11372, "▁Middle": 11373, "itan": 11374, "▁moon": 11375, "orough": 11376, "▁Catholic": 11377, "▁struck": 11378, "]->": 11379, "▁weapon": 11380, "▁subst": 11381, "▁instructions": 11382, "▁occas": 11383, "protected": 11384, "▁Less": 11385, "▁batch": 11386, "▁contra": 11387, "▁deck": 11388, "▁ignored": 11389, "▁refused": 11390, "trigger": 11391, "▁criminal": 11392, "GA": 11393, "olly": 11394, "▁Bell": 11395, "▁Ю": 11396, "forward": 11397, "▁prefix": 11398, "▁immediate": 11399, "▁assigned": 11400, "▁elected": 11401, "▁tonight": 11402, "▁Dies": 11403, "▁Beach": 11404, "▁preced": 11405, "ował": 11406, "▁galax": 11407, "▁logic": 11408, "enza": 11409, "▁Captain": 11410, "▁Hay": 11411, "▁facts": 11412, "▁ни": 11413, "té": 11414, "▁sb": 11415, "oped": 11416, "▁combat": 11417, "▁explore": 11418, "▁(-": 11419, "Loader": 11420, "▁Wilson": 11421, "▁locked": 11422, ":)": 12970, "▁quel": 12971, "▁Га": 12972, "Ty": 12973, "▁temps": 12974, "▁ghost": 12975, "Material": 12976, "ERCHANT": 12977, "pointer": 12978, "жда": 12979, "aha": 12980, "ulf": 12981, "▁supplement": 12982, "▁dismiss": 12983, "▁closing": 12984, "▁vulner": 12985, "▁après": 12986, "▁overwhel": 12987, "ское": 12988, "▁disag": 12989, "acia": 12990, "oured": 12991, "ruption": 12992, "▁PS": 12993, "Endpoint": 12994, "Real": 12995, "▁Tag": 12996, "▁stairs": 12997, "lyn": 12998, "▁eleg": 12999, "▁veter": 13000, "factory": 13001, "anne": 13002, "▁Bat": 13003, "▁franc": 13004, "lung": 13005, "▁\"'": 13006, ".',": 13007, "▁Country": 13008, "^{[": 13009, "▁yours": 13010, "ailability": 13011, "Clear": 13012, "ätt": 13013, "пис": 13014, "▁joke": 13015, "▁annoy": 13016, "▁rag": 13017, "vari": 13018, "лекс": 13019, "▁Psy": 13020, "ilty": 13021, "mount": 13022, "▁cual": 13023, "▁solar": 13024, "}^{(": 13025, "Short": 13026, "▁taxes": 13027, "Append": 13028, "Win": 13029, "estyle": 13030, "▁facil": 13031, "вро": 13032, "▁sought": 13033, "▁bare": 13034, "▁react": 13035, "jar": 13036, "MAC": 13037, "lov": 13038, "warn": 13039, "▁crucial": 13040, "▁museum": 13041, "ниц": 13042, "▁Kent": 13043, "Maybe": 13044, "▁bike": 13045, "▁Address": 13046, "XML": 13047, "▁admitted": 13048, "▁$(\\": 13049, "▁spell": 13050, "▁vic": 13051, "gre": 13052, "▁proc": 13053, "theless": 13054, "▁Nom": 13055, "▁Rail": 13056, "▁acceler": 13057, "▁convin": 13058, "▁Property": 13059, "▁DA": 13060, "▁clip": 13061, "▁plugin": 13062, "Limit": 13063, "views": 13064, "bru": 13065, "▁pra": 13066, "▁ak": 13067, "▁ej": 13068, "▁opts": 13069, "▁slip": 13070, "▁gang": 13071, "asted": 13072, "uals": 13073, "▁dying": 13074, "Coll": 13075, "ammen": 13076, "▁Policy": 13077, "ERCHANTABILITY": 13078, "▁Collection": 13079, "▁vec": 13080, "▁Dick": 13081, "stud": 13082, "▁layers": 13083, "▁tied": 13084, "}\\\\": 13085, "▁alors": 13086, "▁jou": 13087, "▁chicken": 13088, "▁permanent": 13089, "▁Everything": 13090, "▁Low": 13091, "▁Cook": 13092, "▁peak": 13093, "▁PARTICULAR": 13094, "▁dear": 13095, "ič": 13096, "▁introduce": 13097, "▁causing": 13098, "писа": 13099, "Bound": 13100, "hund": 13101, "multi": 13102, "▁pare": 13103, "annt": 13104, "▁breat": 13105, "▁commitment": 13106, "▁increasingly": 13107, "кой": 13108, "▁Friend": 13109, "▁statistics": 13110, "▁Manager": 13111, "plicate": 13112, "Cloud": 13113, "aci": 13114, "▁Conference": 13115, "Span": 13116, "▁CEO": 13117, "▁Wait": 13118, "▁Ober": 13119, "ifting": 13120, "imiento": 13121, "getElement": 13122, "▁gle": 13123, "лия": 13124, "▁wieder": 13125, "▁instruction": 13126, "gly": 13127, "▁blame": 13128, "▁listade": 13129, "▁aapt": 13130, "▁Lewis": 13131, "Fragment": 13132, "▁gear": 13133, "mill": 13134, "prod": 13135, "▁burning": 13136, "ється": 13137, "▁mé": 13138, "ène": 13139, "▁complicated": 13140, "bh": 13141, "▁Justice": 13142, "▁tested": 13143, "▁staring": 13144, "▁survive": 13145, "▁cous": 13146, "▁rib": 13147, "aml": 13148, "▁Trust": 13149, "▁cad": 13150, "▁Terr": 13151, "▁mapping": 13152, "▁twelve": 13153, "▁grant": 13154, "▁thorough": 13155, "▁Ü": 13156, "▁folks": 13157, "▁Content": 13158, "▁childhood": 13159, "cker": 13160, "сно": 13161, "RECT": 13162, "▁finale": 13163, "▁shower": 13164, "éric": 13165, "▁spat": 13166, "odge": 13167, "рь": 13168, "▁pes": 13169, "eda": 13170, "Db": 13171, "▁Antonio": 13172, "▁engaged": 13173, "▁vess": 13174, "vals": 13175, "▁electronic": 13176, "lemma": 13177, "▁Wy": 13178, "mad": 13179, "merge": 13180, "apon": 13181, "▁privile": 13182, "▁novembre": 13183, "▁Sports": 13184, "will": 13185, "▁controls": 13186, "▁categories": 13187, "▁Georgia": 13188, "ipedia": 13189, "▁AV": 13190, "atori": 13191, "▁___": 13192, "▁À": 13193, "▁Ryan": 13194, "▁Charlie": 13195, "▁исто": 13196, "▁emotion": 13197, "▁cooking": 13198, "▁attempts": 13199, "▁FITNESS": 13200, "äter": 13201, "Enable": 13202, "DT": 13203, "▁Change": 13204, "AspNet": 13205, "▁га": 13206, "▁ordinary": 13207, "▁SQL": 13208, "plane": 13209, "%.": 13210, "▁Summer": 13211, "▁avait": 13212, "upp": 13213, "▁illness": 13214, "UINT": 13215, ">{": 13216, "▁zwischen": 13217, "▁hardware": 13218, "▁sounded": 13219, "equiv": 13220, "▁piano": 13221, "uset": 13222, "kn": 13223, "TRY": 13224, "▁bab": 13225, "нен": 13226, "▁reliable": 13227, "▁Bronnen": 13228, "▁Store": 13229, "Az": 13230, "▁»,": 13231, "Static": 13232, "dw": 13233, "green": 13234, "▁'';": 13235, "lij": 13236, "eva": 13237, "ній": 13238, "▁Syd": 13239, "inois": 13240, "convert": 13241, "▁declare": 13242, "bres": 13243, "INK": 13244, "itled": 13245, "▁accord": 13246, "▁mars": 13247, "Sequence": 13248, "zip": 13249, "▁Brazil": 13250, "▁meetings": 13251, "▁accuracy": 13252, "▁Machine": 13253, "▁autor": 13254, "▁ainsi": 13255, "Simple": 13256, "Resources": 13257, "каза": 13258, "▁MP": 13259, "they": 13260, "▁Bang": 13261, "▁eing": 13262, "ateful": 13263, "▁Something": 13264, "▁upset": 13265, "History": 13266, "dimensional": 13267, "▁explanation": 13268, "▁civ": 13269, "▁conce": 13270, "▁köz": 13271, "▁promised": 13272, "жду": 13273, "wed": 13274, "Fore": 13275, "Amount": 13276, "abb": 13277, "▁clothing": 13278, "лись": 13279, "oen": 13280, "▁Print": 13281, "▁sizes": 13282, "▁banks": 13283, "ribed": 13284, "▁'../": 13285, "FIX": 13286, "▁Hug": 13287, "▁zn": 13288, "▁INT": 13289, "▁instances": 13290, "▁alongside": 13291, "Namespace": 13292, "▁renew": 13293, "▁asc": 13294, "▁waves": 13295, "▁pom": 13296, "Duration": 13297, "days": 13298, "$(": 13299, "▁grabbed": 13300, "▁surgery": 13301, "▁restore": 13302, "Normal": 13303, "▁Leb": 13304, "▁analyt": 13305, "Literal": 13306, "HA": 13307, "▁shares": 13308, "illet": 13309, "ols": 13310, "▁Dog": 13311, "orno": 13312, "▁manip": 13313, "jav": 13314, "▁essentially": 13315, "▁casual": 13316, "opl": 13317, "▁р": 13318, "▁SU": 13319, "▁engineering": 13320, "▁Prime": 13321, "▁SW": 13322, "▁reaching": 13323, "▁вла": 13324, "▁Росси": 13325, "▁Kre": 13326, "erry": 13327, "▁oppon": 13328, "program": 13329, "emper": 13330, "isEmpty": 13331, "▁Unit": 13332, "INTER": 13333, "ethe": 13334, "zd": 13335, "CUR": 13336, "▁vm": 13337, "conv": 13338, "ropol": 13339, "▁Coast": 13340, "▁Select": 13341, "▁была": 13342, "▁Ve": 13343, "owy": 13344, "▁myth": 13345, "ceptions": 13346, "classes": 13347, "▁worden": 13348, "▁assault": 13349, "▁dual": 13350, "ORK": 13351, "▁inches": 13352, "▁FA": 13353, "▁Station": 13354, "▁personality": 13355, "▁scar": 13356, "▁regime": 13357, "▁noten": 13358, "▁rural": 13359, "iza": 13360, "Audio": 13361, "▁disput": 13362, "▁aver": 13363, "▁obst": 13364, "▁Region": 13365, "utf": 13366, "▁Cass": 13367, "hspace": 13368, "▁shipping": 13369, "iko": 13370, "icked": 13371, "numer": 13372, "дна": 13373, "riel": 13374, "disabled": 13375, "opol": 13376, "looking": 13377, "▁classical": 13378, "▁constructed": 13379, "▁referenties": 13380, "]+": 13381, "▁captured": 13382, "▁minimal": 13383, "▁sock": 13384, "father": 13385, "isión": 13386, "▁equally": 13387, "▁reduction": 13388, "Ant": 13389, "aison": 13390, "▁argue": 13391, "circle": 13392, "▁toler": 13393, "}\",": 13394, "▁primarily": 13395, "usal": 13396, "▁algebra": 13397, "▁gathered": 13398, "▁Remember": 13399, "_);": 13400, "UTE": 13401, "▁Kit": 13402, "Sy": 13403, "HEAD": 13404, "▁recipe": 13405, "▁scenario": 13406, "▁Following": 13407, "VAR": 13408, "▁yard": 13409, "▁stad": 13410, "*(": 13411, "▁validate": 13412, "DEX": 13413, "▁committee": 13414, "▁temporary": 13415, "▁consequences": 13416, "▁également": 13417, "ктив": 13418, "▁ra": 13419, "▁displ": 13420, "▁apps": 13421, "▁Teil": 13422, "▁».": 13423, "▁adopted": 13424, "tensor": 13425, "▁femin": 13426, "▁мар": 13427, "логи": 13428, "tech": 13429, "▁Rot": 13430, "▁knees": 13431, "phys": 13432, "owej": 13433, "▁Oxford": 13434, "анд": 13435, "hell": 13436, "ografia": 13437, "▁exposed": 13438, "ktop": 13439, "oby": 13440, "lower": 13441, "▁Senate": 13442, "▁sword": 13443, "Flow": 13444, "▁Unfortunately": 13445, "▁boxes": 13446, "▁cuando": 13447, "▁pilot": 13448, "▁Album": 13449, "Bal": 13450, "Sort": 13451, "FIELD": 13452, "▁desert": 13453, "COMM": 13454, "rons": 13455, "adows": 13456, "▁loyal": 13457, "▁asset": 13458, "▁mud": 13459, "фа": 13460, "▁secondary": 13461, "▁Ар": 13462, "▁cul": 13463, "▁Asian": 13464, "▁staying": 13465, "▁dataset": 13466, "▁USE": 13467, "▁loves": 13468, "▁velocity": 13469, "áv": 13470, "▁purchased": 13471, "SOC": 13472, "▁competitive": 13473, "▁Football": 13474, "iska": 13475, "▁knock": 13476, "stairs": 13477, "azy": 13478, "▁vend": 13479, "▁arts": 13480, "▁Bras": 13481, "uela": 13482, "кто": 13483, "trim": 13484, "▁dirty": 13485, "▁websites": 13486, "▁Indep": 13487, "▁стра": 13488, "sr": 13489, "▁ticket": 13490, "atile": 13491, "▁implemented": 13492, "▁время": 13493, "▁bowl": 13494, "DATE": 13495, "▁alter": 13496, "▁Space": 13497, "▁accompan": 13498, "ordon": 13499, "▁doctors": 13500, "istas": 13501, "Cast": 13502, "дом": 13503, "CTL": 13504, "urers": 13505, "▁ingredients": 13506, "▁calculated": 13507, "▁leather": 13508, "▁sensitive": 13509, "▁suspic": 13510, "stan": 13511, "▁anni": 13512, "await": 13513, "▁Franç": 13514, "▁abort": 13515, "▁Spirit": 13516, "▁Walter": 13517, "unkt": 13518, "▁vertical": 13519, "ORS": 13520, "best": 13521, "▁Client": 13522, "itated": 13523, "▁ва": 13524, "▁Č": 13525, "▁ville": 13526, "▁diplom": 13527, "orne": 13528, "▁bars": 13529, "Uri": 13530, "APTER": 13531, "pons": 13532, "utz": 13533, "Proto": 13534, "▁stir": 13535, "▁це": 13536, "▁primer": 13537, "igible": 13538, "extra": 13539, "▁Books": 13540, "▁Bos": 13541, "▁Et": 13542, "▁Welt": 13543, "▁Korea": 13544, "рито": 13545, "▁vibr": 13546, "Self": 13547, "linear": 13548, "об": 13549, "▁Lang": 13550, "▁deeper": 13551, "▁termin": 13552, "enschaft": 13553, "▁році": 13554, "ammed": 13555, "visible": 13556, "▁IOException": 13557, "▁Wind": 13558, "usqu": 13559, "▁Stop": 13560, "▁орга": 13561, "INVALID": 13562, "▁cub": 13563, "▁jew": 13564, "▁captain": 13565, "зі": 13566, "chunk": 13567, "apture": 13568, "ashboard": 13569, "▁divided": 13570, "▁extensive": 13571, "▁suffer": 13572, "▁heading": 13573, "created": 13574, "▁quietly": 13575, "▁ny": 13576, "▁пол": 13577, "\"+": 13578, "ikan": 13579, "▁designs": 13580, "zu": 13581, "}+\\": 13582, "Operator": 13583, "▁Lemma": 13584, "▁нау": 13585, "acji": 13586, "лове": 13587, "Servlet": 13588, "▁Kevin": 13589, "stage": 13590, "bn": 13591, "textwidth": 13592, "failed": 13593, "▁Staff": 13594, "▁enem": 13595, "unde": 13596, "ень": 13597, "Packet": 13598, "▁Als": 13599, "kar": 13600, "]['": 13601, "ked": 13602, "Pers": 13603, ">::": 13604, "▁arc": 13605, "▁synt": 13606, "SPE": 13607, "▁Да": 13608, "▁Mi": 13609, "▁Moh": 13610, "▁Death": 13611, "browser": 13612, "▁Dave": 13613, "▁succ": 13614, "toggle": 13615, "▁tack": 13616, "Comment": 13617, "eron": 13618, "▁awareness": 13619, "▁hug": 13620, "▁contemporary": 13621, "ulating": 13622, "▁Title": 13623, "▁THIS": 13624, "havior": 13625, "rank": 13626, "▁dozen": 13627, "▁cheese": 13628, "coln": 13629, "▁radius": 13630, "▁dimensions": 13631, "roduction": 13632, "▁adds": 13633, "▁household": 13634, "▁Davis": 13635, "pkg": 13636, "{$": 13637, "▁casino": 13638, "▁Pierre": 13639, "▁objective": 13640, "train": 13641, "▁Michigan": 13642, "payload": 13643, "▁rug": 13644, "▁severe": 13645, "mean": 13646, "▁toss": 13647, "▁embarrass": 13648, "▁Very": 13649, "▁appeal": 13650, "▁Comput": 13651, "▁forgotten": 13652, "▁kernel": 13653, "▁carbon": 13654, "fw": 13655, "▁Су": 13656, "▁Empire": 13657, "▁quote": 13658, "etz": 13659, "▁mini": 13660, "▁pipe": 13661, "▁nous": 13662, "▁Move": 13663, "▁ду": 13664, "▁nervous": 13665, "▁Мар": 13666, "*\r": 13667, "▁Bush": 13668, "▁peer": 13669, "▁Writ": 13670, "▁satisfied": 13671, "▁pulling": 13672, "▁Pur": 13673, "▁Miller": 13674, "▁FL": 13675, "amaz": 13676, "▁mile": 13677, "▁Need": 13678, "▁supplies": 13679, "▁año": 13680, "▁pace": 13681, "▁Victoria": 13682, "▁ought": 13683, "▁Player": 13684, "agnostic": 13685, "▁viv": 13686, "▁Patrick": 13687, "▁Š": 13688, "▁Story": 13689, "aca": 13690, "▁mountains": 13691, "CLASS": 13692, "▁fragment": 13693, "▁settlement": 13694, "▁Furthermore": 13695, "▁drivers": 13696, "▁Ju": 13697, "▁были": 13698, "Rows": 13699, "▁impression": 13700, "▁infer": 13701, "▁Expl": 13702, "olute": 13703, "ovan": 13704, "arance": 13705, "CAP": 13706, "▁enforce": 13707, "▁Burn": 13708, "Reset": 13709, "mother": 13710, "▁Battle": 13711, "padding": 13712, "iate": 13713, "▁cried": 13714, "AK": 13715, "uns": 13716, "▁siècle": 13717, "▁Contin": 13718, "bank": 13719, "junit": 13720, "objects": 13721, "Rot": 13722, "issa": 13723, "▁begun": 13724, "*-": 13725, "▁visiting": 13726, "жде": 13727, "▁targets": 13728, "▁Latin": 13729, "ут": 13730, "▁Esc": 13731, "*;": 13732, "ång": 13733, "▁({": 13734, "▁diagram": 13735, "Models": 13736, "▁partnership": 13737, "▁från": 13738, "ulty": 13739, "Pod": 13740, "CALL": 13741, "modal": 13742, "sig": 13743, "itzer": 13744, "itel": 13745, "▁convinced": 13746, "abl": 13747, "стве": 13748, "▁cot": 13749, "▁repeat": 13750, "▁lists": 13751, "sound": 13752, "▁royal": 13753, "▁grace": 13754, "▁oraz": 13755, "Notification": 13756, "prite": 13757, "▁arrival": 13758, "ancell": 13759, "hentic": 13760, "decode": 13761, "▁fantastic": 13762, "progress": 13763, "proxy": 13764, "ző": 13765, "kel": 13766, "▁convenient": 13767, "aque": 13768, "riet": 13769, "▁Digital": 13770, "iors": 13771, "▁Budd": 13772, "andra": 13773, "addy": 13774, "▁overs": 13775, "▁consumers": 13776, "pn": 13777, "mouse": 13778, "▁BC": 13779, "deg": 13780, "perm": 13781, "ités": 13782, "▁испо": 13783, "heast": 13784, "hour": 13785, "PARAM": 13786, "conscious": 13787, "▁wing": 13788, "▁atmosphere": 13789, "▁gig": 13790, "▁contre": 13791, "▁drama": 13792, "ят": 13793, "▁Front": 13794, "▁philosophy": 13795, "▁Hart": 13796, "▁nurs": 13797, "uras": 13798, "▁Tru": 13799, "▁sud": 13800, "▁performing": 13801, "пы": 13802, "▁confused": 13803, "▁checks": 13804, "amt": 13805, "Make": 13806, "▁RO": 13807, "▁df": 13808, "izations": 13809, "▁degli": 13810, "▁architecture": 13811, "Renderer": 13812, "▁Ла": 13813, "▁ptr": 13814, "▁dieser": 13815, "submit": 13816, "▁topics": 13817, "▁principles": 13818, "vars": 13819, "sock": 13820, "▁tongue": 13821, "▁percentage": 13822, "▁SS": 13823, "▁dol": 13824, "▁rice": 13825, "ío": 13826, "▁Eastern": 13827, "▁recognition": 13828, "▁Ern": 13829, "▁Ut": 13830, "▁caut": 13831, "▁Cloud": 13832, "▁conversion": 13833, "▁Ohio": 13834, "▁ME": 13835, "▁surely": 13836, "▁gard": 13837, "puis": 13838, "▁urg": 13839, "imi": 13840, "▁absence": 13841, "▁winner": 13842, "Language": 13843, "▁HTTP": 13844, "wt": 13845, "▁translation": 13846, "сс": 13847, "▁Kind": 13848, "Two": 13849, "▁Revolution": 13850, "Insert": 13851, "Every": 13852, "orient": 13853, "▁тра": 13854, "▁emotions": 13855, "details": 13856, "▁flu": 13857, "▁operate": 13858, "Ag": 13859, "unning": 13860, "▁partie": 13861, "tri": 13862, "▁golden": 13863, "▁Би": 13864, "▁foundation": 13865, "isten": 13866, "▁Carlos": 13867, "Children": 13868, "▁neighb": 13869, "▁Cart": 13870, "Begin": 13871, "гда": 13872, "▁scheduled": 13873, "'>": 13874, "▁observations": 13875, "▁producer": 13876, "athers": 13877, "ному": 13878, "▁expectations": 13879, "oso": 13880, "zh": 13881, "mutable": 13882, "▁writes": 13883, "▁pushing": 13884, "▁seats": 13885, "▁breast": 13886, "aping": 13887, "▁Simple": 13888, "▁socket": 13889, "▁slave": 13890, "iley": 13891, "▁assistant": 13892, "▁trim": 13893, "▁landscape": 13894, "▁association": 13895, "quant": 13896, "▁Palest": 13897, "▁sweat": 13898, "engers": 13899, "?_": 13900, "ép": 13901, ">.": 13902, "▁curious": 13903, "▁Component": 13904, "▁replacement": 13905, "раль": 13906, "▁Track": 13907, "▁Remove": 13908, "▁Size": 13909, "peror": 13910, "▁calculate": 13911, "▁sessions": 13912, "▁typed": 13913, "▁submit": 13914, "!!!": 13915, "▁partition": 13916, "eding": 13917, "-----": 13918, "azioni": 13919, "ließ": 13920, "onal": 13921, "▁shru": 13922, "▁REG": 13923, "▁Fac": 13924, "configuration": 13925, "▁было": 13926, "▁Among": 13927, "__);": 13928, "▁Server": 13929, "▁LOG": 13930, "▁cand": 13931, "']);": 13932, "gov": 13933, "▁Six": 13934, "undefined": 13935, "▁ty": 13936, "asa": 13937, "▁particles": 13938, "▁фор": 13939, "``": 13940, "Tube": 13941, "eland": 13942, "fold": 13943, "ogo": 13944, "▁approaches": 13945, "onda": 13946, "agr": 13947, ",$": 13948, "▁{{": 13949, "▁Modern": 13950, "▁Winter": 13951, "available": 13952, "▁Lud": 13953, "▁casa": 13954, "▁Could": 13955, "▁fifteen": 13956, "▁potentially": 13957, "^^": 13958, "▁seit": 13959, "Animation": 13960, "кого": 13961, "Zone": 13962, "elif": 13963, "▁acknowled": 13964, "▁ownership": 13965, "▁describes": 13966, "▁reverse": 13967, "▁contest": 13968, "▁scored": 13969, "▁opposed": 13970, "flex": 13971, "kre": 13972, "▁merge": 13973, "▁covering": 13974, "▁honestly": 13975, "▁Mess": 13976, "▁rarely": 13977, "▁incredible": 13978, "itage": 13979, "▁victims": 13980, "ными": 13981, "wl": 13982, "izza": 13983, "dn": 13984, "onde": 13985, "▁przy": 13986, "▁HTML": 13987, "▁payload": 13988, "Bus": 13989, "usb": 13990, "Fn": 13991, "▁displayed": 13992, "▁ocean": 13993, "▁Avenue": 13994, "acion": 13995, "ghan": 13996, "metric": 13997, "ieties": 13998, "▁attractive": 13999, "▁fö": 14000, "Creat": 14001, "verter": 14002, "▁Alice": 14003, "пол": 14004, "▁fraction": 14005, "▁behaviour": 14006, "▁Jersey": 14007, "▁revenue": 14008, "▁tres": 14009, "ILD": 14010, "▁Ét": 14011, "▁sync": 14012, "wich": 14013, "▁ancest": 14014, "ът": 14015, "omo": 14016, "▁Ide": 14017, "▁gained": 14018, "▁momentum": 14019, "▁Ko": 14020, "ieu": 14021, "ielt": 14022, "▁bonus": 14023, "▁texture": 14024, "Modal": 14025, "NEXT": 14026, "▁године": 14027, "▁languages": 14028, "vt": 14029, "▁representing": 14030, "▁Dream": 14031, "curr": 14032, "qual": 14033, "▁js": 14034, "burn": 14035, "▁contributions": 14036, "▁ric": 14037, "}-\\": 14038, "={{": 14039, "cart": 14040, "FB": 14041, "jud": 14042, "esp": 14043, "▁electron": 14044, "▁ell": 14045, "▁Runtime": 14046, "achel": 14047, "\\_": 14048, "week": 14049, "packet": 14050, "▁Secretary": 14051, "▁Jahrhund": 14052, "▁threshold": 14053, "bage": 14054, "▁concer": 14055, "▁bone": 14056, "▁Hollywood": 14057, "Cursor": 14058, "▁awarded": 14059, "▁summary": 14060, "aggio": 14061, "▁stell": 14062, "▁flesh": 14063, "Pair": 14064, "▁Age": 14065, "ington": 14066, "▁'.": 14067, "aser": 14068, "кова": 14069, "▁quart": 14070, "ryption": 14071, "Alloc": 14072, "ften": 14073, "Operand": 14074, "▁indicated": 14075, "($_": 14076, "getString": 14077, "▁listener": 14078, "spir": 14079, ")_": 14080, "vens": 14081, "▁foods": 14082, "anza": 14083, "teil": 14084, "DESC": 14085, "▁notion": 14086, "▁employment": 14087, "▁swing": 14088, "nbsp": 14089, "▁pounds": 14090, "tools": 14091, "▁participate": 14092, "▁Tax": 14093, "▁скла": 14094, "apol": 14095, "▁fost": 14096, "compat": 14097, "▁publication": 14098, "▁rapidly": 14099, "▁Wis": 14100, "EventListener": 14101, "▁première": 14102, "uso": 14103, "extend": 14104, "▁MERCHANTABILITY": 14105, "UTF": 14106, "▁experiments": 14107, "single": 14108, "zk": 14109, "▁naj": 14110, "}}}": 14111, "Lin": 14112, "▁interact": 14113, "▁cms": 14114, "▁Roger": 14115, "▁Ру": 14116, ">'": 14117, "commit": 14118, "лось": 14119, "▁outcome": 14120, "▁hits": 14121, "▁им": 14122, "▁spark": 14123, "console": 14124, "▁verw": 14125, "▁като": 14126, "agnostics": 14127, "▁soci": 14128, "▁dining": 14129, "▁tech": 14130, "št": 14131, "folio": 14132, "ultane": 14133, "ктор": 14134, "▁Brand": 14135, "Join": 14136, "▁ию": 14137, "▁pros": 14138, "▁posit": 14139, "Public": 14140, "AspNetCore": 14141, "▁Shop": 14142, "▁coinc": 14143, "нием": 14144, "▁references": 14145, "about": 14146, "namespace": 14147, "DL": 14148, "▁IR": 14149, "▁cada": 14150, "▁Jordan": 14151, "▁gep": 14152, "▁bron": 14153, "andidate": 14154, "EXPECT": 14155, "amo": 14156, "▁Deutsch": 14157, "auc": 14158, "▁райо": 14159, "▁Labor": 14160, "▁surrounded": 14161, "тро": 14162, "▁nome": 14163, "▁underlying": 14164, "▁educational": 14165, "RIGHT": 14166, "COUNT": 14167, "inch": 14168, "Typ": 14169, "umph": 14170, "four": 14171, "Controls": 14172, "▁cp": 14173, "cost": 14174, "▁mechanism": 14175, "eness": 14176, "équ": 14177, "▁acquired": 14178, "▁falls": 14179, "▁Hou": 14180, "▁LE": 14181, "forEach": 14182, "▁vertex": 14183, "▁IF": 14184, "curs": 14185, "'=>": 14186, "тери": 14187, "▁SA": 14188, "riers": 14189, "▁uw": 14190, "▁marks": 14191, "▁energ": 14192, "hof": 14193, "ylvania": 14194, "▁Allen": 14195, "umpy": 14196, "ого": 14197, "ству": 14198, "voice": 14199, "▁engage": 14200, "▁mant": 14201, "orse": 14202, "===": 14203, "▁improvement": 14204, "Opt": 14205, "▁arrested": 14206, "тия": 14207, "▁сле": 14208, "itched": 14209, "socket": 14210, "▁cycl": 14211, "▁SM": 14212, "▁Sex": 14213, "▁neutral": 14214, "вав": 14215, "▁Jess": 14216, "▁dip": 14217, "▁opposition": 14218, "▁borrow": 14219, "спе": 14220, "▁avant": 14221, "кола": 14222, "▁ta": 14223, "Anim": 14224, "▁Gall": 14225, "rgb": 14226, "▁guilty": 14227, "▁buried": 14228, "▁gy": 14229, "Initial": 14230, "▁accomp": 14231, "▁breathing": 14232, "berry": 14233, "GRO": 14234, "▁subsequent": 14235, "roupe": 14236, "ulpt": 14237, "tb": 14238, "▁ä": 14239, "Pi": 14240, "argv": 14241, "▁Must": 14242, ":'": 14243, "svg": 14244, "oup": 14245, "▁precisely": 14246, "▁Ta": 14247, "rena": 14248, "▁folder": 14249, "▁Channel": 14250, "▁revol": 14251, "Miss": 14252, "лом": 14253, "reddit": 14254, "adelph": 14255, "▁discrim": 14256, "▁ave": 14257, "pleted": 14258, "▁gently": 14259, "FFFF": 14260, "ropy": 14261, "▁dial": 14262, "NotFound": 14263, "▁\"[": 14264, "Home": 14265, "onte": 14266, "▁relie": 14267, "▁Context": 14268, "▁stats": 14269, "▁Energy": 14270, "ounced": 14271, "▁grave": 14272, "▁recip": 14273, "лин": 14274, "blog": 14275, "▁naam": 14276, "▁wo": 14277, "▁directions": 14278, "▁Lincoln": 14279, "!)": 14280, "unci": 14281, "neq": 14282, "Tags": 14283, "▁tum": 14284, "▁saving": 14285, "aille": 14286, "itemize": 14287, "▁Famil": 14288, "msm": 14289, "news": 14290, "FFER": 14291, "▁Dead": 14292, "▁territory": 14293, "▁Kat": 14294, "ocker": 14295, "integer": 14296, "▁sne": 14297, "▁fails": 14298, "▁français": 14299, "▁introduction": 14300, "▁Grant": 14301, "ycle": 14302, "'].": 14303, "▁vier": 14304, "native": 14305, "▁Kle": 14306, "quote": 14307, "Users": 14308, "▁advis": 14309, "▁gym": 14310, "▁protein": 14311, "ال": 14312, "▁Mai": 14313, "▁providers": 14314, "▁soil": 14315, "gui": 14316, "▁Nation": 14317, "reation": 14318, "▁Tab": 14319, "ensis": 14320, "inas": 14321, "▁Scotland": 14322, "▁dispatch": 14323, "union": 14324, "▁bere": 14325, "▁Pow": 14326, "▁Hig": 14327, "▁studying": 14328, "REF": 14329, "SSL": 14330, "▁fright": 14331, "▁SORT": 14332, "▁compr": 14333, "▁Madrid": 14334, "rowned": 14335, "opes": 14336, "pdev": 14337, "▁wash": 14338, "▁'../../": 14339, "}}_": 14340, "▁accum": 14341, "rolling": 14342, "▁NC": 14343, "▁fiction": 14344, "ipt": 14345, "connected": 14346, "limits": 14347, "▁lap": 14348, "▁whereas": 14349, "prom": 14350, "▁appointment": 14351, "Program": 14352, "▁Пер": 14353, "nah": 14354, "Validation": 14355, "icons": 14356, "äll": 14357, "▁radical": 14358, "▁exclusive": 14359, "emony": 14360, "▁challenging": 14361, "▁ms": 14362, "▁Private": 14363, "▁vida": 14364, "▁други": 14365, "▁campus": 14366, "forms": 14367, "дно": 14368, "plaat": 14369, "bst": 14370, "ATED": 14371, "▁Abstract": 14372, "▁intense": 14373, "▁Ltd": 14374, "▁controvers": 14375, "óg": 14376, "▁să": 14377, "▁landing": 14378, "!=": 14379, "▁scenes": 14380, "▁Chap": 14381, "▁spoken": 14382, "cred": 14383, "▁pride": 14384, "quet": 14385, "▁meter": 14386, "▁deutsch": 14387, "uum": 14388, "▁bless": 14389, "▁Hann": 14390, "▁inputs": 14391, "▁Row": 14392, "▁withdraw": 14393, "Pal": 14394, "acles": 14395, "assets": 14396, "▁vl": 14397, "веде": 14398, "▁Got": 14399, "▁airport": 14400, "wind": 14401, "▁Columbia": 14402, "▁chocolate": 14403, "▁hö": 14404, "▁alarm": 14405, "FTWARE": 14406, "▁Jay": 14407, "▁sake": 14408, "▁registration": 14409, "vid": 14410, "▁lake": 14411, "▁username": 14412, "▁hack": 14413, "indexOf": 14414, "cx": 14415, "▁festival": 14416, "▁clubs": 14417, "cases": 14418, "CTRL": 14419, "];\r": 14420, "▁Aud": 14421, "▁primera": 14422, "ват": 14423, "▁brilliant": 14424, "uther": 14425, "▁difficulty": 14426, "itals": 14427, "▁scores": 14428, "▁polít": 14429, "database": 14430, "aska": 14431, "▁######": 14432, "▁acid": 14433, "aton": 14434, "atomic": 14435, "freq": 14436, "▁WARRANTY": 14437, "▁reporting": 14438, ".),": 14439, "▁nights": 14440, "▁programme": 14441, ")}{": 14442, "xic": 14443, "▁spo": 14444, "lined": 14445, "quarters": 14446, "eree": 14447, "mers": 14448, "▁serves": 14449, "cow": 14450, "лько": 14451, "enso": 14452, "▁environ": 14453, "Like": 14454, "anche": 14455, "▁crash": 14456, "▁Kap": 14457, "noindent": 14458, "Conn": 14459, "▁авто": 14460, "▁infrastructure": 14461, "IME": 14462, "▁Room": 14463, "need": 14464, "orer": 14465, "▁Dest": 14466, "▁Domin": 14467, "atherine": 14468, "▁Sydney": 14469, "▁gauge": 14470, "▁jet": 14471, "bably": 14472, "▁commonly": 14473, "▁stations": 14474, "iah": 14475, "nl": 14476, "жу": 14477, "eten": 14478, "_)": 14479, "iac": 14480, "amos": 14481, "nement": 14482, "kon": 14483, "Interval": 14484, "▁cabin": 14485, "▁eg": 14486, "▁shots": 14487, "▁Area": 14488, "smith": 14489, "parameter": 14490, "'}": 14491, "▁hem": 14492, "▁singing": 14493, "▁accessible": 14494, "▁Prin": 14495, "optional": 14496, "ancial": 14497, "ships": 14498, "▁canvas": 14499, "spe": 14500, "▁addresses": 14501, "▁xml": 14502, "▁'\"": 14503, "▁kar": 14504, "öff": 14505, "▁ages": 14506, "ёр": 14507, "zing": 14508, "▁över": 14509, "▁Clean": 14510, "▁Silver": 14511, "▁осо": 14512, "health": 14513, "Ali": 14514, "▁ts": 14515, "atern": 14516, "▁choosing": 14517, "▁burned": 14518, "brid": 14519, "rooms": 14520, "ött": 14521, "KERN": 14522, "▁dish": 14523, "Sa": 14524, "Detail": 14525, "▁Hind": 14526, "▁Dans": 14527, "ię": 14528, "▁Jahren": 14529, "extension": 14530, "allas": 14531, "▁Billy": 14532, "usammen": 14533, "itud": 14534, "geon": 14535, "Temp": 14536, "Leg": 14537, "ittel": 14538, "addle": 14539, "▁muscle": 14540, "▁scared": 14541, "sson": 14542, "▁denote": 14543, "ieurs": 14544, "▁orange": 14545, "▁hub": 14546, "▁reb": 14547, "edi": 14548, "▁voices": 14549, "Folder": 14550, "▁suspend": 14551, "▁Heart": 14552, "▁scrap": 14553, "▁aggreg": 14554, "▁Guide": 14555, "transaction": 14556, "▁riding": 14557, "▁vá": 14558, "▁breed": 14559, "▁concert": 14560, "approx": 14561, "▁chances": 14562, "Tok": 14563, "Eq": 14564, "parts": 14565, "▁scholar": 14566, "offs": 14567, "flush": 14568, "!”": 14569, "▁login": 14570, "▁soort": 14571, "▁Mand": 14572, "▁functional": 14573, "▁Bou": 14574, "▁subjects": 14575, "mys": 14576, "▁extraord": 14577, "▁Building": 14578, "ikt": 14579, "Bad": 14580, "iami": 14581, "Driver": 14582, "ête": 14583, "▁kv": 14584, "▁timer": 14585, "itionally": 14586, "▁athlet": 14587, "▁\");": 14588, "wy": 14589, "CFG": 14590, "▁heaven": 14591, "ов": 14592, "▁experimental": 14593, "▁bounds": 14594, "ICK": 14595, "▁excit": 14596, "▁quit": 14597, "▁universal": 14598, "дь": 14599, "▁SP": 14600, "▁stub": 14601, "▁kle": 14602, "▁Bart": 14603, "▁\"@": 14604, "pel": 14605, "▁(!(": 14606, "▁selector": 14607, "EB": 14608, "▁coc": 14609, "eted": 14610, "ють": 14611, "▁possess": 14612, "▁Rick": 14613, "▁unusual": 14614, "termin": 14615, "▁bags": 14616, "▁loading": 14617, "▁tf": 14618, "▁)\r": 14619, "provider": 14620, "pletion": 14621, "▁cursor": 14622, "▁paused": 14623, "им": 14624, "▁counsel": 14625, "]<": 14626, "zech": 14627, "▁tie": 14628, "▁Moon": 14629, "▁armed": 14630, "▁observe": 14631, "▁permet": 14632, "▁Job": 14633, "för": 14634, "argument": 14635, "▁eggs": 14636, "ást": 14637, "▁incredibly": 14638, "werken": 14639, "izard": 14640, "▁painted": 14641, "▁Vietnam": 14642, "▁violent": 14643, "Est": 14644, "ierra": 14645, "reader": 14646, "weise": 14647, "▁Josh": 14648, "▁Him": 14649, "ashes": 14650, "origin": 14651, "▁spir": 14652, "▁Tree": 14653, "▁niet": 14654, "WIN": 14655, "margin": 14656, "▁involves": 14657, "▁organis": 14658, "▁Nacional": 14659, "bara": 14660, "▁depuis": 14661, "pio": 14662, "features": 14663, "stru": 14664, "▁Disney": 14665, "▁restaurants": 14666, "Mill": 14667, "))\r": 14668, "сла": 14669, "remote": 14670, "▁Third": 14671, "▁baseball": 14672, "▁algun": 14673, "]$": 14674, "▁employed": 14675, "pot": 14676, "▁UnityEngine": 14677, "▁integration": 14678, "▁risks": 14679, "▁stro": 14680, "▁agosto": 14681, "including": 14682, "▁Mind": 14683, "▁stroke": 14684, "▁deals": 14685, "ajax": 14686, "ёт": 14687, "▁\\|": 14688, "tar": 14689, "adelphia": 14690, "▁sab": 14691, "pur": 14692, "▁screw": 14693, "▁inev": 14694, "▁\\;": 14695, "▁Donald": 14696, "öd": 14697, "cca": 14698, "esis": 14699, "▁separated": 14700, "DBG": 14701, "agent": 14702, "▁packed": 14703, "ння": 14704, "intern": 14705, "▁Monte": 14706, "▁province": 14707, "▁expanded": 14708, "▁approached": 14709, "▁Ep": 14710, "CLK": 14711, "▁ore": 14712, "Batch": 14713, "▁impressive": 14714, "RM": 14715, "▁Location": 14716, "▁shame": 14717, "wrapper": 14718, "unwrap": 14719, "peer": 14720, "Bits": 14721, "▁SN": 14722, "scar": 14723, "Come": 14724, "▁council": 14725, "▁shouted": 14726, "making": 14727, "▁Maur": 14728, "▁wis": 14729, "LETE": 14730, "▁fs": 14731, "▁dz": 14732, "unque": 14733, "uego": 14734, "Random": 14735, "Html": 14736, "zem": 14737, "▁Dutch": 14738, "▁Golden": 14739, "▁Tar": 14740, "▁Herm": 14741, "▁stretch": 14742, "vard": 14743, "▁tries": 14744, "WI": 14745, "▁disappeared": 14746, "▁crusher": 14747, "▁Kan": 14748, "Mag": 14749, "ør": 14750, "▁Cambridge": 14751, "▁dopo": 14752, "atura": 14753, "heart": 14754, "▁Spiel": 14755, "/**\r": 14756, "Direction": 14757, "atting": 14758, "wig": 14759, "▁codes": 14760, "▁powder": 14761, "alert": 14762, "sembl": 14763, "▁ye": 14764, "Star": 14765, "▁roots": 14766, "▁Holl": 14767, "Rele": 14768, "▁constitu": 14769, "nc": 14770, "“.": 14771, "reference": 14772, "ificial": 14773, "closure": 14774, "▁figured": 14775, "▁assumption": 14776, "getElementById": 14777, "▁AG": 14778, "oses": 14779, "▁_\"": 14780, "epper": 14781, "obre": 14782, "enumerate": 14783, "ографи": 14784, "▁lessons": 14785, "▁qualified": 14786, "Person": 14787, "anse": 14788, "▁Mort": 14789, "sylvania": 14790, "▁cré": 14791, "Binding": 14792, "іс": 14793, "▁Vari": 14794, "▁reminded": 14795, "▁membership": 14796, "iper": 14797, "zte": 14798, "▁cref": 14799, "▁PA": 14800, "plaatst": 14801, "▁Environment": 14802, "boy": 14803, "▁phrase": 14804, "rivial": 14805, "rag": 14806, "води": 14807, "▁pse": 14808, "▁tournament": 14809, ")},": 14810, "▁Sound": 14811, "▁Vel": 14812, "▁Berg": 14813, "elson": 14814, "▁refuge": 14815, "▁elsewhere": 14816, "quality": 14817, "▁abandoned": 14818, "▁Flo": 14819, "ibil": 14820, "UAL": 14821, "▁Platz": 14822, "▁delta": 14823, "▁Buy": 14824, "rière": 14825, "▁flour": 14826, "▁laughing": 14827, "▁Looking": 14828, "Agent": 14829, "▁wx": 14830, "▁Wales": 14831, "Ctx": 14832, "▁cake": 14833, "▁crate": 14834, "▁кла": 14835, "anga": 14836, "Zero": 14837, "▁amounts": 14838, "Tra": 14839, "ometric": 14840, "▁constraints": 14841, "▁temple": 14842, "▁installation": 14843, "stroke": 14844, "▁Neder": 14845, "ți": 14846, "▁Ibid": 14847, "▁obs": 14848, "entries": 14849, "▁jusqu": 14850, "ORM": 14851, "▁Sky": 14852, "ikes": 14853, "nak": 14854, "▁modes": 14855, "▁Hitler": 14856, "▁belt": 14857, "▁pointing": 14858, "▁Ban": 14859, "ignore": 14860, "▁persu": 14861, "▁Besides": 14862, "ynom": 14863, "▁legis": 14864, "▁CPU": 14865, "anded": 14866, "uis": 14867, "bsite": 14868, "▁Euro": 14869, "▁utter": 14870, "eclipse": 14871, "▁irre": 14872, "▁Document": 14873, "▁Meanwhile": 14874, "▁familie": 14875, "verify": 14876, "▁Jason": 14877, "▁Ort": 14878, "▁ciudad": 14879, "▁technologies": 14880, "▁части": 14881, "nica": 14882, "cancel": 14883, "Virtual": 14884, "▁evident": 14885, "aman": 14886, "▁Supreme": 14887, "atoes": 14888, "▁steady": 14889, "▁monthly": 14890, "▁SOFTWARE": 14891, "Die": 14892, "▁applying": 14893, "Dig": 14894, "vier": 14895, "▁горо": 14896, "▁WH": 14897, "▁minds": 14898, "▁kam": 14899, "▁expertise": 14900, "▁notification": 14901, ".-": 14902, "▁deliber": 14903, "▁HE": 14904, "▁resist": 14905, "outes": 14906, "▁Howard": 14907, "special": 14908, "▁presentation": 14909, "▁YouTube": 14910, "mir": 14911, "▁rust": 14912, "▁nations": 14913, "▁Gets": 14914, "▁responses": 14915, "arded": 14916, "immer": 14917, "▁reveal": 14918, "▁Meg": 14919, "▁todos": 14920, "▁ade": 14921, "ategories": 14922, "▁payments": 14923, "ôt": 14924, "Enumer": 14925, "▁platforms": 14926, "▁lifetime": 14927, "Complete": 14928, "Quest": 14929, "enders": 14930, "▁cum": 14931, "pler": 14932, "▁appl": 14933, "ährend": 14934, "зь": 14935, "enez": 14936, "overty": 14937, "ynchron": 14938, "▁argued": 14939, "▁Kath": 14940, "▁synchron": 14941, "▁Builder": 14942, "Border": 14943, "Plan": 14944, "rieb": 14945, "nm": 14946, "FORMAT": 14947, "usk": 14948, "▁jumped": 14949, "charg": 14950, "▁contribute": 14951, "Mesh": 14952, "Univers": 14953, "rell": 14954, "▁polar": 14955, "▁trois": 14956, "icio": 14957, "Groups": 14958, "▁(%": 14959, "Loop": 14960, "▁gaz": 14961, "dbg": 14962, "LAY": 14963, "John": 14964, "blocks": 14965, "▁lung": 14966, "▁kön": 14967, "through": 14968, "▁fifth": 14969, "lisher": 14970, "▁involving": 14971, "▁Deep": 14972, "▁области": 14973, "▁sull": 14974, "Export": 14975, "▁Kate": 14976, "period": 14977, "charge": 14978, "GT": 14979, "\">\r": 14980, "тин": 14981, "▁Ott": 14982, "▁interactions": 14983, "▁Toronto": 14984, "TRACE": 14985, "▁difer": 14986, "▁liberal": 14987, "▁particle": 14988, "▁surve": 14989, "alous": 14990, "reason": 14991, "▁depression": 14992, "ал": 14993, "▁flower": 14994, "▁waar": 14995, "▁hade": 14996, "▁centuries": 14997, "uty": 14998, "party": 14999, "▁approval": 15000, "generate": 15001, "▁Barn": 15002, "▁marg": 15003, "▁monde": 15004, "▁ook": 15005, "▁Clark": 15006, "▁theoret": 15007, "viously": 15008, "?)": 15009, "▁Rud": 15010, "stmt": 15011, "inction": 15012, "▁tun": 15013, "▁roads": 15014, "▁rotation": 15015, "ppen": 15016, "sensor": 15017, "▁Kol": 15018, "idelines": 15019, "▁є": 15020, "▁composed": 15021, "▁virus": 15022, "'$": 15023, "SN": 15024, "▁Von": 15025, "mont": 15026, "lar": 15027, "▁opinions": 15028, "uction": 15029, "rupal": 15030, "underline": 15031, "▁horror": 15032, "Must": 15033, "otto": 15034, "Should": 15035, "▁statist": 15036, "▁gem": 15037, "▁secre": 15038, "▁strip": 15039, "▁dirt": 15040, "amazon": 15041, "▁Round": 15042, "▁discovery": 15043, "▁GO": 15044, "▁substantial": 15045, "ibt": 15046, "▁demands": 15047, "▁everyday": 15048, "▁besch": 15049, "▁Bridge": 15050, "▁HD": 15051, "▁Dol": 15052, "▁très": 15053, "anni": 15054, "roit": 15055, "());\r": 15056, "far": 15057, "timestamp": 15058, "▁bulk": 15059, "Black": 15060, "▁gan": 15061, "setting": 15062, "retval": 15063, "ване": 15064, "nung": 15065, "▁talks": 15066, "▁scientists": 15067, "▁vig": 15068, "▁quantity": 15069, "▁Gard": 15070, "▁movements": 15071, "ähr": 15072, "lings": 15073, "▁Те": 15074, "team": 15075, "rito": 15076, "▁assembly": 15077, "ilst": 15078, "▁happiness": 15079, "▁leaf": 15080, "▁assessment": 15081, "Coord": 15082, "irs": 15083, "sam": 15084, "▁attorney": 15085, "▁geme": 15086, "IDE": 15087, "▁Vere": 15088, "▁Anthony": 15089, "amiento": 15090, "▁Ast": 15091, "▁circul": 15092, "▁Frances": 15093, "▁pent": 15094, "▁mate": 15095, "▁Transport": 15096, "owo": 15097, "чу": 15098, "istes": 15099, "TRAN": 15100, "IMPORT": 15101, "▁Break": 15102, "▁sons": 15103, "▁investors": 15104, "▁Philipp": 15105, "THOD": 15106, "▁panic": 15107, "▁:)": 15108, "▁detection": 15109, "▁simultane": 15110, "nte": 15111, "▁listened": 15112, "кре": 15113, "▁Brig": 15114, "Optional": 15115, "▁abund": 15116, "▁criteria": 15117, "▁chip": 15118, "▁окру": 15119, "▁Constant": 15120, "▁mining": 15121, "тал": 15122, "mates": 15123, "▁worship": 15124, "router": 15125, "CN": 15126, "▁Match": 15127, "▁Cole": 15128, "▁downt": 15129, "▁holes": 15130, "▁grateful": 15131, "RESULT": 15132, "▁Europa": 15133, "▁consent": 15134, "lä": 15135, "opter": 15136, "▁colleagues": 15137, "orous": 15138, "▁enemies": 15139, "hang": 15140, "actual": 15141, "Objects": 15142, "▁як": 15143, "▁fluid": 15144, "fixed": 15145, "▁Graph": 15146, "▁scratch": 15147, "cers": 15148, "ribu": 15149, "▁validation": 15150, "▁completion": 15151, "▁Begin": 15152, "endpoint": 15153, "rient": 15154, "CM": 15155, "▁Site": 15156, "▁explains": 15157, "tres": 15158, "▁anybody": 15159, "foreach": 15160, "lon": 15161, "Chain": 15162, "▁Buff": 15163, "ocal": 15164, "▁Morgan": 15165, "▁sang": 15166, "▁passes": 15167, "@@": 15168, "ijd": 15169, "Word": 15170, "▁Hung": 15171, "▁Fer": 15172, "▁vý": 15173, "bast": 15174, "▁entertainment": 15175, "hin": 15176, "▁grat": 15177, "▁Member": 15178, "▁Minn": 15179, "▁printed": 15180, "▁Franklin": 15181, "▁Imp": 15182, "Machine": 15183, "columns": 15184, "▁deleted": 15185, "▁manufacturing": 15186, "▁rely": 15187, "▁conse": 15188, "▁fishing": 15189, "blo": 15190, "-$": 15191, "▁.\"": 15192, "▁clinical": 15193, "▁Studies": 15194, "▁Бу": 15195, "definition": 15196, "▁evaluation": 15197, "▁attacked": 15198, "▁frozen": 15199, "zent": 15200, "▁últ": 15201, "▁rational": 15202, "othe": 15203, "Cancel": 15204, "history": 15205, "setText": 15206, "▁alc": 15207, "▁hydro": 15208, "▁Theatre": 15209, "▁Material": 15210, "IOException": 15211, "******/": 15212, "spl": 15213, "NODE": 15214, "attrs": 15215, "▁mie": 15216, "▁offices": 15217, "ró": 15218, "▁jam": 15219, "▁Ident": 15220, "vé": 15221, "Setting": 15222, "▁Several": 15223, "▁decay": 15224, "Android": 15225, "▁Save": 15226, "unted": 15227, "▁Mountain": 15228, "usc": 15229, "▁marzo": 15230, "▁asleep": 15231, "▁soldier": 15232, "▁Double": 15233, "PK": 15234, "▁contrad": 15235, "▁wins": 15236, "ceiver": 15237, "▁seasons": 15238, "▁Chall": 15239, "▁healthcare": 15240, "ład": 15241, "от": 15242, "▁Five": 15243, "▁Hell": 15244, "▁worldwide": 15245, "▁',": 15246, "ян": 15247, "made": 15248, "▁responded": 15249, "▁ay": 15250, "▁procedures": 15251, "тера": 15252, "▁cleared": 15253, "\"].": 15254, "▁Target": 15255, "▁Side": 15256, "omin": 15257, "▁deploy": 15258, "▁Tell": 15259, "▁ongoing": 15260, "floor": 15261, "▁bones": 15262, "▁Delete": 15263, "▁shrugged": 15264, "Our": 15265, "Der": 15266, "▁initialize": 15267, "▁Ted": 15268, "MAGE": 15269, "▁hire": 15270, "▁tracking": 15271, "▁ash": 15272, "▁ceiling": 15273, "ках": 15274, "etti": 15275, "▁courage": 15276, "enschapp": 15277, "ются": 15278, "More": 15279, "▁folg": 15280, "▁Grace": 15281, "▁Kelly": 15282, "▁reven": 15283, "▁Ali": 15284, "▁disp": 15285, "▁defeat": 15286, "▁creature": 15287, "▁Kennedy": 15288, "▁Diego": 15289, "EMP": 15290, "▁steam": 15291, "endance": 15292, "rig": 15293, "▁ignor": 15294, "emen": 15295, "▁Gru": 15296, "▁proposal": 15297, "▁weiter": 15298, "▁лі": 15299, "ibles": 15300, "▁consideration": 15301, "▁believes": 15302, "▁Soph": 15303, "“,": 15304, "▁Matthew": 15305, "▁circuit": 15306, "▁singer": 15307, "▁Square": 15308, "ço": 15309, "Edge": 15310, "▁astr": 15311, "▁representative": 15312, "▁comprehensive": 15313, "liga": 15314, "▁mere": 15315, "tbl": 15316, "▁continuing": 15317, "ographer": 15318, "LED": 15319, "▁/***/": 15320, "▁sear": 15321, "▁enormous": 15322, "izi": 15323, "Dit": 15324, "there": 15325, "ін": 15326, "сите": 15327, "▁guerra": 15328, "▁endpoint": 15329, "▁lesson": 15330, "zon": 15331, "variable": 15332, "ис": 15333, "▁researchers": 15334, "▁attempted": 15335, "▁enf": 15336, "тура": 15337, "▁defin": 15338, "вест": 15339, "▁awful": 15340, "▁lowest": 15341, "rules": 15342, "▁unlike": 15343, "interval": 15344, "▁producing": 15345, "▁Kam": 15346, "▁IMP": 15347, "General": 15348, "▁faire": 15349, "▁maxim": 15350, "assemb": 15351, "acent": 15352, "?>": 15353, "plica": 15354, "▁ram": 15355, "mate": 15356, "цу": 15357, "mn": 15358, "▁Hi": 15359, "▁stages": 15360, "▁Editor": 15361, "▁tang": 15362, "RD": 15363, "▁ich": 15364, "▁dependent": 15365, "lifer": 15366, "ascript": 15367, "▁exposure": 15368, "рез": 15369, "▁mart": 15370, "▁Barcel": 15371, "xspace": 15372, "SESSION": 15373, "▁prest": 15374, "URCE": 15375, "-.": 15376, "▁село": 15377, "have": 15378, "▁observation": 15379, "▁commands": 15380, "▁eager": 15381, "▁outdoor": 15382, "▁DEBUG": 15383, "▁hr": 15384, "AX": 15385, "▁puzz": 15386, "blank": 15387, "бур": 15388, "▁kennis": 15389, "▁regarded": 15390, "▁}),": 15391, "volume": 15392, "▁произ": 15393, "▁Training": 15394, "añ": 15395, "▁fois": 15396, "▁три": 15397, "вня": 15398, "▁optimal": 15399, "▁subscription": 15400, "bridge": 15401, "imental": 15402, "▁Think": 15403, "▁\";": 15404, "▁legisl": 15405, "▁Hop": 15406, "▁branches": 15407, "▁Veg": 15408, "▁sprint": 15409, "▁flux": 15410, "▁Freder": 15411, "sis": 15412, "notify": 15413, "▁Фран": 15414, "som": 15415, "nym": 15416, "▁Ré": 15417, "lett": 15418, "ingham": 15419, "▁Farm": 15420, "DOM": 15421, "▁shield": 15422, "Here": 15423, "▁Treat": 15424, "▁Luke": 15425, "▁unsafe": 15426, "anton": 15427, "▁Imper": 15428, "▁telephone": 15429, "▁unlock": 15430, "Owner": 15431, "collection": 15432, "▁snd": 15433, "▁suiv": 15434, "▁entering": 15435, "шен": 15436, "▁Label": 15437, "selector": 15438, "▁GET": 15439, "▁quando": 15440, "▁fed": 15441, "jQuery": 15442, "Origin": 15443, "▁Alan": 15444, "mathscr": 15445, "▁pregnant": 15446, "Expect": 15447, "resources": 15448, "▁ersten": 15449, "alia": 15450, "▁retired": 15451, "ût": 15452, "Cred": 15453, "▁méd": 15454, "▁erh": 15455, "Framework": 15456, "Slot": 15457, "duration": 15458, "sal": 15459, "▁composition": 15460, "article": 15461, "gpu": 15462, "▁permitted": 15463, "▁Font": 15464, "▁Much": 15465, "▁pending": 15466, "▁agencies": 15467, "Columns": 15468, "▁klik": 15469, "▁rating": 15470, "mind": 15471, "▁Pennsylvania": 15472, "Java": 15473, "abstract": 15474, "▁dumb": 15475, "▁VI": 15476, "usa": 15477, "Remote": 15478, "▁YOU": 15479, "▁Creek": 15480, "мати": 15481, "Bottom": 15482, "▁rolling": 15483, "▁bundle": 15484, "▁golf": 15485, "gpio": 15486, "▁Chair": 15487, "▁cls": 15488, "$}": 15489, "▁Parliament": 15490, "führ": 15491, "Many": 15492, "▁Sep": 15493, "▁badly": 15494, "igi": 15495, "▁Gemeinde": 15496, "Ill": 15497, "▁Ан": 15498, "uart": 15499, "itempty": 15500, "▁Niger": 15501, "▁immigr": 15502, "Super": 15503, "vá": 15504, "istribute": 15505, "Helpers": 15506, "▁waters": 15507, "▁joining": 15508, "omitempty": 15509, "▁Otherwise": 15510, "▁Host": 15511, "▁redd": 15512, "▁dy": 15513, "▁converted": 15514, "▁prayer": 15515, "▁Украї": 15516, "▁elections": 15517, "reb": 15518, "erie": 15519, "▁свя": 15520, "Abs": 15521, "iembre": 15522, "holders": 15523, "▁Rol": 15524, "utschen": 15525, "▁Gh": 15526, "tery": 15527, "анг": 15528, "▁narrative": 15529, "minus": 15530, "▁Iron": 15531, "=\"#": 15532, "▁wand": 15533, "▁wished": 15534, "icode": 15535, "orr": 15536, "[[": 15537, "▁detected": 15538, "▁municipal": 15539, "▁Pour": 15540, "▁Serv": 15541, "citet": 15542, "▁grey": 15543, "▁Rap": 15544, "▁voy": 15545, "▁lleg": 15546, "▁currency": 15547, "▁Script": 15548, "strument": 15549, "▁expecting": 15550, "▁tickets": 15551, "▁bucket": 15552, "egr": 15553, "▁jacket": 15554, "drv": 15555, "▁loans": 15556, "▁kann": 15557, "▁integral": 15558, "▁characteristics": 15559, "(\".": 15560, "▁manual": 15561, "▁dynamics": 15562, ":*": 15563, "sha": 15564, "reens": 15565, "onical": 15566, "▁toile": 15567, "aña": 15568, "▁distant": 15569, "▁handled": 15570, "Bool": 15571, "▁penal": 15572, "▁Things": 15573, "▁prominent": 15574, "▁exped": 15575, "▁Help": 15576, "▁asp": 15577, "lap": 15578, "▁Auth": 15579, "Basic": 15580, "achuset": 15581, "▁Bild": 15582, "▁entitled": 15583, "▁jag": 15584, "▁rejected": 15585, "▁memor": 15586, "orts": 15587, "▁applies": 15588, "▁Language": 15589, "specific": 15590, "achusetts": 15591, "HAND": 15592, "▁Route": 15593, "market": 15594, "▁Ky": 15595, "▁pose": 15596, "ACHE": 15597, "poll": 15598, "▁rocks": 15599, "bone": 15600, "▁DIS": 15601, "Watch": 15602, "▁smiling": 15603, "рио": 15604, "Month": 15605, "▁efter": 15606, "construct": 15607, "▁bands": 15608, "▁collaboration": 15609, "ними": 15610, "glas": 15611, "▁vy": 15612, "▁engagement": 15613, "__)": 15614, "▁wings": 15615, "ким": 15616, "netje": 15617, "ativa": 15618, "▁Duke": 15619, "лее": 15620, "▁Within": 15621, "▁dove": 15622, "▁cb": 15623, "yers": 15624, "pow": 15625, "[(": 15626, "▁evaluate": 15627, "Points": 15628, "▁рі": 15629, "odigd": 15630, "onomy": 15631, "▁Illinois": 15632, "▁Typ": 15633, "▁coordinates": 15634, "pisode": 15635, "ucked": 15636, "▁flav": 15637, "▁brands": 15638, "▁calendar": 15639, "Lib": 15640, "▁uitgen": 15641, "▁tale": 15642, "▁briefly": 15643, "▁mic": 15644, "RESS": 15645, "▁später": 15646, "▁integrated": 15647, "▁cookies": 15648, "▁uitgenodigd": 15649, "▁Priv": 15650, "▁phenomen": 15651, "▁voegen": 15652, "Supp": 15653, "▁refers": 15654, "пад": 15655, "▁Clinton": 15656, "▁assignment": 15657, "inals": 15658, "▁asym": 15659, "cycle": 15660, "▁Anderson": 15661, "▁binding": 15662, "rique": 15663, "hind": 15664, "▁behalf": 15665, "▁Fle": 15666, "▁breaks": 15667, "▁soap": 15668, "вар": 15669, "▁vä": 15670, "▁crying": 15671, "▁→": 15672, "▁msm": 15673, "▁boots": 15674, "owing": 15675, "▁bell": 15676, "suite": 15677, "▁Bundes": 15678, "Year": 15679, "ndef": 15680, "Other": 15681, "▁google": 15682, "ENCE": 15683, "WER": 15684, "Les": 15685, "Shared": 15686, "▁ED": 15687, "IFT": 15688, "▁floating": 15689, "ým": 15690, "{},": 15691, "Binary": 15692, "▁roce": 15693, "raj": 15694, "▁bewerken": 15695, "BF": 15696, "▁Hur": 15697, "cen": 15698, "▁ere": 15699, "▁camb": 15700, "▁Pakistan": 15701, "▁greatly": 15702, "▁logging": 15703, "/.": 15704, "Tensor": 15705, "▁opens": 15706, "▁Rio": 15707, "▁klikken": 15708, "▁sculpt": 15709, "apore": 15710, "wx": 15711, "▁Nich": 15712, "nan": 15713, "▁injured": 15714, "compare": 15715, "tha": 15716, "Sample": 15717, "Shell": 15718, "▁commander": 15719, "▁receiver": 15720, "▁hopes": 15721, "▁byl": 15722, "▁proxy": 15723, "▁gall": 15724, "getId": 15725, "▁Bab": 15726, "feld": 15727, "▁\"_": 15728, "▁Hab": 15729, "simple": 15730, "▁executed": 15731, "▁ate": 15732, "▁animation": 15733, "▁inhab": 15734, "▁боль": 15735, "▁router": 15736, "▁glob": 15737, "Geplaatst": 15738, "▁beginnetje": 15739, "▁Kur": 15740, "▁Ха": 15741, "aligned": 15742, "▁certificate": 15743, "▁Å": 15744, ".).": 15745, "▁soll": 15746, "▁Import": 15747, "реди": 15748, "▁pandemic": 15749, "▁nic": 15750, "vä": 15751, "▁Gree": 15752, "▁Say": 15753, "▁ді": 15754, "▁Num": 15755, "▁roughly": 15756, "▁después": 15757, "▁​": 15758, "▁specify": 15759, "Mapper": 15760, "licht": 15761, "▁thumb": 15762, "wie": 15763, "▁unlikely": 15764, "▁Edd": 15765, "Hey": 15766, "▁Opt": 15767, "BLOCK": 15768, "вор": 15769, "▁×": 15770, "▁ba": 15771, "▁periods": 15772, "▁titles": 15773, "Med": 15774, "▁fon": 15775, "▁bast": 15776, "▁Forest": 15777, "▁№": 15778, "onds": 15779, "▁fal": 15780, "▁gesch": 15781, "direction": 15782, "IFY": 15783, "▁LA": 15784, "▁(((": 15785, "GTH": 15786, "itudes": 15787, "▁destruction": 15788, "▁Ja": 15789, "▁stake": 15790, "ifferent": 15791, "▁identical": 15792, "▁fog": 15793, "▁Reb": 15794, "ские": 15795, "ступ": 15796, "jax": 15797, "▁Mars": 15798, "▁historic": 15799, "▁Vo": 15800, "▁entrepre": 15801, "▁tension": 15802, "▁WHERE": 15803, "▁Philadelphia": 15804, "Counter": 15805, "▁frames": 15806, "▁muy": 15807, "ej": 15808, "öt": 15809, "eu": 15810, "▁челове": 15811, "PROC": 15812, "▁resolved": 15813, "▁tape": 15814, "цион": 15815, "▁singular": 15816, "▁personnel": 15817, "▁Mun": 15818, "▁Occ": 15819, "▁scalar": 15820, "dess": 15821, "▁cable": 15822, "being": 15823, "▁Jenn": 15824, "▁erst": 15825, "Actions": 15826, "Environment": 15827, "via": 15828, "▁struggling": 15829, "▁DVD": 15830, "whe": 15831, "▁throwing": 15832, "Bounds": 15833, "▁MD": 15834, "▁\"../": 15835, "▁satisfy": 15836, "▁Colorado": 15837, "▁Active": 15838, "Tasks": 15839, "<>();": 15840, "▁slipped": 15841, "▁poison": 15842, "zb": 15843, "Dispatch": 15844, "warning": 15845, "▁ultimate": 15846, "picture": 15847, "expression": 15848, "▁Talk": 15849, "▁flick": 15850, "▁raising": 15851, "▁transactions": 15852, "▁glance": 15853, "▁gri": 15854, "▁през": 15855, "selection": 15856, "ња": 15857, "endl": 15858, "▁Abb": 15859, "▁bold": 15860, "▁maintained": 15861, "Exists": 15862, "▁encouraged": 15863, "Qual": 15864, "▁essere": 15865, "▁hired": 15866, "letter": 15867, "itches": 15868, "others": 15869, "▁woj": 15870, "▁injuries": 15871, "▁dil": 15872, "execut": 15873, "▁Steel": 15874, "▁Garden": 15875, "зя": 15876, "\\,\\": 15877, "▁Angel": 15878, "prim": 15879, ">:]<": 15880, "gb": 15881, "peat": 15882, "inte": 15883, "▁apolog": 15884, "▁regulations": 15885, "Src": 15886, "kh": 15887, "Upload": 15888, "mapping": 15889, "▁presents": 15890, "▁poetry": 15891, "▁stops": 15892, "▁Tol": 15893, "▁tower": 15894, "▁OUT": 15895, "Thank": 15896, "▁organic": 15897, "▁drei": 15898, "▁pound": 15899, "century": 15900, "▁modules": 15901, "▁дере": 15902, "▁worn": 15903, "▁parad": 15904, "▁Cos": 15905, "fic": 15906, "▁без": 15907, "▁Jimmy": 15908, "▁lands": 15909, "▁minist": 15910, "vspace": 15911, "▁lighting": 15912, "▁naked": 15913, "▁designer": 15914, "▁Stream": 15915, "TMP": 15916, "Center": 15917, "resentation": 15918, "ONT": 15919, "▁ers": 15920, "▁measurement": 15921, "▁muscles": 15922, "▁Ign": 15923, "▁COM": 15924, "▁fru": 15925, "▁genre": 15926, "▁alpha": 15927, "▁retirement": 15928, "▁Gon": 15929, "ől": 15930, "contents": 15931, "▁healing": 15932, "▁sido": 15933, "incipal": 15934, "Permission": 15935, "рак": 15936, "▁Gordon": 15937, "▁Rank": 15938, "▁Autom": 15939, "Constructor": 15940, "wiki": 15941, "▁concerning": 15942, "rizona": 15943, "▁variant": 15944, "▁arranged": 15945, "▁Spr": 15946, "BPACK": 15947, "Timestamp": 15948, "restore": 15949, "aware": 15950, "▁Observ": 15951, "▁SV": 15952, "ipp": 15953, "▁Executive": 15954, "▁colleg": 15955, "▁explicitly": 15956, "written": 15957, "▁Kön": 15958, "irus": 15959, "▁Hold": 15960, "▁Pract": 15961, "Character": 15962, "▁redistribute": 15963, "uerto": 15964, "▁Student": 15965, "▁elder": 15966, "▁Dop": 15967, "vp": 15968, "▁Hub": 15969, "▁grounds": 15970, "▁Ry": 15971, "▁signals": 15972, "▁gifts": 15973, "▁strengthen": 15974, "▁Lyn": 15975, "commun": 15976, "▁най": 15977, "▁finance": 15978, "noc": 15979, "helm": 15980, "▁cuts": 15981, "▁adventure": 15982, "▁Ric": 15983, "▁intellectual": 15984, "▁Output": 15985, "▁awk": 15986, "▁concentration": 15987, "▁guidance": 15988, "Buff": 15989, "▁filling": 15990, "▁regul": 15991, "▁delicious": 15992, "([]": 15993, "ших": 15994, "▁tons": 15995, "activity": 15996, "GP": 15997, "LOB": 15998, "stadt": 15999, "tal": 16000, "▁img": 16001, "▁rush": 16002, "attice": 16003, "▁pok": 16004, "steps": 16005, "▁lid": 16006, "▁DNA": 16007, "Browser": 16008, "▁ladies": 16009, "▁années": 16010, "▁rescue": 16011, "avity": 16012, "rock": 16013, "▁glasses": 16014, "▁Bey": 16015, ")}$": 16016, "detail": 16017, "▁dés": 16018, "tax": 16019, "▁favourite": 16020, "▁precision": 16021, "▁conoc": 16022, "Ms": 16023, "▁Native": 16024, "▁Pil": 16025, "InputStream": 16026, "orp": 16027, "▁Pap": 16028, "▁picking": 16029, "iph": 16030, "Loading": 16031, "▁priest": 16032, "Hook": 16033, "▁pist": 16034, "▁Une": 16035, "%,": 16036, "▁bil": 16037, "▁conservative": 16038, "eval": 16039, "iking": 16040, "'},": 16041, "▁sauce": 16042, "▁Due": 16043, "assen": 16044, "▁occasionally": 16045, "▁Дж": 16046, "unknown": 16047, "DED": 16048, "▁drum": 16049, "▁dub": 16050, "ATURE": 16051, "usage": 16052, "getType": 16053, "reply": 16054, "▁strategic": 16055, "▁kap": 16056, "design": 16057, "datetime": 16058, "▁Prim": 16059, "Master": 16060, "▁Corps": 16061, "▁considerable": 16062, "▁Tu": 16063, "▁ла": 16064, "▁tous": 16065, "▁clar": 16066, "▁poem": 16067, "album": 16068, "]*": 16069, "loaded": 16070, "▁traveling": 16071, "вые": 16072, "▁Ferr": 16073, "▁pharm": 16074, "abi": 16075, "▁}\\": 16076, "collect": 16077, "▁Bour": 16078, "OC": 16079, "▁measurements": 16080, "▁Professional": 16081, "▁sensor": 16082, "utsche": 16083, "▁demanded": 16084, "▁accompanied": 16085, "▁prend": 16086, "▁encoding": 16087, "▁Geschichte": 16088, "▁mig": 16089, "▁Gib": 16090, "▁Reich": 16091, "▁myster": 16092, "▁Mock": 16093, "▁physically": 16094, "▁Bau": 16095, "▁Single": 16096, "▁managing": 16097, "▁Kil": 16098, "▁Temple": 16099, "▁lev": 16100, "▁lí": 16101, "CPU": 16102, "▁Premier": 16103, "▁Give": 16104, "iri": 16105, "NV": 16106, "▁AI": 16107, "▁fp": 16108, "лександ": 16109, "▁tant": 16110, "▁fot": 16111, "Nullable": 16112, "▁guards": 16113, "Once": 16114, "▁chamber": 16115, "film": 16116, "▁bias": 16117, "▁Tai": 16118, "insic": 16119, "▁ml": 16120, "▁Ka": 16121, "вал": 16122, "▁SER": 16123, "▁Someone": 16124, "}}_{": 16125, "Fixed": 16126, "▁bent": 16127, "▁prohib": 16128, "▁bid": 16129, "▁fewer": 16130, "кры": 16131, "▁lugar": 16132, "▁deserve": 16133, "ssl": 16134, "▁cfg": 16135, "reck": 16136, "▁stability": 16137, "resize": 16138, "▁assertThat": 16139, "Trigger": 16140, "▁станов": 16141, "plugins": 16142, "▁lets": 16143, "хід": 16144, "▁Laura": 16145, "нер": 16146, "▁brut": 16147, "▁FI": 16148, "isons": 16149, "▁dyn": 16150, "icher": 16151, "rayed": 16152, "▁frequent": 16153, "▁jedoch": 16154, "▁Marine": 16155, "strings": 16156, "▁Util": 16157, "▁bos": 16158, "Mus": 16159, "▁Portugal": 16160, "Strategy": 16161, "▁посе": 16162, "▁slice": 16163, "▁insight": 16164, "▁widget": 16165, "▁général": 16166, "messages": 16167, "▁Hu": 16168, "▁requirement": 16169, "Side": 16170, "emplates": 16171, "▁ceremony": 16172, "▁physics": 16173, "▁graduate": 16174, "para": 16175, "▁preserv": 16176, "▁shops": 16177, "zek": 16178, "▁ub": 16179, "prepare": 16180, "▁Oil": 16181, "▁fib": 16182, "▁runtime": 16183, "▁hogy": 16184, "Warning": 16185, "▁Convert": 16186, "bourne": 16187, "▁emerged": 16188, "▁Ди": 16189, "ighth": 16190, "guard": 16191, "kal": 16192, "validation": 16193, "ência": 16194, "▁drinks": 16195, "theorem": 16196, "HR": 16197, "iev": 16198, "ployee": 16199, "Usage": 16200, "▁спе": 16201, "dispatch": 16202, "▁instantly": 16203, "obi": 16204, "▁justify": 16205, "▁Nev": 16206, "▁явля": 16207, "agra": 16208, "▁transmission": 16209, "fly": 16210, ";';": 17021, "▁cousin": 17022, "createElement": 17023, "Could": 17024, "▁capac": 17025, "▁pause": 17026, "ArrayList": 17027, "kte": 17028, "ordered": 17029, "▁shaking": 17030, "labels": 17031, "▁reducing": 17032, "вых": 17033, "USED": 17034, "▁voting": 17035, "▁Ministry": 17036, "▁Mig": 17037, "▁Chen": 17038, "▁accompany": 17039, "ulle": 17040, "▁ga": 17041, "▁equipped": 17042, "▁nun": 17043, "Bet": 17044, "▁licensed": 17045, "ARCH": 17046, "FN": 17047, "▁engines": 17048, "▁ster": 17049, "▁locale": 17050, "▁въ": 17051, "links": 17052, "▁Capital": 17053, "▁alien": 17054, "Wr": 17055, "ръ": 17056, "Cart": 17057, "▁Marketing": 17058, "▁RT": 17059, "FileName": 17060, "▁ti": 17061, "iji": 17062, "▁versus": 17063, "live": 17064, "Sym": 17065, "kor": 17066, "▁emission": 17067, "umm": 17068, "ycz": 17069, "▁climbed": 17070, "▁plusieurs": 17071, "кри": 17072, "yar": 17073, "osten": 17074, "▁usb": 17075, "▁crossing": 17076, "▁polynom": 17077, "▁removal": 17078, "▁Adams": 17079, "▁ihre": 17080, "anden": 17081, "▁Benj": 17082, "▁Phill": 17083, "▁wounded": 17084, "▁Castle": 17085, "bild": 17086, "Annotation": 17087, "Processor": 17088, "▁tin": 17089, "folg": 17090, "▁Students": 17091, "▁Mexican": 17092, "▁administrative": 17093, "ILED": 17094, "▁conqu": 17095, "▁cheer": 17096, "▁Ces": 17097, "Because": 17098, "▁Juni": 17099, "▁encontr": 17100, "avi": 17101, "VI": 17102, "aku": 17103, "▁Ton": 17104, "▁smoking": 17105, "▁bay": 17106, "works": 17107, "ат": 17108, "attered": 17109, "▁Boolean": 17110, "▁Balt": 17111, "defer": 17112, "pathy": 17113, "Ah": 17114, "▁akt": 17115, "▁governor": 17116, "Pad": 17117, "▁sisters": 17118, "Lat": 17119, "▁revel": 17120, "▁SY": 17121, "itos": 17122, "▁filters": 17123, "Chunk": 17124, "consum": 17125, "▁removing": 17126, "▁Herr": 17127, "▁generator": 17128, "▁Cra": 17129, "▁farmers": 17130, "▁Members": 17131, "▁overcome": 17132, "▁Cin": 17133, "igkeit": 17134, "criptions": 17135, "Tests": 17136, "▁клу": 17137, "▁shake": 17138, "▁yy": 17139, "placement": 17140, "▁awards": 17141, "▁episodes": 17142, "▁Blood": 17143, "▁bullet": 17144, "▁viene": 17145, "▁Financial": 17146, "Future": 17147, "▁rou": 17148, "▁biologie": 17149, "▁useState": 17150, "iani": 17151, "piece": 17152, "▁speaker": 17153, "▁refr": 17154, "ARK": 17155, "▁MIT": 17156, "▁Tan": 17157, "▁Based": 17158, "▁cultiv": 17159, "▁hungry": 17160, "▁Ay": 17161, "▁Hey": 17162, "▁excitement": 17163, "ibraries": 17164, "Hit": 17165, "▁Ende": 17166, "NG": 17167, "FIL": 17168, ".\")": 17169, "Family": 17170, "inery": 17171, "necess": 17172, "velope": 17173, "▁Bot": 17174, "porter": 17175, "▁climb": 17176, "▁Eli": 17177, "urent": 17178, "▁mistakes": 17179, "ában": 17180, "marks": 17181, "pkt": 17182, "Library": 17183, "sted": 17184, "ublice": 17185, "▁Administration": 17186, "▁shapes": 17187, "публи": 17188, "God": 17189, "innen": 17190, "коло": 17191, "<<<<": 17192, "ibe": 17193, "ês": 17194, "▁США": 17195, "▁Foreign": 17196, "▁Margaret": 17197, "▁gene": 17198, "▁disturb": 17199, "▁тер": 17200, "▁onClick": 17201, "▁Engineering": 17202, "▁stopping": 17203, "▁restrictions": 17204, ",*": 17205, "BUF": 17206, "▁shadows": 17207, "hci": 17208, "▁Christians": 17209, "▁fence": 17210, "▁luxury": 17211, "akh": 17212, "coord": 17213, "▁investigate": 17214, "▁conventional": 17215, "\"—": 17216, "▁visits": 17217, "isé": 17218, "▁Sac": 17219, "className": 17220, "▁Psych": 17221, "▁reflected": 17222, "▁пло": 17223, "▁Vice": 17224, "ław": 17225, "________________": 17226, "▁Wolf": 17227, "rente": 17228, "▁Champion": 17229, "▁simulation": 17230, "esota": 17231, "▁Soon": 17232, "▁Cel": 17233, "▁theories": 17234, "▁STR": 17235, "▁collective": 17236, "▁coordinate": 17237, "querySelector": 17238, "emed": 17239, "Break": 17240, "▁gef": 17241, "▁electricity": 17242, "▁gathering": 17243, "aters": 17244, "exper": 17245, "▁Roma": 17246, "▁Cooper": 17247, "SYMBOL": 17248, "vd": 17249, "iversary": 17250, "aines": 17251, "▁Grad": 17252, "▁independence": 17253, "woh": 17254, "▁consequence": 17255, "▁conversations": 17256, "▁Rou": 17257, "▁andere": 17258, "▁Systems": 17259, "гар": 17260, "▁moist": 17261, "flu": 17262, "ція": 17263, "ниш": 17264, "▁rode": 17265, "▁perd": 17266, "▁szer": 17267, "▁flood": 17268, "▁intim": 17269, "stderr": 17270, "▁reflection": 17271, "Scan": 17272, "▁disaster": 17273, "akespe": 17274, "▁Invalid": 17275, "▁humor": 17276, "▁Friedrich": 17277, "▁suggestions": 17278, "uvud": 17279, "Delay": 17280, "brief": 17281, "▁ис": 17282, "glied": 17283, "fas": 17284, "▁Smart": 17285, "▁medi": 17286, "sdk": 17287, "▁seus": 17288, "▁Arizona": 17289, "▁innocent": 17290, "Warn": 17291, "acious": 17292, "▁Moscow": 17293, "▁caps": 17294, "Delegate": 17295, "▁dramatic": 17296, "books": 17297, "▁shore": 17298, "uki": 17299, "▁Russell": 17300, "▁correlation": 17301, "Help": 17302, "▁pubblic": 17303, "zym": 17304, "comb": 17305, "EY": 17306, "LENGTH": 17307, "▁Mün": 17308, "▁_.": 17309, "▁ferm": 17310, "▁Ian": 17311, "▁Studio": 17312, "▁affairs": 17313, "los": 17314, "Rules": 17315, "running": 17316, "▁Posted": 17317, "Pixel": 17318, "▁dancing": 17319, "▁agreements": 17320, "▁Pic": 17321, "ancia": 17322, "▁má": 17323, "ationToken": 17324, "descriptor": 17325, "▁Carter": 17326, "Release": 17327, "************": 17328, "▁outstanding": 17329, "changes": 17330, "ARRAY": 17331, "▁Barbara": 17332, "▁nurse": 17333, "(\r": 17334, "▁Douglas": 17335, "▁nucle": 17336, "ouri": 17337, "▁Style": 17338, "avo": 17339, "▁painful": 17340, "▁slic": 17341, "▁seinem": 17342, "SUPPORT": 17343, "ogene": 17344, "▁satell": 17345, "tagon": 17346, "▁collapse": 17347, "velle": 17348, "MON": 17349, "aughters": 17350, "▁threatened": 17351, "▁Illegal": 17352, "▁desperate": 17353, "strict": 17354, "rus": 17355, "ститу": 17356, "\\\":": 17357, "▁conflic": 17358, "download": 17359, "atos": 17360, "▁Position": 17361, ".*;": 17362, "▁theater": 17363, "▁pleasant": 17364, "▁Cette": 17365, "▁Singapore": 17366, "heet": 17367, "▁pir": 17368, "▁acquis": 17369, "▁назва": 17370, "теля": 17371, "▁recru": 17372, "жения": 17373, "ёл": 17374, "версите": 17375, "▁respective": 17376, "▁tunnel": 17377, "▁Dean": 17378, "Du": 17379, "▁uncle": 17380, "▁offensive": 17381, "colo": 17382, "▁Unlike": 17383, "series": 17384, "▁Arn": 17385, "minute": 17386, "▁descriptor": 17387, "▁stones": 17388, "ICATION": 17389, "▁Pad": 17390, "▁iPhone": 17391, "ei": 17392, "▁fantasy": 17393, "▁Korean": 17394, "\"}": 17395, "▁orth": 17396, "halten": 17397, "deep": 17398, "▁Kay": 17399, "requency": 17400, "▁duties": 17401, "awt": 17402, "▁nearest": 17403, "▁disorder": 17404, "стру": 17405, "▁Chile": 17406, "▁seq": 17407, "▁transportation": 17408, "OO": 17409, "▁Dez": 17410, "iju": 17411, "▁Results": 17412, "jed": 17413, "ivel": 17414, "HOST": 17415, "▁€": 17416, "▁Î": 17417, "▁chin": 17418, "▁matt": 17419, "▁voted": 17420, "▁gehör": 17421, "▁▁▁▁▁▁▁▁▁▁▁": 17422, "▁sue": 17423, "▁legacy": 17424, "вся": 17425, "SOURCE": 17426, "WORK": 17427, "itis": 17428, "▁$|": 17429, "▁обо": 17430, "▁nr": 17431, "▁Tamb": 17432, "▁snap": 17433, "▁impressed": 17434, "▁deposit": 17435, "▁divid": 17436, "Segment": 17437, "▁кар": 17438, "▁Gas": 17439, "▁crimes": 17440, "▁insult": 17441, "▁Hum": 17442, "▁bounded": 17443, "▁kicked": 17444, "▁Му": 17445, "▁|\\": 17446, "added": 17447, "Produ": 17448, "▁./": 17449, "▁awkward": 17450, "▁Кра": 17451, "▁ї": 17452, "▁CONTR": 17453, "▁beim": 17454, "▁placeholder": 17455, "spi": 17456, "▁Bei": 17457, "▁Pf": 17458, "ientes": 17459, "disk": 17460, "blk": 17461, "neo": 17462, "itarian": 17463, "▁cogn": 17464, "▁sout": 17465, "▁trash": 17466, "▁Rab": 17467, "▁decline": 17468, "tat": 17469, "▁combine": 17470, "▁Tot": 17471, "▁drops": 17472, "Times": 17473, "cheduler": 17474, "▁governments": 17475, "Tex": 17476, "▁Used": 17477, "зан": 17478, "▁pd": 17479, "мет": 17480, "▁&=&": 17481, "▁Nag": 17482, "▁дол": 17483, "▁Always": 17484, "rtc": 17485, "ске": 17486, "▁performances": 17487, "rupted": 17488, "▁два": 17489, "▁managers": 17490, "▁Pitt": 17491, "▁mystery": 17492, "▁settle": 17493, "ulse": 17494, "cross": 17495, "question": 17496, "asha": 17497, "seed": 17498, "urable": 17499, "Final": 17500, "++++": 17501, "inputs": 17502, "▁backup": 17503, "▁Learning": 17504, "▁*,": 17505, "logo": 17506, "▁seinen": 17507, "▁vulnerable": 17508, "directory": 17509, "ië": 17510, "▁friendship": 17511, "tu": 17512, "▁Vec": 17513, "rifice": 17514, "▁бра": 17515, "▁involve": 17516, "TON": 17517, "▁corrid": 17518, "separ": 17519, "Destroy": 17520, "▁jul": 17521, "▁inequality": 17522, "▁ain": 17523, "hex": 17524, "▁wider": 17525, "тели": 17526, "▁jack": 17527, "▁quot": 17528, "▁Glen": 17529, "initely": 17530, "ihood": 17531, "▁waist": 17532, "▁Manchester": 17533, "regular": 17534, "▁(&": 17535, "▁masses": 17536, "▁DEFAULT": 17537, "▁chairs": 17538, "▁Fast": 17539, "▁citt": 17540, "_{{\\": 17541, "oa": 17542, "▁$\\{": 17543, "▁seeds": 17544, "▁Ald": 17545, "▁Batt": 17546, "fab": 17547, "▁democracy": 17548, "DTO": 17549, "▁Hij": 17550, "PTR": 17551, "Na": 17552, "▁Harvard": 17553, "sid": 17554, "Pred": 17555, "fers": 17556, "▁spare": 17557, "AMP": 17558, "▁groupe": 17559, "▁sender": 17560, "▁Christopher": 17561, "▁prisoners": 17562, "▁Ker": 17563, "▁Crist": 17564, "▁ALL": 17565, "rice": 17566, "▁antes": 17567, "natural": 17568, "▁Susan": 17569, "▁Juli": 17570, "▁diab": 17571, "ixon": 17572, "icator": 17573, "▁flexible": 17574, "▁reserve": 17575, "Contains": 17576, "▁Hil": 17577, "▁Isa": 17578, "▁towns": 17579, "GS": 17580, "▁Trad": 17581, "▁Lock": 17582, "▁Grund": 17583, "▁criticism": 17584, "ню": 17585, "▁că": 17586, "▁politician": 17587, "stable": 17588, "Accept": 17589, "Summary": 17590, "▁também": 17591, "}^{-": 17592, "▁IM": 17593, "idal": 17594, "мор": 17595, "Blue": 17596, "GROUP": 17597, "▁terminal": 17598, "▁complexity": 17599, "▁locally": 17600, "DOWN": 17601, "▁Near": 17602, "Depth": 17603, "▁pole": 17604, "▁equality": 17605, "Site": 17606, "▁isinstance": 17607, "Speed": 17608, "ippi": 17609, ",&": 17610, "▁Enc": 17611, "щен": 17612, "▁mater": 17613, "▁slaves": 17614, "ACTION": 17615, "usalem": 17616, "▁haz": 17617, "▁Beat": 17618, "▁wrest": 17619, "▁llam": 17620, "Ins": 17621, "мина": 17622, "▁був": 17623, "▁Frame": 17624, "ushes": 17625, "▁virtually": 17626, "▁Perm": 17627, "▁weights": 17628, "▁llvm": 17629, "▁cave": 17630, "states": 17631, "DMA": 17632, "ellt": 17633, "ifact": 17634, "vendor": 17635, "▁Emma": 17636, "Locale": 17637, "▁SET": 17638, "▁geometry": 17639, "Styles": 17640, "▁Referee": 17641, "▁weit": 17642, "fica": 17643, "▁ads": 17644, "gray": 17645, "▁Burg": 17646, "iona": 17647, "dagger": 17648, "▁Januar": 17649, "дей": 17650, "isterschaft": 17651, "ppo": 17652, "oids": 17653, "▁départ": 17654, "Shader": 17655, "▁constraint": 17656, "Secret": 17657, "▁Peters": 17658, "▁eyeb": 17659, "▁mesh": 17660, "▁cookie": 17661, "▁Pick": 17662, "▁nick": 17663, "bye": 17664, "▁savings": 17665, "Try": 17666, "python": 17667, "▁patri": 17668, "▁multip": 17669, "▁kinda": 17670, "▁'_": 17671, "▁Franz": 17672, "▁cloth": 17673, "зульта": 17674, "▁fleet": 17675, "▁humanity": 17676, "resa": 17677, "blob": 17678, "▁TX": 17679, "▁Buch": 17680, "▁Lond": 17681, "▁valley": 17682, "▁murm": 17683, "▁Trade": 17684, "linewidth": 17685, "▁especial": 17686, "upper": 17687, "▁hosp": 17688, "▁tanto": 17689, "▁oldest": 17690, "▁Roose": 17691, "▁hitting": 17692, "dog": 17693, "ovi": 17694, "},\r": 17695, "▁compatible": 17696, "▁Website": 17697, "poch": 17698, "▁Bag": 17699, "▁accomplish": 17700, "Christ": 17701, "asset": 17702, "▁Until": 17703, "▁geld": 17704, "Listen": 17705, "SB": 17706, "Setup": 17707, "icia": 17708, "▁lum": 17709, "▁janvier": 17710, "PAGE": 17711, "▁Nu": 17712, "/\"": 17713, "▁divorce": 17714, "Execute": 17715, "Depend": 17716, "▁Scottish": 17717, "▁Ts": 17718, "ruppe": 17719, "▁refuse": 17720, "▁Oktober": 17721, "ijk": 17722, "▁Amy": 17723, "▁dimin": 17724, "▁gross": 17725, "▁trat": 17726, "isible": 17727, "mixer": 17728, "▁autres": 17729, "▁neat": 17730, "▁otros": 17731, "Void": 17732, "▁schol": 17733, "▁Walker": 17734, "▁tube": 17735, "ologists": 17736, "▁груп": 17737, "▁haben": 17738, "uber": 17739, "ACTIVE": 17740, "▁Attendance": 17741, "▁оп": 17742, "▁blade": 17743, "oplus": 17744, "▁Original": 17745, "▁manufacturer": 17746, "asz": 17747, "âte": 17748, "rer": 17749, "▁Json": 17750, "▁succeeded": 17751, "uffle": 17752, "▁backed": 17753, "esian": 17754, "tick": 17755, "External": 17756, "▁XIX": 17757, "▁hearts": 17758, "▁После": 17759, "olu": 17760, "▁лет": 17761, "VICE": 17762, "ário": 17763, "▁fraud": 17764, "edu": 17765, "Primary": 17766, "▁gaming": 17767, "▁plt": 17768, "igator": 17769, "IES": 17770, "Compiler": 17771, "▁monument": 17772, "agem": 17773, "▁Rain": 17774, "▁moins": 17775, "oku": 17776, "osex": 17777, "▁Kansas": 17778, "▁gepublice": 17779, "▁Joy": 17780, "Scene": 17781, "▁kingdom": 17782, "rices": 17783, "▁juin": 17784, "▁uncomfortable": 17785, "▁Money": 17786, "obb": 17787, "expl": 17788, "strcmp": 17789, "▁dread": 17790, "rition": 17791, "▁Chi": 17792, "▁demonstrated": 17793, "▁vertices": 17794, "чо": 17795, "▁Culture": 17796, "FX": 17797, "Dictionary": 17798, "▁Dru": 17799, "trm": 17800, "▁examine": 17801, "▁therap": 17802, "ième": 17803, "мини": 17804, "▁produces": 17805, "▁photographs": 17806, "▁threads": 17807, "▁MI": 17808, "▁extraordinary": 17809, "ским": 17810, "▁gepubliceerd": 17811, "▁Poland": 17812, "▁guaranteed": 17813, "RG": 17814, "osc": 17815, "али": 17816, "▁тех": 17817, "errno": 17818, "science": 17819, "iffs": 17820, "▁Tam": 17821, "▁Beth": 17822, "▁Travel": 17823, "▁translate": 17824, "ché": 17825, "▁ling": 17826, "▁belongs": 17827, "▁electrical": 17828, "ensk": 17829, "▁Compet": 17830, "cg": 17831, "VC": 17832, "topic": 17833, "▁presum": 17834, "вета": 17835, "▁approximation": 17836, "▁grim": 17837, "▁Из": 17838, "_{(": 17839, "вин": 17840, "ution": 17841, "owych": 17842, "åg": 17843, "sterreich": 17844, "▁characteristic": 17845, "oming": 17846, "▁/*!": 17847, "▁prize": 17848, "▁Minnesota": 17849, "ted": 17850, "цы": 17851, "▁Om": 17852, "▁indices": 17853, "▁stem": 17854, "regon": 17855, "ниче": 17856, "▁Salv": 17857, "ése": 17858, "▁aged": 17859, "▁Past": 17860, "▁internation": 17861, "▁Vic": 17862, "▁resume": 17863, "akespeare": 17864, "▁estado": 17865, "▁abilities": 17866, "▁brow": 17867, "▁NFL": 17868, "▁trends": 17869, "▁Austin": 17870, "▁LIMIT": 17871, "▁Kor": 17872, "▁folk": 17873, "▁ward": 17874, "▁nest": 17875, "▁Junior": 17876, "▁maintaining": 17877, "Pub": 17878, "OBJECT": 17879, "▁bloody": 17880, "▁sj": 17881, "▁dtype": 17882, "Pane": 17883, "▁bacter": 17884, "▁gradually": 17885, "mr": 17886, "Team": 17887, "▁indicating": 17888, "▁decrease": 17889, "tek": 17890, "▁Represent": 17891, "▁developers": 17892, "Guid": 17893, "▁Diet": 17894, "▁retr": 17895, "Navigation": 17896, "esi": 17897, "▁lazy": 17898, "Standard": 17899, "Er": 17900, "AW": 17901, "▁États": 17902, "▁assured": 17903, "San": 17904, "▁Andre": 17905, "’,": 17906, "fang": 17907, "ération": 17908, "▁industries": 17909, "▁incon": 17910, "Emit": 17911, "▁где": 17912, "▁retriev": 17913, "eni": 17914, "▁Turkey": 17915, "izers": 17916, "Angle": 17917, "▁oc": 17918, "▁palm": 17919, "▁stan": 17920, "льно": 17921, "▁CSS": 17922, "▁frances": 17923, "▁grin": 17924, "▁tiempo": 17925, "▁Prix": 17926, "]).": 17927, "▁deput": 17928, "▁Pin": 17929, "▁sixt": 17930, "▁predicted": 17931, "azure": 17932, "▁Motor": 17933, "▁ihm": 17934, "▁manus": 17935, "apos": 17936, "▁instruments": 17937, "▁counts": 17938, "▁aimed": 17939, "profit": 17940, "▁dok": 17941, "обра": 17942, "▁estud": 17943, "iesz": 17944, "▁piss": 17945, "▁inaug": 17946, "▁voters": 17947, "▁packages": 17948, "▁cute": 17949, "▁fitness": 17950, "▁leurs": 17951, "▁sorted": 17952, "phant": 17953, "OPT": 17954, "▁zip": 17955, "season": 17956, "emi": 17957, "encoding": 17958, "won": 17959, "elect": 17960, "▁tooth": 17961, "▁upcoming": 17962, "▁Graham": 17963, "nut": 17964, "▁Ark": 17965, "ält": 17966, "▁precious": 17967, "agle": 17968, "née": 17969, "ница": 17970, "aris": 17971, "▁pile": 17972, "cole": 17973, "▁WITH": 17974, "routing": 17975, "▁***": 17976, "Appearance": 17977, "llvm": 17978, "▁Oliver": 17979, "▁PL": 17980, "ifndef": 17981, "etzt": 17982, "skiego": 17983, "▁pon": 17984, "ARGET": 17985, "kö": 17986, "alled": 17987, "▁=\\": 17988, "sure": 17989, "matches": 17990, "▁temperatures": 17991, "SEL": 17992, "▁clone": 17993, "▁eller": 17994, "erna": 17995, "▁поло": 17996, "Management": 17997, "company": 17998, "▁lun": 17999, "▁streaming": 18000, "▁Ni": 18001, "▁sí": 18002, "Contact": 18003, "▁Credit": 18004, "▁Oak": 18005, "▁представ": 18006, "radius": 18007, "cli": 18008, "IENT": 18009, "▁Lucy": 18010, "▁calculation": 18011, "▁pixel": 18012, "▁mul": 18013, "▁outcomes": 18014, "▁centers": 18015, "▁residence": 18016, "Constraint": 18017, "▁preserve": 18018, "peon": 18019, "uffix": 18020, "▁Roberts": 18021, "▁promot": 18022, "?!": 18023, "balance": 18024, "▁courts": 18025, "▁disg": 18026, "PRINT": 18027, "▁их": 18028, "elfare": 18029, "▁retreat": 18030, "▁Ав": 18031, "Cost": 18032, "also": 18033, "▁Für": 18034, "▁März": 18035, "DIO": 18036, "▁bez": 18037, "AUTH": 18038, "Den": 18039, "▁atom": 18040, "▁roman": 18041, "▁Pel": 18042, "▁Roosevelt": 18043, "▁Plant": 18044, "Contents": 18045, "▁Between": 18046, "▁coupling": 18047, "structure": 18048, "▁Marshall": 18049, "▁Career": 18050, "▁railway": 18051, "▁Bureau": 18052, "▁possibilities": 18053, "▁kor": 18054, "){\r": 18055, "mero": 18056, "mov": 18057, "англ": 18058, "AIN": 18059, "mund": 18060, "lette": 18061, "▁summar": 18062, "▁describing": 18063, "▁NAS": 18064, "▁Emb": 18065, "Instruction": 18066, "liest": 18067, "▁Sig": 18068, "Bill": 18069, "▁verd": 18070, "plant": 18071, "▁galaxies": 18072, "\"])": 18073, "▁PyObject": 18074, "▁Gy": 18075, "▁mě": 18076, "▁organisation": 18077, "Her": 18078, "Sep": 18079, "ocom": 18080, "▁Same": 18081, "▁bite": 18082, "▁Seattle": 18083, "зыва": 18084, "Observer": 18085, "’.": 18086, "▁morph": 18087, "urches": 18088, "alph": 18089, "reement": 18090, "consin": 18091, "^-": 18092, "▁dann": 18093, "translate": 18094, "вих": 18095, "React": 18096, "▁cats": 18097, "▁brew": 18098, "▁ds": 18099, "▁circles": 18100, "▁drift": 18101, "agma": 18102, "▁Valent": 18103, "PIN": 18104, "ARM": 18105, "▁surviv": 18106, "alin": 18107, "Pref": 18108, "friendly": 18109, "▁uncertainty": 18110, "▁fd": 18111, "▁engineer": 18112, "Ben": 18113, "icular": 18114, "orest": 18115, "▁horizontal": 18116, "UTC": 18117, "textrm": 18118, "Live": 18119, "Score": 18120, "▁Germans": 18121, "distance": 18122, "uti": 18123, "▁équ": 18124, "▁numerical": 18125, "▁reass": 18126, "Activ": 18127, "▁cod": 18128, "bullet": 18129, "ensing": 18130, "▁Gem": 18131, "▁navigation": 18132, "addClass": 18133, "▁simultaneously": 18134, "вий": 18135, "▁його": 18136, "▁Hö": 18137, "▁harsh": 18138, "precated": 18139, "ССР": 18140, "▁Equip": 18141, "adget": 18142, "▁TYPE": 18143, "▁mg": 18144, "IGH": 18145, "▁vin": 18146, "▁findings": 18147, "ivan": 18148, "▁possession": 18149, "▁того": 18150, "▁parsed": 18151, "riors": 18152, "zeichnet": 18153, "ников": 18154, "Worker": 18155, "▁enables": 18156, "▁($\\": 18157, "▁Copy": 18158, "▁orientation": 18159, "стре": 18160, "▁Indians": 18161, "▁Gary": 18162, "▁Insurance": 18163, "isan": 18164, "Chat": 18165, "▁comun": 18166, "▁coron": 18167, "ография": 18168, "updated": 18169, "▁Ин": 18170, "These": 18171, "SEC": 18172, "▁boyfriend": 18173, "Diagnostics": 18174, "Hint": 18175, "mul": 18176, "▁inode": 18177, "xA": 18178, "eft": 18179, "OPTION": 18180, "unct": 18181, "annon": 18182, "ENS": 18183, "strip": 18184, "▁enthusi": 18185, "▁Whit": 18186, "▁Фи": 18187, "aude": 18188, "▁disagree": 18189, "▁snapped": 18190, "Phys": 18191, "▁Syn": 18192, "▁sour": 18193, "▁Lux": 18194, "ugar": 18195, "tile": 18196, "▁infection": 18197, "▁Feb": 18198, "▁Chem": 18199, "dataset": 18200, "chts": 18201, "Dynamic": 18202, "▁сред": 18203, "▁queen": 18204, "worker": 18205, "swap": 18206, "▁timestamp": 18207, "▁Integr": 18208, "▁interviews": 18209, "such": 18210, "▁laughter": 18211, "prof": 18212, "▁Bird": 18213, "(|": 18214, "ân": 18215, "▁gra": 18216, "&=": 18217, "zens": 18218, "getMessage": 18219, "▁Ost": 18220, "▁gab": 18221, "▁mortgage": 18222, "multicol": 18223, "LEVEL": 18224, "partition": 18225, "seen": 18226, "▁declar": 18227, "AU": 18228, "▁ox": 18229, "▁ligger": 18230, "▁Carm": 18231, "geme": 18232, "▁Vegas": 18233, "▁Eug": 18234, "orus": 18235, "▁brick": 18236, "▁así": 18237, "▁Magazine": 18238, "HasColumnType": 18239, "VR": 18240, "licher": 18241, "▁Future": 18242, "▁Jug": 18243, "attan": 18244, "constructor": 18245, "VP": 18246, "▁тур": 18247, "чина": 18248, "Comparator": 18249, "▁authentic": 18250, "▁monster": 18251, "▁transformed": 18252, "▁firms": 18253, "FW": 18254, "▁catalog": 18255, "boards": 18256, "▁diseases": 18257, "▁Benjamin": 18258, "▁horizon": 18259, "▁Available": 18260, "Mvc": 18261, "Stud": 18262, "▁lord": 18263, "general": 18264, "пар": 18265, "▁cabinet": 18266, "▁Basic": 18267, "TestCase": 18268, "ansk": 18269, "▁Snow": 18270, "ierten": 18271, "▁vocal": 18272, "Padding": 18273, "halt": 18274, "▁Alexand": 18275, "▁Colomb": 18276, "ivamente": 18277, "▁artificial": 18278, "▁Atlanta": 18279, "▁mentre": 18280, "▁estaba": 18281, "jekt": 18282, "▁slept": 18283, "▁endless": 18284, "éro": 18285, "attery": 18286, "uur": 18287, "▁weakness": 18288, "▁attempting": 18289, "BYTE": 18290, "▁founder": 18291, "▁salv": 18292, "▁Medicine": 18293, "tid": 18294, "▁Schwe": 18295, "raction": 18296, "▁¿": 18297, "crate": 18298, "SERVER": 18299, "▁compound": 18300, "▁conve": 18301, "▁caf": 18302, "▁handful": 18303, "onne": 18304, "ública": 18305, "▁defensive": 18306, "Alignment": 18307, "▁préc": 18308, "▁significance": 18309, "élé": 18310, "arta": 18311, "Dam": 18312, "▁perpet": 18313, "▁caller": 18314, "icients": 18315, "cep": 18316, "▁Multi": 18317, "▁stolen": 18318, "▁focusing": 18319, "embed": 18320, "▁bree": 18321, "▁AB": 18322, "▁occasions": 18323, "sea": 18324, "Prov": 18325, "чение": 18326, "▁Category": 18327, "▁sq": 18328, "▁Фе": 18329, "VA": 18330, "Diff": 18331, "Tri": 18332, "issement": 18333, "▁actress": 18334, "▁Пе": 18335, "▁jej": 18336, "▁twisted": 18337, "▁Nicol": 18338, "▁junior": 18339, "Sound": 18340, "▁Brasil": 18341, "▁juice": 18342, "▁>>>": 18343, "▁Alb": 18344, "▁softly": 18345, "▁McK": 18346, "▁Gren": 18347, "▁italiano": 18348, "▁creatures": 18349, "▁residential": 18350, "▁Instagram": 18351, "ucks": 18352, "▁killer": 18353, "▁Johnny": 18354, "▁enterprise": 18355, "Dto": 18356, "chestra": 18357, "▁Tel": 18358, "▁Activ": 18359, "factor": 18360, "oust": 18361, "▁vacuum": 18362, "рал": 18363, "')->": 18364, "▁Left": 18365, "▁defect": 18366, "▁ninete": 18367, "fare": 18368, "▁regret": 18369, "▁shar": 18370, "ctrine": 18371, "mesh": 18372, "city": 18373, "icit": 18374, "▁Fem": 18375, "limited": 18376, "oka": 18377, "!\\!\\": 18378, "Donald": 18379, "зно": 18380, "▁provision": 18381, "▁discussions": 18382, "Drag": 18383, "▁Incl": 18384, "Exit": 18385, "▁Abd": 18386, "story": 18387, "ieve": 18388, "▁był": 18389, "olving": 18390, "wohner": 18391, "▁guidelines": 18392, "▁straw": 18393, "üss": 18394, "▁було": 18395, "▁burden": 18396, "▁spatial": 18397, "▁stretched": 18398, "▁Inf": 18399, "▁typedef": 18400, "▁robot": 18401, "▁Doc": 18402, "pliers": 18403, "wal": 18404, "camp": 18405, "▁diffé": 18406, "▁McG": 18407, "▁tel": 18408, "arette": 18409, "▁subsequently": 18410, "▁honey": 18411, "FUNC": 18412, "▁establishment": 18413, "tesy": 18414, "▁który": 18415, "▁сель": 18416, "▁FO": 18417, "▁Islands": 18418, "▁mp": 18419, "Scalar": 18420, "▁Yan": 18421, "cken": 18422, "▁variation": 18423, "ią": 18424, "optim": 18425, "azor": 18426, "tuple": 18427, "▁gravity": 18428, "▁conclude": 18429, "▁collections": 18430, "ész": 18431, "▁Liver": 18432, "▁ethnic": 18433, "compile": 18434, "▁parl": 18435, "Surface": 18436, "{'": 18437, "▁paragraph": 18438, "posite": 18439, "ítulo": 18440, "oba": 18441, "binary": 18442, "rob": 18443, "▁Pedro": 18444, "▁fis": 18445, "▁Grande": 18446, "odox": 18447, "▁posting": 18448, "": 26345, "olent": 26346, "▁этого": 26347, "▁Generic": 26348, "▁*/,": 26349, "▁combinations": 26350, "▁rejo": 26351, "спубли": 26352, "capacity": 26353, "▁traces": 26354, "▁opacity": 26355, "▁Official": 26356, "icion": 26357, "▁emotionally": 26358, "▁Joel": 26359, "ському": 26360, "▁legendary": 26361, "▁pam": 26362, "▁También": 26363, ".<": 26364, "iba": 26365, "midt": 26366, "бом": 26367, "▁ensuite": 26368, "Authorization": 26369, "Pag": 26370, "▁helmet": 26371, "▁territo": 26372, "secondary": 26373, "▁segunda": 26374, "▁Wire": 26375, "recated": 26376, "▁invoked": 26377, "▁ValueError": 26378, "▁фо": 26379, "ALIGN": 26380, "CURRENT": 26381, "\\+\\_\\": 26382, "▁compilation": 26383, "ær": 26384, "▁Palmar": 26385, "▁influences": 26386, "/:": 26387, "Mix": 26388, "NOP": 26389, "econom": 26390, "▁tucked": 26391, "▁});\r": 26392, "ANK": 26393, "reject": 26394, "▁pension": 26395, "▁generates": 26396, "чё": 26397, "▁incap": 26398, "▁clicked": 26399, "▁fus": 26400, "ourses": 26401, "▁Easter": 26402, "%;": 26403, "zin": 26404, "▁obligations": 26405, "▁Tips": 26406, "};\r": 26407, ".\"_": 26408, "▁BSD": 26409, "ática": 26410, "▁expose": 26411, "Pars": 26412, "▁Amanda": 26413, "куп": 26414, "▁guessed": 26415, "dsi": 26416, "▁Leip": 26417, "Broad": 26418, "▁Hughes": 26419, "ié": 26420, "▁Wahl": 26421, "▁formerly": 26422, "Relative": 26423, "▁Yu": 26424, "▁Mountains": 26425, "▁Enum": 26426, "▁strang": 26427, "_-": 26428, "recht": 26429, "viv": 26430, "pause": 26431, "▁Londres": 26432, "▁elbow": 26433, "▁Hawaii": 26434, "▁Casino": 26435, "Threshold": 26436, "Units": 26437, "Include": 26438, "ито": 26439, "asury": 26440, "▁steht": 26441, "▁damned": 26442, "▁packets": 26443, "▁Werk": 26444, "▁elevator": 26445, "iedad": 26446, "govern": 26447, "▁CONTRACT": 26448, "mals": 26449, "▁remem": 26450, "▁entonces": 26451, "▁vas": 26452, "▁sympathy": 26453, "▁befindet": 26454, "incing": 26455, "DataSet": 26456, "▁additionally": 26457, "▁musician": 26458, "шего": 26459, "▁listop": 26460, ">\")": 26461, "Printf": 26462, "▁Felix": 26463, "▁carved": 26464, "▁nicely": 26465, "гом": 26466, "chap": 26467, "▁Nieder": 26468, "▁Lav": 26469, "▁modifications": 26470, "moment": 26471, "▁balcon": 26472, "▁dependency": 26473, "CKET": 26474, "▁vanished": 26475, "▁fighters": 26476, "▁zunächst": 26477, "ioctl": 26478, "▁defens": 26479, "▁Nem": 26480, "Utility": 26481, "▁curv": 26482, "▁DAMAGES": 26483, "▁Rogers": 26484, "▁gratitude": 26485, "▁Denmark": 26486, "рая": 26487, "grpc": 26488, "▁juni": 26489, "▁октября": 26490, "▁immense": 26491, "▁prevented": 26492, "▁foam": 26493, "▁Extra": 26494, "aimed": 26495, "▁Criteria": 26496, "▁Simply": 26497, "boxes": 26498, "▁Legend": 26499, "▁Players": 26500, "▁Mercedes": 26501, "▁Branch": 26502, "TERN": 26503, "omena": 26504, "▁incorporate": 26505, "conde": 26506, "▁Estado": 26507, "▁wasted": 26508, "▁complaining": 26509, "▁warriors": 26510, "oter": 26511, "▁этом": 26512, "▁conten": 26513, "▁machinery": 26514, "▁technological": 26515, "▁TD": 26516, "▁gras": 26517, "▁minimize": 26518, "▁Door": 26519, "▁bzw": 26520, "▁prac": 26521, "TREE": 26522, "▁Wing": 26523, "▁Transaction": 26524, "▁MVT": 26525, "▁Klein": 26526, "commons": 26527, "▁}{": 26528, "▁Heritage": 26529, "▁fade": 26530, "рок": 26531, "setValue": 26532, "▁Wallace": 26533, "MX": 26534, "▁ACT": 26535, "▁footage": 26536, "▁entstand": 26537, "arga": 26538, "▁nails": 26539, "▁capitalism": 26540, "▁Garc": 26541, "▁suspension": 26542, "ilis": 26543, "▁Mov": 26544, "uffled": 26545, "Arc": 26546, "▁Beautiful": 26547, "WAY": 26548, "Parallel": 26549, "XXXX": 26550, "diag": 26551, "▁DT": 26552, "mq": 26553, "TextView": 26554, "MLE": 26555, "ennen": 26556, "▁infected": 26557, "▁therapist": 26558, "INGS": 26559, "▁cidade": 26560, "ън": 26561, "▁pdf": 26562, "▁bump": 26563, "CTX": 26564, "▁INCLUDING": 26565, "▁Gef": 26566, "ENTIAL": 26567, "▁handy": 26568, "▁temporal": 26569, "AtA": 26570, "ISH": 26571, "▁Pattern": 26572, "▁lan": 26573, "ependant": 26574, "▁shining": 26575, "idy": 26576, "▁NT": 26577, "▁Fran": 26578, "▁nurses": 26579, "▁betray": 26580, "▁sensible": 26581, "▁апреля": 26582, "▁'[": 26583, "▁thirteen": 26584, ")}_{": 26585, "▁Noah": 26586, "INSERT": 26587, "istically": 26588, "▁Appendix": 26589, "▁recher": 26590, "Receiver": 26591, "▁dernier": 26592, "лла": 26593, "лиза": 26594, "▁Partido": 26595, "▁maximal": 26596, "snap": 26597, "▁часть": 26598, "STOP": 26599, "▁ultra": 26600, "▁développ": 26601, "▁tegen": 26602, "▁Чи": 26603, "LIB": 26604, "▁baseline": 26605, "reload": 26606, "▁Arbitro": 26607, "▁kall": 26608, "capture": 26609, "Arm": 26610, "quin": 26611, "impse": 26612, "zas": 26613, "▁Cand": 26614, "▁brains": 26615, "▁hostile": 26616, "▁marble": 26617, "oons": 26618, "▁Loss": 26619, "MetaData": 26620, "▁República": 26621, "▁andra": 26622, "oden": 26623, "▁documented": 26624, "▁Moses": 26625, "odd": 26626, "▁wax": 26627, "usch": 26628, "▁diagnosed": 26629, "inkle": 26630, "▁Xbox": 26631, "▁seventy": 26632, "cias": 26633, "▁noviembre": 26634, "Compute": 26635, "});\r": 26636, "▁Philippe": 26637, "▁För": 26638, "Leave": 26639, "▁sage": 26640, "▁unpre": 26641, "▁Fortunately": 26642, "▁apost": 26643, "entities": 26644, "▁ellos": 26645, "authorized": 26646, "GBT": 26647, "▁insist": 26648, "▁inspire": 26649, "Mass": 26650, "▁rôle": 26651, "fee": 26652, "ipart": 26653, "цер": 26654, "unate": 26655, "▁CNN": 26656, ":}": 26657, "▁unhappy": 26658, "▁imported": 26659, "HIGH": 26660, "rings": 26661, "▁Instance": 26662, "Bay": 26663, "agles": 26664, "mee": 26665, "bery": 26666, "▁Stories": 26667, "▁Chase": 26668, "▁carriage": 26669, "▁misunder": 26670, "▁imagin": 26671, "pw": 26672, "▁Meter": 26673, "▁crowds": 26674, "▁Fame": 26675, "skill": 26676, "▁comed": 26677, "▁ranch": 26678, "▁lacking": 26679, "▁submar": 26680, "iante": 26681, "▁lanz": 26682, "▁служ": 26683, "-----------": 26684, "▁obten": 26685, "▁downstairs": 26686, "YN": 26687, "rotation": 26688, "▁Jesse": 26689, "$(\"#": 26690, "▁puls": 26691, "irling": 26692, "▁Schaus": 26693, "▁deployed": 26694, "▁{}\",": 26695, "▁Marvel": 26696, "ENUM": 26697, "▁Mathemat": 26698, "▁nn": 26699, "compet": 26700, "ków": 26701, "bil": 26702, "Which": 26703, "isine": 26704, "▁rude": 26705, "▁niveau": 26706, "▁área": 26707, "▁près": 26708, "atis": 26709, "▁[...]": 26710, "fur": 26711, "omm": 26712, "packed": 26713, "мене": 26714, "scriptstyle": 26715, "▁Ath": 26716, "▁desp": 26717, "eltemperaturen": 26718, "▁talents": 26719, "ocy": 26720, "▁raises": 26721, "LIMIT": 26722, "▁editorial": 26723, "▁Animal": 26724, "drive": 26725, "▁работа": 26726, "bss": 26727, "▁Sev": 26728, "epoch": 26729, "▁RC": 26730, "UNUSED": 26731, "▁mandatory": 26732, "(?:": 26733, "▁Bin": 26734, "▁synthetic": 26735, "▁gown": 26736, "▁Dob": 26737, "kap": 26738, "▁harmon": 26739, "▁liberty": 26740, "▁Rice": 26741, "▁prayers": 26742, "▁mise": 26743, "▁confusing": 26744, "▁leap": 26745, "▁arrives": 26746, "kamp": 26747, "▁thats": 26748, "ACC": 26749, "▁Parameters": 26750, "▁одно": 26751, "▁Bio": 26752, "density": 26753, "▁glimpse": 26754, "FORE": 26755, "▁Listen": 26756, "Prev": 26757, "}\\,\\": 26758, "куль": 26759, "▁SEC": 26760, "▁explored": 26761, "▁meantime": 26762, "AIL": 26763, "▁WP": 26764, "▁raison": 26765, "▁existe": 26766, "▁lesser": 26767, "▁Validate": 26768, "▁caution": 26769, "usta": 26770, "heading": 26771, "EFF": 26772, ".'\"": 26773, "▁Gilbert": 26774, "▁limitation": 26775, "▁retour": 26776, "▁Commonwealth": 26777, "▁gewann": 26778, "▁miserable": 26779, "▁networking": 26780, "▁ottobre": 26781, "▁Dise": 26782, "edges": 26783, "▁sede": 26784, "вича": 26785, "uniform": 26786, "▁деятель": 26787, "iros": 26788, "▁desen": 26789, "▁parc": 26790, "▁Rico": 26791, "Ns": 26792, "guid": 26793, "orio": 26794, "avelength": 26795, "▁Gle": 26796, "inceton": 26797, "Amaz": 26798, "Construct": 26799, "▁mx": 26800, "▁Vern": 26801, "▁Generation": 26802, "Jack": 26803, "romag": 26804, "▁viagra": 26805, "▁Peg": 26806, "▁Updated": 26807, "▁overlap": 26808, "EventArgs": 26809, "кро": 26810, "▁*«": 26811, "▁questioned": 26812, "South": 26813, "notice": 26814, "▁permanently": 26815, "lst": 26816, "ficie": 26817, "▁quella": 26818, "▁colleges": 26819, "▁disappointment": 26820, "▁Luft": 26821, "imgur": 26822, "▁transitions": 26823, "▁seller": 26824, "▁июня": 26825, "▁Og": 26826, "▁ADD": 26827, "▁Pays": 26828, "COMMAND": 26829, "grades": 26830, "▁febbra": 26831, "▁Cyr": 26832, "▁febbraio": 26833, "eti": 26834, "▁arom": 26835, "▁Claude": 26836, "▁UEFA": 26837, "▁живе": 26838, "▁Victorian": 26839, "keeping": 26840, "ên": 26841, "▁FIXME": 26842, "itime": 26843, "chestr": 26844, "▁Samsung": 26845, "▁doctrine": 26846, "▁pear": 26847, "▁Mediterranean": 26848, "▁Ya": 26849, "▁vault": 26850, "▁Historic": 26851, "▁sedan": 26852, "▁heated": 26853, "▁política": 26854, "Proof": 26855, ":{": 26856, "fem": 26857, "▁Frankfurt": 26858, "pectives": 26859, "MG": 26860, "▁Eye": 26861, "dai": 26862, "▁reserves": 26863, "NER": 26864, "▁tobacco": 26865, "▁fragments": 26866, "icc": 26867, "▁booth": 26868, "▁cruise": 26869, "▁Testament": 26870, "cola": 26871, "▁Leop": 26872, "▁noon": 26873, "▁terrified": 26874, "vb": 26875, "intel": 26876, "alie": 26877, "▁verification": 26878, "yster": 26879, "ADER": 26880, "chied": 26881, "▁datasets": 26882, "▁зі": 26883, "▁miem": 26884, "ulates": 26885, "▁uuid": 26886, "▁Pictures": 26887, "▁Brend": 26888, "Billboard": 26889, "▁stern": 26890, "▁denom": 26891, "▁accidents": 26892, "сня": 26893, "▁packing": 26894, "ција": 26895, "iblical": 26896, "▁Так": 26897, "▁whisk": 26898, "▁luego": 26899, "▁rectangle": 26900, "▁hooks": 26901, "▁neglect": 26902, "▁sober": 26903, "proposition": 26904, "Multiple": 26905, ":\",": 26906, "▁bapt": 26907, "Parts": 26908, "▁Selection": 26909, "▁Alpha": 26910, "weights": 26911, "hall": 26912, "соб": 26913, "▁lur": 26914, "▁época": 26915, "▁rested": 26916, "ambigu": 26917, "▁tastes": 26918, "amazonaws": 26919, "▁confess": 26920, "▁diciembre": 26921, "implement": 26922, "▁absorption": 26923, "Hal": 26924, "LEAN": 26925, "▁Zach": 26926, "▁freeze": 26927, "LBL": 26928, "STM": 26929, "▁calc": 26930, "={()": 26931, "=*/": 26932, "▁bt": 26933, "Reb": 26934, "▁Wien": 26935, "anska": 26936, "▁surn": 26937, "iative": 26938, "▁invån": 26939, "CY": 26940, "▁là": 26941, "amba": 26942, "leen": 26943, "wahl": 26944, "▁functioning": 26945, "ția": 26946, "getContext": 26947, "gart": 26948, "▁обе": 26949, "Pen": 26950, "vik": 26951, "Slider": 26952, "▁Accept": 26953, "Gap": 26954, "▁Jorge": 26955, "SIG": 26956, "▁вос": 26957, "▁голо": 26958, "▁periodo": 26959, "шта": 26960, "▁patches": 26961, "кої": 26962, "äre": 26963, "engono": 26964, "lista": 26965, "horn": 26966, "▁Complex": 26967, "Sent": 26968, "trfs": 26969, "▁convex": 26970, "Generation": 26971, "▁місце": 26972, "compress": 26973, "▁Sax": 26974, "▁uid": 26975, "▁Lebens": 26976, "Completion": 26977, "\\|_{": 26978, "insky": 26979, "▁schon": 26980, "▁masters": 26981, "independ": 26982, "neys": 26983, "▁lied": 26984, "▁aspir": 26985, "чні": 26986, "▁breakdown": 26987, "▁Harm": 26988, "▁designing": 26989, "hf": 26990, "▁Angela": 26991, "▁confer": 26992, "▁partido": 26993, "▁interference": 26994, "mao": 26995, "▁absorbed": 26996, "▁Vall": 26997, "ErrorCode": 26998, "▁Publishing": 26999, "vano": 27000, "BITS": 27001, "▁deer": 27002, "▁Campaign": 27003, "▁graz": 27004, "CHANGE": 27005, "▁feder": 27006, "iffe": 27007, "handed": 27008, "cq": 27009, "umbing": 27010, "▁unre": 27011, "▁siendo": 27012, "▁simpler": 27013, "why": 27014, "arettes": 27015, "anst": 27016, "▁hass": 27017, "▁Enterprise": 27018, "▁mois": 27019, "▁Fo": 27020, "▁участ": 27021, "ffen": 27022, "▁MODULE": 27023, "▁activated": 27024, "▁internacional": 27025, "▁Mittel": 27026, "degree": 27027, "▁откры": 27028, "▁&(": 27029, "getProperty": 27030, "isz": 27031, "cedure": 27032, "▁enters": 27033, "▁Sally": 27034, "▁Train": 27035, "▁logged": 27036, "▁Rav": 27037, "▁Avoid": 27038, "▁Kaiser": 27039, "▁expend": 27040, "aphor": 27041, "▁brass": 27042, "▁melod": 27043, "▁attitudes": 27044, "*\"": 27045, "Wall": 27046, "▁owe": 27047, "▁bamb": 27048, "shader": 27049, "cester": 27050, "▁PP": 27051, "▁migrations": 27052, "entric": 27053, "▁Setup": 27054, "▁Artist": 27055, "hre": 27056, "▁polite": 27057, "ahan": 27058, "▁luglio": 27059, "▁predecess": 27060, "▁SIG": 27061, "тів": 27062, "▁RF": 27063, "▁Dry": 27064, "▁maker": 27065, "шим": 27066, "▁Sounds": 27067, "▁implementing": 27068, "▁ah": 27069, "▁gev": 27070, "▁duplicate": 27071, "▁Logan": 27072, "▁Grade": 27073, "DUCT": 27074, "íses": 27075, "ért": 27076, "▁nonsense": 27077, "backup": 27078, "Attachment": 27079, "▁ecc": 27080, "▁Squadron": 27081, "learn": 27082, "deprecated": 27083, "▁Aub": 27084, "▁Gol": 27085, "▁overl": 27086, "SERVICE": 27087, "▁beautifully": 27088, "REL": 27089, "▁Gian": 27090, "▁Papa": 27091, "respond": 27092, "▁Caribbean": 27093, "rn": 27094, "▁худож": 27095, "Cfg": 27096, "rai": 27097, "▁sniff": 27098, "tto": 27099, "ологи": 27100, "▁rb": 27101, "▁incidents": 27102, "▁duck": 27103, "▁PROVIDED": 27104, "Sources": 27105, "▁Chelsea": 27106, "▁tek": 27107, "▁налази": 27108, "▁pilots": 27109, "тки": 27110, "▁traded": 27111, "▁Beijing": 27112, "▁Gregory": 27113, "scalar": 27114, "▁inclined": 27115, "▁Kamp": 27116, "▁Marian": 27117, "▁fierce": 27118, "▁theft": 27119, "ющих": 27120, "▁Into": 27121, "constraint": 27122, "parentNode": 27123, "idental": 27124, "▁gouvernement": 27125, "▁SND": 27126, "▁Ruby": 27127, "▁monaster": 27128, "Records": 27129, "▁Kab": 27130, "▁Universe": 27131, "▁approximate": 27132, "Water": 27133, "▁Physical": 27134, "appers": 27135, "oubtedly": 27136, "ложен": 27137, "▁towel": 27138, "▁siblings": 27139, "eph": 27140, "icios": 27141, "рами": 27142, "▁outrage": 27143, "▁també": 27144, "SRC": 27145, "телем": 27146, "Vi": 27147, ".');": 27148, "LM": 27149, "▁mitt": 27150, "▁weed": 27151, "▁crops": 27152, "iman": 27153, "Claim": 27154, "insula": 27155, "▁(“": 27156, "▁Changes": 27157, "▁invånare": 27158, "again": 27159, "▁cnt": 27160, "▁Gaz": 27161, "▁austral": 27162, "overlay": 27163, "▁Mechan": 27164, "▁slammed": 27165, "▁trailing": 27166, "▁Biography": 27167, "▁appealing": 27168, "IVER": 27169, "▁Ave": 27170, "▁Plot": 27171, "voj": 27172, "▁sung": 27173, "▁unos": 27174, "Effects": 27175, "vv": 27176, "cook": 27177, "Buttons": 27178, "▁transm": 27179, "ierto": 27180, "CONTEXT": 27181, "▁dignity": 27182, "aired": 27183, "javax": 27184, "▁Alberto": 27185, "▁Recently": 27186, "▁facial": 27187, "mathop": 27188, "ało": 27189, "вид": 27190, "cott": 27191, "Variables": 27192, "▁Ran": 27193, "▁bunk": 27194, "amiliar": 27195, "CAST": 27196, "▁frü": 27197, "VED": 27198, "▁NOTICE": 27199, "▁turno": 27200, "validator": 27201, "▁Portuguese": 27202, "▁questioning": 27203, "}})": 27204, "▁lear": 27205, "Xamarin": 27206, "▁disadv": 27207, "encoded": 27208, "▁Kot": 27209, "rated": 27210, "▁Theory": 27211, "cius": 27212, "▁Darwin": 27213, "ђе": 27214, "▁décl": 27215, "▁область": 27216, "рович": 27217, "▁mobility": 27218, "VF": 27219, "▁хи": 27220, "until": 27221, "▁barriers": 27222, "gif": 27223, "▁Roh": 27224, "▁aging": 27225, "▁Widget": 27226, "olk": 27227, "▁farms": 27228, "Checker": 27229, "Introduction": 27230, "смо": 27231, "▁Russians": 27232, "naments": 27233, "▁Insert": 27234, "▁Whenever": 27235, "erset": 27236, "itori": 27237, "▁Dort": 27238, "▁costume": 27239, "▁mathematical": 27240, "▁Bast": 27241, "▁nominated": 27242, "▁restoration": 27243, "posal": 27244, "▁unfortunate": 27245, "Ps": 27246, "LIN": 27247, "▁intact": 27248, "▁provoc": 27249, "▁située": 27250, "▁ноября": 27251, "ermo": 27252, "▁fisher": 27253, "гля": 27254, "▁conting": 27255, "▁Doug": 27256, "\"?": 27257, "▁Eva": 27258, "▁tops": 27259, "▁Remote": 27260, "▁artwork": 27261, "▁artillery": 27262, "quick": 27263, "▁Arabia": 27264, "▁SDValue": 27265, "▁Dakota": 27266, "iated": 27267, "▁Optim": 27268, "buttons": 27269, "▁cottage": 27270, "▁wherein": 27271, "▁tutorial": 27272, "▁Scre": 27273, "▁sweep": 27274, "▁Coffee": 27275, "})}": 27276, "▁музы": 27277, "hostname": 27278, "▁Temp": 27279, "▁Fut": 27280, "respect": 27281, "ocz": 27282, "▁predomin": 27283, "Indicator": 27284, "encial": 27285, "UMENT": 27286, "▁SHALL": 27287, "▁commanded": 27288, "▁withdrawal": 27289, "iour": 27290, "REGION": 27291, "sprintf": 27292, "▁вме": 27293, "▁Payment": 27294, "▁Anim": 27295, "publish": 27296, "▁seeks": 27297, "ouw": 27298, "▁GM": 27299, "rugu": 27300, "ustain": 27301, "▁))": 27302, "▁consulting": 27303, "▁Dialog": 27304, "▁Lars": 27305, "▁critique": 27306, "▁circulation": 27307, "▁landsc": 27308, "managed": 27309, "▁Craft": 27310, "▁herman": 27311, "afi": 27312, "amy": 27313, "▁discour": 27314, "<>(": 27315, "▁Steph": 27316, "▁tolerance": 27317, "typename": 27318, "ventions": 27319, "ział": 27320, "стов": 27321, "▁sticking": 27322, "ASC": 27323, "ISO": 27324, "▁Spencer": 27325, "▁Didn": 27326, "gomery": 27327, "imiter": 27328, "dru": 27329, "Clause": 27330, "▁slides": 27331, "###": 27332, "▁Sugar": 27333, "HY": 27334, "▁эти": 27335, "▁Edwards": 27336, "▁cents": 27337, "oya": 27338, "serts": 27339, "▁Hass": 27340, "▁ingen": 27341, "стри": 27342, "▁saddle": 27343, "solid": 27344, "▁champions": 27345, "-)": 27346, "▁Slov": 27347, "▁shiny": 27348, "▁*)&": 27349, "▁Define": 27350, "če": 27351, "▁scrut": 27352, "onden": 27353, "'\",": 27354, "uffs": 27355, "▁olymp": 27356, "idential": 27357, "wand": 27358, "▁annually": 27359, "▁Arkansas": 27360, "▁saint": 27361, "▁gleich": 27362, "▁perfection": 27363, ")>": 27364, "▁shorts": 27365, "▁justified": 27366, "peated": 27367, "packages": 27368, "driven": 27369, "▁Liberty": 27370, "▁stripped": 27371, "шение": 27372, "▁fünf": 27373, "▁ecosystem": 27374, "ixa": 27375, "▁Fresh": 27376, "vart": 27377, "▁treats": 27378, "▁stance": 27379, "чёт": 27380, "▁pity": 27381, "adém": 27382, "▁окон": 27383, "▁Chand": 27384, "rab": 27385, "вший": 27386, "inski": 27387, "▁continually": 27388, "▁Daddy": 27389, "▁nightmare": 27390, "icional": 27391, "▁efect": 27392, "ueblo": 27393, "▁lanç": 27394, "▁Collections": 27395, "due": 27396, "ampton": 27397, "▁memcpy": 27398, "▁**(": 27399, "issent": 27400, "▁Insp": 27401, "▁Glasgow": 27402, "▁furono": 27403, "▁kindness": 27404, "Bi": 27405, "▁competed": 27406, "▁oak": 27407, "Large": 27408, "▁disgu": 27409, "▁kings": 27410, "тами": 27411, "▁stuffed": 27412, "▁hilar": 27413, "published": 27414, "▁stressed": 27415, "▁Peak": 27416, "▁loader": 27417, "Keyboard": 27418, "▁reconstruction": 27419, "▁vod": 27420, "▁dun": 27421, "▁understands": 27422, "tenant": 27423, "▁chaque": 27424, "▁prejud": 27425, "utat": 27426, "▁uso": 27427, "▁Heavy": 27428, "▁cuatro": 27429, "▁sidewalk": 27430, "▁Bug": 27431, "▁månaden": 27432, "geo": 27433, "▁united": 27434, "▁Files": 27435, "▁Аль": 27436, "▁rugby": 27437, "▁financing": 27438, "▁comply": 27439, "&#": 27440, "▁rushing": 27441, "▁fen": 27442, "mong": 27443, "▁spé": 27444, "▁presenting": 27445, "INCLUDING": 27446, "ěl": 27447, "zeichnung": 27448, "Backup": 27449, "▁petit": 27450, "▁allerg": 27451, "нут": 27452, "▁worrying": 27453, "▁mamm": 27454, "▁operand": 27455, ":%.*]]": 27456, "▁realise": 27457, "Commands": 27458, "▁Bew": 27459, "▁assumes": 27460, "▁Covid": 27461, "▁quand": 27462, "tyard": 27463, "▁Mono": 27464, "linked": 27465, "MARK": 27466, "Esp": 27467, "▁blessing": 27468, "▁eyebrows": 27469, "▁NV": 27470, "▁стру": 27471, "▁modeling": 27472, "▁greeted": 27473, "Workspace": 27474, "▁pedest": 27475, "▁неза": 27476, "lemagne": 27477, "Statistics": 27478, "▁aument": 27479, "▁speeds": 27480, "▁syndrome": 27481, "CONNECT": 27482, "zahl": 27483, "verso": 27484, "ército": 27485, "▁astronom": 27486, "▁aprile": 27487, "žen": 27488, "веро": 27489, "draft": 27490, "▁gioc": 27491, "▁comport": 27492, "▁variance": 27493, "▁realizing": 27494, "EDIT": 27495, "олові": 27496, "▁estar": 27497, "▁sost": 27498, "NORMAL": 27499, "▁ó": 27500, "▁Andr": 27501, "ATTRIB": 27502, "▁rede": 27503, "▁toes": 27504, "▁advances": 27505, "▁Against": 27506, "TOM": 27507, "rss": 27508, "MMMM": 27509, "▁newest": 27510, "▁VER": 27511, "▁phrases": 27512, "anter": 27513, "Launch": 27514, "▁chr": 27515, "▁manufactured": 27516, "$),": 27517, "rollment": 27518, "eston": 27519, "▁peint": 27520, "”)": 27521, "endet": 27522, "▁Hair": 27523, "ivalent": 27524, "▁upright": 27525, "gren": 27526, "anked": 27527, "wright": 27528, "▁mast": 27529, "▁onChange": 27530, "▁debris": 27531, "▁grap": 27532, "etry": 27533, "▁(__": 27534, "▁Commerce": 27535, "BOX": 27536, "Tax": 27537, "▁отри": 27538, "▁prevention": 27539, "▁Feel": 27540, "▁exotic": 27541, "▁Bark": 27542, "▁Steam": 27543, "fon": 27544, "olin": 27545, "▁eliminated": 27546, "▁bc": 27547, "▁Cycl": 27548, "▁$(\"#": 27549, "▁Parl": 27550, "manuel": 27551, "ospher": 27552, "WF": 27553, "Analy": 27554, "▁navig": 27555, "▁renown": 27556, "Rx": 27557, "▁Walt": 27558, "uffed": 27559, "▁foster": 27560, "$:": 27561, "shore": 27562, "Connector": 27563, "фика": 27564, "▁realization": 27565, "Li": 27566, "ctxt": 27567, "ahoo": 27568, "▁miracle": 27569, "▁ET": 27570, "▁GPS": 27571, "▁Observable": 27572, "▁hf": 27573, "▁magnificent": 27574, "него": 27575, "BIN": 27576, "▁Dorf": 27577, "ieck": 27578, "vee": 27579, "▁Craw": 27580, "/#": 27581, "▁pci": 27582, "ippet": 27583, "▁Hillary": 27584, "▁gir": 27585, "▁rand": 27586, "▁laying": 27587, "▁Different": 27588, "boys": 27589, "virt": 27590, "▁encryption": 27591, "ász": 27592, "пор": 27593, "▁smelled": 27594, "▁suscept": 27595, "cluded": 27596, "▁Carn": 27597, "igten": 27598, "▁Chuck": 27599, "▁Provinc": 27600, "▁perí": 27601, "▁Marshal": 27602, "мож": 27603, "gfx": 27604, "oshi": 27605, "▁WHE": 27606, "▁relaxation": 27607, ",.": 27608, "were": 27609, "▁varieties": 27610, "▁Won": 27611, "▁gaps": 27612, "▁stole": 27613, "igua": 27614, "ющие": 27615, "▁Hampshire": 27616, "phrase": 27617, "▁película": 27618, "Processing": 27619, "▁initialization": 27620, "oustic": 27621, "▁Josef": 27622, "icating": 27623, "▁goodness": 27624, "TES": 27625, "▁cope": 27626, "▁ignorance": 27627, "▁Brist": 27628, "▁paras": 27629, "▁accidentally": 27630, "▁tand": 27631, "ittest": 27632, "▁ули": 27633, "▁shipped": 27634, "▁ост": 27635, "elseif": 27636, "▁usize": 27637, "horizontal": 27638, "▁Carr": 27639, "▁precip": 27640, "roz": 27641, "pathetic": 27642, "rived": 27643, "rok": 27644, "▁digging": 27645, "мом": 27646, "▁Mull": 27647, "▁XIII": 27648, "▁peas": 27649, "▁foul": 27650, "▁travels": 27651, "▁Ng": 27652, "▁составе": 27653, "Mont": 27654, "arde": 27655, "▁Stefan": 27656, "^^^^": 27657, "▁Kiss": 27658, "▁Ek": 27659, "▁oktober": 27660, "▁memorable": 27661, "')).": 27662, "▁Vision": 27663, "▁Nina": 27664, "▁Solar": 27665, "▁highlighted": 27666, "▁memo": 27667, "meisterschaft": 27668, "sidebar": 27669, "SEE": 27670, "▁Nevada": 27671, "Da": 27672, "▁drawer": 27673, "astically": 27674, "elde": 27675, "scribed": 27676, "▁priests": 27677, "▁hommes": 27678, "▁instructor": 27679, "клад": 27680, "▁spett": 27681, "\\-": 27682, "▁мира": 27683, "▁Looks": 27684, "▁sleeve": 27685, "▁strongest": 27686, "▁tête": 27687, "▁Nicole": 27688, "imper": 27689, "нача": 27690, "ipper": 27691, "▁inwon": 27692, "ilers": 27693, "▁Deputy": 27694, "oge": 27695, "▁depressed": 27696, "▁arte": 27697, "▁combining": 27698, "LAST": 27699, "inted": 27700, "▁Average": 27701, "▁pollution": 27702, "▁Phillips": 27703, "▁WM": 27704, "}}}\\": 27705, "Added": 27706, "▁peripher": 27707, "Creation": 27708, "▁italien": 27709, "▁Choice": 27710, "▁EXPRESS": 27711, "▁Struct": 27712, "ysz": 27713, "Resize": 27714, "ARGS": 27715, "▁repo": 27716, "▁чтобы": 27717, "▁pref": 27718, "▁earthqu": 27719, "▁Мекси": 27720, "▁Finale": 27721, "▁hecho": 27722, "requests": 27723, "Cut": 27724, "▁deserved": 27725, "гово": 27726, "▁Recent": 27727, "▁дивизи": 27728, "▁supportive": 27729, "прави": 27730, "▁irrelevant": 27731, "'\r": 27732, "▁ctrl": 27733, "▁Deal": 27734, "izada": 27735, "uo": 27736, "▁nort": 27737, "geometry": 27738, "▁Individual": 27739, "ereg": 27740, "▁приня": 27741, "cref": 27742, "══": 27743, "▁comerc": 27744, "=_": 27745, "bund": 27746, "тах": 27747, "ilen": 27748, "чита": 27749, "▁corporation": 27750, "esz": 27751, "▁==>": 27752, "ablish": 27753, "Apr": 27754, "▁ripped": 27755, "Vars": 27756, "stret": 27757, "▁Francesco": 27758, "NaN": 27759, "▁anytime": 27760, "▁automated": 27761, "ostream": 27762, "▁drawings": 27763, "▁enhancement": 27764, "okrat": 27765, "▁Issue": 27766, "вра": 27767, "Currency": 27768, "▁wyn": 27769, "izarre": 27770, "ético": 27771, "multiple": 27772, "▁Rate": 27773, "▁Ich": 27774, "▁Auss": 27775, "▁Former": 27776, "Curve": 27777, "▁marvel": 27778, "attro": 27779, "▁сп": 27780, "BOOL": 27781, "сия": 27782, "gold": 27783, "▁Nintendo": 27784, "▁Salvador": 27785, "▁Solution": 27786, "ADC": 27787, "бора": 27788, "▁Bennett": 27789, "▁FR": 27790, "▁pueden": 27791, "patient": 27792, "▁PG": 27793, "▁Jin": 27794, "▁crashed": 27795, "▁denen": 27796, "▁Sample": 27797, "▁Quebec": 27798, "itories": 27799, "▁blinked": 27800, "▁lion": 27801, "▁voce": 27802, "▁Impact": 27803, "▁Mau": 27804, "▁Nie": 27805, "▁lob": 27806, "▁две": 27807, "orneys": 27808, "▁coastal": 27809, "▁sensors": 27810, "▁XII": 27811, "▁illusion": 27812, "oji": 27813, "▁INC": 27814, "▁Duncan": 27815, "yk": 27816, "▁affecting": 27817, "pul": 27818, "▁Napoleon": 27819, "▁акаде": 27820, "▁compt": 27821, "▁profitable": 27822, "loe": 27823, "▁deuxième": 27824, "▁WC": 27825, "▁viable": 27826, "▁Drug": 27827, "TextBox": 27828, "▁luminos": 27829, "auté": 27830, "yc": 27831, "ště": 27832, "▁affiliates": 27833, "ilda": 27834, "conduct": 27835, "▁ebenfalls": 27836, "▁AMD": 27837, "▁Monitor": 27838, "▁Companies": 27839, "▁corrected": 27840, "äck": 27841, "SYSTEM": 27842, "otherapy": 27843, "▁перед": 27844, "▁blues": 27845, "atisf": 27846, "although": 27847, "rost": 27848, "SCAN": 27849, "▁RAM": 27850, "ціональ": 27851, "▁vendors": 27852, "▁customs": 27853, "▁activate": 27854, "▁blogs": 27855, "▁brace": 27856, "▁strat": 27857, "anje": 27858, "щё": 27859, "▁tide": 27860, "▁Brigade": 27861, "getOperand": 27862, "▁aliment": 27863, "▁achievements": 27864, "▁suspicion": 27865, "▁touchdown": 27866, "broad": 27867, "iore": 27868, "Comparison": 27869, "▁mum": 27870, "English": 27871, "▁Picture": 27872, "▁Mouse": 27873, "amd": 27874, "▁[`": 27875, "▁denomin": 27876, "▁Aleks": 27877, "▁prevents": 27878, "ób": 27879, "fed": 27880, "▁Pray": 27881, "▁shine": 27882, "▁clutch": 27883, "mux": 27884, "Appro": 27885, "▁notably": 27886, "chio": 27887, "nage": 27888, "HAS": 27889, "▁')": 27890, "▁Miche": 27891, "tg": 27892, "::~": 27893, "▁amely": 27894, "▁rodz": 27895, "zs": 27896, "trait": 27897, "▁klass": 27898, "fö": 27899, "▁destac": 27900, "▁Clara": 27901, "frequency": 27902, "▁Git": 27903, "▁поль": 27904, "▁frequencies": 27905, "▁febrero": 27906, "▁stumbled": 27907, "кою": 27908, "▁Names": 27909, "▁Flight": 27910, "▁prey": 27911, "▁medio": 27912, "▁VAR": 27913, "▁Float": 27914, "▁Ernest": 27915, "▁Marcatori": 27916, "oport": 27917, "▁cancellation": 27918, "▁Bryan": 27919, "————": 27920, "Luc": 27921, "▁libre": 27922, "▁título": 27923, "*>": 27924, "▁Sandy": 27925, "▁Marina": 27926, "Been": 27927, "▁wal": 27928, "▁Kultur": 27929, "▁explode": 27930, "▁limiting": 27931, "▁presumably": 27932, "▁pb": 27933, "▁Merc": 27934, "▁реки": 27935, "learning": 27936, "Catalog": 27937, "▁Census": 27938, "lte": 27939, "▁NET": 27940, "raising": 27941, "ське": 27942, "staff": 27943, "▁Quinn": 27944, "▁memorial": 27945, "пня": 27946, "▁cuenta": 27947, "▁XI": 27948, "lbl": 27949, "▁varies": 27950, "▁fluctuations": 27951, "▁долж": 27952, "▁особи": 27953, "▁warehouse": 27954, "However": 27955, "▁corrections": 27956, "dhd": 27957, "▁fals": 27958, "▁controversy": 27959, "▁curse": 27960, "▁télé": 27961, "řed": 27962, "▁AU": 27963, "▁тор": 27964, "▁crít": 27965, "idan": 27966, "iliary": 27967, "▁Panel": 27968, "cule": 27969, "▁Poor": 27970, "▁BA": 27971, "▁ignorant": 27972, "èmes": 27973, "▁aesthetic": 27974, "Linked": 27975, "getInt": 27976, "Unicode": 27977, "[@": 27978, "▁Zent": 27979, "Manifest": 27980, "▁vars": 27981, "PB": 27982, "▁ву": 27983, "▁Describe": 27984, "▁Anything": 27985, "oirs": 27986, "▁socks": 27987, "▁imped": 27988, "▁neue": 27989, "▁dispers": 27990, "Collect": 27991, "filer": 27992, "▁Frau": 27993, "▁Hockey": 27994, "▁teens": 27995, "▁Roberto": 27996, "lauf": 27997, "вать": 27998, "▁ско": 27999, "isArray": 28000, "▁teenager": 28001, "Built": 28002, "▁loudly": 28003, "Capacity": 28004, "▁adventures": 28005, "▁Molly": 28006, "recogn": 28007, "bars": 28008, "▁Lor": 28009, "▁può": 28010, "▁mong": 28011, "inement": 28012, "Assignment": 28013, "▁diz": 28014, "lessness": 28015, "▁Halloween": 28016, "▁bitmap": 28017, "Rom": 28018, "нар": 28019, "▁rebel": 28020, "▁radial": 28021, "measure": 28022, "nit": 28023, "▁Assume": 28024, "▁assignments": 28025, "▁Isn": 28026, "▁altre": 28027, "ßer": 28028, "наль": 28029, "▁flies": 28030, "▁droit": 28031, "▁thickness": 28032, "▁enjo": 28033, "▁dwell": 28034, "▁homosexual": 28035, "▁eval": 28036, "$_{": 28037, "asia": 28038, "▁philos": 28039, "getCurrent": 28040, "▁veterans": 28041, "▁Berkeley": 28042, "▁wildlife": 28043, "Cop": 28044, "vern": 28045, "▁Ú": 28046, "tos": 28047, "▁Led": 28048, "▁keywords": 28049, "▁medications": 28050, "neum": 28051, "▁jamais": 28052, "▁Buc": 28053, "▁PD": 28054, "▁Statement": 28055, "▁PI": 28056, "▁Jackie": 28057, "▁ordin": 28058, "▁kör": 28059, "enze": 28060, "▁utilized": 28061, "áct": 28062, "azed": 28063, "▁severely": 28064, "▁även": 28065, "▁libro": 28066, "▁Eu": 28067, "äst": 28068, "PART": 28069, "▁Butler": 28070, "▁puzzle": 28071, "Fall": 28072, "Country": 28073, "pfn": 28074, "▁україн": 28075, "▁Orchestra": 28076, "▁alto": 28077, "▁ancora": 28078, "▁decomposition": 28079, "▁م": 28080, "▁appetite": 28081, "adu": 28082, "▁THAT": 28083, "▁comenz": 28084, "mina": 28085, "▁initiated": 28086, "▁Tat": 28087, "▁sometime": 28088, "rek": 28089, "bread": 28090, "▁Statistics": 28091, "▁Cob": 28092, "Follow": 28093, "▁geometric": 28094, "шла": 28095, "▁proceedings": 28096, "Dlg": 28097, "seven": 28098, "▁[-": 28099, "▁Buffalo": 28100, "▁blacks": 28101, "▁sov": 28102, "▁custody": 28103, "▁ras": 28104, "▁tattoo": 28105, "öffentlicht": 28106, "Blo": 28107, "Austral": 28108, "▁recuper": 28109, "лев": 28110, "▁bem": 28111, "▁thou": 28112, "oriented": 28113, "vir": 28114, "▁colony": 28115, "▁Stanford": 28116, "Absolute": 28117, "adrat": 28118, "▁Situ": 28119, "▁souvent": 28120, "EXEC": 28121, "▁mű": 28122, "▁apartments": 28123, "▁случа": 28124, "▁ano": 28125, "WINDO": 28126, "acci": 28127, "▁Lau": 28128, "court": 28129, "▁manifold": 28130, "▁coalition": 28131, "▁XIV": 28132, "Attrib": 28133, "ascade": 28134, "▁wheat": 28135, "▁strengths": 28136, "FREE": 28137, "EMPTY": 28138, "▁hey": 28139, "ascular": 28140, "▁plasma": 28141, "▁bob": 28142, "Separator": 28143, "=\"${": 28144, "▁Zag": 28145, "▁projet": 28146, "▁smoothly": 28147, "SEQU": 28148, "analy": 28149, "attachment": 28150, "▁ES": 28151, "▁popped": 28152, "ős": 28153, "tom": 28154, "▁són": 28155, "▁rott": 28156, "Utilities": 28157, "hadoop": 28158, "▁sotto": 28159, "autor": 28160, "▁Georges": 28161, "▁který": 28162, "▁gruppo": 28163, "▁когда": 28164, "▁меда": 28165, "▁instrumental": 28166, "▁Writer": 28167, "▁setTimeout": 28168, "ikk": 28169, "▁Dopo": 28170, "]);\r": 28171, "▁practicing": 28172, "▁Ronald": 28173, "▁уби": 28174, "▁agrees": 28175, "▁denoted": 28176, "ismiss": 28177, "▁interviewed": 28178, "templates": 28179, "ři": 28180, "administr": 28181, "▁Butter": 28182, "▁XVII": 28183, "▁positioned": 28184, "▁Fourth": 28185, "▁overwhelmed": 28186, "▁Regular": 28187, "▁reprezent": 28188, "кономи": 28189, "▁expects": 28190, "Indices": 28191, "▁marijuana": 28192, "▁zaj": 28193, "▁Bren": 28194, "▁begg": 28195, "▁nahm": 28196, "▁interrog": 28197, "тие": 28198, "▁Bun": 28199, "▁серед": 28200, "▁shelves": 28201, "▁которых": 28202, "▁Frauen": 28203, "▁Sergeant": 28204, "▁успе": 28205, "matched": 28206, "▁donne": 28207, "▁touches": 28208, "abort": 28209, "▁vale": 28210, "▁institutional": 28211, "▁Mons": 28212, "▁ambitious": 28213, "▁nonetheless": 28214, "jd": 28215, "пей": 28216, "▁backpack": 28217, "dao": 28218, "вия": 28219, "▁surroundings": 28220, "|_{": 28221, "▁gegründ": 28222, "disp": 28223, "▁moisture": 28224, "▁wyd": 28225, "▁traders": 28226, "▁Erst": 28227, "▁Galaxy": 28228, "▁воло": 28229, "▁Peru": 28230, "▁priorities": 28231, "▁pronounced": 28232, "▁CBS": 28233, "▁Palm": 28234, "▁expans": 28235, "▁energet": 28236, "▁Condition": 28237, "▁Sver": 28238, "nested": 28239, "▁февраля": 28240, "hero": 28241, "▁коло": 28242, "▁Films": 28243, "Bon": 28244, "éal": 28245, "ployed": 28246, "trained": 28247, "▁első": 28248, "▁lust": 28249, "atinum": 28250, "oyle": 28251, "▁Jet": 28252, "ждения": 28253, "▁surveys": 28254, "bee": 28255, "workers": 28256, "records": 28257, "calendar": 28258, "bbing": 28259, "regation": 28260, "dashboard": 28261, "King": 28262, "▁vista": 28263, "▁depicted": 28264, "▁occurring": 28265, "▁офи": 28266, "▁sandwich": 28267, "rcu": 28268, "kern": 28269, "▁minut": 28270, "▁смер": 28271, "▁td": 28272, "solete": 28273, "Complex": 28274, "▁tunn": 28275, "▁scarc": 28276, "stead": 28277, "▁Fail": 28278, "▁Rs": 28279, "▁trails": 28280, "kem": 28281, "▁Romans": 28282, "ativity": 28283, "Previous": 28284, "▁depress": 28285, "▁resigned": 28286, "getDefault": 28287, "▁Tibet": 28288, "▁Franco": 28289, "\")));": 28290, "▁injection": 28291, "removed": 28292, "▁praised": 28293, "▁Asc": 28294, "erase": 28295, "▁commissioned": 28296, "MAIL": 28297, "▁Boh": 28298, "Poly": 28299, "▁cinq": 28300, "▁Above": 28301, "▁Joshua": 28302, "ZERO": 28303, "▁summit": 28304, "▁Urs": 28305, "▁curl": 28306, "▁visa": 28307, "▁resur": 28308, "={'": 28309, "feat": 28310, "▁absorb": 28311, "▁planets": 28312, "▁princess": 28313, "▁Jahrhunderts": 28314, "xp": 28315, "▁NBC": 28316, "▁коми": 28317, "▁FUN": 28318, "▁neuen": 28319, "▁déjà": 28320, "▁Oz": 28321, "bben": 28322, "VIDEO": 28323, "▁ejempl": 28324, "▁considers": 28325, "atri": 28326, "▁arrog": 28327, "ioso": 28328, "▁hace": 28329, "▁contacted": 28330, "▁unple": 28331, "▁sponsored": 28332, "▁trainer": 28333, "sbi": 28334, "▁занима": 28335, "Criterion": 28336, "ното": 28337, "scheme": 28338, "ennial": 28339, "perform": 28340, "▁fixing": 28341, "▁постро": 28342, "arb": 28343, "EXIT": 28344, "▁café": 28345, "ituted": 28346, "riages": 28347, "Tur": 28348, "▁haber": 28349, "elasticsearch": 28350, "▁ал": 28351, "rh": 28352, "▁voll": 28353, "CLU": 28354, "Mil": 28355, "▁membres": 28356, "▁remarked": 28357, "вана": 28358, "=\"_": 28359, "Less": 28360, "(\"\");": 28361, "▁Yale": 28362, "berries": 28363, "▁releasing": 28364, "▁imports": 28365, "idea": 28366, "▁(+": 28367, "▁arqu": 28368, "ificación": 28369, "▁пара": 28370, "▁Rangers": 28371, "Mic": 28372, "▁nederbörd": 28373, "▁imaginary": 28374, "▁specialists": 28375, "▁hoof": 28376, "Modules": 28377, "▁sadly": 28378, "ceil": 28379, "TabIndex": 28380, "ationale": 28381, "▁Partner": 28382, "tbody": 28383, "▁leverage": 28384, "DN": 28385, "▁Prec": 28386, "▁Sé": 28387, "▁Mam": 28388, "▁afin": 28389, "isValid": 28390, "Pse": 28391, "▁сторо": 28392, "▁chopped": 28393, "▁Minor": 28394, "▁dabei": 28395, "David": 28396, "ussia": 28397, "▁деревня": 28398, "▁Identity": 28399, "▁LGBT": 28400, "ције": 28401, "▁Orts": 28402, "▁parti": 28403, "▁Bachelor": 28404, "uga": 28405, "▁OPT": 28406, "▁Seth": 28407, "▁LIABLE": 28408, "▁inaugur": 28409, "▁Shanghai": 28410, "▁relaxing": 28411, "циона": 28412, "\"%": 28413, "▁obey": 28414, "▁Airlines": 28415, "Links": 28416, "▁Celt": 28417, "▁Admin": 28418, "agation": 28419, "▁worries": 28420, "INTE": 28421, "arith": 28422, "Fatalf": 28423, "]])": 28424, "colm": 28425, "▁archae": 28426, "▁brushed": 28427, "▁tät": 28428, "▁structured": 28429, "тии": 28430, "▁homem": 28431, "[:,": 28432, "▁navy": 28433, "getKey": 28434, "powered": 28435, "▁sucked": 28436, "▁zomb": 28437, "issant": 28438, "▁Might": 28439, "▁Pull": 28440, "rir": 28441, "▁пі": 28442, "▁seas": 28443, "▁Wrest": 28444, "▁tense": 28445, "▁atm": 28446, "▁havet": 28447, "▁pierws": 28448, "▁tragic": 28449, "▁Diff": 28450, "▁confidential": 28451, "successful": 28452, "ęż": 28453, "▁Chain": 28454, "▁Kenya": 28455, "Choice": 28456, "ocur": 28457, "aniu": 28458, "▁consultant": 28459, "▁Advis": 28460, "Lif": 28461, "▁Lors": 28462, "avorite": 28463, "▁utilizing": 28464, "▁vintage": 28465, "Matcher": 28466, "▁membre": 28467, "▁Expect": 28468, "▁tracing": 28469, "nog": 28470, "▁dej": 28471, "▁уче": 28472, "▁loops": 28473, "▁onclick": 28474, "▁GPU": 28475, "▁Albums": 28476, "▁Archives": 28477, "вата": 28478, "▁stove": 28479, "шли": 28480, "ancies": 28481, "▁gemeente": 28482, "mob": 28483, "PDF": 28484, "eso": 28485, "▁vég": 28486, "Resolve": 28487, "▁teaches": 28488, "ложе": 28489, "▁ство": 28490, "▁Одна": 28491, "▁fid": 28492, "Something": 28493, "▁nebo": 28494, "▁Valentine": 28495, "rowning": 28496, "▁але": 28497, "awi": 28498, "ishi": 28499, "▁SPI": 28500, "▁spel": 28501, "▁біль": 28502, "▁participant": 28503, "▁Ned": 28504, "▁Gast": 28505, "▁blond": 28506, "▁saves": 28507, "colored": 28508, "▁ACTION": 28509, "▁Politiker": 28510, "}$)": 28511, "▁Dum": 28512, "dentry": 28513, "Student": 28514, "▁~=": 28515, "loads": 28516, "▁Foster": 28517, "一个": 28518, "▁PK": 28519, "▁SB": 28520, "▁Hern": 28521, "▁Exhib": 28522, "Listeners": 28523, "Sun": 28524, "plac": 28525, "▁Bever": 28526, "▁incluy": 28527, "▁dc": 28528, "argc": 28529, "▁ged": 28530, "спа": 28531, "▁Formula": 28532, "▁сем": 28533, "▁empt": 28534, "unregister": 28535, "▁Queensland": 28536, "ández": 28537, "otive": 28538, "▁alley": 28539, "▁Democrat": 28540, "▁travail": 28541, "▁$,": 28542, "RP": 28543, "рое": 28544, "personal": 28545, "▁période": 28546, "HOME": 28547, "omes": 28548, "▁recognised": 28549, "heng": 28550, "▁Jung": 28551, "▁Roland": 28552, "▁convicted": 28553, "Locked": 28554, "▁mari": 28555, "▁Luxem": 28556, "referto": 28557, "Deleted": 28558, "intent": 28559, "▁Staats": 28560, "▁області": 28561, "ит": 28562, "▁саве": 28563, "▁Protocol": 28564, "ając": 28565, "chk": 28566, "TypeInfo": 28567, "▁pkt": 28568, "▁scandal": 28569, "▁individually": 28570, "FMT": 28571, "▁nj": 28572, "abile": 28573, "▁Rivers": 28574, "PROPERTY": 28575, "VB": 28576, "wort": 28577, "▁splitting": 28578, "achten": 28579, "▁ARISING": 28580, "▁sip": 28581, "▁fres": 28582, "▁groom": 28583, "Hol": 28584, "▁canon": 28585, "▁abruptly": 28586, "▁afterward": 28587, "▁Running": 28588, "▁ji": 28589, "▁%,": 28590, "▁Palestinian": 28591, "RW": 28592, "pgfscope": 28593, "▁countryside": 28594, "▁fortunate": 28595, "▁cél": 28596, "▁Pointer": 28597, "ensors": 28598, "rating": 28599, "▁buffers": 28600, "▁remot": 28601, "▁PropTypes": 28602, "▁Nah": 28603, "altern": 28604, "▁easiest": 28605, "▁invas": 28606, "▁clk": 28607, "copyright": 28608, "▁blanc": 28609, "SAMP": 28610, "▁Cohen": 28611, "▁Shell": 28612, "▁destroying": 28613, "▁Zel": 28614, "dater": 28615, "čen": 28616, "▁filing": 28617, "▁integrate": 28618, "xit": 28619, "▁RET": 28620, "lene": 28621, "calls": 28622, "▁slaughter": 28623, "initialized": 28624, "unches": 28625, "▁Trace": 28626, "efficient": 28627, "▁Woods": 28628, "▁longitud": 28629, "GN": 28630, "▁Kont": 28631, "▁chunks": 28632, "ách": 28633, "▁unemployment": 28634, "acom": 28635, "▁slowed": 28636, "▁outlined": 28637, "xffff": 28638, "▁ikke": 28639, "▁workspace": 28640, "Mc": 28641, "▁kicking": 28642, "▁embedding": 28643, "chnitt": 28644, "erten": 28645, "▁Interior": 28646, "▁Songs": 28647, "mmc": 28648, "▁analyzed": 28649, "▁Coupe": 28650, "▁favorites": 28651, "▁tt": 28652, "▁той": 28653, "Routing": 28654, "▁Silva": 28655, "▁anderem": 28656, "▁honom": 28657, "▁использова": 28658, ".\"]": 28659, "▁Wu": 28660, "legt": 28661, "▁spoon": 28662, "▁jap": 28663, "▁Extension": 28664, "erne": 28665, "▁vagy": 28666, "▁села": 28667, "▁функ": 28668, "▁analytics": 28669, "▁sug": 28670, "▁Async": 28671, "▁peaks": 28672, "▁Gym": 28673, "▁lawsuit": 28674, "<>": 28675, "ialis": 28676, "etric": 28677, "faced": 28678, "▁disrupt": 28679, "▁få": 28680, "Inputs": 28681, "`);": 28682, "▁Mend": 28683, "gon": 28684, "▁\",\"": 28685, "▁nerves": 28686, "▁doubts": 28687, "sap": 28688, "▁sow": 28689, ",\\,\\": 28690, "▁BS": 28691, "▁Glad": 28692, "▁aster": 28693, "œuvre": 28694, "▁Bangl": 28695, "▁iPad": 28696, "useppe": 28697, "▁conducting": 28698, "▁({\\": 28699, "▁Harbor": 28700, "psz": 28701, "▁FIFA": 28702, "_**": 28703, "emor": 28704, "▁": 28705, "e": 28706, "t": 28707, "a": 28708, "o": 28709, "i": 28710, "n": 28711, "r": 28712, "s": 28713, "l": 28714, "d": 28715, "h": 28716, "c": 28717, "u": 28718, "m": 28719, "p": 28720, "g": 28721, "f": 28722, ".": 28723, "y": 28724, ",": 28725, "b": 28726, "w": 28727, "v": 28728, "k": 28729, "_": 28730, ")": 28731, "(": 28732, "-": 28733, "0": 28734, "S": 28735, "*": 28736, "I": 28737, "T": 28738, "\"": 28739, "1": 28740, "A": 28741, "'": 28742, "C": 28743, "x": 28744, ";": 28745, "=": 28746, ":": 28747, "/": 28748, "E": 28749, "2": 28750, "{": 28751, "}": 28752, "P": 28753, "R": 28754, "M": 28755, "\\": 28756, "D": 28757, "L": 28758, "N": 28759, "B": 28760, "о": 28761, "O": 28762, "а": 28763, "z": 28764, "F": 28765, "|": 28766, ">": 28767, "j": 28768, "H": 28769, "3": 28770, "#": 28771, "и": 28772, "е": 28773, "9": 28774, "q": 28775, "$": 28776, "G": 28777, "н": 28778, "U": 28779, "W": 28780, "4": 28781, "5": 28782, "8": 28783, "6": 28784, "р": 28785, "т": 28786, "7": 28787, "с": 28788, "<": 28789, "V": 28790, "в": 28791, "[": 28792, "]": 28793, "л": 28794, "к": 28795, "K": 28796, "é": 28797, "J": 28798, "д": 28799, "&": 28800, "\r": 28801, "Y": 28802, "м": 28803, "?": 28804, "у": 28805, "+": 28806, "п": 28807, "!": 28808, "’": 28809, "г": 28810, "я": 28811, "з": 28812, "і": 28813, "X": 28814, "^": 28815, "–": 28816, "б": 28817, "@": 28818, "й": 28819, "á": 28820, "—": 28821, "ь": 28822, "%": 28823, "Q": 28824, "ó": 28825, "ч": 28826, "í": 28827, "Z": 28828, "ы": 28829, "ä": 28830, "х": 28831, "`": 28832, "ц": 28833, "ö": 28834, "“": 28835, "ж": 28836, "ü": 28837, "”": 28838, "à": 28839, "è": 28840, "ш": 28841, "ю": 28842, "ł": 28843, "С": 28844, "~": 28845, "ф": 28846, "П": 28847, "»": 28848, "В": 28849, "«": 28850, "å": 28851, "К": 28852, "щ": 28853, "·": 28854, "ј": 28855, "М": 28856, "ç": 28857, "А": 28858, "Н": 28859, "Р": 28860, "Б": 28861, "č": 28862, "ú": 28863, "ę": 28864, "ã": 28865, "ą": 28866, "ă": 28867, "Д": 28868, "ї": 28869, "ъ": 28870, "ě": 28871, "Г": 28872, "š": 28873, "О": 28874, "Т": 28875, "ê": 28876, "ñ": 28877, "…": 28878, "ž": 28879, "ß": 28880, "ё": 28881, "ż": 28882, "ř": 28883, "ś": 28884, "Л": 28885, "ő": 28886, "„": 28887, "э": 28888, "ý": 28889, "У": 28890, "â": 28891, "И": 28892, "є": 28893, "‘": 28894, "î": 28895, "З": 28896, "Ф": 28897, "ò": 28898, "•": 28899, "ć": 28900, "É": 28901, "°": 28902, "ș": 28903, "Х": 28904, "ț": 28905, "ô": 28906, "Е": 28907, "ń": 28908, "Ч": 28909, "Ш": 28910, "ø": 28911, "ù": 28912, "ů": 28913, "的": 28914, "ا": 28915, "æ": 28916, "њ": 28917, "љ": 28918, "ë": 28919, "ï": 28920, "Э": 28921, "£": 28922, "−": 28923, ",": 28924, "õ": 28925, "ћ": 28926, "­": 28927, "Ц": 28928, "І": 28929, "ā": 28930, "ű": 28931, "†": 28932, "ل": 28933, "ō": 28934, "​": 28935, "º": 28936, "Я": 28937, "′": 28938, "Á": 28939, "Ö": 28940, "²": 28941, "Ж": 28942, "ì": 28943, "。": 28944, "数": 28945, "×": 28946, "ر": 28947, "α": 28948, "́": 28949, "Ю": 28950, "û": 28951, "œ": 28952, "ı": 28953, "م": 28954, "ن": 28955, "ª": 28956, "ź": 28957, "ο": 28958, "″": 28959, "€": 28960, "Ü": 28961, "و": 28962, "用": 28963, "À": 28964, "Č": 28965, "Š": 28966, "ت": 28967, "د": 28968, "一": 28969, "¿": 28970, "是": 28971, "ي": 28972, "ђ": 28973, "®": 28974, "ی": 28975, "ν": 28976, "đ": 28977, "τ": 28978, "─": 28979, "ι": 28980, "ε": 28981, "→": 28982, "ب": 28983, "Å": 28984, "ū": 28985, "№": 28986, "ş": 28987, "不": 28988, "џ": 28989, "ー": 28990, "中": 28991, "Î": 28992, "の": 28993, ":": 28994, "个": 28995, "Й": 28996, "ρ": 28997, "有": 28998, "Ä": 28999, " ": 29000, "ī": 29001, "©": 29002, "为": 29003, "ه": 29004, "י": 29005, "ו": 29006, "时": 29007, "س": 29008, "Ś": 29009, "在": 29010, "件": 29011, "取": 29012, "ς": 29013, "™": 29014, "이": 29015, "σ": 29016, "μ": 29017, "定": 29018, "文": 29019, "据": 29020, "置": 29021, "Ž": 29022, "±": 29023, "表": 29024, "成": 29025, "ň": 29026, "λ": 29027, "¡": 29028, "È": 29029, "π": 29030, "字": 29031, "│": 29032, "Ј": 29033, "回": 29034, "Є": 29035, "到": 29036, "行": 29037, "§": 29038, "½": 29039, "ع": 29040, "、": 29041, "Ł": 29042, "다": 29043, "ン": 29044, "κ": 29045, "名": 29046, "ה": 29047, "入": 29048, "η": 29049, "大": 29050, "对": 29051, "可": 29052, "Â": 29053, "上": 29054, "█": 29055, "新": 29056, "ف": 29057, "加": 29058, "要": 29059, "Ż": 29060, "下": 29061, "分": 29062, "值": 29063, "ת": 29064, "出": 29065, "类": 29066, "请": 29067, "’": 29068, "息": 29069, "Ú": 29070, "υ": 29071, "获": 29072, "示": 29073, "以": 29074, "ר": 29075, "接": 29076, "ל": 29077, "を": 29078, "存": 29079, "信": 29080, "设": 29081, "方": 29082, "ش": 29083, "能": 29084, "点": 29085, "人": 29086, "前": 29087, "ğ": 29088, "作": 29089, "═": 29090, "↘": 29091, "ð": 29092, "理": 29093, "■": 29094, "法": 29095, "️": 29096, "ˈ": 29097, "果": 29098, "发": 29099, "ح": 29100, "γ": 29101, "ɵ": 29102, "า": 29103, "َ": 29104, "了": 29105, "户": 29106, "Í": 29107, "ə": 29108, "ス": 29109, "查": 29110, "し": 29111, "מ": 29112, "单": 29113, "ť": 29114, "ق": 29115, "る": 29116, "间": 29117, "如": 29118, "本": 29119, "后": 29120, "ί": 29121, "式": 29122, "ト": 29123, "Щ": 29124, "Ó": 29125, "す": 29126, "א": 29127, "生": 29128, "动": 29129, "ک": 29130, "和": 29131, "い": 29132, "€": 29133, "ა": 29134, "가": 29135, "하": 29136, "�": 29137, "小": 29138, "返": 29139, "否": 29140, "ة": 29141, "日": 29142, "로": 29143, "标": 29144, "码": 29145, "地": 29146, "位": 29147, "에": 29148, " ": 29149, "列": 29150, "수": 29151, "β": 29152, "除": 29153, "使": 29154, "ש": 29155, "ج": 29156, "イ": 29157, "δ": 29158, "自": 29159, "于": 29160, "지": 29161, "当": 29162, "所": 29163, "기": 29164, "ი": 29165, "ב": 29166, "ร": 29167, "★": 29168, "子": 29169, "号": 29170, "ك": 29171, "参": 29172, "型": 29173, "に": 29174, "는": 29175, "这": 29176, "开": 29177, "น": 29178, "会": 29179, "器": 29180, "面": 29181, "ル": 29182, "图": 29183, "度": 29184, ")": 29185, "(": 29186, "의": 29187, "内": 29188, "을": 29189, "最": 29190, "”": 29191, "化": 29192, "建": 29193, "니": 29194, "量": 29195, "😂": 29196, "始": 29197, "ē": 29198, "خ": 29199, "를": 29200, "ά": 29201, "过": 29202, "³": 29203, "´": 29204, "组": 29205, "功": 29206, "‎": 29207, "Ÿ": 29208, "区": 29209, "ز": 29210, "ґ": 29211, "ό": 29212, "ッ": 29213, "ω": 29214, "Ç": 29215, "选": 29216, "通": 29217, "结": 29218, "录": 29219, "改": 29220, "ク": 29221, "目": 29222, "指": 29223, "务": 29224, "๐": 29225, "输": 29226, "た": 29227, "อ": 29228, "关": 29229, "で": 29230, "调": 29231, "ा": 29232, "정": 29233, "合": 29234, "已": 29235, "시": 29236, "部": 29237, "页": 29238, "━": 29239, "ː": 29240, "ま": 29241, "我": 29242, "求": 29243, "市": 29244, "次": 29245, "נ": 29246, "实": 29247, "将": 29248, "重": 29249, "更": 29250, "制": 29251, "符": 29252, "配": 29253, "象": 29254, "θ": 29255, "ก": 29256, "て": 29257, "进": 29258, "需": 29259, "Đ": 29260, "性": 29261, "认": 29262, "来": 29263, "题": 29264, "程": 29265, "模": 29266, "!": 29267, "失": 29268, "口": 29269, "な": 29270, "έ": 29271, "": 29272, "空": 29273, "‍": 29274, "期": 29275, "者": 29276, "は": 29277, "Ђ": 29278, "提": 29279, "ή": 29280, "ラ": 29281, "한": 29282, "态": 29283, "复": 29284, "ง": 29285, "ე": 29286, "Ø": 29287, "리": 29288, "修": 29289, "‚": 29290, "得": 29291, "多": 29292, "格": 29293, "자": 29294, "ע": 29295, "่": 29296, "函": 29297, "应": 29298, "↗": 29299, "्": 29300, "เ": 29301, "正": 29302, "注": 29303, "스": 29304, "서": 29305, "リ": 29306, "φ": 29307, "ص": 29308, "が": 29309, "则": 29310, "消": 29311, "节": 29312, "序": 29313, "代": 29314, "사": 29315, "と": 29316, "ד": 29317, "้": 29318, "र": 29319, "此": 29320, "保": 29321, "ア": 29322, "ư": 29323, "인": 29324, "ė": 29325, "处": 29326, "删": 29327, "ɛ": 29328, "容": 29329, "ط": 29330, "“": 29331, "之": 29332, "包": 29333, "状": 29334, "ド": 29335, "İ": 29336, "体": 29337, "同": 29338, "事": 29339, "🙂": 29340, "タ": 29341, "χ": 29342, "ʿ": 29343, "Ș": 29344, "主": 29345, "品": 29346, "ק": 29347, "询": 29348, "创": 29349, "该": 29350, " ": 29351, "元": 29352, "第": 29353, "天": 29354, "或": 29355, "年": 29356, "转": 29357, "ח": 29358, "传": 29359, "ţ": 29360, "路": 29361, "例": 29362, "机": 29363, "Ã": 29364, "ď": 29365, "高": 29366, "相": 29367, "โ": 29368, "片": 29369, "―": 29370, "操": 29371, "ա": 29372, "ม": 29373, "全": 29374, "无": 29375, "月": 29376, "称": 29377, "ั": 29378, "就": 29379, "™": 29380, "明": 29381, "计": 29382, "你": 29383, "败": 29384, "密": 29385, "解": 29386, "れ": 29387, "أ": 29388, "变": 29389, "段": 29390, "条": 29391, "默": 29392, "●": 29393, "ล": 29394, "色": 29395, "断": 29396, "商": 29397, "ם": 29398, "か": 29399, "里": 29400, "系": 29401, "编": 29402, "错": 29403, "트": 29404, "只": 29405, "县": 29406, "ს": 29407, "常": 29408, "初": 29409, "ɔ": 29410, "Α": 29411, "フ": 29412, "►": 29413, "等": 29414, "일": 29415, "・": 29416, "Ō": 29417, "情": 29418, "现": 29419, "Ř": 29420, "ِ": 29421, "さ": 29422, "ạ": 29423, "용": 29424, "证": 29425, "해": 29426, "手": 29427, "支": 29428, "입": 29429, "服": 29430, "்": 29431, "道": 29432, "어": 29433, "送": 29434, "载": 29435, "限": 29436, "线": 29437, "属": 29438, "—": 29439, "他": 29440, "放": 29441, "记": 29442, "公": 29443, "没": 29444, "添": 29445, "显": 29446, "บ": 29447, "ย": 29448, "რ": 29449, "其": 29450, "集": 29451, "金": 29452, "国": 29453, "任": 29454, "ە": 29455, "话": 29456, "并": 29457, "被": 29458, "ύ": 29459, "都": 29460, "گ": 29461, "意": 29462, "כ": 29463, "经": 29464, "성": 29465, "看": 29466, "פ": 29467, "址": 29468, "ס": 29469, "드": 29470, "交": 29471, "¼": 29472, "Џ": 29473, "完": 29474, "Δ": 29475, "义": 29476, "보": 29477, "向": 29478, "换": 29479, "山": 29480, "算": 29481, "二": 29482, "پ": 29483, "⁄": 29484, "判": 29485, "级": 29486, "工": 29487, "ด": 29488, "⠀": 29489, "家": 29490, "レ": 29491, "三": 29492, "原": 29493, "】": 29494, "长": 29495, "া": 29496, "管": 29497, "ѝ": 29498, "क": 29499, "学": 29500, "ロ": 29501, "验": 29502, "写": 29503, "Œ": 29504, "从": 29505, "【": 29506, "收": 29507, "ả": 29508, "未": 29509, "登": 29510, "고": 29511, "源": 29512, "每": 29513, "µ": 29514, "误": 29515, "り": 29516, "요": 29517, "按": 29518, "ว": 29519, "权": 29520, "根": 29521, "プ": 29522, "串": 29523, "ส": 29524, "›": 29525, "제": 29526, "シ": 29527, "Ş": 29528, "确": 29529, "好": 29530, "统": 29531, "效": 29532, "网": 29533, "\u0001": 29534, "物": 29535, "아": 29536, "也": 29537, "은": 29538, "ệ": 29539, "न": 29540, "项": 29541, "资": 29542, "こ": 29543, "引": 29544, "ジ": 29545, "ค": 29546, "版": 29547, "ท": 29548, "平": 29549, "们": 29550, "与": 29551, "き": 29552, "移": 29553, "ि": 29554, "素": 29555, "执": 29556, "주": 29557, "‐": 29558, "Ґ": 29559, "ี": 29560, "板": 29561, "问": 29562, "Ε": 29563, "安": 29564, "면": 29565, "소": 29566, "ต": 29567, "ิ": 29568, "持": 29569, "습": 29570, "Σ": 29571, "ら": 29572, "コ": 29573, "心": 29574, "Π": 29575, "打": 29576, "」": 29577, "상": 29578, "「": 29579, "检": 29580, "库": 29581, "÷": 29582, "으": 29583, "测": 29584, "ん": 29585, "े": 29586, "ُ": 29587, "力": 29588, "直": 29589, "由": 29590, "ى": 29591, "试": 29592, "必": 29593, "端": 29594, "ʻ": 29595, "先": 29596, "↑": 29597, "命": 29598, "도": 29599, "전": 29600, "ห": 29601, "员": 29602, "ɪ": 29603, "있": 29604, "比": 29605, "ṣ": 29606, "時": 29607, "择": 29608, "ذ": 29609, "テ": 29610, "‌": 29611, "构": 29612, "备": 29613, "그": 29614, "链": 29615, "说": 29616, "ლ": 29617, "ן": 29618, "签": 29619, "う": 29620, "غ": 29621, "ế": 29622, "ض": 29623, "ḥ": 29624, "启": 29625, "력": 29626, "ო": 29627, "付": 29628, "მ": 29629, "索": 29630, "特": 29631, "ג": 29632, "西": 29633, "대": 29634, "├": 29635, "–": 29636, "Ž": 29637, "外": 29638, "צ": 29639, "头": 29640, "连": 29641, "流": 29642, "◄": 29643, "デ": 29644, "カ": 29645, "র": 29646, "오": 29647, "找": 29648, "清": 29649, "🤣": 29650, "去": 29651, "₹": 29652, "경": 29653, "グ": 29654, "ْ": 29655, "¢": 29656, "因": 29657, "": 29658, "Κ": 29659, "增": 29660, "知": 29661, "¶": 29662, "像": 29663, "♥": 29664, "터": 29665, "く": 29666, "ậ": 29667, "メ": 29668, "Æ": 29669, "省": 29670, "स": 29671, "म": 29672, "❤": 29673, "あ": 29674, "样": 29675, "起": 29676, "台": 29677, "读": 29678, "角": 29679, "南": 29680, "整": 29681, "订": 29682, "\f": 29683, "ט": 29684, "マ": 29685, "্": 29686, "우": 29687, "ն": 29688, "您": 29689, "ئ": 29690, "基": 29691, "水": 29692, "생": 29693, "‑": 29694, "나": 29695, "画": 29696, "描": 29697, "击": 29698, "っ": 29699, "라": 29700, "ნ": 29701, "ր": 29702, "业": 29703, "ბ": 29704, "别": 29705, "♦": 29706, "ィ": 29707, "त": 29708, "给": 29709, "문": 29710, "形": 29711, "控": 29712, "然": 29713, "동": 29714, "Њ": 29715, "⁠": 29716, "东": 29717, "ป": 29718, "州": 29719, "排": 29720, "세": 29721, "装": 29722, "할": 29723, "Ć": 29724, "∞": 29725, "海": 29726, "城": 29727, "键": 29728, "径": 29729, "호": 29730, "화": 29731, "្": 29732, "料": 29733, "ơ": 29734, "ी": 29735, "ウ": 29736, "具": 29737, "ブ": 29738, "块": 29739, "再": 29740, "ố": 29741, "电": 29742, ";": 29743, "위": 29744, "两": 29745, "而": 29746, "장": 29747, "آ": 29748, "Ț": 29749, "バ": 29750, "还": 29751, "令": 29752, "キ": 29753, "ّ": 29754, "값": 29755, "번": 29756, "만": 29757, "总": 29758, "ल": 29759, "▲": 29760, "异": 29761, "光": 29762, "客": 29763, "非": 29764, "ị": 29765, "": 29766, "þ": 29767, "設": 29768, "述": 29769, "합": 29770, "?": 29771, "✔": 29772, "导": 29773, "ṇ": 29774, "부": 29775, "˙": 29776, "Τ": 29777, "も": 29778, "구": 29779, "镇": 29780, "작": 29781, "░": 29782, "步": 29783, "ộ": 29784, "活": 29785, "พ": 29786, "←": 29787, "ǎ": 29788, "จ": 29789, "束": 29790, "ـ": 29791, "‘": 29792, "那": 29793, "प": 29794, "エ": 29795, "志": 29796, "么": 29797, "运": 29798, "北": 29799, "超": 29800, "་": 29801, "布": 29802, "ώ": 29803, "͡": 29804, "少": 29805, "파": 29806, "ʃ": 29807, "ム": 29808, "•": 29809, "卡": 29810, "ন": 29811, "Μ": 29812, "ɑ": 29813, "😉": 29814, "辑": 29815, "원": 29816, "美": 29817, "产": 29818, "利": 29819, "모": 29820, "联": 29821, "界": 29822, "체": 29823, "种": 29824, "王": 29825, "ľ": 29826, "여": 29827, "메": 29828, "域": 29829, "ვ": 29830, "立": 29831, "록": 29832, "게": 29833, "إ": 29834, "ṭ": 29835, "神": 29836, "ո": 29837, "音": 29838, "☆": 29839, "Ñ": 29840, "조": 29841, "動": 29842, "缓": 29843, "과": 29844, "报": 29845, "ʼ": 29846, "ា": 29847, "되": 29848, "ե": 29849, "视": 29850, "ช": 29851, "详": 29852, "แ": 29853, "¦": 29854, "把": 29855, "க": 29856, "ি": 29857, "출": 29858, "비": 29859, "边": 29860, "框": 29861, "व": 29862, "サ": 29863, "Ι": 29864, "Ο": 29865, "オ": 29866, "¾": 29867, "历": 29868, "ŏ": 29869, "门": 29870, "ข": 29871, "含": 29872, "¬": 29873, "周": 29874, "填": 29875, "待": 29876, "ะ": 29877, "დ": 29878, "Ї": 29879, "额": 29880, "음": 29881, "四": 29882, "だ": 29883, "회": 29884, "止": 29885, "率": 29886, "环": 29887, "パ": 29888, "래": 29889, "闭": 29890, "̀": 29891, "语": 29892, "개": 29893, "身": 29894, "藏": 29895, "य": 29896, "된": 29897, "即": 29898, "拉": 29899, "선": 29900, "변": 29901, "≥": 29902, "ุ": 29903, "些": 29904, "🤷": 29905, "せ": 29906, "左": 29907, "ợ": 29908, "右": 29909, "ể": 29910, "내": 29911, "ּ": 29912, "ז": 29913, "ে": 29914, "告": 29915, "ấ": 29916, "白": 29917, "账": 29918, "费": 29919, "江": 29920, "み": 29921, "‹": 29922, "์": 29923, "‡": 29924, "造": 29925, "但": 29926, "十": 29927, "它": 29928, "ं": 29929, "ŋ": 29930, "ў": 29931, "セ": 29932, "女": 29933, "⣿": 29934, "ի": 29935, "京": 29936, "触": 29937, "함": 29938, "들": 29939, "Ā": 29940, "˜": 29941, "石": 29942, "よ": 29943, "田": 29944, "易": 29945, "规": 29946, "展": 29947, "¯": 29948, "做": 29949, "星": 29950, "უ": 29951, "✓": 29952, "თ": 29953, "供": 29954, "명": 29955, "ξ": 29956, "己": 29957, "且": 29958, "插": 29959, "景": 29960, "切": 29961, "ไ": 29962, "없": 29963, "ョ": 29964, "及": 29965, "Ν": 29966, "미": 29967, "ث": 29968, "데": 29969, "价": 29970, "乡": 29971, "ह": 29972, "チ": 29973, "真": 29974, "太": 29975, "ู": 29976, "ダ": 29977, "局": 29978, "♂": 29979, "退": 29980, "ு": 29981, "ক": 29982, "ி": 29983, "何": 29984, "😭": 29985, "¥": 29986, "": 29987, "≈": 29988, "司": 29989, "层": 29990, "실": 29991, "站": 29992, "首": 29993, "款": 29994, "រ": 29995, "間": 29996, "ָ": 29997, "저": 29998, "监": 29999, "ァ": 30000, "册": 30001, "案": 30002, "ो": 30003, "反": 30004, "听": 30005, "族": 30006, "析": 30007, "ื": 30008, "秒": 30009, "공": 30010, "œ": 30011, "🚀": 30012, "거": 30013, "재": 30014, "‚": 30015, "場": 30016, "广": 30017, "播": 30018, "║": 30019, "⋅": 30020, "技": 30021, "贴": 30022, "想": 30023, "ʁ": 30024, "ớ": 30025, "ャ": 30026, "중": 30027, "》": 30028, "速": 30029, "频": 30030, "队": 30031, "ำ": 30032, "け": 30033, "ु": 30034, "≤": 30035, "↓": 30036, "须": 30037, "菜": 30038, "̃": 30039, "剪": 30040, "버": 30041, "ェ": 30042, "Λ": 30043, "细": 30044, "選": 30045, "द": 30046, "¹": 30047, "许": 30048, "ầ": 30049, "世": 30050, "ュ": 30051, "ء": 30052, "‡": 30053, "候": 30054, "共": 30055, "크": 30056, "ธ": 30057, "설": 30058, "快": 30059, "友": 30060, "ְ": 30061, "车": 30062, "推": 30063, "花": 30064, "言": 30065, "چ": 30066, "至": 30067, "開": 30068, "校": 30069, "個": 30070, "村": 30071, "つ": 30072, "▌": 30073, "ப": 30074, "결": 30075, "ņ": 30076, "优": 30077, "ន": 30078, "达": 30079, "核": 30080, "ナ": 30081, "场": 30082, "影": 30083, "🏻": 30084, "钮": 30085, "ظ": 30086, "Þ": 30087, "▼": 30088, "お": 30089, "份": 30090, "微": 30091, "ờ": 30092, "识": 30093, "행": 30094, "《": 30095, "ใ": 30096, "ọ": 30097, "预": 30098, "ব": 30099, "த": 30100, "": 30101, "ų": 30102, "마": 30103, "않": 30104, "ɡ": 30105, "계": 30106, "연": 30107, "五": 30108, "Ź": 30109, "め": 30110, "很": 30111, "간": 30112, "無": 30113, "ប": 30114, "社": 30115, "Ê": 30116, "书": 30117, "顶": 30118, "ტ": 30119, "才": 30120, "云": 30121, "└": 30122, "ζ": 30123, "،": 30124, "搜": 30125, "신": 30126, "유": 30127, "‏": 30128, "✅": 30129, "⭐": 30130, "照": 30131, "短": 30132, "川": 30133, "後": 30134, "范": 30135, "民": 30136, "治": 30137, "章": 30138, "ề": 30139, "바": 30140, "ә": 30141, "⚭": 30142, "河": 30143, "论": 30144, "え": 30145, "Ω": 30146, "√": 30147, "Ă": 30148, "Γ": 30149, "坐": 30150, "적": 30151, "停": 30152, "추": 30153, "受": 30154, "♀": 30155, "ʾ": 30156, "树": 30157, "林": 30158, "치": 30159, "fi": 30160, "▒": 30161, "张": 30162, "着": 30163, "访": 30164, "考": 30165, "教": 30166, "ग": 30167, "准": 30168, "印": 30169, "精": 30170, "窗": 30171, "宝": 30172, "ち": 30173, "围": 30174, "ַ": 30175, "致": 30176, "モ": 30177, "때": 30178, "随": 30179, "储": 30180, "况": 30181, "邮": 30182, "武": 30183, "⛔": 30184, "维": 30185, "ү": 30186, "跳": 30187, "ब": 30188, "投": 30189, "ủ": 30190, "표": 30191, "반": 30192, "英": 30193, "ʰ": 30194, "👍": 30195, "ज": 30196, "带": 30197, "為": 30198, "续": 30199, "ɨ": 30200, "처": 30201, "₂": 30202, "클": 30203, "群": 30204, "현": 30205, "风": 30206, "购": 30207, "ក": 30208, "老": 30209, "留": 30210, "球": 30211, "프": 30212, "▄": 30213, "史": 30214, "Љ": 30215, "⟩": 30216, "분": 30217, "გ": 30218, "店": 30219, "审": 30220, "료": 30221, "목": 30222, "略": 30223, "관": 30224, "ִ": 30225, "科": 30226, "货": 30227, "ம": 30228, "络": 30229, "阳": 30230, "Ḥ": 30231, "資": 30232, "若": 30233, "স": 30234, "ہ": 30235, "宽": 30236, "见": 30237, "ズ": 30238, "游": 30239, "방": 30240, "ồ": 30241, "ɾ": 30242, "열": 30243, "러": 30244, "ך": 30245, "\u001b": 30246, "်": 30247, "余": 30248, "响": 30249, "缩": 30250, "ட": 30251, "评": 30252, "允": 30253, "离": 30254, "🤔": 30255, "Ё": 30256, "ʊ": 30257, "黑": 30258, "马": 30259, "⟨": 30260, "値": 30261, "箱": 30262, "야": 30263, "ម": 30264, "Ő": 30265, "感": 30266, "ツ": 30267, "ụ": 30268, "ポ": 30269, "확": 30270, "声": 30271, "战": 30272, "ѕ": 30273, "変": 30274, "와": 30275, "父": 30276, "ベ": 30277, "助": 30278, "업": 30279, "ʲ": 30280, "ÿ": 30281, "充": 30282, "强": 30283, "博": 30284, "ミ": 30285, "销": 30286, "당": 30287, "記": 30288, "什": 30289, "匹": 30290, "ւ": 30291, "そ": 30292, "코": 30293, "ল": 30294, "ŭ": 30295, "午": 30296, "ニ": 30297, "\u0012": 30298, "ʒ": 30299, "შ": 30300, "某": 30301, "ォ": 30302, "足": 30303, "타": 30304, "Ð": 30305, "ხ": 30306, "름": 30307, "木": 30308, "楼": 30309, "최": 30310, "红": 30311, "¨": 30312, "古": 30313, "\u0006": 30314, "단": 30315, "今": 30316, "ʔ": 30317, "ट": 30318, "ম": 30319, "斯": 30320, "語": 30321, "Ÿ": 30322, "🙄": 30323, "牌": 30324, "안": 30325, "ស": 30326, "颜": 30327, "~": 30328, "克": 30329, "深": 30330, "금": 30331, "會": 30332, "尔": 30333, "释": 30334, "批": 30335, "산": 30336, "野": 30337, "防": 30338, "Η": 30339, "ө": 30340, "ψ": 30341, "ボ": 30342, "š": 30343, "各": 30344, "진": 30345, "追": 30346, "句": 30347, "警": 30348, "Φ": 30349, "ѣ": 30350, "ḍ": 30351, "词": 30352, "男": 30353, "글": 30354, "식": 30355, "隐": 30356, "복": 30357, "盘": 30358, "Ì": 30359, "申": 30360, "议": 30361, "ザ": 30362, "近": 30363, "능": 30364, "য": 30365, "東": 30366, "這": 30367, "ர": 30368, "距": 30369, "院": 30370, "德": 30371, "ǐ": 30372, "针": 30373, "▀": 30374, "↔": 30375, "房": 30376, "青": 30377, "政": 30378, "😅": 30379, "递": 30380, "প": 30381, "波": 30382, "ソ": 30383, "绑": 30384, "ビ": 30385, "ễ": 30386, "포": 30387, "\u0010": 30388, "ử": 30389, "등": 30390, "환": 30391, "士": 30392, "ত": 30393, "Θ": 30394, "초": 30395, "境": 30396, "差": 30397, "采": 30398, "디": 30399, "ĩ": 30400, "升": 30401, "背": 30402, "배": 30403, "龙": 30404, "街": 30405, "್": 30406, "ṛ": 30407, "ু": 30408, "弹": 30409, "魔": 30410, "객": 30411, "‰": 30412, "⌁": 30413, "ἐ": 30414, "禁": 30415, "ผ": 30416, "қ": 30417, "島": 30418, "ா": 30419, "♭": 30420, "百": 30421, "ứ": 30422, "ネ": 30423, "专": 30424, "來": 30425, "刷": 30426, "필": 30427, "յ": 30428, "ắ": 30429, "华": 30430, "Β": 30431, "श": 30432, "¸": 30433, "屏": 30434, "死": 30435, "遍": 30436, "검": 30437, "Χ": 30438, "것": 30439, "八": 30440, "览": 30441, "택": 30442, "唯": 30443, "∙": 30444, "¤": 30445, "페": 30446, "让": 30447, "锁": 30448, "무": 30449, "思": 30450, "隔": 30451, "Ô": 30452, "\u0013": 30453, "ṃ": 30454, "ワ": 30455, "低": 30456, "션": 30457, "半": 30458, "较": 30459, "ត": 30460, "享": 30461, "积": 30462, "ˆ": 30463, "😊": 30464, "典": 30465, "ǔ": 30466, "六": 30467, "便": 30468, "ɐ": 30469, "简": 30470, "继": 30471, "仅": 30472, "尾": 30473, "‹": 30474, "வ": 30475, "կ": 30476, "ƒ": 30477, "영": 30478, "火": 30479, "湖": 30480, "書": 30481, "발": 30482, "ハ": 30483, "循": 30484, "术": 30485, "結": 30486, "ļ": 30487, "乐": 30488, "滤": 30489, "종": 30490, "ถ": 30491, "ὶ": 30492, "满": 30493, "╝": 30494, "わ": 30495, "ど": 30496, "็": 30497, "형": 30498, "國": 30499, "ự": 30500, "線": 30501, "블": 30502, "封": 30503, "確": 30504, "依": 30505, "ս": 30506, "永": 30507, "색": 30508, "歌": 30509, "數": 30510, "福": 30511, "삭": 30512, "実": 30513, "레": 30514, "ſ": 30515, "千": 30516, "\u000e": 30517, "母": 30518, "더": 30519, "임": 30520, "տ": 30521, "ے": 30522, "几": 30523, "双": 30524, "노": 30525, "ณ": 30526, "掉": 30527, "Ρ": 30528, "ἀ": 30529, "標": 30530, "長": 30531, "档": 30532, "태": 30533, "ペ": 30534, "본": 30535, "Œ": 30536, "底": 30537, "终": 30538, "請": 30539, "კ": 30540, "̯": 30541, "예": 30542, "▬": 30543, "報": 30544, "ピ": 30545, "๏": 30546, "暂": 30547, "李": 30548, "Υ": 30549, "\u0005": 30550, "\u0002": 30551, "替": 30552, "운": 30553, "射": 30554, "\u0018": 30555, "매": 30556, "\u0011": 30557, "🏼": 30558, "票": 30559, "附": 30560, "ノ": 30561, "ũ": 30562, "压": 30563, "阿": 30564, "Ò": 30565, "테": 30566, "∼": 30567, "万": 30568, "մ": 30569, "후": 30570, "普": 30571, "截": 30572, "속": 30573, "括": 30574, "😀": 30575, "ை": 30576, "▶": 30577, "까": 30578, "ট": 30579, "曲": 30580, "师": 30581, "钱": 30582, "栏": 30583, "Ы": 30584, "走": 30585, "ữ": 30586, "‬": 30587, "归": 30588, "점": 30589, "🔥": 30590, "었": 30591, "連": 30592, "私": 30593, "청": 30594, "刘": 30595, "免": 30596, "": 30597, "奖": 30598, "見": 30599, "ֹ": 30600, "☺": 30601, "ケ": 30602, "역": 30603, "际": 30604, "받": 30605, "望": 30606, "帝": 30607, "减": 30608, "두": 30609, "领": 30610, "„": 30611, "钟": 30612, "ガ": 30613, "架": 30614, "든": 30615, "ல": 30616, "松": 30617, "□": 30618, "越": 30619, "答": 30620, "ɕ": 30621, "ῦ": 30622, "染": 30623, "": 30624, "质": 30625, "顺": 30626, "气": 30627, "╗": 30628, "計": 30629, "ქ": 30630, "亮": 30631, "🤦": 30632, "̂": 30633, "ٹ": 30634, "座": 30635, "ˌ": 30636, "均": 30637, "\u000b": 30638, "官": 30639, "适": 30640, "护": 30641, "久": 30642, "春": 30643, "曹": 30644, "皇": 30645, "脚": 30646, "池": 30647, "延": 30648, "키": 30649, "품": 30650, "現": 30651, "檔": 30652, "ば": 30653, "ⴰ": 30654, "希": 30655, "玩": 30656, "固": 30657, "黄": 30658, "": 30659, "☽": 30660, "银": 30661, "\u0003": 30662, "┃": 30663, "👏": 30664, "불": 30665, "攻": 30666, "へ": 30667, "决": 30668, "⊙": 30669, "宁": 30670, "च": 30671, "機": 30672, "義": 30673, "ɲ": 30674, "\u0015": 30675, "했": 30676, "ẩ": 30677, "愛": 30678, "矩": 30679, "패": 30680, "ặ": 30681, "郎": 30682, "Ь": 30683, "绘": 30684, "负": 30685, "ổ": 30686, "ய": 30687, "汉": 30688, "編": 30689, "ێ": 30690, "്": 30691, "じ": 30692, "카": 30693, "似": 30694, "ں": 30695, "や": 30696, "認": 30697, "\u000f": 30698, "過": 30699, "통": 30700, "▪": 30701, "约": 30702, "香": 30703, "买": 30704, "住": 30705, "╚": 30706, "😁": 30707, "扩": 30708, "静": 30709, "려": 30710, "학": 30711, "钥": 30712, "증": 30713, "ỉ": 30714, "她": 30715, "食": 30716, "往": 30717, "點": 30718, "偏": 30719, "康": 30720, "\u0014": 30721, "į": 30722, "준": 30723, "\u0004": 30724, "ฟ": 30725, "♣": 30726, "戏": 30727, "ʂ": 30728, "井": 30729, "军": 30730, "爱": 30731, "ٱ": 30732, "七": 30733, "차": 30734, "币": 30735, "♠": 30736, "哈": 30737, "阅": 30738, "介": 30739, "观": 30740, "區": 30741, "˜": 30742, "ً": 30743, "又": 30744, "冲": 30745, "朝": 30746, "姓": 30747, "课": 30748, "龍": 30749, "각": 30750, "∈": 30751, "米": 30752, "ƒ": 30753, "喜": 30754, "夜": 30755, "团": 30756, "⇒": 30757, "远": 30758, "\u001a": 30759, "ὐ": 30760, "承": 30761, "ಿ": 30762, "室": 30763, "ʀ": 30764, "ង": 30765, "अ": 30766, "罗": 30767, "🙏": 30768, "软": 30769, "🟡": 30770, "건": 30771, "؟": 30772, "း": 30773, "ᴇ": 30774, "ユ": 30775, "토": 30776, "策": 30777, "̄": 30778, "국": 30779, "ֶ": 30780, "协": 30781, "营": 30782, "関": 30783, "吉": 30784, "💀": 30785, "奇": 30786, "滚": 30787, "轴": 30788, "処": 30789, "土": 30790, "划": 30791, "ड": 30792, "临": 30793, "ֵ": 30794, "航": 30795, "浏": 30796, "ゴ": 30797, "別": 30798, "寺": 30799, "於": 30800, "進": 30801, "ὸ": 30802, "風": 30803, "ன": 30804, "班": 30805, "◼": 30806, "九": 30807, "̥": 30808, "號": 30809, "류": 30810, "础": 30811, "般": 30812, "︙": 30813, "̈": 30814, "番": 30815, "✨": 30816, "😎": 30817, "ো": 30818, "😍": 30819, "單": 30820, "帧": 30821, "授": 30822, "赋": 30823, "巴": 30824, "占": 30825, "假": 30826, "ṅ": 30827, "透": 30828, "項": 30829, "ħ": 30830, "馬": 30831, "🟢": 30832, "Ľ": 30833, "լ": 30834, "券": 30835, "같": 30836, "類": 30837, "對": 30838, "월": 30839, "激": 30840, "\u0017": 30841, "戦": 30842, "独": 30843, "訊": 30844, "ិ": 30845, "套": 30846, "ʷ": 30847, "跟": 30848, "ở": 30849, "渲": 30850, "顯": 30851, "降": 30852, "ာ": 30853, "尼": 30854, "血": 30855, "언": 30856, "牛": 30857, "將": 30858, "ศ": 30859, "拍": 30860, "刻": 30861, "ზ": 30862, "╔": 30863, "藤": 30864, "్": 30865, "ῶ": 30866, "🟠": 30867, "良": 30868, "김": 30869, "দ": 30870, "Ṣ": 30871, "録": 30872, "伊": 30873, "落": 30874, "雄": 30875, "雪": 30876, "映": 30877, "著": 30878, "른": 30879, "ფ": 30880, "対": 30881, "智": 30882, "译": 30883, "┬": 30884, "抽": 30885, "ῖ": 30886, "酒": 30887, "Ћ": 30888, "股": 30889, "់": 30890, "순": 30891, "직": 30892, "भ": 30893, "谷": 30894, "물": 30895, "ǒ": 30896, "⠄": 30897, "热": 30898, "終": 30899, "夹": 30900, "干": 30901, "彩": 30902, "敗": 30903, "ќ": 30904, "♯": 30905, "̣": 30906, "վ": 30907, "轮": 30908, "阵": 30909, "夏": 30910, "幕": 30911, "吧": 30912, "港": 30913, "益": 30914, "儿": 30915, "액": 30916, "售": 30917, "兵": 30918, "惠": 30919, "欢": 30920, "›": 30921, "零": 30922, "學": 30923, "ž": 30924, "員": 30925, "ỗ": 30926, "玉": 30927, "逻": 30928, "᥀": 30929, "吗": 30930, "沒": 30931, "≠": 30932, "너": 30933, "ச": 30934, "\u0016": 30935, "夫": 30936, "წ": 30937, "堂": 30938, "電": 30939, "≡": 30940, "陆": 30941, "져": 30942, "研": 30943, "荐": 30944, "健": 30945, "碼": 30946, "练": 30947, "検": 30948, "송": 30949, "ै": 30950, "哪": 30951, "圆": 30952, "Ա": 30953, "↩": 30954, "托": 30955, "̪": 30956, "ू": 30957, "缀": 30958, "네": 30959, "沙": 30960, "兴": 30961, "病": 30962, "\u0007": 30963, "ល": 30964, "ừ": 30965, "Ἀ": 30966, "강": 30967, "항": 30968, "\u0019": 30969, "換": 30970, "温": 30971, "帖": 30972, "ទ": 30973, "込": 30974, "削": 30975, "알": 30976, "征": 30977, "习": 30978, "법": 30979, "栈": 30980, "绝": 30981, "": 30982, "ڕ": 30983, "圖": 30984, "苏": 30985, "発": 30986, "ု": 30987, "町": 30988, "互": 30989, "়": 30990, "ც": 30991, "守": 30992, "새": 30993, "侧": 30994, "草": 30995, "ས": 30996, "扫": 30997, "‒": 30998, "恢": 30999, "ң": 31000, "ण": 31001, "ற": 31002, "째": 31003, "්": 31004, "拟": 31005, "派": 31006, "🏽": 31007, "呼": 31008, "Š": 31009, "演": 31010, "究": 31011, "교": 31012, "ɣ": 31013, "ए": 31014, "ី": 31015, "ף": 31016, "富": 31017, "駅": 31018, "ず": 31019, "♪": 31020, "😆": 31021, "접": 31022, "ғ": 31023, "▓": 31024, "존": 31025, "ಾ": 31026, "旋": 31027, "ゃ": 31028, "补": 31029, "ץ": 31030, "門": 31031, "ច": 31032, "날": 31033, "ภ": 31034, "ག": 31035, "傳": 31036, "∆": 31037, "†": 31038, "ׁ": 31039, "缺": 31040, "頭": 31041, "怪": 31042, "組": 31043, "별": 31044, "Ъ": 31045, "發": 31046, "雷": 31047, "ರ": 31048, "ซ": 31049, "び": 31050, "翻": 31051, "ھ": 31052, "პ": 31053, "題": 31054, "居": 31055, "집": 31056, "🌍": 31057, "˚": 31058, "避": 31059, "줄": 31060, "ុ": 31061, "滑": 31062, "故": 31063, "ญ": 31064, "〜": 31065, "ನ": 31066, "양": 31067, "완": 31068, "ள": 31069, "倍": 31070, "宗": 31071, "択": 31072, "브": 31073, "ɴ": 31074, "効": 31075, "尺": 31076, "視": 31077, "ẽ": 31078, "覆": 31079, "ध": 31080, "骨": 31081, "달": 31082, "ᴛ": 31083, "蓝": 31084, "關": 31085, "額": 31086, "Õ": 31087, "∗": 31088, "卷": 31089, "갑": 31090, "르": 31091, "众": 31092, "ᴀ": 31093, "態": 31094, "ٰ": 31095, "暗": 31096, "君": 31097, "錯": 31098, "ɒ": 31099, "យ": 31100, "ḫ": 31101, "ῆ": 31102, "亚": 31103, "♡": 31104, "割": 31105, "鼠": 31106, "̶": 31107, "Ë": 31108, "読": 31109, "격": 31110, "ゲ": 31111, "眼": 31112, "Ý": 31113, "ژ": 31114, "雨": 31115, "宮": 31116, "쪽": 31117, "ष": 31118, "複": 31119, "剩": 31120, "早": 31121, "杂": 31122, "焦": 31123, "贝": 31124, "突": 31125, "워": 31126, "另": 31127, "摄": 31128, "\b": 31129, "‭": 31130, "府": 31131, "외": 31132, "盖": 31133, "\u001c": 31134, "ษ": 31135, "佛": 31136, "概": 31137, "與": 31138, "經": 31139, "-": 31140, "һ": 31141, "問": 31142, "ು": 31143, "ἰ": 31144, "話": 31145, "倒": 31146, "葛": 31147, "べ": 31148, "ろ": 31149, "\u001e": 31150, "।": 31151, "ေ": 31152, "ᴏ": 31153, "训": 31154, "體": 31155, "👌": 31156, "內": 31157, "က": 31158, "企": 31159, "약": 31160, "찾": 31161, "ོ": 31162, "破": 31163, "輸": 31164, "림": 31165, "塔": 31166, "턴": 31167, "杀": 31168, "』": 31169, "味": 31170, "浮": 31171, "┆": 31172, "ġ": 31173, "郡": 31174, "┐": 31175, "『": 31176, "阶": 31177, "雅": 31178, "┈": 31179, "园": 31180, ".": 31181, "吃": 31182, "남": 31183, " ": 31184, "ར": 31185, "帮": 31186, "毛": 31187, "耗": 31188, "举": 31189, "ర": 31190, "拿": 31191, "밀": 31192, "ご": 31193, "够": 31194, "礼": 31195, "ព": 31196, "ね": 31197, "‰": 31198, "兰": 31199, "❌": 31200, "折": 31201, "십": 31202, "💎": 31203, "業": 31204, "诸": 31205, "孙": 31206, "བ": 31207, "😳": 31208, "種": 31209, "Ï": 31210, "ึ": 31211, "⁣": 31212, "医": 31213, "拼": 31214, "↵": 31215, "⅓": 31216, "\u001f": 31217, "မ": 31218, "叫": 31219, "জ": 31220, "予": 31221, "寸": 31222, "梅": 31223, "醒": 31224, "津": 31225, "န": 31226, "ి": 31227, "厂": 31228, "屋": 31229, "ख": 31230, "師": 31231, "👀": 31232, "ỏ": 31233, "ヤ": 31234, "ὰ": 31235, "\u001d": 31236, "◆": 31237, "ដ": 31238, "材": 31239, "ホ": 31240, "張": 31241, "洞": 31242, "餐": 31243, "천": 31244, "হ": 31245, "達": 31246, "們": 31247, "斗": 31248, "横": 31249, "백": 31250, "ំ": 31251, "ۆ": 31252, "말": 31253, "গ": 31254, "佳": 31255, "랜": 31256, "仁": 31257, "陈": 31258, "飞": 31259, "极": 31260, "": 31261, "및": 31262, "仓": 31263, "⬛": 31264, "昌": 31265, "錢": 31266, "殊": 31267, "┴": 31268, "○": 31269, "길": 31270, "泉": 31271, "甲": 31272, "활": 31273, "ひ": 31274, "শ": 31275, "ን": 31276, "Ť": 31277, "ღ": 31278, "皮": 31279, "強": 31280, "赛": 31281, "ా": 31282, "預": 31283, "င": 31284, "튼": 31285, "플": 31286, "ყ": 31287, "⋆": 31288, "ք": 31289, "ા": 31290, "尚": 31291, "또": 31292, "բ": 31293, "┌": 31294, "節": 31295, "森": 31296, "आ": 31297, "办": 31298, "園": 31299, "牙": 31300, "庆": 31301, "隆": 31302, "😔": 31303, "叉": 31304, "գ": 31305, "피": 31306, "ギ": 31307, "啊": 31308, "続": 31309, "灵": 31310, "ヒ": 31311, "忽": 31312, "ʌ": 31313, "량": 31314, "油": 31315, "讯": 31316, "ⵉ": 31317, "릭": 31318, "刚": 31319, "氏": 31320, "ိ": 31321, "Ī": 31322, "誤": 31323, "齐": 31324, "末": 31325, "🙌": 31326, "̞": 31327, "圈": 31328, "念": 31329, "숫": 31330, "毫": 31331, "當": 31332, "規": 31333, "판": 31334, "ు": 31335, "旧": 31336, "卖": 31337, "ฉ": 31338, "幸": 31339, "署": 31340, "근": 31341, "ই": 31342, "岛": 31343, "դ": 31344, "觉": 31345, "害": 31346, "毕": 31347, "ฐ": 31348, "威": 31349, "育": 31350, "呢": 31351, "峰": 31352, "职": 31353, "陽": 31354, "ි": 31355, "亞": 31356, "ұ": 31357, "₃": 31358, "따": 31359, "施": 31360, "泰": 31361, "載": 31362, "…": 31363, "笑": 31364, "華": 31365, "迎": 31366, "됩": 31367, "豆": 31368, "嘉": 31369, "🤡": 31370, "ĕ": 31371, "庄": 31372, "級": 31373, "Ψ": 31374, "ི": 31375, "気": 31376, "责": 31377, "հ": 31378, "អ": 31379, "乱": 31380, "休": 31381, "約": 31382, "ฆ": 31383, "∑": 31384, "察": 31385, "온": 31386, "😬": 31387, "ড": 31388, "乘": 31389, "람": 31390, "इ": 31391, "Ά": 31392, "ந": 31393, "ើ": 31394, "亲": 31395, "េ": 31396, "委": 31397, "赤": 31398, "됨": 31399, "勝": 31400, "怎": 31401, "감": 31402, "宋": 31403, "調": 31404, "짜": 31405, "ী": 31406, "难": 31407, "못": 31408, "티": 31409, "備": 31410, "塞": 31411, "វ": 31412, "险": 31413, "旅": 31414, "虚": 31415, "↳": 31416, "笔": 31417, "馆": 31418, "Қ": 31419, "⚡": 31420, "ೆ": 31421, "※": 31422, "唐": 31423, "律": 31424, "稍": 31425, "散": 31426, "ર": 31427, "ヴ": 31428, "副": 31429, "尽": 31430, "挂": 31431, "県": 31432, "⚠": 31433, "洋": 31434, "鬼": 31435, "암": 31436, "孩": 31437, "℃": 31438, "並": 31439, "ց": 31440, "ូ": 31441, "ℓ": 31442, "ⵏ": 31443, "扣": 31444, "铁": 31445, "闻": 31446, "ˆ": 31447, "戳": 31448, "む": 31449, "秀": 31450, "細": 31451, "ပ": 31452, "御": 31453, "拖": 31454, "좌": 31455, "ؤ": 31456, "绍": 31457, "ỹ": 31458, "참": 31459, "향": 31460, "Ď": 31461, "끝": 31462, "민": 31463, "ძ": 31464, "贵": 31465, "纪": 31466, "秋": 31467, "ಕ": 31468, "ӏ": 31469, "網": 31470, "铺": 31471, "恋": 31472, "fl": 31473, "兼": 31474, "羽": 31475, "창": 31476, "啟": 31477, "弟": 31478, "년": 31479, "慢": 31480, "효": 31481, "許": 31482, "硬": 31483, "잘": 31484, "템": 31485, "્": 31486, "න": 31487, "術": 31488, "ڈ": 31489, "溪": 31490, "": 31491, "暴": 31492, "混": 31493, "夢": 31494, "랑": 31495, "আ": 31496, "還": 31497, "探": 31498, "祖": 31499, "织": 31500, "軍": 31501, "թ": 31502, "務": 31503, "艺": 31504, "ད": 31505, "ት": 31506, "ṁ": 31507, "應": 31508, "擇": 31509, "🥰": 31510, "ķ": 31511, "渡": 31512, "葉": 31513, "령": 31514, "決": 31515, "刀": 31516, "從": 31517, "變": 31518, "올": 31519, "💪": 31520, "灣": 31521, "ር": 31522, "평": 31523, "衣": 31524, "😄": 31525, "ി": 31526, "ჩ": 31527, "ὁ": 31528, "ほ": 31529, "Û": 31530, "চ": 31531, "ර": 31532, "製": 31533, "隊": 31534, "₱": 31535, "纳": 31536, "赖": 31537, "农": 31538, "桥": 31539, "ỳ": 31540, "🏾": 31541, "阻": 31542, "ជ": 31543, "秘": 31544, "박": 31545, "伤": 31546, "稿": 31547, "ం": 31548, "拦": 31549, "넣": 31550, "💕": 31551, "₁": 31552, "宿": 31553, "錄": 31554, "镜": 31555, "채": 31556, "Ə": 31557, "ང": 31558, "⇔": 31559, "☼": 31560, "ུ": 31561, "党": 31562, "급": 31563, "洲": 31564, "ղ": 31565, "說": 31566, "ĭ": 31567, "尝": 31568, "담": 31569, "फ": 31570, "哥": 31571, "圣": 31572, "萨": 31573, "😏": 31574, "ʏ": 31575, "ெ": 31576, "丁": 31577, "虎": 31578, "권": 31579, "善": 31580, "岩": 31581, "커": 31582, "◦": 31583, "抛": 31584, "석": 31585, "Έ": 31586, "宣": 31587, "拳": 31588, "팅": 31589, "枚": 31590, "洛": 31591, "証": 31592, "陵": 31593, "佐": 31594, "館": 31595, "누": 31596, "돌": 31597, "₄": 31598, "稱": 31599, "聊": 31600, "車": 31601, "루": 31602, "״": 31603, "ಠ": 31604, "庫": 31605, "མ": 31606, "統": 31607, "련": 31608, "़": 31609, "ṯ": 31610, "ക": 31611, "旗": 31612, "励": 31613, "紀": 31614, "忠": 31615, "າ": 31616, "杨": 31617, "丹": 31618, "Ù": 31619, "ฝ": 31620, "却": 31621, "舞": 31622, "轉": 31623, "တ": 31624, "丽": 31625, "借": 31626, "ා": 31627, "ょ": 31628, "옵": 31629, "편": 31630, "蒙": 31631, "衡": 31632, "ʋ": 31633, "叶": 31634, "̇": 31635, "⬜": 31636, "🇺": 31637, "Հ": 31638, "谢": 31639, "Ą": 31640, "ே": 31641, "ằ": 31642, "既": 31643, "济": 31644, "≯": 31645, "準": 31646, "답": 31647, "ಲ": 31648, "残": 31649, "虑": 31650, "̆": 31651, "┘": 31652, "急": 31653, "招": 31654, "막": 31655, "≮": 31656, "產": 31657, "Ṭ": 31658, "😢": 31659, "垂": 31660, "親": 31661, "ģ": 31662, "־": 31663, "猫": 31664, "ʟ": 31665, "☃": 31666, "✪": 31667, "刪": 31668, "胡": 31669, "☉": 31670, "晚": 31671, "군": 31672, "승": 31673, "న": 31674, "ὴ": 31675, "曾": 31676, "論": 31677, "ɯ": 31678, "త": 31679, "戰": 31680, "鱼": 31681, "ǧ": 31682, "寶": 31683, "특": 31684, "💯": 31685, "崎": 31686, "甘": 31687, "該": 31688, "링": 31689, "😡": 31690, "उ": 31691, "ែ": 31692, "頁": 31693, "큰": 31694, "➤": 31695, "총": 31696, "💰": 31697, "∂": 31698, "毁": 31699, "聖": 31700, "麻": 31701, "ʐ": 31702, "敏": 31703, "運": 31704, "될": 31705, "쓰": 31706, "ಸ": 31707, "စ": 31708, "✦": 31709, "젝": 31710, "復": 31711, "寻": 31712, "茶": 31713, "ਾ": 31714, "竹": 31715, "遇": 31716, "順": 31717, "며": 31718, "累": 31719, "ĝ": 31720, "ˇ": 31721, "覧": 31722, "এ": 31723, "株": 31724, "취": 31725, "ስ": 31726, "争": 31727, "势": 31728, "宇": 31729, "橋": 31730, "Ӏ": 31731, "堆": 31732, "ⵙ": 31733, "丶": 31734, "棋": 31735, "肉": 31736, "የ": 31737, "": 31738, "❶": 31739, "季": 31740, "ል": 31741, "殿": 31742, "優": 31743, "試": 31744, "첫": 31745, "Ό": 31746, "戶": 31747, "ண": 31748, "羅": 31749, "桃": 31750, "립": 31751, "浪": 31752, "脑": 31753, "😛": 31754, "弃": 31755, "炮": 31756, "轻": 31757, "울": 31758, "": 31759, "ヘ": 31760, "奥": 31761, "💜": 31762, "忘": 31763, "遠": 31764, "飛": 31765, "魏": 31766, "Ē": 31767, "汇": 31768, "央": 31769, "逆": 31770, "露": 31771, "須": 31772, "ѐ": 31773, "ḷ": 31774, "ದ": 31775, "✭": 31776, "寄": 31777, "盟": 31778, "财": 31779, "際": 31780, "ἔ": 31781, "ǫ": 31782, "थ": 31783, "ാ": 31784, "宫": 31785, "巨": 31786, "途": 31787, "ʹ": 31788, "ಗ": 31789, "帐": 31790, "‪": 31791, "拒": 31792, "药": 31793, "🙃": 31794, "ŕ": 31795, "亡": 31796, "壁": 31797, "ም": 31798, "參": 31799, "😩": 31800, "շ": 31801, "ವ": 31802, "ណ": 31803, "丰": 31804, "獲": 31805, "莉": 31806, "좋": 31807, "ရ": 31808, "₦": 31809, "겠": 31810, "👉": 31811, "吴": 31812, "岡": 31813, "诉": 31814, "읽": 31815, "🥺": 31816, "爆": 31817, "🇸": 31818, "ভ": 31819, "迭": 31820, "엔": 31821, "ἄ": 31822, "捷": 31823, "納": 31824, "邀": 31825, "ಯ": 31826, "爾": 31827, "船": 31828, "赞": 31829, "胜": 31830, "므": 31831, "သ": 31832, "構": 31833, "磁": 31834, "冰": 31835, "딩": 31836, "ે": 31837, "媒": 31838, "繁": 31839, "☠": 31840, "❒": 31841, "仪": 31842, "렬": 31843, "昭": 31844, "珠": 31845, "離": 31846, "ན": 31847, "ల": 31848, "ತ": 31849, "拷": 31850, "粉": 31851, "벤": 31852, "⇽": 31853, "乌": 31854, "拥": 31855, "ҳ": 31856, "ය": 31857, "ེ": 31858, "仙": 31859, "塊": 31860, "幅": 31861, "🎉": 31862, "Մ": 31863, "跨": 31864, "ٔ": 31865, "恩": 31866, "损": 31867, "养": 31868, "奈": 31869, "ǀ": 31870, "严": 31871, "卫": 31872, "迟": 31873, "様": 31874, "裡": 31875, "난": 31876, "았": 31877, "͜": 31878, "Ζ": 31879, "ਰ": 31880, "պ": 31881, "ং": 31882, "丢": 31883, "伝": 31884, "컨": 31885, "ව": 31886, "ြ": 31887, "冷": 31888, "遗": 31889, "銀": 31890, "̌": 31891, "ᴜ": 31892, "瑞": 31893, "ฌ": 31894, "❍": 31895, "ふ": 31896, "聚": 31897, "碎": 31898, "衛": 31899, "অ": 31900, "ញ": 31901, "퍼": 31902, "Ս": 31903, "ນ": 31904, "ẓ": 31905, "✌": 31906, "孝": 31907, "陳": 31908, "히": 31909, "ක": 31910, "黒": 31911, "💖": 31912, "ḩ": 31913, "応": 31914, "饰": 31915, "∪": 31916, "宜": 31917, "樂": 31918, "則": 31919, "勇": 31920, "徐": 31921, "ⵓ": 31922, "權": 31923, "鲁": 31924, "‟": 31925, "庭": 31926, "苗": 31927, "🔴": 31928, "闲": 31929, "독": 31930, "ɹ": 31931, "ҽ": 31932, "ថ": 31933, "宏": 31934, "尊": 31935, "總": 31936, "裝": 31937, "ම": 31938, "▸": 31939, "測": 31940, "ಮ": 31941, "አ": 31942, "轩": 31943, "兄": 31944, "剑": 31945, "ન": 31946, "朱": 31947, "ǝ": 31948, "Ḩ": 31949, "担": 31950, "灰": 31951, "讲": 31952, "롤": 31953, "︎": 31954, "😤": 31955, "ោ": 31956, "애": 31957, "였": 31958, "질": 31959, "振": 31960, "灯": 31961, "ĉ": 31962, "ස": 31963, "閉": 31964, "램": 31965, "ಂ": 31966, "げ": 31967, "̧": 31968, "狂": 31969, "融": 31970, "仍": 31971, "實": 31972, "楽": 31973, "範": 31974, "ٌ": 31975, "వ": 31976, "嵌": 31977, "摩": 31978, "袁": 31979, "ষ": 31980, "乎": 31981, "규": 31982, "岗": 31983, "糊": 31984, "క": 31985, "雲": 31986, "심": 31987, "ई": 31988, "འ": 31989, "ἡ": 31990, "丝": 31991, "Ħ": 31992, "ٍ": 31993, "ٓ": 31994, "အ": 31995, "執": 31996, "벨": 31997, "ゼ": 31998, "梦": 31999 }, "merges": [ "▁ t", "i n", "e r", "▁ a", "h e", "o n", "r e", "▁ s", "e n", "a t", "o r", "▁t he", "▁th e", "▁ the", "e s", "▁ w", "a n", "▁ c", "i s", "i t", "o u", "▁ d", "a l", "a r", "▁ p", "▁ f", "e d", "▁ b", "in g", "i ng", "▁ o", "▁ m", "l e", "n d", "a s", "i c", "▁ h", "io n", "i on", "▁i n", "▁ in", "▁t o", "▁ to", "e t", "o m", "e l", "▁o f", "▁ of", "s t", "▁a nd", "▁an d", "▁ and", "▁ l", "▁t h", "▁ th", "▁ n", "en t", "e nt", "i l", "c t", "r o", "▁r e", "▁ re", "i d", "a m", "▁ I", "a d", "▁ e", "▁ S", "▁ g", "▁ T", "i m", "o t", "a c", "u r", "▁ (", "i g", "▁ =", "o l", "u t", "▁ A", "s e", "▁ u", "v e", "▁ C", "i f", "o w", "▁ y", "c h", "a y", "▁d e", "▁ de", "▁s t", "▁ st", "▁ |", "ve r", "v er", ") ;", "▁ \"", "l y", "▁b e", "▁ be", "* *", "▁i s", "▁ is", "o d", "▁ M", "at ion", "ati on", "atio n", "u l", "▁f or", "▁fo r", "▁ for", "▁o n", "▁ on", "a g", "c e", "te r", "t er", "i r", "t h", "▁ v", "q u", "▁ B", "e m", "▁ P", "▁y ou", "▁yo u", "▁ you", "▁t hat", "▁th at", "▁ that", "u n", "▁ {", "it h", "i th", "r i", "es t", "e st", "a b", "- -", "a p", "▁i t", "▁ it", "▁c on", "▁co n", "▁ con", "at e", "a te", "u s", "▁ H", "u m", "▁ D", "o s", "p e", "▁ -", "▁w h", "▁ wh", "▁a l", "▁ al", "▁a s", "▁ as", "an d", "a nd", "is t", "i st", "▁ L", "▁ W", "▁w ith", "▁ with", "▁a n", "▁ an", "er e", "e re", "▁ *", "▁ R", "▁h e", "▁ he", "▁ F", "o c", "▁w as", "▁wa s", "▁ was", "er s", "e rs", "k e", "ou t", "o ut", "h t", "▁ r", "es s", "e ss", "o p", "re s", "r es", "i e", "▁ E", "▁ \\", "▁T he", "▁Th e", "▁ The", "en d", "e nd", "l d", "▁ N", "or t", "o rt", "▁ G", "/ /", "▁ #", "ou r", "o ur", "t e", "il l", "i ll", "ai n", "a in", "▁s e", "▁ se", "▁ $", "▁p ro", "▁pr o", "▁ pro", "or e", "o re", "▁c om", "▁co m", "▁ com", "am e", "a me", "t r", "▁n e", "▁ ne", "ro m", "r om", "u b", "▁a t", "▁ at", "▁e x", "▁ ex", "an t", "a nt", "u e", "▁o r", "▁ or", "▁ }", "ar t", "a rt", "ct ion", "▁ k", "p t", "n t", "i v", "d e", "▁ O", "p l", "ur n", "u rn", "ig ht", "igh t", "i ght", "al l", "a ll", "▁t his", "▁th is", "▁ this", "se r", "s er", "av e", "a ve", "▁n ot", "▁no t", "▁ not", "▁a re", "▁ar e", "▁ are", "▁ j", "▁l e", "▁ le", "i z", "▁ '", "ag e", "a ge", "me nt", "men t", "m ent", "▁t r", "▁ tr", "ac k", "a ck", "us t", "u st", "( )", "- >", "it y", "i ty", "in e", "i ne", "ou ld", "oul d", "o uld", "▁ J", "o g", "▁f rom", "▁fr om", "▁fro m", "▁ from", "▁w e", "▁ we", "el l", "e ll", "▁s h", "▁ sh", "▁e n", "▁ en", "ur e", "u re", "por t", "po rt", "p ort", "▁c h", "▁ ch", "n e", "▁b y", "▁ by", "pe r", "p er", "ar d", "a rd", "as s", "a ss", "g e", "a k", "ar e", "a re", "o k", "a v", "iv e", "i ve", "f f", "ie s", "i es", "at h", "a th", "tu rn", "t urn", "▁ U", "in t", "i nt", "-- --", "--- -", "- ---", "▁i m", "▁ im", "os t", "o st", "ia l", "i al", "▁h ave", "▁ha ve", "▁hav e", "▁ have", "in d", "i nd", "i p", "an s", "a ns", "x t", "▁d o", "▁ do", "c l", "▁i f", "▁ if", "co n", "c on", "i a", "▁h is", "▁hi s", "▁ his", "ul t", "u lt", "ro u", "r ou", "▁s u", "▁ su", "r a", "▁u n", "▁ un", "ab le", "abl e", "a ble", "▁ <", "▁ K", "om e", "o me", "▁q u", "▁ qu", "ge t", "g et", "▁m e", "▁ me", "as t", "a st", "ec t", "e ct", "▁# #", "▁ ##", "t o", "▁c l", "▁ cl", "▁a b", "▁ ab", "ic e", "i ce", "ir e", "i re", "be r", "b er", "on e", "o ne", "ic h", "i ch", "he n", "h en", "▁c an", "▁ca n", "▁ can", "▁T h", "▁ Th", "▁l a", "▁ la", "▁a ll", "▁al l", "▁ all", "im e", "i me", "il e", "i le", "id e", "i de", "\" ,", "▁p l", "▁ pl", "▁ V", "r u", "or m", "o rm", "▁h ad", "▁ha d", "▁ had", "u d", "as e", "a se", "or d", "o rd", ") ,", "▁h er", "▁he r", "▁ her", "▁I n", "▁ In", "ac e", "a ce", "▁b ut", "▁bu t", "▁ but", "at a", "a ta", ": :", "** **", "*** *", "* ***", "on g", "o ng", "▁ &", ". .", "it e", "i te", "yp e", "y pe", "ac t", "a ct", "od e", "o de", "▁y our", "▁you r", "▁yo ur", "▁ your", "▁o ut", "▁ou t", "▁ out", "▁g o", "▁ go", "li c", "l ic", "al ly", "all y", "▁s o", "▁ so", "or k", "a u", "▁u p", "▁ up", "▁ _", "l l", "= =", "▁m y", "▁ my", "p p", "c c", "▁/ /", "▁ //", "▁the y", "▁th ey", "▁ they", "g h", "▁u s", "▁ us", "i b", "ion s", "io ns", "i ons", "ac h", "a ch", "en s", "e ns", "▁a r", "▁ ar", "o b", "el f", "oo k", "o ok", "at ed", "ate d", "a ted", "an g", "a ng", "ig n", "i gn", "▁re turn", "▁r eturn", "▁ret urn", "▁ return", "▁re s", "▁r es", "▁ res", "c k", "ou s", "o us", "с т", ") .", "▁ п", ". \"", "н а", "▁ i", "ai l", "a il", "e p", "▁a d", "▁ ad", "an ce", "anc e", "( \"", "▁* *", "▁ **", "th er", "the r", "t her", "ak e", "a ke", "▁w ill", "▁ will", "▁c omp", "▁com p", "▁co mp", "▁ comp", "▁o ne", "▁on e", "▁ one", "▁g et", "▁ge t", "▁ get", "o v", "▁ Y", "ar y", "a ry", "oc k", "o ck", "▁s he", "▁sh e", "▁ she", "ch e", "c he", "f t", "▁n ew", "▁ne w", "▁ new", "▁d es", "▁de s", "▁ des", "▁l i", "▁ li", "en ce", "enc e", "▁s a", "▁ sa", "re ss", "res s", "r ess", "▁e l", "▁ el", "▁u nd", "▁un d", "▁ und", "e g", "fe r", "f er", "r y", "ea r", "e ar", "os e", "o se", "ve ry", "ver y", "v ery", "' ,", "▁ +", "▁ в", "▁H e", "▁ He", "ub lic", "ubl ic", "u blic", "▁the ir", "iz e", "i ze", "▁w ere", "▁we re", "▁wer e", "▁ were", "in k", "ow n", "o wn", "I n", "{ \\", "▁h as", "▁ha s", "▁ has", "▁p er", "▁pe r", "▁ per", "▁I t", "▁ It", "▁S t", "▁ St", "he r", "h er", "je ct", "j ect", "р а", "il d", "i ld", "s o", "▁s p", "▁ sp", "н и", "d u", "ro w", "r ow", "al ue", "alu e", "se t", "s et", "fo rm", "for m", "f orm", "co m", "c om", "▁m an", "▁ma n", "▁ man", "on t", "o nt", "ul l", "u ll", "▁c ont", "▁con t", "▁co nt", "▁ cont", "▁m ore", "▁mor e", "▁mo re", "▁ more", "ic k", "i ck", "▁w ould", "▁wo uld", "▁e v", "▁ ev", "▁ab out", "▁ about", "it ion", "iti on", "▁ z", "ou nd", "oun d", "o und", "re e", "r ee", "▁C h", "▁ Ch", "▁wh ich", "▁ which", "i o", "() ;", "( );", "▁w ho", "▁wh o", "▁ who", "er r", "e rr", "or y", "o ry", "ou nt", "oun t", "o unt", "at ions", "ation s", "ati ons", "atio ns", "▁ с", "ri ng", "rin g", "r ing", "< /", "▁f e", "▁ fe", "к о", "н о", "▁d is", "▁di s", "▁ dis", "m a", "▁t hem", "▁the m", "▁th em", "▁a ny", "▁an y", "▁ any", "▁n o", "▁ no", "-- ------", "---- ----", "--- -----", "----- ---", "------ --", "------- -", "- -------", "▁p re", "▁pr e", "▁ pre", "▁t e", "▁ te", "▁r o", "▁ ro", "▁h im", "▁hi m", "▁ him", "▁ :", "u p", "▁in t", "▁i nt", "▁ int", "▁a g", "▁ ag", "S t", "ar k", "e x", "p h", "ie nt", "ien t", "i ent", "el y", "e ly", "▁p r", "▁ pr", "E R", "▁im port", "▁imp ort", "▁ import", "▁t ime", "▁tim e", "▁ti me", "▁ time", "р о", "pr o", "p ro", "Us er", "Use r", "U ser", "l o", "▁ /", "▁ [", "or s", "o rs", "= \"", "▁t here", "▁the re", "▁th ere", "▁ther e", "▁ there", "▁l ike", "▁li ke", "▁lik e", "▁ like", "ol d", "o ld", "▁w hen", "▁wh en", "▁whe n", "▁ when", "ve rs", "ver s", "v ers", "▁s ome", "▁so me", "▁som e", "▁ some", "in gs", "ing s", ") )", "▁p art", "▁par t", "▁pa rt", "▁ part", "ic al", "ica l", "i cal", "▁f un", "▁fu n", "▁ fun", "▁k n", "▁ kn", "ay s", "a ys", "ie r", "i er", "▁b een", "▁be en", "ov e", "o ve", "▁s c", "▁ sc", "ia n", "i an", "▁o ver", "▁ov er", "▁ over", "ie l", "i el", "▁p e", "▁ pe", "ri b", "r ib", "pu t", "p ut", "e c", "et h", "e th", "ar am", "ara m", "a ram", "ap p", "a pp", "▁ –", "▁s tat", "▁st at", "▁sta t", "▁ stat", "po n", "p on", "▁w hat", "▁wh at", "▁ what", "pt ion", "w e", "ad e", "a de", "▁w ork", "▁wor k", "▁ work", "te xt", "tex t", "t ext", "▁s aid", "▁sa id", "▁# ##", "▁## #", "▁ ###", "I N", "▁j ust", "▁ju st", "▁ just", "ir st", "irs t", "▁in to", "▁int o", "▁ into", "▁con st", "▁cons t", "▁ const", "our ce", "t t", "p s", "p r", "er v", "e rv", "it t", "i tt", "u g", "_ {", "en ts", "ent s", "is h", "i sh", "en er", "ene r", "e ner", "▁in ter", "▁int er", "▁inte r", "▁ inter", "pl e", "p le", "ol l", "o ll", "me r", "m er", "at er", "ate r", "a ter", "oo l", "o ol", "e f", "▁p ublic", "▁pub lic", "▁pu blic", "▁publi c", "▁ public", "▁o ther", "▁ot her", "▁ other", "р е", "▁d ef", "▁de f", "▁ def", "▁ @", "г о", "oin t", "oi nt", "o int", "▁o ff", "▁of f", "▁ off", "oi d", "o id", "re turn", "ret urn", "r eturn", "▁s et", "▁se t", "▁ set", "w o", "ft er", "fte r", "f ter", "s h", "** ******", "**** ****", "****** **", "▁o ur", "▁ou r", "▁ our", "ri v", "r iv", "is s", "i ss", "▁W e", "▁ We", "n g", "▁o b", "▁ ob", "s s", "g r", "▁t han", "▁th an", "▁ than", "pe ct", "pec t", "p ect", "ie d", "i ed", "s c", "ie w", "i ew", "de r", "d er", "ys t", "y st", "e v", "▁c ould", "▁co uld", "▁cou ld", "▁ could", "an n", "a nn", "en c", "e nc", "O N", "i x", "an c", "a nc", "▁al so", "▁als o", "▁ also", "re at", "rea t", "▁a m", "▁ am", "▁b ec", "▁be c", "▁ bec", "▁ и", "ua l", "u al", "pe c", "p ec", "▁ .", "▁b l", "▁ bl", "le ct", "l ect", "op le", "opl e", "o ple", "y s", "▁g r", "▁ gr", "ic t", "i ct", "i k", "tr ing", "tri ng", "t ring", "▁T his", "▁Th is", "▁ This", "▁b ack", "▁ba ck", "▁ back", "▁ о", "▁f in", "▁fi n", "▁ fin", "at ch", "Co n", "C on", "( '", "er m", "e rm", "▁= =", "▁ ==", "_ _", "na me", "nam e", "n ame", ", \"", "▁d id", "▁di d", "▁ did", "is e", "i se", "▁on ly", "▁ only", "ru ct", "r uct", "le s", "l es", "▁t hen", "▁the n", "▁th en", "▁ then", "au se", "aus e", "a use", "в а", "▁it s", "▁i ts", "▁ its", "ri t", "r it", "▁k now", "▁kn ow", "▁ know", "ie ld", "iel d", "i eld", "▁c lass", "▁cl ass", "▁clas s", "▁ class", "▁ >", "▁e m", "▁ em", "▁$ \\", "▁ $\\", "▁y ear", "▁ye ar", "▁ year", "w n", "} ,", "▁d el", "▁de l", "▁ del", "al e", "a le", "t y", "fi g", "f ig", "s p", "he d", "h ed", "ro und", "rou nd", "r ound", "e w", "▁d i", "▁ di", "▁d er", "▁de r", "▁ der", "р и", "re d", "r ed", "th is", "t his", "le t", "l et", "R E", "a x", "f r", "ess age", "essa ge", "ou gh", "o ugh", "▁c omm", "▁com m", "▁co mm", "▁ comm", "f o", "uc h", "u ch", "o y", "▁pe ople", "▁ people", "yst em", "ys tem", "▁f irst", "▁fir st", "▁ first", "▁f unction", "▁fun ction", "▁ function", "an ge", "ang e", "▁h ow", "▁ho w", "▁ how", "▁e t", "▁ et", "a h", "▁l ook", "▁lo ok", "▁ look", "т о", "un d", "u nd", "▁u nder", "▁un der", "▁und er", "▁ under", "к а", "▁ !", "ra y", "r ay", "S T", "if ic", "ifi c", "i fic", "л и", "re ad", "rea d", "r ead", "▁b et", "▁be t", "▁ bet", "io us", "i ous", "ar g", "a rg", "▁n eed", "▁ne ed", "▁ need", "ma th", "mat h", "m ath", "▁н а", "▁ на", "er t", "e rt", "▁o p", "▁ op", "▁a cc", "▁ac c", "▁ acc", "Pr o", "P ro", "▁e st", "▁es t", "▁ est", "▁U n", "▁ Un", "▁e nt", "▁en t", "▁ ent", "▁re c", "▁r ec", "▁ rec", "▁u se", "▁us e", "▁ use", "е н", "▁p ar", "▁pa r", "▁ par", "a z", "▁ д", "▁W h", "▁ Wh", "sel f", "s elf", "▁k e", "▁ ke", "т а", "▁w ant", "▁wa nt", "▁ want", "▁e nd", "▁en d", "▁ end", "▁d on", "▁do n", "▁ don", "e k", "re n", "r en", "Na me", "N ame", "▁= >", "▁ =>", "▁a pp", "▁ap p", "▁ app", "▁qu e", "▁q ue", "▁ que", "ig h", "i gh", "▁b u", "▁ bu", "eq u", "e qu", "ve l", "v el", "▁a ct", "▁ac t", "▁ act", "cr e", "c re", "A T", "▁v ar", "▁va r", "▁ var", "ce ss", "ces s", "c ess", "== ==", "=== =", "= ===", "E x", "▁a dd", "▁ad d", "▁ add", "▁m od", "▁mo d", "▁ mod", "un g", "u ng", "▁w here", "▁wh ere", "▁whe re", "▁ where", "ni ng", "n ing", "▁f l", "▁ fl", "al s", "a ls", "ter n", "te rn", "t ern", "} }", "▁A l", "▁ Al", "▁p os", "▁po s", "▁ pos", "an k", "▁a p", "▁ ap", "en g", "e ng", "▁ “", "bl e", "b le", "▁re g", "▁r eg", "▁ reg", "^ {", "▁S he", "▁Sh e", "▁ She", "▁* /", "▁ */", "ud e", "u de", "ad d", "a dd", "▁t wo", "▁tw o", "▁ two", "▁c ol", "▁co l", "▁ col", "▁s m", "▁ sm", "ai r", "a ir", "▁m ay", "▁ma y", "▁ may", "fo re", "for e", "f ore", "▁Y ou", "▁ You", "ro ugh", "rou gh", "r ough", "▁c he", "▁ch e", "▁ che", "▁a tt", "▁at t", "▁ att", "ot h", "o th", "л а", "▁c o", "▁ co", "at es", "ate s", "a tes", "▁re m", "▁r em", "▁ rem", "oo d", "o od", "Ty pe", "Typ e", "T ype", "le d", "l ed", "fu l", "f ul", "▁s elf", "▁sel f", "▁ self", "o f", "▁A r", "▁ Ar", "qu e", "q ue", "▁e very", "▁ev ery", "▁ever y", "▁ every", "re f", "r ef", "Th e", "T he", "▁A nd", "▁An d", "▁ And", "▁re l", "▁r el", "▁ rel", "O R", "I d", "▁e ven", "▁ev en", "▁ even", "E N", "▁h and", "▁ha nd", "▁han d", "▁ hand", "ai t", "a it", "▁sh ould", "▁ should", "▁a fter", "▁af ter", "▁ after", "▁d if", "▁di f", "gh t", "g ht", "if e", "i fe", "at or", "ato r", "a tor", "as h", "a sh", "ri but", "rib ut", "ribu t", "um ber", "umb er", "u mber", "▁s ee", "▁se e", "▁ see", "m s", "▁c all", "▁cal l", "▁ca ll", "▁ call", "y n", "d d", "▁e s", "▁ es", "▁m ake", "▁ma ke", "▁ make", "ot her", "oth er", "othe r", "o ther", "▁ —", "\") ;", "\" );", "st r", "s tr", "▁l ong", "▁lo ng", "▁lon g", "▁ long", "le ment", "lem ent", "l ement", "▁w or", "▁wo r", "▁ wor", "it s", "i ts", "▁I f", "▁ If", "al se", "als e", "л ь", "wa rd", "war d", "w ard", "▁п о", "▁ по", "va l", "v al", "on s", "o ns", "▁ Z", "▁n ow", "▁no w", "▁ now", "da ta", "dat a", "d ata", "am p", "a mp", "en se", "ens e", "▁th rough", "▁thr ough", "▁thro ugh", "▁ through", "▁d own", "▁do wn", "▁dow n", "▁ down", "at t", "a tt", "▁st atic", "▁stat ic", "▁ static", "ic s", "i cs", "# #", "po s", "p os", "▁v oid", "▁vo id", "▁ void", "a w", "ou n", "o un", "▁w ay", "▁wa y", "▁ way", "ib le", "i ble", "ve nt", "ven t", "v ent", "ow er", "owe r", "o wer", "▁th ink", "▁thin k", "▁ think", "t s", "* /", "▁a gain", "▁ag ain", "▁ again", "at ing", "ati ng", "atin g", "a ting", "т е", "ne r", "n er", "▁m ost", "▁mo st", "▁mos t", "▁ most", "li ne", "lin e", "l ine", "y m", "▁s ub", "▁su b", "▁ sub", "er son", "ers on", "▁re qu", "▁r equ", "▁req u", "▁ requ", "A L", "A R", "ab el", "abe l", "a bel", "on d", "o nd", ")) ;", ") );", "▁S e", "▁ Se", "▁B ut", "▁Bu t", "▁ But", "al k", "▁A n", "▁ An", "ne w", "n ew", "▁b ecause", "▁bec ause", "▁ because", "ge r", "g er", "ul ar", "ula r", "u lar", "ro up", "rou p", "r oup", "t a", ".. .", ". ..", "▁c ons", "▁con s", "▁co ns", "▁ cons", "▁r ight", "▁ri ght", "▁rig ht", "▁ right", "▁f r", "▁ fr", "b e", "il y", "i ly", "к и", "▁p h", "▁ ph", "ea d", "e ad", "? \"", "▁g u", "▁ gu", "▁el se", "▁els e", "▁ else", "▁s om", "▁so m", "▁ som", "re nt", "ren t", "r ent", "c o", "em ent", "eme nt", "emen t", "e ment", "▁s tr", "▁st r", "▁ str", "au lt", "aul t", "a ult", "▁ з", "л о", "se rt", "ser t", "s ert", "va r", "v ar", "ty pe", "typ e", "t ype", "▁C om", "▁Co m", "▁ Com", "л е", "in s", "i ns", "m e", "wa y", "w ay", "id ent", "ide nt", "iden t", "▁p rov", "▁pro v", "▁pr ov", "▁ prov", "▁ м", "▁tr ue", "▁ true", "▁P ro", "▁Pr o", "▁ Pro", "f l", "▁s l", "▁ sl", "▁A s", "▁ As", "} \\", "I D", "ue s", "u es", "▁in st", "▁ins t", "▁ inst", "▁n ame", "▁na me", "▁nam e", "▁ name", "o x", "▁ )", "l i", "am es", "ame s", "a mes", "Re s", "R es", "▁s ur", "▁su r", "▁ sur", "par am", "pa ram", "para m", "p aram", "▁st art", "▁star t", "▁sta rt", "▁ start", "a j", "S E", "as k", "a sk", "I T", "St ring", "Str ing", "S tring", "▁a ss", "▁as s", "▁ ass", "▁p lay", "▁pl ay", "▁ play", "ti ng", "t ing", "to n", "t on", "▁b efore", "▁be fore", "▁bef ore", "▁ before", "▁p ol", "▁po l", "▁ pol", "ar ch", "arc h", "▁w ell", "▁we ll", "▁wel l", "▁ well", "Co m", "C om", "an y", "a ny", "ol og", "olo g", "o log", "▁e rr", "▁er r", "▁ err", "▁the se", "▁th ese", "ar s", "a rs", "e b", "▁b r", "▁ br", "▁in cl", "▁inc l", "▁ incl", "▁h el", "▁he l", "▁ hel", "er n", "e rn", "od y", "o dy", "в о", "▁in d", "▁i nd", "▁ ind", "-- --------------", "---- ------------", "-------- --------", "--- -------------", "------------ ----", "----- -----------", "---------- ------", "------ ----------", "------------- ---", "-------------- --", "--------- -------", "------- ---------", "----------- -----", "▁d ata", "▁da ta", "▁dat a", "▁ data", "▁g ood", "▁go od", "▁ good", "L E", "] ,", "▁a v", "▁ av", "▁a c", "▁ ac", "id er", "ide r", "i der", "н е", "▁ Q", "▁m in", "▁mi n", "▁ min", "▁m uch", "▁mu ch", "c i", "el s", "e ls", "▁c ur", "▁cu r", "▁ cur", "▁v alue", "▁val ue", "▁ value", "er y", "e ry", "u f", "▁l oc", "▁lo c", "▁ loc", "re ak", "rea k", "at ive", "ati ve", "ativ e", "im es", "ime s", "i mes", "C l", "▁ ,", "▁s er", "▁se r", "▁ ser", "▁d ie", "▁di e", "▁ die", "▁tr ans", "▁tra ns", "▁ trans", "▁res ult", "▁ result", "ex t", "e xt", "▁a ut", "▁au t", "▁ aut", "la nd", "lan d", "l and", "▁& &", "▁ &&", "C h", "te n", "t en", "} $", "▁t ype", "▁typ e", "▁ty pe", "▁ type", "con d", "co nd", "c ond", "ic es", "ice s", "i ces", "▁v ery", "▁ver y", "▁ve ry", "▁ very", "▁o wn", "▁ own", "▁f il", "▁fi l", "▁ fil", "it ies", "iti es", "i ties", "▁p rodu", "▁pro du", "▁prod u", "▁ produ", "▁re ad", "▁r ead", "▁ read", "▁f orm", "▁for m", "▁fo rm", "▁ form", "▁c ase", "▁cas e", "▁ca se", "▁ case", "at her", "ath er", "a ther", "т и", "д а", "е р", "T h", "au t", "a ut", "▁s pec", "▁sp ec", "▁spe c", "▁ spec", "i j", "b l", "il ity", "ili ty", "▁ é", "▁e r", "▁ er", "▁d oes", "▁do es", "▁ does", "▁h ere", "▁he re", "▁her e", "▁ here", "th e", "t he", "ur es", "ure s", "u res", "▁ %", "mi n", "m in", "▁n ull", "▁nu ll", "▁ null", "ra p", "r ap", "\" )", "r r", "Li st", "L ist", "ri ght", "rig ht", "r ight", "▁U ser", "▁Us er", "▁Use r", "▁ User", "U L", "at ional", "ation al", "ati onal", "atio nal", "▁b eing", "▁be ing", "▁bei ng", "▁ being", "A N", "s k", "▁c ar", "▁ca r", "▁ car", "ol e", "o le", "▁d ist", "▁dis t", "▁di st", "▁ dist", "pl ic", "p lic", "ol low", "oll ow", "▁p res", "▁pre s", "▁pr es", "▁ pres", "▁s uch", "▁su ch", "▁suc h", "▁ such", "re am", "rea m", "in ce", "inc e", "ga n", "g an", "▁F or", "▁Fo r", "▁ For", "\" :", "so n", "s on", "riv ate", "▁y ears", "▁year s", "▁ye ars", "▁s erv", "▁se rv", "▁ser v", "▁ serv", "▁m ade", "▁ma de", "▁mad e", "▁ made", "de f", "d ef", "; \r", "▁g l", "▁ gl", "▁b el", "▁be l", "▁ bel", "▁l ist", "▁li st", "▁ list", "▁c or", "▁co r", "▁ cor", "▁d et", "▁de t", "▁ det", "ce ption", "cept ion", "eg in", "e gin", "▁ б", "▁c har", "▁ch ar", "▁cha r", "▁ char", "tr ans", "tra ns", "▁f am", "▁fa m", "▁! =", "▁ !=", "ou se", "ous e", "o use", "▁d ec", "▁de c", "▁ dec", "ic a", "i ca", "▁m any", "▁man y", "▁ma ny", "▁ many", "ak ing", "aki ng", "a king", "▁ à", "▁s im", "▁si m", "▁ sim", "ag es", "age s", "a ges", "uf f", "u ff", "as ed", "ase d", "a sed", "ma n", "m an", "▁S h", "▁ Sh", "ie t", "i et", "ir ect", "ire ct", "i rect", "▁R e", "▁ Re", "▁d iffer", "▁dif fer", "▁diff er", "▁f ind", "▁fin d", "▁fi nd", "▁ find", "eth od", "▁ \r", "in es", "ine s", "i nes", "▁in v", "▁i nv", "▁ inv", "▁p oint", "▁po int", "▁poi nt", "▁ point", "▁The y", "▁Th ey", "▁ They", "▁u sed", "▁us ed", "▁use d", "▁ used", "ct ions", "ction s", "▁st ill", "i ó", "in ed", "ine d", "i ned", "▁wh ile", "▁ while", "I t", "em ber", "emb er", "e mber", "▁s ay", "▁sa y", "▁ say", "▁he lp", "▁hel p", "▁ help", "▁c re", "▁cr e", "▁ cre", "▁ x", "▁T r", "▁ Tr", "um ent", "ume nt", "umen t", "u ment", "▁s k", "▁ sk", "ou ght", "ough t", "ual ly", "u ally", "m essage", "▁C on", "▁Co n", "▁ Con", "▁m on", "▁mo n", "▁ mon", "ar ed", "are d", "a red", "wor k", "w ork", ") :", "is ter", "ist er", "iste r", "i ster", "ar n", "a rn", "iz ed", "ize d", "i zed", "Dat a", "Da ta", "D ata", "or n", "o rn", "▁h ead", "▁he ad", "▁ head", "D E", "▁L e", "▁ Le", "▁p erson", "▁per son", "▁pers on", "▁ person", "ment s", "men ts", "m ents", "eng th", "e ngth", "▁f alse", "▁fal se", "▁fals e", "▁ false", "▁m ed", "▁me d", "▁ med", "▁D e", "▁ De", "ac he", "ach e", "a che", "it ed", "ite d", "i ted", "▁l et", "▁le t", "▁ let", "▁s how", "▁sh ow", "▁ show", "▁s ame", "▁sa me", "▁sam e", "▁ same", "us s", "u ss", "▁g ener", "▁gen er", "▁ge ner", "▁gene r", "▁ gener", "▁ у", "cu r", "c ur", "▁re al", "▁ real", "ce d", "c ed", "\" >", "st ruct", "str uct", "stru ct", "be gin", "b egin", "ce pt", "cep t", "▁b o", "▁ bo", "ir ed", "ire d", "i red", "▁F r", "▁ Fr", "▁st ud", "▁ stud", "de v", "d ev", "A r", "( \\", "▁C l", "▁ Cl", "we en", "w een", "▁t oo", "▁to o", "▁ too", "▁t est", "▁te st", "▁ test", "▁d ay", "▁da y", "▁ day", "o h", "▁f ollow", "▁fol low", "▁ follow", "at ure", "atur e", "atu re", "z e", "ie n", "i en", "re g", "r eg", "ce s", "c es", "ur ing", "uri ng", "u ring", "am b", "a mb", "in a", "i na", "cr i", "c ri", "▁e d", "▁ ed", "S S", "uc k", "u ck", "▁/ *", "▁ /*", "C T", "▁T here", "▁The re", "▁Th ere", "▁Ther e", "▁ There", "▁t ake", "▁tak e", "▁ta ke", "▁ take", "pa r", "p ar", "ul e", "u le", "ca l", "c al", "fo r", "f or", "** **************", "**** ************", "******** ********", "************ ****", "************** **", "s ource", "▁th ose", "co l", "c ol", "▁e ff", "▁ eff", "mo d", "m od", "con t", "co nt", "c ont", "} {", "▁a round", "▁ar ound", "▁ around", "pr ess", "pre ss", "pres s", "p ress", "b y", "▁go ing", "▁ going", "pon se", "pons e", "▁ С", "▁l ine", "▁li ne", "▁lin e", "▁ line", "da te", "dat e", "d ate", "co de", "cod e", "c ode", "[ '", "▁l ife", "▁li fe", "▁lif e", "▁ life", "as on", "a son", "▁u sing", "▁us ing", "▁ using", "▁v al", "▁va l", "▁ val", "▁d u", "▁ du", "y p", "▁O n", "▁ On", "▁f ound", "▁fo und", "▁fou nd", "▁ found", "ol ut", "olu t", "' ]", "ar ent", "are nt", "aren t", "a rent", "▁s tring", "▁st ring", "▁str ing", "▁stri ng", "▁ string", "▁m et", "▁me t", "▁ met", "▁w r", "▁ wr", "us h", "u sh", "st ring", "str ing", "stri ng", "s tring", "si ze", "s ize", "▁v er", "▁ve r", "▁ ver", "▁e ach", "▁ each", "val ue", "v alue", "▁l ast", "▁la st", "▁las t", "▁ last", "▁g ot", "▁go t", "▁ got", "ve n", "v en", "ba ck", "b ack", "Se t", "S et", "e y", "ro l", "r ol", "▁c r", "▁ cr", "th ing", "t hing", "re t", "r et", "é s", "is m", "i sm", "▁bet ween", "▁ between", "O b", "et hing", "eth ing", "e thing", "m p", "▁l o", "▁ lo", "at s", "a ts", "▁N ew", "▁Ne w", "▁ New", "в и", "ad o", "a do", "de x", "d ex", "д и", "▁p ass", "▁pas s", "▁pa ss", "▁ pass", "w h", "▁d en", "▁de n", "▁ den", "Ge t", "G et", "ap t", "a pt", "▁a sk", "▁as k", "▁ ask", "▁s up", "▁su p", "▁ sup", "Val ue", "V alue", "н ы", "▁t ry", "▁tr y", "▁ try", "lat ion", "l ation", "da y", "d ay", "ne ss", "nes s", "n ess", "et s", "e ts", "▁ex per", "▁exp er", "▁ exper", "T r", "▁M ar", "▁Ma r", "▁ Mar", "se rv", "ser v", "s erv", "b r", "▁n umber", "▁num ber", "▁nu mber", "▁ number", "in al", "ina l", "i nal", "ce nt", "cen t", "c ent", "/ *", "no t", "n ot", "ion al", "io nal", "iona l", "i onal", "▁f inal", "▁fin al", "▁fi nal", "▁ final", "' )", "▁r un", "▁ru n", "▁ run", "ov er", "ove r", "o ver", "▁n ever", "▁ne ver", "▁ never", "u c", "▁h igh", "▁hig h", "▁hi gh", "▁ high", "yl e", "y le", "▁in s", "▁i ns", "▁ ins", "▁b est", "▁be st", "▁bes t", "▁ best", "it tle", "itt le", "ri c", "r ic", "▁s ign", "▁si gn", "▁sig n", "▁ sign", "▁d em", "▁de m", "▁ dem", "in ess", "ine ss", "ines s", "i ness", "g y", "▁w ar", "▁wa r", "▁ war", "is hed", "ish ed", "▁g iv", "▁gi v", "ke y", "k ey", "▁ X", "( $", "▁ch ild", "▁chi ld", "▁ child", "le ss", "les s", "l ess", "way s", "wa ys", "w ays", "in cl", "inc l", "ro p", "r op", "ra w", "r aw", ": //", "▁ «", "n o", "ind ow", "indo w", "f e", "ri end", "rie nd", "rien d", "▁l es", "▁le s", "▁ les", "▁l os", "▁lo s", "▁ los", "fil e", "fi le", "f ile", "form ation", "format ion", "cc ess", "c cess", "▁ В", "n a", "▁i l", "▁ il", "is ion", "isi on", "le r", "l er", "▁a rt", "▁ar t", "▁ art", "Con t", "Co nt", "C ont", "▁w orld", "▁wor ld", "▁ world", "▁t urn", "▁tu rn", "▁tur n", "▁ turn", "▁re ally", "▁real ly", "▁E x", "▁ Ex", "м а", "▁ П", "ter s", "te rs", "t ers", "ar get", "arg et", "arge t", "Er r", "E rr", "▁h app", "▁ha pp", "ti me", "tim e", "t ime", "▁S o", "▁ So", "di v", "d iv", "▁did n", "▁di dn", "ad a", "a da", "oo t", "o ot", "} )", "▁s ch", "▁sc h", "▁ sch", "▁c le", "▁cl e", "▁ cle", "▁some thing", "▁som ething", "▁somet hing", "▁ something", "() .", "( ).", "▁c our", "▁co ur", "▁cou r", "ev er", "eve r", "e ver", "an ts", "ant s", "▁ ?", "T o", "▁ `", "tr y", "t ry", "u x", "ai s", "a is", "ro ss", "ros s", "r oss", "hi p", "h ip", "▁re p", "▁r ep", "▁ rep", "la bel", "lab el", "l abel", "▁b oth", "▁bo th", "▁bot h", "▁ both", "* ,", "ot t", "o tt", "м и", "an e", "a ne", "▁o pen", "▁op en", "▁ open", "w w", "▁c ome", "▁com e", "▁co me", "▁ come", "▁e xt", "▁ex t", "▁ ext", "re m", "r em", "_{ \\", "_ {\\", "▁o ld", "▁ol d", "▁ old", "ch ed", "che d", "c hed", ". _", "M E", "if y", "i fy", "g g", "Co l", "C ol", "vi ew", "v iew", "▁b us", "▁bu s", "▁ bus", "▁m ust", "▁mus t", "▁mu st", "▁ must", "▁d ifferent", "▁differ ent", "lo g", "l og", "is ts", "ist s", "i sts", "ro ll", "rol l", "r oll", "a i", "▁з а", "▁ за", "▁s ystem", "▁sys tem", "▁syst em", "▁ system", "iv ers", "ive rs", "iver s", "i vers", "at us", "atu s", "ot e", "o te", "me d", "m ed", "] .", "ak es", "ake s", "a kes", "R O", "▁c ent", "▁ce nt", "▁ cent", "gr am", "gra m", "g ram", "▁p rivate", "▁priv ate", "▁ private", "▁g reat", "▁gre at", "\" ;", "op y", "o py", "▁fe el", "▁fee l", "▁H ow", "▁Ho w", "▁ How", "// //", "/// /", "/ ///", "I C", "▁d r", "▁ dr", "ain s", "ai ns", "a ins", "lo ck", "loc k", "l ock", "E n", "▁S ch", "▁Sc h", "▁ Sch", "▁m at", "▁ma t", "▁ mat", "▁h ome", "▁hom e", "▁ho me", "▁ home", "per ty", "pert y", "te st", "tes t", "t est", "lo c", "l oc", "▁w om", "▁wo m", "s w", "ar ly", "arl y", "▁E n", "▁ En", "▁к о", "▁ ко", "de n", "d en", "ст а", "с та", "▁ а", "et er", "ete r", "e ter", "▁incl ud", "▁inclu d", "UL L", "U LL", "▁m em", "▁me m", "▁ mem", "▁p o", "▁ po", "▁l ittle", "▁lit tle", "▁litt le", "▁a rg", "▁ar g", "▁ arg", "▁} ,", "▁ },", "in clude", "incl ude", "et a", "e ta", "▁p lace", "▁pl ace", "▁plac e", "▁ place", "id th", "us tom", "ust om", "▁| |", "▁ ||", "▁t em", "▁te m", "▁ tem", "ri ed", "rie d", "r ied", "▁f act", "▁fac t", "▁fa ct", "▁ fact", "ien ce", "i ence", "▁P l", "▁ Pl", "op t", "o pt", "el e", "e le", "g o", "A C", "in ter", "int er", "inte r", "==== ====", "() ,", "( ),", "ot s", "o ts", "ra l", "r al", "iqu e", "iq ue", "i que", "av ing", "avi ng", "a ving", "m l", "▁th ought", "▁though t", "▁thou ght", "fr ac", "f rac", "▁c are", "▁car e", "▁ca re", "▁ care", "() );", "()) ;", "( ));", "▁p ut", "▁pu t", "▁ put", "▁m ight", "▁mi ght", "▁mig ht", "▁A mer", "▁Am er", "▁ Amer", "▁( !", "▁ (!", "am ple", "amp le", "al th", "alt h", "▁f ew", "▁fe w", "▁st ate", "▁stat e", "▁sta te", "▁ state", "su b", "s ub", "▁O r", "▁ Or", "] ;", "▁s ize", "▁si ze", "▁ size", "▁S p", "▁ Sp", "▁with out", "▁ without", "▁p oss", "▁pos s", "▁po ss", "▁ poss", "e q", "pl ay", "p lay", "▁ex pect", "▁exp ect", "▁ expect", "▁se cond", "▁sec ond", "▁ second", "▁S tring", "▁St ring", "▁Str ing", "▁ String", "ui ld", "u ild", "▁n ext", "▁ne xt", "▁ next", "+ +", "re qu", "req u", "r equ", "▁A ll", "▁Al l", "▁ All", "▁m en", "▁me n", "▁ men", "▁W hen", "▁Wh en", "▁Whe n", "▁ When", "it er", "ite r", "i ter", "am ent", "ame nt", "amen t", "a ment", "ne t", "n et", "▁ К", "ro n", "r on", "ain t", "ai nt", "a int", "▁I s", "▁ Is", "в е", "pe nd", "pen d", "p end", "trans lation", "transl ation", "▁г о", "▁ го", "ч е", "▁v an", "▁va n", "▁ van", "▁an other", "▁ano ther", "▁re t", "▁r et", "▁ ret", "▁L a", "▁ La", "Mo d", "M od", "IO N", "I ON", "li st", "l ist", "▁p ost", "▁pos t", "▁po st", "▁ post", "d a", "wa re", "war e", "w are", "▁w ord", "▁wor d", "▁wo rd", "▁ word", "Err or", "Er ror", "▁se em", "▁see m", "▁cont in", "▁ contin", "at ic", "ati c", "▁th ree", "▁thr ee", "▁ three", "Ob ject", "Obj ect", "▁part ic", "▁parti c", "$ .", "▁m ark", "▁mar k", "▁ mark", "▁v is", "▁vi s", "▁ vis", "r c", "▁s w", "▁ sw", "pt ions", "ption s", "▁b reak", "▁bre ak", "▁ break", "▁th ings", "▁thing s", "▁thin gs", "ut e", "u te", "u i", "▁T hat", "▁Th at", "▁ That", "ur s", "u rs", "g l", "р у", "▁f ile", "▁fil e", "▁fi le", "▁ file", "us e", "u se", "ig ned", "ign ed", "igne d", "par t", "pa rt", "p art", "U n", "▁e qu", "▁eq u", "▁ equ", "( &", "▁l ead", "▁le ad", "r m", "ain ed", "ai ned", "aine d", "a ined", "▁B e", "▁ Be", "pat h", "pa th", "p ath", "▁sm all", "▁ small", "ag er", "age r", "a ger", "▁al ways", "▁ always", "▁E l", "▁ El", "▁or der", "▁ord er", "▁ order", "▁e y", "▁ ey", "▁w on", "▁wo n", "▁ won", "ap e", "a pe", "▁l eft", "▁le ft", "▁ left", "av a", "a va", "it em", "ite m", "i tem", "ho r", "h or", "▁a way", "▁aw ay", "▁ away", "b b", "fu n", "f un", "▁I nd", "▁In d", "▁ Ind", "m b", "▁st ruct", "▁str uct", "▁stru ct", "▁ struct", "▁pro cess", "▁proc ess", "▁proces s", "▁ process", "▁s upport", "▁sup port", "▁supp ort", "▁ support", "); \r", ") ;\r", "ió n", "i ón", "L O", "▁o per", "▁op er", "▁ oper", "U T", "▁ ·", "P E", "lo ad", "l oad", "of f", "o ff", "▁N o", "▁ No", "iv es", "ive s", "i ves", "ic an", "ica n", "i can", "▁v e", "▁ ve", "act ion", "a ction", "' ;", "▁v o", "▁ vo", "$ ,", "▁G r", "▁ Gr", "pr e", "p re", "n y", "ain ing", "ai ning", "a ining", "io r", "i or", "in it", "ini t", "i nit", "le ction", "lect ion", "l ection", "ar m", "a rm", "um n", "u mn", "ag s", "a gs", "ц и", "ск о", "с ко", "vers ion", "v ersion", "▁T o", "▁ To", "▁re f", "▁r ef", "▁ ref", "st and", "sta nd", "stan d", "▁A t", "▁ At", "if t", "i ft", "▁e in", "fa ce", "fac e", "f ace", "b o", "if ied", "ifi ed", "ve d", "v ed", "su m", "s um", "un e", "u ne", "it al", "ita l", "i tal", "um p", "u mp", "com m", "co mm", "c omm", "▁m ov", "▁mo v", "▁ mov", "el t", "e lt", "▁v on", "▁vo n", "vel op", "ct or", "c tor", "he ad", "h ead", "cl e", "c le", "▁b uild", "▁bu ild", "▁ build", "in c", "i nc", ". '", "b s", "in fo", "inf o", "ch n", "c hn", "▁we ek", "▁ week", "▁b ook", "▁bo ok", "▁ book", "H E", "ba r", "b ar", "ic ense", "▁W hat", "▁Wh at", "▁ What", "▁qu est", "▁que st", "▁q uest", "▁ quest", "ur ch", "at o", "a to", "le ft", "l eft", "▁m ar", "▁ma r", "▁ mar", "▁t op", "▁to p", "▁ top", "F F", "▁f riend", "▁ friend", "▁b eh", "▁be h", "▁f ield", "▁fi eld", "▁ field", "▁again st", "ra ct", "rac t", "r act", "iz ation", "us er", "use r", "u ser", "ch en", "che n", "c hen", "▁ke ep", "▁ keep", "A D", "it or", "ito r", "i tor", "▁n on", "▁no n", "▁ non", "ir d", "i rd", "op e", "o pe", "▁re st", "▁r est", "▁res t", "▁ rest", "▁d ev", "▁de v", "▁ dev", "▁_ _", "▁ __", "▁u na", "▁un a", "▁ una", "▁t erm", "▁te rm", "▁ter m", "▁ term", "I S", "▁p op", "▁po p", "▁ pop", "ri st", "ris t", "r ist", "▁s ince", "▁sin ce", "▁sinc e", "▁ since", "ve s", "v es", "▁h ard", "▁ha rd", "▁har d", "▁ hard", "p i", "ut il", "uti l", "u til", "▁s oc", "▁so c", "▁ soc", "en e", "e ne", "Ex ception", "▁l ocal", "▁loc al", "▁lo cal", "▁ local", "▁d irect", "▁di rect", "▁dire ct", "▁dir ect", "▁ direct", "▁s ure", "▁su re", "▁sur e", "▁ sure", "▁b ro", "▁br o", "▁ bro", "▁d a", "▁ da", "▁< /", "▁ ", "ai m", "a im", "▁s ervice", "▁serv ice", "▁ service", "▁with in", "an gu", "ang u", "▁ Д", "uf fer", "uff er", "A G", "▁D o", "▁ Do", "▁in cre", "▁inc re", "▁under stand", "} ^", "▁look ed", "▁lo oked", "ge n", "g en", "ail ed", "ai led", "a iled", "▁ е", "ay er", "aye r", "a yer", "▁O ne", "▁On e", "▁ One", "▁b as", "▁ba s", "▁ bas", "▁j ob", "▁jo b", "▁ job", "m u", "bu t", "b ut", "el ta", "elt a", "▁Ch rist", "▁Chris t", "▁ Christ", "ur ation", "▁re cord", "▁rec ord", "▁ record", "▁Un ivers", "▁ Univers", "iv id", "ivi d", "i vid", "val id", "▁ Р", "▁h old", "▁hol d", "▁ho ld", "▁ hold", "▁t able", "▁tab le", "▁ta ble", "▁ table", "on es", "one s", "o nes", "lin k", "l ink", "▁G e", "▁ Ge", "▁of fer", "▁off er", "st er", "ste r", "s ter", "For m", "F orm", "= {", "▁н е", "▁ не", "st ance", "stan ce", "▁g overn", "▁go vern", "▁gover n", "▁ govern", "▁te chn", "▁tech n", "▁ techn", "▁p rim", "▁pr im", "▁pri m", "▁ prim", "* .", "ch o", "c ho", "ma x", "m ax", "▁f ore", "▁for e", "▁fo re", "▁ fore", "▁C an", "▁Ca n", "▁ Can", "▁pol it", "▁po lit", "▁ polit", "or ies", "ori es", "orie s", "o ries", "▁t imes", "▁time s", "▁tim es", "▁ti mes", "▁ times", "▁d ans", "▁da ns", "▁dan s", "▁a ir", "▁ai r", "▁ air", "▁any thing", "▁s ever", "▁se ver", "ac y", "a cy", "} _", "H e", "▁l east", "▁le ast", "ip s", "i ps", "EN T", "E NT", "d o", "▁о т", "▁ от", "▁c ost", "▁co st", "▁cos t", "▁ cost", ". ”", "▁child ren", "▁ children", "ab ility", "abil ity", "Bu t", "B ut", "▁p ath", "▁pat h", "▁pa th", "▁ path", "res ult", "ac ter", "act er", "▁e lement", "▁el ement", "▁ele ment", "▁elem ent", "▁ element", "e e", "▁w ait", "▁wa it", "▁ wait", "▁m oney", "▁mon ey", "▁mo ney", "Ma p", "M ap", "t d", "oi n", "o in", "iv ing", "ivi ng", "i ving", "ic ht", "ich t", "i cht", "ic y", "i cy", "sc h", "s ch", "st e", "s te", "д у", "or ed", "ore d", "o red", "ou d", "o ud", "il le", "ill e", "i lle", "is ed", "ise d", "i sed", "pl ication", "plic ation", "▁c ustom", "▁cust om", "▁ custom", "▁h aving", "▁ha ving", "▁hav ing", "pon ent", "po nent", "▁B y", "▁ By", "ul es", "ule s", "u les", "ue d", "u ed", "at ter", "att er", "atte r", "An d", "A nd", "it ive", "iti ve", "De f", "D ef", "▁m oment", "▁mom ent", "▁mo ment", "▁ moment", "at erial", "ate rial", "ater ial", "Cl ass", "C lass", "og raph", "ograp h", "o graph", "ik e", "i ke", "▁l arge", "▁larg e", "▁ large", "▁# ###", "▁## ##", "▁### #", "▁ ####", "▁e ither", "du ct", "duc t", "d uct", "▁T hen", "▁The n", "▁Th en", "▁ Then", "▁G u", "▁ Gu", "ole an", "o lean", "pe rt", "per t", "p ert", "▁G et", "▁Ge t", "▁ Get", "▁A b", "▁ Ab", "▁sh ort", "▁ short", "O n", "im ent", "ime nt", "imen t", "i ment", "▁pro ject", "▁ project", "cri pt", "cr ipt", "c ript", "▁incl uding", "▁includ ing", "▁inclu ding", "▁ including", "ни я", "▁m aking", "▁ma king", "▁ making", "▁some one", "▁F l", "▁ Fl", "▁s at", "▁sa t", "▁ sat", "▁comp any", "▁compan y", "▁ company", "oc us", "p u", "▁G od", "▁Go d", "▁ God", "if ication", "ific ation", "N o", "▁s n", "▁ sn", "an o", "a no", "g a", "▁a u", "▁ au", "▁c ou", "▁co u", "▁ cou", "á s", "en ded", "end ed", "ende d", "т у", "ob er", "obe r", "o ber", "▁n othing", "▁not hing", "▁no thing", "▁n et", "▁ne t", "▁ net", "▁p ot", "▁po t", "▁ pot", "▁t yp", "▁ty p", "▁ typ", "▁it em", "▁i tem", "▁ item", "re w", "r ew", "At t", "A tt", "▁you ng", "▁yo ung", "} \r", "nd er", "nde r", "n der", "st art", "sta rt", "star t", "▁S c", "▁ Sc", "* )", "▁e nc", "▁en c", "▁ enc", "▁w omen", "▁wom en", "▁wo men", "▁look ing", "▁lo oking", "▁ looking", "▁р о", "▁ ро", "▁he alth", "▁heal th", "▁ health", "Pat h", "P ath", "▁A fter", "▁Af ter", "▁ After", "▁m ult", "▁mu lt", "▁mul t", "▁ mult", "▁{ \\", "▁ {\\", "▁l and", "▁la nd", "▁lan d", "▁ land", "or ld", "▁D es", "▁De s", "▁ Des", "▁e ng", "▁en g", "▁ eng", "in put", "▁P ol", "▁Po l", "▁ Pol", "\" \"", "Co de", "C ode", "▁s upp", "▁su pp", "▁sup p", "▁ supp", "ain er", "ai ner", "aine r", "a iner", "he ck", "▁m or", "▁mo r", "▁ mor", "▁m ill", "▁mil l", "▁mi ll", "▁ mill", "▁a w", "▁ aw", "f s", "▁do ing", "ting s", "t ings", "ad es", "ade s", "a des", "▁to get", "▁c ertain", "▁cert ain", "▁cer tain", "▁t ogether", "▁toget her", "C E", "ide o", "▁Amer ican", "▁America n", "▁ American", "on y", "o ny", "id d", "i dd", "I I", "ge d", "g ed", "ab les", "able s", "abl es", "a bles", "▁ide nt", "▁id ent", "▁ ident", "io d", "i od", "▁p arent", "▁par ent", "▁pa rent", "▁pare nt", "▁ parent", "F or", "amb da", "an do", "and o", "= \\", "ag ed", "age d", "a ged", "en ding", "end ing", "In t", "I nt", "▁poss ible", "▁ possible", "▁с о", "▁ со", "iv ity", "ivi ty", "nu m", "n um", "r t", "aj or", "ajo r", "a jor", "cre ate", "creat e", "c reate", "ri de", "rid e", "r ide", "▁k new", "▁kn ew", "▁kne w", "bi t", "b it", "it ional", "ition al", "iti onal", "▁l ik", "▁li k", "▁ lik", "▁H er", "▁He r", "▁ Her", "ens ion", "\" .", "ot o", "o to", "▁ex ist", "▁ exist", "ak en", "ake n", "a ken", "▁act ually", "▁actual ly", "c a", "▁ Г", "х о", "in n", "i nn", "Al l", "A ll", "bu f", "b uf", "▁M e", "▁ Me", "▁s een", "▁se en", "▁see n", "▁ seen", "op s", "o ps", "No t", "N ot", "▁cont rol", "▁contr ol", "▁contro l", "▁ control", "▁res pon", "▁resp on", "▁ respon", "} ;", "il t", "i lt", "is k", "i sk", "▁b ad", "▁ba d", "▁ bad", "▁o ften", "▁of ten", "▁p ast", "▁pas t", "▁pa st", "ap er", "ape r", "a per", "▁re ason", "▁ reason", "et ers", "eter s", "ete rs", "e ters", "▁w anted", "▁want ed", "ur a", "u ra", "ta ble", "tab le", "t able", "or mal", "orm al", "wid th", "w idth", "г а", "pt r", "p tr", "▁d est", "▁de st", "▁des t", "▁ dest", "▁de sign", "▁des ign", "▁ design", "▁s ound", "▁so und", "▁sou nd", "▁ sound", "▁p lan", "▁pl an", "▁ plan", "▁b ase", "▁bas e", "▁ba se", "▁ base", "ha nd", "han d", "h and", "g s", "▁s ays", "▁sa ys", "▁say s", "fun ction", "f unction", "▁t ri", "▁tr i", "▁ tri", "m t", "▁in vest", "▁inv est", "▁av ailable", "▁ available", "ay out", "a yout", "▁o ch", "▁oc h", "▁ och", "▁l as", "▁la s", "▁ las", "il led", "ill ed", "ille d", "V al", "▁ ф", "ie ty", "iet y", "i ety", "mo n", "m on", "Ha nd", "H and", "F r", "ia m", "i am", "pa ce", "p ace", "▁O b", "▁ Ob", "▁p ara", "▁par a", "▁pa ra", "▁ para", "▁me et", "▁s um", "▁su m", "▁ sum", "M essage", "ic i", "i ci", "▁k nown", "▁kn own", "▁know n", "▁ known", "▁g en", "▁ge n", "▁ gen", "am ma", "amm a", "a mma", "ar r", "a rr", "▁t re", "▁tr e", "▁ tre", "ok e", "o ke", "ut h", "u th", "~ \\", "▁exper ience", "▁experi ence", "ic le", "icl e", "i cle", "▁I l", "▁ Il", "▁s ent", "▁se nt", "▁sen t", "▁ sent", "▁o thers", "▁other s", "▁ others", "▁s oft", "▁so ft", "▁ soft", "I P", "▁m ax", "▁ma x", "▁ max", "ba ll", "bal l", "b all", "▁mark et", "▁mar ket", "▁ market", "▁p our", "▁po ur", "▁pou r", "pr ession", "press ion", "p ression", "ep s", "e ps", "▁s aw", "▁sa w", "▁a cross", "▁ac ross", "▁S u", "▁ Su", "O ver", "ни е", "ul ation", "u lation", "▁R eg", "▁Re g", "▁ Reg", "▁+ =", "▁ +=", "bo dy", "b ody", ") \\", "▁pr int", "▁pri nt", "▁prin t", "▁ print", "▁п ри", "▁пр и", "▁ при", "d b", "our ces", "ource s", "ward s", "war ds", "w ards", "▁bl ack", "▁ black", "с о", "il i", "i li", "▁E d", "▁ Ed", "▁com plet", "▁comp let", "▁compl et", "▁s ingle", "▁sing le", "▁sin gle", "▁ single", "▁I N", "▁ IN", "ac hed", "ach ed", "ache d", "a ched", "b t", "▁c ode", "▁co de", "▁cod e", "▁ code", "▁b ool", "▁bo ol", "▁ bool", "▁a rea", "▁are a", "▁ar ea", "▁ area", "▁re quire", "▁requ ire", "▁ require", "▁pro blem", "▁proble m", "▁prob lem", "ac ed", "ace d", "a ced", "Eq u", "E qu", "▁con fig", "▁conf ig", "▁ config", "ve c", "v ec", "ne y", "n ey", "c y", "A l", "▁acc ount", "▁ac count", "▁ account", "ym bol", "▁s te", "▁st e", "▁ ste", "ge s", "g es", "Ar ray", "Arr ay", "em pl", "emp l", "con text", "cont ext", "De s", "D es", "Res ult", "ec ut", "e cut", "▁t arget", "▁tar get", "▁ target", "▁get ting", "\" />", "og le", "o gle", "▁him self", "▁was n", "▁wa sn", "▁b lock", "▁bl ock", "▁blo ck", "▁ block", "▁a nt", "▁an t", "▁ ant", "▁Y ork", "▁be come", "▁bec ome", "if f", "i ff", "port s", "por ts", "p orts", "re ate", "reat e", "rea te", "= '", "c d", "loc ation", "l ocation", "е т", "▁a ccess", "▁acc ess", "▁ac cess", "▁ access", "gr ess", "gre ss", "gres s", "g ress", "ro s", "r os", "U p", "▁work ing", "▁wor king", "▁ working", "▁A m", "▁ Am", "iq u", "i qu", "ce r", "c er", "▁( (", "▁ ((", "▁P er", "▁Pe r", "▁ Per", "▁f unc", "▁fun c", "▁fu nc", "▁ func", "▁g irl", "▁gi rl", "▁gir l", "▁ girl", "▁ab ove", "pe n", "p en", "п и", "id o", "i do", "▁v ersion", "▁vers ion", "▁ version", "T Y", "▁ ;", "ma ry", "mar y", "m ary", "ab led", "able d", "abl ed", "a bled", "an nel", "ann el", "anne l", "▁ex ample", "▁exam ple", "▁ example", "▁con text", "▁cont ext", "▁ context", "O P", "▁re d", "▁r ed", "▁ red", "▁c ir", "▁ci r", "▁ cir", "s m", "Lo g", "L og", "▁s pace", "▁sp ace", "▁ space", "▁f ut", "▁fu t", "▁G ener", "▁Ge ner", "▁Gen er", "▁Gene r", "▁ Gener", "il ls", "ill s", "▁d ri", "▁dr i", "_ .", "▁f elt", "▁fe lt", "▁fel t", "▁o ffic", "▁of fic", "▁off ic", "▁= ==", "▁== =", "▁ ===", "i i", "▁start ed", "▁star ted", "▁ Т", "▁} );", "▁}) ;", "▁ });", "j s", "▁fr ont", "▁fro nt", "▁ front", "▁al most", "ir m", "i rm", "! \"", "sign ed", "sig ned", "s igned", "▁y et", "▁ye t", "▁t rad", "▁tr ad", "▁tra d", "ient s", "ien ts", "i ents", "am a", "a ma", "▁in put", "▁ input", "li m", "l im", "п а", "▁к а", "▁ ка", "▁c amp", "▁cam p", "▁ca mp", "▁ camp", "ib r", "i br", "fe ct", "f ect", "un t", "u nt", "▁h alf", "▁hal f", "▁ half", "▁c over", "▁co ver", "▁cov er", "▁ cover", "angu age", "▁b en", "▁be n", "▁ ben", "h a", "▁d iff", "▁di ff", "▁dif f", "▁ diff", "_ \\", "▁о б", "▁ об", "] )", "od es", "ode s", "o des", "he l", "h el", "io s", "i os", "▁ О", "▁m ot", "▁mo t", "▁ mot", "▁s ocial", "▁so cial", "▁soc ial", "▁soci al", "▁ social", "//// ////", "▁s tre", "▁st re", "▁str e", "▁ stre", "gr ound", "gro und", "g round", "і в", "ob ject", "obj ect", "pl es", "ple s", "p les", "re ed", "ree d", "r eed", "▁e en", "▁ een", "▁b ased", "▁bas ed", "▁base d", "▁ba sed", "▁ based", "▁r ange", "▁ran ge", "▁rang e", "▁ range", "A n", "ur g", "u rg", "▁le arn", "▁lear n", "▁ learn", "▁e xc", "▁ex c", "▁ exc", "▁im p", "▁i mp", "▁ imp", "▁me ans", "▁mean s", "▁w ur", "en ds", "end s", "vo id", "v oid", "▁s td", "▁st d", "▁ std", "▁part icular", "▁partic ular", "▁particul ar", "▁parti cular", "j a", "▁s ource", "▁sour ce", "▁ source", "def ault", "p y", "▁a ls", "▁al s", "▁ als", "sc ri", "scr i", "s cri", "st atus", "stat us", "▁st ory", "▁stor y", "▁sto ry", "▁ story", "▁b egin", "▁be gin", "▁beg in", "▁ begin", "▁pos ition", "▁posit ion", "▁ position", "▁spec ial", "▁spe cial", "▁ special", "ph p", "p hp", "▁b ar", "▁ba r", "▁ bar", "▁p ract", "▁pr act", "▁pra ct", "▁prac t", "cal l", "ca ll", "c all", "▁d as", "▁da s", "▁ das", "▁r ad", "▁ra d", "▁ rad", "▁cl ose", "▁clos e", "▁clo se", "▁ close", "ww w", "w ww", "ер е", "е ре", "g u", "▁E r", "▁ Er", "▁d om", "▁do m", "▁ dom", "A M", "▁b ed", "▁be d", "▁ bed", "▁sever al", "au l", "a ul", "bo x", "b ox", "▁l ow", "▁lo w", "▁ low", "pa ck", "p ack", "Re g", "R eg", "O f", "at ures", "ature s", "atur es", "atu res", "é n", "ed er", "ede r", "e der", "uild er", "ca st", "cas t", "c ast", "con om", "co nom", "c onom", "ra ft", "raf t", "r aft", "▁m akes", "▁make s", "▁ma kes", "Lo c", "L oc", "ht tp", "htt p", "h ttp", "▁a bs", "▁ab s", "▁ abs", "re sh", "res h", "r esh", "▁W ill", "▁Wil l", "▁Wi ll", "▁ Will", "bre ak", "b reak", "▁o ptions", "▁opt ions", "▁option s", "▁ options", "fo rt", "for t", "f ort", "▁и з", "▁ из", "▁a nal", "▁an al", "▁ anal", "▁e nv", "▁en v", "▁ env", "( {", "ev ent", "even t", "eve nt", "e vent", "▁p age", "▁pa ge", "▁pag e", "▁ page", "ter nal", "tern al", "▁d istribut", "▁dist ribut", "▁f ood", "▁fo od", "▁foo d", "▁ food", "che ck", "c heck", "C K", "▁в о", "▁ во", "as sert", "ass ert", "asse rt", "á n", "ba se", "bas e", "b ase", "▁w hole", "▁wh ole", "▁who le", "ac ión", "ació n", "aci ón", "a ción", "O D", "▁turn ed", "▁tur ned", "ig ma", "▁res ponse", "▁respon se", "▁respons e", "▁ response", "▁Univers ity", "▁d iv", "▁di v", "▁ div", "ap ter", "apt er", "▁result s", "▁ results", "▁re present", "▁rep resent", "▁every thing", "▁C ent", "▁Ce nt", "▁ Cent", "ut es", "ute s", "u tes", "ri x", "r ix", "▁S ome", "▁So me", "▁Som e", "▁ Some", "▁be hind", "▁beh ind", "▁c reat", "▁cre at", "▁ creat", "pl ace", "plac e", "p lace", "s u", "▁P art", "▁Par t", "▁Pa rt", "▁ Part", "um b", "u mb", "math bb", "pi ng", "pin g", "p ing", "▁m atch", "▁mat ch", "▁ match", "O ut", "do m", "d om", "▁s itu", "▁sit u", "▁si tu", "d r", "ar a", "a ra", "▁w indow", "▁wind ow", "▁ window", "n s", "lish ed", "l ished", "▁V er", "▁Ve r", "▁ Ver", "▁m essage", "▁mess age", "▁ message", "▁E m", "▁ Em", "▁h uman", "▁hum an", "▁ human", "per ties", "pert ies", "л у", "le m", "l em", "OR T", "O RT", "▁e arly", "▁ear ly", "▁qu ick", "▁qui ck", "▁ quick", "▁т а", "▁ та", "ro id", "r oid", "▁c ountry", "▁coun try", "▁count ry", "▁countr y", "▁ country", "▁d ue", "▁du e", "▁ due", "▁D ie", "▁Di e", "▁ Die", "▁t rying", "▁tr ying", "▁try ing", "▁l ive", "▁li ve", "▁liv e", "▁ live", "▁p ress", "▁pre ss", "▁pr ess", "▁pres s", "▁ press", "IN T", "I NT", "W ith", "ov ed", "ove d", "o ved", "▁spec ific", "▁ specific", "▁f all", "▁fa ll", "▁fal l", "▁ fall", "u k", "y l", "▁gener al", "▁gen eral", "▁gene ral", "▁ general", "м у", "н у", "▁n ames", "▁name s", "▁na mes", "▁nam es", "▁ names", "wh ere", "whe re", "w here", "▁The se", "▁Th ese", "▁ These", "▁s il", "▁si l", "▁ sil", "é t", "▁e ner", "▁en er", "▁ ener", "▁N ow", "▁No w", "▁ Now", "▁add ress", "▁addr ess", "▁ address", "Res ponse", "▁M r", "▁ Mr", "▁an sw", "▁ans w", "▁fil m", "▁fi lm", "▁ film", "▁str ong", "▁stro ng", "▁ strong", "▁b ring", "▁br ing", "▁Un ited", "▁Unit ed", "▁g e", "▁ ge", "▁w oman", "▁wom an", "▁wo man", "▁ woman", "Ne w", "N ew", "et t", "e tt", ". )", "en ame", "ena me", "e name", "▁A N", "▁ AN", "▁de scrib", "▁desc rib", "з а", "is ing", "isi ng", "i sing", "E L", "q l", "▁f ur", "▁fu r", "▁ fur", "y ing", "▁C al", "▁Ca l", "▁ Cal", "▁D r", "▁ Dr", "ER R", "E RR", "▁\\ \\", "▁ \\\\", "an gle", "ang le", "ur ope", "uro pe", "urop e", "▁c ity", "▁cit y", "▁ci ty", "▁ city", "▁in dex", "▁ind ex", "▁inde x", "▁ index", "▁a ction", "▁act ion", "▁ action", "▁How ever", "▁ However", "▁f ig", "▁fi g", "▁ fig", "ia s", "i as", "▁quest ion", "▁ question", "▁J an", "▁Ja n", "▁ Jan", "▁M ed", "▁Me d", "▁ Med", "▁C ont", "▁Con t", "▁Co nt", "▁ Cont", "am ed", "ame d", "a med", "Cal l", "C all", "pl ied", "tt y", "t ty", "▁ind ivid", "pa ge", "pag e", "p age", "▁c omb", "▁com b", "▁co mb", "▁ comb", "se ction", "sect ion", "s ection", "▁C omm", "▁Com m", "▁Co mm", "▁ Comm", "ue l", "u el", "▁h et", "▁he t", "▁ het", "▁B ar", "▁Ba r", "▁ Bar", "ag ement", "age ment", "agem ent", "fi n", "f in", "▁m ajor", "▁ma jor", "▁maj or", "▁ major", "op er", "ope r", "o per", "ap i", "a pi", "ro om", "r oom", "▁ „", "▁h ab", "▁ha b", "▁ hab", "з и", "▁a uf", "▁au f", "▁ auf", "cur rent", "curr ent", "n i", "▁in clude", "▁incl ude", "▁includ e", "▁inclu de", "▁ include", "▁qu i", "▁q ui", "v a", "U E", "▁ide a", "▁id ea", "▁ idea", ", '", "▁requ ired", "▁require d", "▁ required", "▁he art", "▁hear t", "▁ heart", "ib ility", "ibil ity", "ict ion", "i ction", "Mod el", "Mode l", "Mo del", "wr ite", "writ e", "w rite", "▁cont ent", "▁conten t", "▁ content", "▁w er", "▁we r", "▁ wer", "▁h ands", "▁hand s", "▁han ds", "ze n", "z en", "ch ar", "cha r", "c har", "}^ {", "} ^{", "▁m ass", "▁ma ss", "▁mas s", "▁ mass", "pl y", "p ly", "▁n at", "▁na t", "▁ nat", "re l", "r el", "▁d at", "▁da t", "▁ dat", "==== ============", "======== ========", "============ ====", "im al", "ima l", "i mal", "▁pro bably", "▁prob ably", "un ch", "unc h", "▁m er", "▁me r", "▁ mer", "il ar", "ila r", "i lar", "ir es", "ire s", "i res", "▁w atch", "▁wat ch", "▁ watch", "S I", "▁c ult", "▁cu lt", "▁cul t", "▁m other", "▁mot her", "▁mo ther", "▁ mother", "▁govern ment", "or ding", "ord ing", "▁( )", "▁ ()", "▁p ri", "▁pr i", "▁l ink", "▁lin k", "▁ link", "gr oup", "gro up", "g roup", "O L", "▁n ear", "▁ne ar", "▁S er", "▁Se r", "▁ Ser", "Se r", "S er", "it o", "i to", "▁value s", "▁val ues", "▁ values", "▁j ava", "▁ja va", "▁ java", "ful ly", "full y", "f ully", "Co unt", "C ount", "++ )", "▁v i", "▁ vi", "▁wh ite", "▁ white", "ma t", "m at", "ct x", "c tx", "▁con c", "▁co nc", "▁ conc", "▁st ay", "▁sta y", "gi ng", "gin g", "g ing", "▁c lear", "▁cl ear", "▁cle ar", "▁ clear", "▁c opy", "▁co py", "▁cop y", "▁ copy", "sel ves", "▁prov ide", "▁w ords", "▁wor ds", "▁word s", "▁ words", "com p", "co mp", "c omp", "ar gs", "arg s", "▁p ick", "▁pi ck", "▁pic k", "▁ pick", "ul y", "u ly", "▁v ari", "▁var i", "▁va ri", "▁ vari", "▁bel ieve", "▁belie ve", "▁C o", "▁ Co", "Pro perty", "Gr oup", "G roup", "▁t en", "▁te n", "▁ ten", "is chen", "isch en", "ische n", "isc hen", "i schen", "et urn", "e turn", "iv al", "iva l", "i val", "Sys tem", "S ystem", "C L", "be d", "b ed", "▁t otal", "▁to tal", "▁tot al", "▁ total", "▁is t", "▁i st", "▁ ist", "In put", "um ents", "ument s", "umen ts", "u ments", "Man ager", "ш и", "▁w in", "▁ win", "le ep", "lee p", "P I", "но го", "н ого", "ru ction", "ruct ion", "r uction", "▁in te", "▁i nte", "▁int e", "▁ inte", "Ap p", "A pp", "av or", "avo r", "a vor", "▁re spect", "▁res pect", "▁resp ect", "▁ respect", "at ors", "ator s", "ato rs", "▁c omo", "▁com o", "▁co mo", "▁c ut", "▁cu t", "▁ cut", "F A", "▁s us", "▁su s", "▁A pp", "▁Ap p", "▁ App", "re ct", "rec t", "r ect", "F I", "▁be gan", "▁beg an", "op h", "o ph", "▁s ort", "▁so rt", "▁sor t", "▁ sort", "th ough", "ј е", "ic ro", "i cro", "Tr ans", "Tra ns", "л і", "▁In st", "▁Ins t", "▁ Inst", "re quest", "requ est", "req uest", "о р", "▁rel ations", "▁relation s", "- \\", "St atus", "Stat us", "ж и", "▁f ather", "▁fa ther", "▁fat her", "▁ father", "c s", "▁s ex", "▁se x", "▁ sex", "is ch", "isc h", "i sch", "v o", "}_ {", "} _{", "ave n", "av en", "a ven", "▁N e", "▁ Ne", "AT E", "A TE", "it ten", "itt en", "itte n", "▁e ss", "▁es s", "▁ ess", "T H", "ight s", "igh ts", "▁h om", "▁ho m", "▁ hom", "▁t oday", "▁to day", "▁tod ay", "▁toda y", "▁z u", "▁ zu", "it a", "i ta", "▁is n", "▁i sn", "▁o pt", "▁op t", "▁ opt", "og n", "o gn", "é r", "▁wh ether", "▁whe ther", "ix ed", "ph i", "p hi", "id ence", "iden ce", "al d", "a ld", "Cl ient", "A t", "▁de ath", "▁L et", "▁Le t", "▁ Let", "iu s", "i us", "г и", "▁р е", "▁ ре", "be n", "b en", ") \r", "b a", ">< /", "> ", "▁ ->", "▁J ust", "▁Ju st", "▁ Just", "Wh at", "W hat", "at al", "ata l", "a tal", "▁M in", "▁Mi n", "▁ Min", "▁C or", "▁Co r", "▁ Cor", "▁d ark", "▁dar k", "▁ dark", "r l", "▁l arg", "▁la rg", "▁ larg", "di ng", "d ing", "ó n", "ou ch", "o uch", "▁u m", "▁ um", "▁e lect", "▁el ect", "▁ele ct", "▁ elect", "▁d am", "▁da m", "▁ dam", "▁ne eds", "▁need s", "▁m atter", "▁mat ter", "▁matt er", "▁r ather", "▁rat her", "▁ra ther", "fr om", "f rom", "ra m", "r am", "▁ і", "▁t aken", "▁take n", "▁tak en", "▁ta ken", "▁de al", "▁per iod", "▁ period", "▁M on", "▁Mo n", "▁ Mon", "▁ Л", "▁A ug", "▁Au g", "▁ Aug", "ru n", "r un", "m m", "el le", "ell e", "e lle", "▁ex port", "▁exp ort", "▁ export", "S c", "vi s", "v is", "ab or", "a bor", "▁aut hor", "▁auth or", "▁ author", "è re", "▁re member", "▁rem ember", "▁remem ber", "▁re du", "▁r edu", "▁red u", "▁ redu", "▁L ist", "▁Li st", "▁Lis t", "▁ List", "▁f ocus", "▁ focus", "▁char acter", "▁ character", "Tab le", "T able", "▁individ ual", "▁need ed", "bu m", "b um", "▁st yle", "▁sty le", "▁ style", "in ary", "ina ry", "inar y", "ers ion", "ou te", "out e", "o ute", "▁P e", "▁ Pe", "▁h on", "▁ho n", "▁ hon", "mu t", "m ut", "se e", "s ee", "▁bec ame", "▁d ire", "▁di re", "▁dir e", "▁ dire", "▁d ocument", "▁doc ument", "▁ document", "se c", "s ec", "en ing", "eni ng", "e ning", "▁vis it", "▁ visit", "▁f ac", "▁fa c", "▁ fac", "t x", "do wn", "d own", "pl it", "p lit", "▁ph ys", "▁ phys", "it ting", "itt ing", "jo y", "j oy", "▁h ig", "▁hi g", "Th is", "T his", "A d", "▁B rit", "▁Br it", "▁em ploy", "▁r é", "▁ ré", "▁ т", "l ambda", "▁im pro", "▁imp ro", "▁B o", "▁ Bo", "id ing", "idi ng", "i ding", "▁on line", "▁ online", "me m", "m em", "at form", "▁W ar", "▁Wa r", "▁ War", "▁c as", "▁ca s", "▁ cas", "as ure", "a sure", "▁p ur", "▁pu r", "▁ pur", "me di", "med i", "m edi", "Di s", "D is", "▁G erm", "▁Ge rm", "▁Ger m", "p c", "с а", "▁friend s", "▁M c", "▁ Mc", "D I", "▁pl us", "▁ plus", "▁S et", "▁Se t", "▁ Set", "idd le", "it ut", "itu t", "▁de pend", "▁dep end", "▁ depend", "re st", "res t", "r est", "▁J e", "▁ Je", "▁h or", "▁ho r", "▁ hor", "▁ent ire", "Qu ery", "Que ry", "▁re fer", "▁ref er", "▁ refer", "▁h ot", "▁ho t", "▁ hot", "▁A ust", "▁Aus t", "▁Au st", "▁com mon", "▁comm on", "▁ common", "ц і", "▁p ull", "▁pu ll", "▁pul l", "▁ pull", "▁A dd", "▁Ad d", "▁ Add", "▁se ason", "▁sea son", "▁seas on", "▁ season", "▁in vol", "▁inv ol", "▁W orld", "▁Wor ld", "▁ World", "cl ient", "cli ent", "no w", "n ow", "tr ue", "ap pend", "app end", "appe nd", "appen d", "it ted", "itt ed", "itte d", "em pt", "emp t", ") {", "// /", "/ //", "▁p rop", "▁pro p", "▁pr op", "▁ prop", "im ate", "ima te", "imat e", "i mate", "S C", "▁h ours", "▁hour s", "▁ho urs", "▁h ope", "▁hop e", "▁ho pe", "an dom", "and om", "ando m", "і д", "ist ic", "isti c", "▁pro perty", "▁proper ty", "▁ property", "s g", "> (", "▁w rite", "▁wr ite", "▁writ e", "▁ write", "mar k", "m ark", "fin d", "fi nd", "f ind", "▁person al", "▁pers onal", "▁persona l", "▁ personal", "] [", "ro wn", "row n", "r own", "P h", "▁f oot", "▁fo ot", "▁foo t", "▁ foot", "▁re search", "▁res earch", "iron ment", "▁n om", "▁no m", "▁ nom", "▁in stance", "▁inst ance", "▁ instance", "▁h eld", "▁he ld", "▁hel d", "▁ held", "D e", "▁mem bers", "▁member s", "▁ members", "▁f ire", "▁fi re", "▁fir e", "▁ fire", "▁hist ory", "▁histor y", "▁hi story", "▁ history", "▁m ap", "▁ma p", "▁ map", "▁dis cuss", "▁disc uss", "▁e spec", "▁es pec", "▁esp ec", "▁ espec", "▁t aking", "▁tak ing", "▁ta king", "▁s ervices", "▁serv ices", "▁service s", "▁ services", "▁ind ust", "▁indu st", "▁ indust", "ig en", "ige n", "i gen", "▁A ss", "▁As s", "▁ Ass", "▁e xpected", "▁ex pected", "▁expect ed", "▁ expected", "▁wur de", "di r", "d ir", "▁a mong", "▁am ong", "▁s ugg", "▁su gg", "▁sug g", "re c", "r ec", "In ter", "Int er", "bl ock", "blo ck", "b lock", "▁R ep", "▁Re p", "▁ Rep", "▁p ain", "▁pa in", "▁f ive", "▁fi ve", "▁ five", "▁f und", "▁fun d", "▁fu nd", "▁ fund", "ri d", "r id", "ar row", "arr ow", "▁t reat", "▁tre at", "▁he ard", "▁hear d", "▁de term", "▁det erm", "▁deter m", "ic ult", "▁s ense", "▁sens e", "▁sen se", "es e", "e se", "F un", "▁month s", "▁mont hs", "js on", "j son", ", ”", "T I", "or age", "ora ge", "o rage", "▁ У", "▁every one", "▁c los", "▁cl os", "▁clo s", "▁ clos", "ie rs", "ier s", "i ers", "air s", "ai rs", "a irs", "def ine", "I f", "os p", "o sp", "▁w onder", "▁won der", "▁wo nder", "N A", "qu ery", "que ry", "quer y", "p g", "it es", "ite s", "i tes", "▁m aterial", "▁mat erial", "▁mate rial", "▁mater ial", "▁ material", "y d", "Re ad", "R ead", "ht ml", "h tml", "T E", "P r", "^{ \\", "^ {\\", "▁g ave", "▁ga ve", "▁I S", "▁ IS", "▁s uggest", "▁sugg est", "▁sug gest", "Over ride", "ro du", "rod u", "Fr om", "F rom", "▁E urope", "▁Europ e", "▁Euro pe", "▁ Europe", "P O", "▁s oon", "▁so on", "ho st", "hos t", "h ost", "▁B er", "▁Be r", "▁ Ber", ".. ..", "... .", ". ...", "▁H ar", "▁Ha r", "▁ Har", "▁e nergy", "▁ener gy", "▁energ y", "▁ energy", "> <", "ave s", "av es", "a ves", "▁e asy", "▁eas y", "▁b re", "▁br e", "▁ bre", "fr ame", "▁g round", "▁gr ound", "▁gro und", "▁ ground", "wi th", "w ith", "▁in side", "▁ins ide", "ie f", "i ef", "▁m o", "▁ mo", "p m", "pa n", "p an", "ig r", "i gr", "▁o m", "▁ om", "ne xt", "nex t", "n ext", "om et", "ome t", "o met", "▁st atus", "▁stat us", "▁ status", "▁} \r", "▁ }\r", "▁mus ic", "or a", "o ra", "il es", "ile s", "i les", "k i", "▁e sc", "▁es c", "▁ esc", "▁b es", "▁be s", "▁ bes", "▁D is", "▁Di s", "▁ Dis", "▁h ost", "▁ho st", "▁ host", "▁c omes", "▁com es", "▁co mes", "▁come s", "▁ comes", "us ed", "use d", "u sed", "▁f uture", "▁fut ure", "▁ future", "lic k", "li ck", "l ick", "ai d", "a id", "▁com pet", "▁comp et", "▁ compet", "▁v oice", "▁vo ice", "▁ voice", "▁l oad", "▁lo ad", "▁ load", "ev el", "eve l", "e vel", "▁n eg", "▁ne g", "▁ neg", "▁com mand", "▁comm and", "▁ command", "▁f ür", "▁p ie", "▁pi e", "▁ pie", "▁qu ite", "▁qui te", "▁quit e", "▁b lo", "▁bl o", "▁ blo", "ag n", "a gn", "il on", "ilo n", "i lon", "▁cl aim", "▁ claim", "▁t each", "▁te ach", "▁tea ch", "▁pre vious", "▁prev ious", "▁ previous", "▁s ite", "▁sit e", "▁si te", "▁ site", "co lor", "col or", "colo r", "at tr", "att r", "▁ac cept", "▁ accept", "▁ex act", ") }", "af t", "a ft", "rol ler", "roll er", "о н", "o o", "Dat e", "Da te", "D ate", "▁o u", "▁ ou", "s y", "▁pre tty", "▁pret ty", "▁im age", "▁imag e", "▁ image", "B U", "▁term s", "▁ter ms", "▁s earch", "▁se arch", "▁sear ch", "▁ search", "▁ è", "▁V al", "▁Va l", "▁ Val", "▁ ‘", "▁D av", "▁Da v", "M S", "sr c", "s rc", "ma r", "m ar", "in cip", "inc ip", "▁could n", "ad os", "ado s", "▁d ro", "▁dr o", "▁ dro", "be ta", "bet a", "b eta", "im um", "▁min utes", "▁minute s", "▁minut es", "▁g rand", "▁gr and", "▁gran d", "▁gra nd", "▁ grand", "▁ »", "▁O ur", "▁ Our", "St r", "S tr", "VE R", "V ER", "ma z", "m az", "▁or iginal", "▁orig inal", "▁origin al", "▁ original", "in i", "i ni", "▁c oll", "▁col l", "▁co ll", "▁ coll", "lo at", "▁o s", "▁ os", "}) ;", "} );", "sum mary", "▁w all", "▁wa ll", "▁wal l", "▁ wall", "Col or", "Co lor", "▁v ers", "▁ver s", "▁ve rs", "▁ vers", "▁d ella", "▁de lla", "▁del la", "▁dell a", "▁\" \"\"", "▁\"\" \"", "▁ \"\"\"", "math bf", "ze r", "z er", "au r", "a ur", "▁tr ack", "▁tra ck", "▁ track", "▁ass oci", "▁ associ", "▁s uff", "▁su ff", "▁in de", "▁i nde", "▁ind e", "▁ inde", "ag ue", "agu e", "a gue", "▁A pr", "▁Ap r", "▁ Apr", "L e", "ro ups", "rou ps", "roup s", "bo ard", "b oard", "▁att ack", "▁s eries", "▁se ries", "▁ser ies", "▁serie s", "▁ series", "▁in stead", "▁inst ead", "ha m", "h am", "bo ok", "b ook", "▁s ix", "▁si x", "▁ six", "▁R ec", "▁Re c", "▁ Rec", "▁c oming", "▁com ing", "▁co ming", "▁ coming", "ur t", "u rt", "▁gl obal", "▁glob al", "▁glo bal", "▁ global", "▁ne cess", "▁neces s", "▁ necess", "le ge", "leg e", "Po s", "P os", "▁le ave", "▁ leave", "▁p od", "▁po d", "▁ pod", "ateg ory", "ategor y", "u z", "▁de ep", "▁ deep", "▁k m", "▁ km", "▁out side", "▁outs ide", "ha s", "h as", "opt ions", "option s", "o ptions", "▁S m", "▁ Sm", "Su b", "S ub", "ro ws", "row s", "r ows", "▁в и", "▁ ви", "▁St ates", "▁State s", "▁Stat es", "▁Sta tes", "▁ States", "▁wr ong", "▁how ever", "▁s em", "▁se m", "▁ sem", "▁c atch", "▁cat ch", "▁ catch", "\") ,", "\" ),", "mod el", "mode l", "mo del", "▁h ttp", "▁htt p", "▁ http", "▁o ption", "▁opt ion", "▁ option", "ri e", "r ie", "▁с та", "▁ст а", "▁ ста", "▁ä r", "▁ är", "▁en joy", "▁enjo y", "n u", "▁p as", "▁pa s", "▁ pas", "▁a mount", "▁am ount", "▁ amount", "▁res pons", "▁respon s", "▁resp ons", "▁ respons", "▁In tern", "▁Inter n", "▁Int ern", "▁ Intern", "▁my self", "▁o pp", "▁op p", "▁ opp", "▁S im", "▁Si m", "▁ Sim", "▁s ens", "▁se ns", "▁sen s", "E d", "▁( \\", "▁ (\\", "▁stud ents", "▁student s", "но в", "н ов", "▁point s", "▁ points", "ar ning", "arn ing", "U P", "el ling", "ell ing", "elli ng", "▁c annot", "▁can not", "B e", "▁l ength", "▁le ngth", "▁ length", "nu ll", "n ull", "ui nt", "u int", "wi se", "w ise", "▁d ouble", "▁dou ble", "▁doub le", "▁ double", "ig e", "i ge", "is ta", "ist a", "i sta", "▁est ab", "▁es tab", "▁esta b", "an ch", "anc h", "▁a go", "▁ag o", "▁ ago", "▁b ound", "▁bo und", "▁bou nd", "▁ bound", "▁f a", "▁ fa", "▁c lean", "▁cle an", "▁ clean", "▁sim ple", "▁simpl e", "▁ simple", "m i", "#### ####", "if ier", "ifi er", "▁Gener al", "▁Gen eral", "▁Gene ral", "▁ General", "▁se emed", "▁see med", "▁seem ed", "en a", "e na", "▁a ge", "▁ag e", "▁ age", "но й", "end if", "A A", "▁c aus", "▁ca us", "▁e duc", "▁ed uc", "▁ educ", "▁c ell", "▁ce ll", "▁cel l", "▁ cell", "Ge ner", "Gen er", "G ener", "sp ace", "s pace", "▁Y our", "▁You r", "▁ Your", "▁be aut", "g t", "▁l imit", "▁li mit", "▁lim it", "▁ limit", "▁d ate", "▁da te", "▁dat e", "▁ date", "Ut il", "U til", "▁N ational", "▁Nat ional", "▁Nation al", "▁ National", "ow s", "o ws", "pa t", "p at", "qu ad", "▁o k", "▁ ok", "▁ И", "ar th", "art h", "ha t", "h at", "▁comm unity", "▁commun ity", "ou l", "o ul", "▁e conom", "▁ec onom", "▁ econom", "Com ponent", "bo r", "b or", "us ion", "▁be low", "▁bel ow", "ear ch", "e arch", "or es", "ore s", "o res", "ba n", "b an", "▁Aug ust", "▁fur ther", "sig ma", "s igma", "▁h a", "▁ ha", "j i", "▁com put", "▁comp ut", "▁ comput", "г ра", "▁N one", "▁No ne", "▁Non e", "▁ None", "▁t er", "▁te r", "▁ ter", "▁any one", "▁t ask", "▁ta sk", "▁ task", "en te", "ent e", "e nte", "pos ition", "pp ed", "ppe d", "p ped", "▁a us", "▁au s", "▁ aus", "Att ribute", "Attrib ute", "re q", "r eq", "ad dr", "add r", "li ght", "lig ht", "l ight", "ш е", "▁a rm", "▁ar m", "▁ arm", "co ver", "cov er", "c over", "up port", "upp ort", "▁G l", "▁ Gl", "▁S an", "▁Sa n", "▁ San", "▁wr iting", "▁writ ing", "▁ writing", "▁l ost", "▁lo st", "▁los t", "▁M ark", "▁Mar k", "▁ Mark", "▁g re", "▁gr e", "▁ gre", "TY PE", "T YPE", "▁S outh", "▁So uth", "▁Sou th", "▁Sout h", "▁ South", "▁per fect", "▁perf ect", "▁pack age", "▁ package", "▁in fl", "▁inf l", "▁ infl", "ha ps", "h aps", "▁A ng", "▁An g", "▁ Ang", "res pon", "resp on", "ri s", "r is", "pt ember", "pte mber", "▁build ing", "▁ building", "VA L", "V AL", "fr ee", "fre e", "f ree", "▁c e", "▁ ce", "H T", "▁F rom", "▁Fr om", "▁Fro m", "▁ From", "d s", "ro y", "r oy", "ach ine", "achi ne", "no wn", "now n", "n own", "▁sa ying", "▁say ing", "▁б ы", "▁ бы", "o e", "Re f", "R ef", "▁net work", "▁ network", "par ent", "pa rent", "pare nt", "paren t", "p arent", "ug e", "u ge", "▁sim ilar", "> \r", "Build er", "B uilder", "▁l iving", "▁li ving", "▁liv ing", "▁contin ue", "▁continu e", "▁ continue", "an ger", "ang er", "ange r", "▁R ed", "▁Re d", "▁ Red", "▁h air", "▁ha ir", "an ced", "ance d", "anc ed", "ia ns", "ian s", "i ans", "▁d ead", "▁de ad", "▁ dead", "▁bo olean", "▁ boolean", "ic ation", "▁д е", "▁ де", "▁cl ient", "▁ client", "uc t", "u ct", "▁ •", "S P", "ol der", "old er", "п е", "ud io", "udi o", "▁d eg", "▁de g", "▁ deg", "as ing", "asi ng", "a sing", "▁st ep", "▁ste p", "▁ step", "▁p ers", "▁per s", "▁pe rs", "▁ pers", "ç ão", "ob j", "o z", "ul a", "u la", "▁r ound", "▁ro und", "▁rou nd", "▁ round", "▁u pon", "▁up on", "▁re source", "▁res ource", "▁ resource", "▁val id", "▁ valid", "▁I I", "▁ II", "bu g", "b ug", "st d", "s td", "▁a ng", "▁an g", "▁ ang", "sp an", "s pan", "po l", "p ol", "ial og", "ia log", "▁p hot", "▁ph ot", "? '", "D B", "▁F in", "▁Fi n", "▁ Fin", "V E", "E m", "▁c am", "▁ca m", "▁ cam", "tar get", "t arget", "pe cted", "pect ed", "pec ted", "He l", "H el", "▁u t", "▁ ut", "▁T est", "▁Te st", "▁Tes t", "▁ Test", "▁t own", "▁to wn", "▁tow n", "▁ town", "al ign", "ali gn", "▁we bs", "▁web s", "in ner", "inn er", "au gh", "aug h", "a ugh", "▁ex cept", "▁ except", "▁init ial", "▁initi al", "▁ initial", "en ty", "ent y", "lic h", "li ch", "l ich", "▁A ut", "▁Au t", "▁ Aut", "to p", "t op", "▁f ail", "▁fa il", "▁ fail", "on a", "o na", "▁ben ef", "an ks", "ank s", "is che", "isch e", "isc he", "i sche", ". *", "▁sign ific", "▁cont act", "▁ contact", "Re c", "R ec", "ar io", "ari o", "a rio", "ot tom", "ott om", "otto m", "▁rel ationship", "▁relations hip", "▁relation ship", "]) ;", "] );", "▁Н а", "▁ На", "He ad", "H ead", "form at", "for mat", "▁é t", "▁ ét", "▁M ore", "▁Mor e", "▁Mo re", "▁ More", "act ory", "actor y", "port un", "+ \\", "▁sim ply", "▁simpl y", "▁e p", "▁ ep", "▁R uss", "▁Ru ss", "▁Rus s", "n í", "u a", "er c", "e rc", "▁long er", "▁lon ger", "in ition", "init ion", "ect or", "ec tor", "e ctor", "apt ion", "a ption", "▁prof ess", "▁profes s", "▁M us", "▁Mu s", "▁ Mus", "il ities", "ili ties", "è s", "▁A ct", "▁Ac t", "▁ Act", "off set", "offs et", "▁i ll", "▁il l", "▁ ill", "ba nd", "ban d", "b and", "▁A g", "▁ Ag", "▁П о", "▁ По", "б и", "cont ent", "ic on", "ico n", "i con", "▁work s", "▁wor ks", "▁ works", "yn am", "yna m", "y nam", "pl ement", "ple ment", "p lement", "Res ource", "Re source", "Act ion", "A ction", "▁diff icult", "▁W est", "▁We st", "▁Wes t", "▁ West", "▁v ideo", "▁vide o", "▁ video", "▁T HE", "▁TH E", "▁ THE", "▁de cl", "▁dec l", "▁ decl", "on don", "ond on", "ondo n", "de d", "d ed", "}{ \\", "} {\\", "oc r", "o cr", "▁C ity", "▁Cit y", "▁Ci ty", "▁ City", "▁ я", "ue r", "u er", "c z", "▁im ag", "▁i mag", "▁ imag", "c r", "et e", "e te", "id get", "idge t", "▁M od", "▁Mo d", "▁ Mod", "▁for ward", "▁ forward", "▁p ict", "▁pi ct", "▁pic t", "or ge", "org e", "▁sub ject", "▁ subject", "up date", "at tle", "att le", "s a", "▁A nt", "▁An t", "▁ Ant", "▁r unning", "▁run ning", "▁ running", "▁s al", "▁sa l", "▁ sal", "con ne", "conn e", "c onne", "▁out put", "▁ output", "ad ata", "ada ta", "a data", "M L", "Che ck", "C heck", "led ge", "l edge", "▁p aper", "▁pa per", "▁pap er", "▁ paper", "param s", "par ams", "para ms", "av y", "a vy", "▁a f", "▁ af", "▁e ine", "▁ein e", "▁j our", "▁jo ur", "▁jou r", "▁ jour", "A Y", "▁it self", "▁its elf", "▁S tr", "▁St r", "▁ Str", "st yle", "sty le", "Th at", "T hat", "▁m illion", "▁mill ion", "▁l anguage", "▁ language", "O S", "vi ng", "vin g", "v ing", "▁м а", "▁ ма", "▁т о", "▁ то", ") (", "▁b uy", "▁bu y", ". /", "▁. ..", "▁.. .", "▁ ...", "▁t ried", "▁tr ied", "▁tri ed", "▁com pl", "▁comp l", "▁act iv", "▁ activ", "ap ped", "app ed", "appe d", "a pped", "But ton", "B utton", "To ken", "Tok en", "T oken", "▁prov ided", "▁provide d", "ib er", "ibe r", "i ber", "▁c reated", "▁cre ated", "▁create d", "▁creat ed", "▁ created", "cur ity", "c urity", "En d", "E nd", "a ł", "us ter", "ust er", "u ster", "iz ing", "izi ng", "i zing", "om b", "o mb", "▁s ich", "▁si ch", "▁com pon", "▁comp on", "▁S ee", "▁Se e", "▁ See", "▁u int", "▁ui nt", "▁ uint", "▁l abel", "▁la bel", "▁lab el", "▁ label", "vo l", "v ol", "ó w", "oc ol", "oco l", "o col", "▁re ceived", "▁rece ived", "▁receive d", "▁in tern", "▁int ern", "▁inter n", "▁inte rn", "▁ intern", "ц е", "R un", "▁r oad", "▁ro ad", "▁ road", "▁O ct", "▁ Oct", "▁C omp", "▁Com p", "▁Co mp", "▁ Comp", "▁stud y", "▁т е", "▁ те", "Ac t", "A ct", "▁t our", "▁to ur", "▁tou r", "▁St ate", "▁Stat e", "▁Sta te", "▁ State", "▁ad ded", "▁add ed", "▁ added", "htt ps", "http s", "st ream", "stre am", "▁l ower", "▁lo wer", "▁low er", "▁ lower", "▁b ox", "▁bo x", "▁ box", "▁S k", "▁ Sk", "▁them selves", "▁c ross", "▁cr oss", "▁cro ss", "▁ cross", "▁e cho", "▁ec ho", "▁ echo", "▁dev ice", "▁ device", "pos e", "po se", "p ose", "▁g ames", "▁game s", "▁gam es", "▁ga mes", "P L", "W indow", "is es", "ise s", "i ses", "ti tle", "tit le", "t itle", "St ream", "z t", "▁S w", "▁ Sw", "▁r ole", "▁ro le", "▁ role", "ia nt", "ian t", "i ant", "k u", "se qu", "seq u", "s equ", "▁l ate", "▁la te", "▁lat e", "▁ late", "▁s old", "▁so ld", "▁sol d", "р я", "Com m", "Co mm", "C omm", "▁en tre", "▁ent re", "▁entr e", "▁ entre", "▁d og", "▁do g", "▁ dog", "dev ice", "P ar", "▁like ly", "▁lik ely", "▁ likely", "^{ -", "^ {-", "▁l en", "▁le n", "▁ len", "▁P aul", "▁Pa ul", "▁ Paul", "▁t ool", "▁to ol", "▁too l", "▁ tool", "Of f", "O ff", "▁f amil", "▁fam il", "▁fa mil", "▁d raw", "▁dr aw", "▁ draw", "ap ping", "app ing", "a pping", "▁ev ents", "▁even ts", "▁event s", "▁ events", "cre t", "cr et", "c ret", "rou ght", "rough t", "r ought", "Cont ent", "▁soft ware", "ri a", "r ia", "ms g", "m sg", "ga mma", "g amma", "▁h ear", "▁he ar", "Op er", "O per", "▁your self", "▁yours elf", "▁l iter", "▁li ter", "▁lit er", "▁ liter", "em p", "e mp", "▁se par", "▁sep ar", "▁ separ", "▁ З", "▁t itle", "▁tit le", "▁ti tle", "▁ title", "M ethod", "math rm", "▁s low", "▁sl ow", "▁R om", "▁Ro m", "▁ Rom", "! !", "▁t ax", "▁ta x", "▁ tax", "ск а", "с ка", "empl ate", "emp late", "o i", "▁A rt", "▁Ar t", "▁ Art", "f alse", "ast ic", "ст ь", "с ть", "oc ket", "ock et", "▁e ns", "▁en s", "▁ ens", "T O", "am ente", "ame nte", "ament e", "amen te", "a mente", "lo cal", "loc al", "l ocal", "ch ie", "chi e", "▁p an", "▁pa n", "▁ pan", "ни й", "ch ema", "che ma", "chem a", "▁N orth", "▁Nor th", "▁Nort h", "з о", "▁> =", "▁ >=", "A ut", "▁d ig", "▁di g", "▁ dig", "▁se ems", "▁see ms", "▁seem s", "▁mor ning", "so le", "sol e", "s ole", "um er", "ume r", "u mer", "del ta", "d elta", "it é", "i té", "ab ase", "aba se", "a base", "ra f", "r af", "▁ob serv", "▁obs erv", "▁ observ", "▁E st", "▁Es t", "▁ Est", "▁s eg", "▁se g", "▁ seg", "▁[ ]", "▁ []", "▁P res", "▁Pr es", "▁Pre s", "▁ Pres", "if ul", "i ful", "pu sh", "pus h", "p ush", "▁O ff", "▁Of f", "▁ Off", "ip e", "i pe", "at i", "a ti", "▁d im", "▁di m", "▁ dim", "ce ed", "c eed", "En t", "E nt", "__ __", "___ _", "_ ___", "en try", "ent ry", "entr y", "▁f ight", "▁fig ht", "▁fi ght", "▁c red", "▁cre d", "▁cr ed", "▁ cred", "▁O R", "▁ OR", "▁D ep", "▁De p", "▁ Dep", "$ {", "ле н", "л ен", "Creat e", "C reate", "▁Apr il", "▁Ap ril", "min istr", "F L", "▁A p", "▁ Ap", "▁H ere", "▁He re", "▁Her e", "▁ Here", "priv ate", "p rivate", "In stance", "Inst ance", "ie m", "i em", "▁off ice", "▁offic e", "▁th ird", "▁ third", "▁up date", "▁ update", "Lin e", "Li ne", "L ine", "ta g", "t ag", "▁e specially", "▁espec ially", "▁especial ly", "▁ especially", "▁го да", "▁год а", "▁c u", "▁ cu", "▁k ill", "▁kil l", "▁ki ll", "▁ kill", "au ght", "augh t", "aug ht", "▁s we", "▁sw e", "Option s", "Opt ions", "O ptions", "I M", "C C", "▁com pan", "▁comp an", "ju st", "j ust", "▁Wh ile", "▁ While", "iz er", "ize r", "i zer", "▁м о", "▁ мо", "к е", "▁a uto", "▁aut o", "▁au to", "▁ auto", "▁b and", "▁ban d", "▁ba nd", "▁ band", "ме н", "м ен", "ique s", "iqu es", "iq ues", "i ques", "▁p le", "▁pl e", "▁ ple", "N O", "▁O F", "▁ OF", "▁s ong", "▁so ng", "▁son g", "▁A cc", "▁Ac c", "▁ Acc", "EX T", "E XT", "en sor", "ens or", "enso r", "in ing", "ini ng", "i ning", "▁l at", "▁la t", "▁ lat", "bi g", "b ig", "▁K ing", "▁Ki ng", "▁Kin g", "▁ King", "oc h", "o ch", "s i", "▁H ist", "▁His t", "▁Hi st", "▁ Hist", "▁qu ality", "▁qual ity", "▁ quality", "mod e", "mo de", "m ode", "▁op portun", "▁would n", ":* *", ": **", "out put", "▁fe et", "▁fee t", "▁m is", "▁mi s", "d f", "ag ing", "agi ng", "a ging", "▁м е", "▁ ме", "▁t ro", "▁tr o", "▁d efined", "▁def ined", "▁define d", "▁defin ed", "▁ defined", "▁re view", "▁rev iew", "▁ review", "▁F il", "▁Fi l", "▁ Fil", "> >", "▁pr incip", "▁prin cip", "Bas e", "B ase", "di ct", "d ict", "ve rage", "ver age", "ic ient", "ici ent", "I F", "▁h it", "▁hi t", "▁ hit", "Pag e", "P age", "▁p erm", "▁per m", "▁pe rm", "▁ perm", "ce l", "c el", "í t", "▁ex press", "▁exp ress", "▁expr ess", "▁ express", "▁ind ic", "▁Se ptember", "▁Sept ember", "im age", "ima ge", "imag e", "▁product s", "▁ products", "▁m edia", "▁med ia", "▁medi a", "▁ media", "ch ange", "chan ge", "ig ger", "igg er", "▁s end", "▁se nd", "▁sen d", "▁ send", "la st", "las t", "l ast", "min g", "mi ng", "m ing", "p a", "ua ry", "uar y", "u ary", "▁spe ak", "ны й", "щ е", "ys is", "y sis", "ly ing", "l ying", "▁ ч", "li ke", "lik e", "l ike", "р ы", "в і", "▁M ich", "▁Mic h", "▁Mi ch", "M O", "▁J ah", "▁Ja h", "ens ive", "▁sh are", "▁shar e", "▁sha re", "▁ share", "▁develop ment", "C P", "sp ec", "spe c", "s pec", "▁f ast", "▁fa st", "▁ fast", "he t", "h et", "H O", "▁part icip", "▁partic ip", "▁parti cip", "Bl ock", "Blo ck", "B lock", "▁vi ol", "▁fr ame", "▁fra me", "▁fram e", "▁ frame", "▁qu al", "▁q ual", "▁ qual", "tr e", "t re", "▁ Ф", "▁to ward", "▁tow ard", "f g", "Bo x", "B ox", "Col umn", "▁mil it", "▁mi lit", "▁M arch", "▁Mar ch", "▁Marc h", "▁var ious", "▁vari ous", "pa ss", "pas s", "p ass", "▁P ark", "▁Par k", "▁B en", "▁Be n", "▁ Ben", "Fr ame", "▁n ormal", "▁nor mal", "▁norm al", "▁ normal", "op en", "ope n", "o pen", "p x", "▁ph one", "▁ phone", "▁E ven", "▁Ev en", "▁Eve n", "▁ Even", "▁m a", "▁ ma", "ibr ary", "St art", "Star t", "id den", "idd en", "rh o", "r ho", "gr aph", "gra ph", "g raph", "ac ing", "aci ng", "a cing", "' .", "ar ter", "art er", "arte r", "me s", "m es", "in st", "ins t", "▁i r", "▁ ir", "act ive", "activ e", "▁f em", "▁fe m", "▁ fem", "▁m oved", "▁mov ed", "▁move d", "▁mo ved", "▁st ore", "▁stor e", "▁sto re", "▁ store", "▁p rice", "▁pr ice", "▁pri ce", "▁ price", "\") .", "\" ).", "ber g", "be rg", "b erg", "▁n ov", "▁no v", "▁ nov", "▁c ard", "▁car d", "▁ca rd", "▁ card", "el low", "ell ow", "ello w", "▁part y", "▁par ty", "▁ party", "▁M or", "▁Mo r", "ae l", "a el", "▁per cent", "▁ percent", "▁tr aining", "▁tra ining", "▁train ing", "▁ training", "▁in g", "▁i ng", "▁ ing", "im er", "ime r", "i mer", "▁S am", "▁Sa m", "▁ Sam", "Def ault", "▁f uck", "▁fu ck", "▁com plete", "▁comp lete", "▁complet e", "▁compl ete", "▁ complete", "ui d", "u id", "▁det ails", "▁detail s", "▁ details", "▁l ed", "▁le d", "▁ led", "Po int", "P oint", "▁C ount", "▁Co unt", "▁Coun t", "▁Cou nt", "▁ Count", "▁reg ard", "z o", "▁B ro", "▁Br o", "▁ Bro", "▁rec ogn", "▁ recogn", "▁H ol", "▁Ho l", "▁ Hol", "U M", "el ement", "ele ment", "elem ent", "e lement", "Mod e", "Mo de", "M ode", "▁ex am", "▁E X", "▁ EX", "Im age", "ver se", "vers e", "ri ter", "rit er", "rite r", "r iter", "so ft", "s oft", "▁int rodu", "▁intro du", "▁sur pr", "Buf fer", "Buff er", "B uffer", "le ctor", "lect or", "l ector", "ar en", "are n", "a ren", "an ged", "ang ed", "ange d", "▁P at", "▁Pa t", "▁ Pat", "▁P al", "▁Pa l", "▁ Pal", "▁con tr", "▁cont r", "▁ contr", "Hand ler", "Handle r", "▁fe atures", "▁feature s", "▁feat ures", "▁ features", "ip le", "i ple", "▁C ON", "▁CO N", "▁ CON", "Fi l", "F il", "▁P ort", "▁Po rt", "▁Por t", "▁ Port", "▁th inking", "▁think ing", "▁thin king", "do c", "d oc", "we r", "w er", "▁work ed", "▁wor ked", "P C", "c m", "da t", "d at", "PR O", "P RO", "▁E very", "▁Ev ery", "▁Ever y", "▁Eve ry", "▁ Every", "▁e ra", "▁er a", "▁ era", "▁F irst", "▁ First", "g n", "▁im medi", "▁imm edi", "ov ember", "ove mber", "ap an", "apa n", "a pan", "▁ex tra", "▁ext ra", "▁extr a", "▁ extra", "▁s ection", "▁se ction", "▁sect ion", "▁ section", "▁J une", "▁Jun e", "▁Ju ne", "▁v ia", "▁vi a", "▁ via", "▁g one", "▁go ne", "com e", "co me", "c ome", "▁s tri", "▁st ri", "▁str i", "▁ stri", "^ \\", "ant ly", "▁ar ch", "▁arc h", "▁ arch", "S ource", "▁con v", "▁co nv", "▁ conv", "▁L ondon", "▁Lond on", "▁ London", "Num ber", "N umber", "▁quest ions", "▁question s", "an did", "and id", "▁play ed", "en v", "e nv", "▁Sch ool", "▁nat ural", "▁natur al", "▁ natural", "ca n", "c an", "▁ne ws", "▁new s", "▁ news", "D R", "▁c hall", "▁ch all", "▁cha ll", "▁S oc", "▁So c", "▁ э", "▁att empt", "* }", "N ull", "ro te", "rot e", "r ote", "▁b i", "▁ bi", "▁wr itten", "▁writ ten", "▁ written", "▁bl ood", "▁blo od", "▁happ ened", "▁happen ed", "▁c ause", "▁caus e", "▁ca use", "as hing", "ash ing", "ashi ng", "▁Will iam", "ad em", "ade m", "a dem", "▁b rought", "▁br ought", "▁dis play", "▁displ ay", "▁disp lay", "▁ display", "im a", "i ma", "▁fin ally", "▁final ly", "ta b", "t ab", "▁return ed", "ны х", "ni e", "n ie", "▁ q", "▁h ers", "▁he rs", "▁her s", "▁P re", "▁Pr e", "▁ Pre", "▁d ou", "▁do u", "buf fer", "buff er", "b uffer", "▁eff ort", "ain e", "ai ne", "a ine", "x y", "▁his tor", "▁hist or", "en u", "e nu", "▁ar riv", "▁arr iv", "▁D em", "▁De m", "▁ Dem", "▁f avor", "▁fa vor", "▁fav or", "▁hand le", "▁ handle", "SE T", "S ET", "▁P ublic", "▁Pub lic", "▁Pu blic", "▁ Public", "ru pt", "rup t", "r upt", "▁u r", "▁ ur", "▁for ce", "▁ force", "▁é s", "▁ és", "ub e", "u be", "Pr e", "P re", "р і", "in y", "i ny", "th eta", "the ta", "is f", "i sf", "▁n ational", "▁nat ional", "▁nation al", "Equ al", "Eq ual", "E qual", "ren ch", "▁w ife", "▁c apt", "▁cap t", "▁ca pt", "▁In ter", "▁Int er", "▁ Inter", "ta u", "t au", "▁s leep", "▁sle ep", "▁ sleep", "../ ../", "▁iss ue", "▁ issue", "▁m ember", "▁me mber", "▁mem ber", "▁ member", "▁a wait", "▁aw ait", "▁ await", "▁D an", "▁Da n", "▁ Dan", "z i", "in ate", "ina te", "i nate", "▁s ym", "▁sy m", "▁ sym", "ch an", "cha n", "c han", "▁J ack", "▁Jac k", "▁Ja ck", "▁ Jack", "▁Eng lish", "▁ English", "▁s z", "▁ sz", "rib utes", "ribut es", "ribute s", "ribu tes", "▁i gn", "▁ig n", "▁ ign", "á l", "▁app ear", "▁appe ar", "ra d", "r ad", "id ge", "▁co uple", "▁cou ple", "▁coup le", "▁s hip", "▁sh ip", "▁ ship", "li g", "l ig", "we b", "w eb", "▁us ually", "▁usual ly", "▁re ady", "▁read y", "▁ ready", "▁v ill", "▁vi ll", "▁vil l", "▁W hy", "▁Wh y", "▁ Why", "eb ru", "e bru", "▁g rad", "▁gr ad", "▁gra d", "▁ grad", "or ds", "ord s", "▁in f", "▁i nf", "▁ inf", "▁l oss", "▁lo ss", "▁los s", "▁ loss", "▁o d", "▁ od", "▁Ph il", "▁ Phil", "ser ver", "serv er", "serve r", "▁U p", "▁ Up", "▁b uff", "▁bu ff", "▁buf f", "▁ buff", "▁fil ename", "▁file name", "▁ filename", "AB LE", "it ing", "iti ng", "i ting", "ef ore", "e fore", "() ->", "( )->", "▁cond itions", "▁condition s", "▁ conditions", "v m", "el d", "e ld", "it z", "i tz", "▁Tr ans", "▁Tra ns", "▁ Trans", "▁w eight", "▁we ight", "▁weigh t", "▁ weight", "▁high er", "▁hig her", "▁r ate", "▁rat e", "▁ra te", "▁ rate", "▁acc om", "▁ac com", "vi der", "vid er", "v ider", "O M", "▁w ays", "▁way s", "▁wa ys", "▁ ways", "com ing", "co ming", "c oming", "▁l ock", "▁loc k", "▁lo ck", "▁ lock", "▁e tc", "▁et c", "▁ etc", "▁a vec", "▁av ec", "▁ave c", "▁t akes", "▁take s", "▁tak es", "▁ta kes", "▁C har", "▁Ch ar", "▁Cha r", "▁ Char", "▁N ovember", "▁Nov ember", "m ethod", "▁A ustral", "▁Aust ral", "▁ Austral", "▁Amer ica", "▁ America", "lo ng", "lon g", "l ong", "ce mber", "c ember", "▁polit ical", "fl ow", "f low", "▁may be", "▁ maybe", "▁a mb", "▁am b", "▁ amb", "La yout", "L ayout", "il ed", "ile d", "i led", "om en", "ome n", "o men", "ol a", "o la", "ic ip", "ici p", "i cip", "part ial", "Tr ue", "▁f loor", "▁fl oor", "▁flo or", "▁ floor", "▁D ef", "▁De f", "▁ Def", "▁conc ern", "▁conce rn", "▁concer n", "y r", "▁sh ows", "▁show s", "i h", "▁an swer", "▁answ er", "▁ans wer", "▁ answer", "ac c", "a cc", "▁b all", "▁bal l", "▁ba ll", "▁ ball", "▁R ev", "▁Re v", "▁ Rev", "▁s un", "▁su n", "▁ sun", "▁quick ly", "▁s omet", "▁so met", "▁some t", "▁som et", "ment e", "me nte", "men te", "m ente", "▁M al", "▁Ma l", "▁ Mal", "und red", "▁iss ues", "▁issue s", "▁ issues", "ec ause", "eca use", "pe s", "p es", "▁p layer", "▁pl ayer", "▁play er", "▁ player", "▁par ents", "▁parent s", "▁ parents", "▁pop ular", "▁popula r", "▁popul ar", "▁m ode", "▁mod e", "▁mo de", "▁ mode", "▁m ention", "▁ment ion", "N E", "Lo ad", "L oad", "▁reg ular", "▁regul ar", "▁ regular", "ave d", "av ed", "a ved", "? :", "ye ar", "y ear", "fun c", "fu nc", "f unc", "▁per formance", "▁perform ance", "▁J uly", "▁Jul y", "▁Ju ly", "th ern", "ther n", "the rn", "▁we bsite", "▁webs ite", "▁web site", "fo rd", "for d", "f ord", "P R", "el a", "e la", "le vel", "lev el", "l evel", "ui t", "u it", "fl ags", "flag s", "▁w orth", "▁wor th", "▁ worth", "▁cor respon", "▁Brit ish", "si m", "s im", "▁al one", "▁ alone", "▁h ar", "▁ha r", "▁ har", "▁o nes", "▁on es", "▁one s", "▁ ones", "ob ile", "obi le", "obil e", "▁d ru", "▁dr u", "▁ dru", "ch i", "c hi", "▁D avid", "▁Dav id", "▁Da vid", "▁ David", "▁proble ms", "▁problem s", "▁col umn", "▁ column", "() ;\r", "(); \r", "( );\r", "Z E", "▁re lig", "▁rel ig", "▁reli g", "olog ical", "▁reg ion", "▁ region", "ad y", "a dy", "I O", "an der", "and er", "ande r", "a nder", "Ne t", "N et", "▁bu ilt", "▁ built", "▁inst all", "▁ install", "▁appro ach", "C ur", "▁f ine", "▁fin e", "▁fi ne", "▁talk ing", "▁tal king", "▁ch anges", "▁chang es", "▁change s", "▁ changes", "St yle", "▁M art", "▁Mar t", "▁Ma rt", "▁ Mart", "л ю", "res ponse", "respon se", "respons e", "te ger", "{ \r", "ir it", "iri t", "i rit", "▁prote cted", "▁protect ed", "▁ protected", "▁re le", "▁r ele", "▁rel e", "er ship", "ers hip", "те ль", "тел ь", "un signed", "uns igned", "ial ize", "▁htt ps", "▁http s", "▁ https", "T ag", "▁$ (", "▁ $(", "mo re", "mor e", "m ore", "ype s", "yp es", "y pes", "▁st ream", "▁stre am", "▁ stream", "et ch", "etc h", "▁eng ine", "▁ engine", "K E", "cm d", "c md", "sc ript", "scri pt", "scr ipt", "s cript", "tt p", "t tp", "▁a void", "▁av oid", "▁t err", "▁te rr", "▁ter r", "▁r ock", "▁ro ck", "▁ rock", "▁f ul", "▁fu l", "▁ ful", "Up date", "▁env ironment", "▁environ ment", "▁ environment", "▁p rec", "▁pre c", "▁pr ec", "▁ prec", "▁с а", "▁ са", "▁c ases", "▁case s", "▁cas es", "▁ca ses", "▁ cases", "▁off set", "▁ offset", "▁r ais", "▁ra is", "▁ rais", "li b", "l ib", "ée s", "é es", "a a", "y t", "▁a rr", "▁ar r", "▁ arr", "opy right", "f irst", "▁u til", "▁ut il", "▁ util", "▁fe ature", "▁feat ure", "▁ feature", "pos ed", "po sed", "pose d", "p osed", "ff ect", "f fect", "ж а", "it ude", "itu de", "itud e", "em ents", "ement s", "emen ts", "e ments", "as c", "a sc", "ad or", "ado r", "le ctions", "lect ions", "lection s", "▁cl ub", "▁ club", "] {", "▁* )", "▁ *)", "ст во", "ств о", "с тво", "▁im m", "▁i mm", "▁ imm", "▁for mer", "▁form er", "▁forme r", "▁ former", "▁r ights", "▁right s", "▁dec ided", "▁decide d", "▁decid ed", "▁re v", "▁r ev", "▁ rev", "▁m ent", "▁me nt", "▁men t", "▁ ment", "an i", "a ni", "▁st ru", "▁str u", "▁ stru", "▁att ention", "art ment", "▁I tal", "▁It al", "al le", "all e", "a lle", "▁b is", "▁bi s", "▁ bis", "ge ner", "gen er", "g ener", "▁in tegr", "▁int egr", "▁inte gr", "▁ integr", "el lo", "ell o", "ry pt", "▁a chie", "ne s", "n es", "▁s tra", "▁st ra", "▁str a", "▁ stra", "s b", "▁t ypes", "▁type s", "▁typ es", "▁ty pes", "▁ types", "▁R E", "▁ RE", "In it", "I nit", "▁com ment", "▁comm ent", "▁comme nt", "▁ comment", "▁add ition", "▁I D", "▁ ID", "AR T", "A RT", "F O", "щ и", "Con ne", "Conn e", "C onne", "▁s qu", "▁sq u", "▁consider ed", "▁consid ered", "id ad", "ida d", "▁Oct ober", "ci al", "cia l", "c ial", "▁O f", "▁ Of", "▁tr avel", "▁tra vel", "▁trav el", "▁b oy", "▁bo y", "▁ boy", "') .", "' ).", "u y", "il la", "ill a", "i lla", "is try", "ist ry", "istr y", "▁v a", "▁ va", "▁C he", "▁Ch e", "▁ Che", "ER T", "E RT", "en de", "end e", "e nde", "un gen", "ung en", "unge n", "ab y", "a by", "▁R ober", "▁Ro ber", "▁Rob er", "▁play ing", "il s", "i ls", "▁s am", "▁sa m", "▁ sam", "▁ex ecut", "▁exec ut", "▁ execut", "▁U s", "▁ Us", "▁m ut", "▁mu t", "▁ mut", "▁b al", "▁ba l", "▁ bal", "as se", "ass e", "▁k ids", "▁kid s", "▁ki ds", "▁fin anc", "go r", "g or", "▁S ec", "▁Se c", "▁ Sec", "ber t", "be rt", "b ert", "▁H igh", "▁Hig h", "▁Hi gh", "▁ High", "▁ је", "▁ke pt", "but ton", "b utton", "it ory", "itor y", "ito ry", "▁R em", "▁Re m", "▁ Rem", "▁D E", "▁ DE", "▁re ach", "▁r each", "▁ reach", "▁b ur", "▁bu r", "▁ bur", "La bel", "L abel", "á t", "ag o", "a go", "▁pass ed", "▁pas sed", "▁be hav", "▁beh av", "xF F", "x FF", "▁R eturn", "▁Re turn", "▁Ret urn", "▁ Return", "ST R", "S TR", "▁L es", "▁Le s", "▁ Les", "▁o rd", "▁or d", "▁ ord", "al a", "a la", "in ger", "ing er", "inge r", "▁S ince", "▁Sin ce", "▁ Since", "▁exper i", "▁exp eri", "▁s hall", "▁sh all", "▁sha ll", "▁ shall", "▁s tar", "▁st ar", "▁sta r", "▁ star", "no n", "n on", "▁g un", "▁gu n", "▁ gun", "▁B el", "▁Be l", "▁ Bel", "▁ob j", "▁ obj", "ar es", "are s", "a res", "r s", "▁we eks", "▁week s", "ne n", "n en", "▁S tre", "▁St re", "▁Str e", "or ing", "ori ng", "o ring", "▁ î", "▁ser ious", "time s", "ti mes", "tim es", "t imes", "▁H ouse", "▁Ho use", "▁Hou se", "▁r oll", "▁ro ll", "▁ roll", "▁reg ister", "▁ register", "▁mod ule", "▁mo dule", "▁ module", "▁app lic", "▁ap plic", "▁appl ic", "I R", "▁c ook", "▁co ok", "▁ cook", "au x", "a ux", "▁s ave", "▁sa ve", "▁sav e", "▁ save", "▁C r", "▁ Cr", ", \r", "▁st ates", "▁stat es", "▁state s", "▁sta tes", "▁ states", "▁em pty", "▁emp ty", "▁empt y", "▁ empty", "▁aut om", "▁au tom", "▁auto m", "▁ autom", "fig ure", "ian ce", "i ance", "▁h appy", "▁happ y", "▁f n", "▁ fn", "▁j ud", "▁ju d", "▁ jud", "▁h at", "▁ha t", "▁ hat", "AC K", "A CK", "▁F e", "▁ Fe", "$ -", "iv il", "ivi l", "i vil", "ot ed", "ote d", "o ted", "▁size of", "▁ sizeof", "▁sit uation", "▁situ ation", "▁l ives", "▁li ves", "▁live s", "▁liv es", "▁fe eling", "▁feel ing", "▁fee ling", "▁r isk", "▁ri sk", "▁ris k", "▁Jan uary", "▁Januar y", "▁Ob ject", "▁ Object", "▁re comm", "▁rec omm", "▁в ы", "▁ вы", "▁pot ential", "ea h", "e ah", "▁com plex", "▁comp lex", "▁compl ex", "▁ complex", "print f", "ist ance", "istan ce", "i stance", "ir th", "irt h", "li k", "l ik", "as te", "ast e", "a ste", "▁wh ose", "▁who se", "Ar g", "A rg", "▁mod ern", "▁mo dern", "▁mode rn", "▁moder n", "ion es", "io nes", "ione s", "i ones", "▁ч е", "▁ че", "▁s ett", "▁se tt", "▁set t", "▁M ag", "▁Ma g", "▁ Mag", "a e", "▁cond ition", "▁ condition", "Le ngth", "L ength", "▁f it", "▁fi t", "▁ fit", "ound s", "oun ds", "▁ch anged", "▁chang ed", "▁change d", "▁ changed", "▁g uy", "▁gu y", "fil ter", "at ever", "ate ver", "é d", "re move", "rem ove", "▁h op", "▁ho p", "▁ hop", "▁O ut", "▁ Out", "▁R ich", "▁Ric h", "▁ Rich", "ch ild", "chi ld", "▁in cluded", "▁incl uded", "▁includ ed", "▁include d", "▁inclu ded", "$ \\", "▁T om", "▁To m", "▁ Tom", "el ine", "eli ne", "elin e", "e line", "▁s ometimes", "▁some times", "▁somet imes", "▁sometime s", "▁dr ink", "▁qu ant", "▁ quant", "▁p lease", "▁ple ase", "▁I nt", "▁In t", "▁ Int", "ri ef", "rie f", "r ief", "▁ex actly", "▁exact ly", "ci ng", "cin g", "c ing", "▁all owed", "▁allow ed", "▁ allowed", "bu ild", "b uild", "▁beaut iful", "▁W ell", "▁We ll", "▁Wel l", "▁ Well", "▁look s", "▁lo oks", "▁ ü", "▁ch ance", "▁w rote", "▁wr ote", "▁n or", "▁no r", "▁ nor", "▁f ailed", "▁fa iled", "▁fail ed", "▁ failed", "Me t", "M et", "▁p rior", "▁pr ior", "▁pri or", "▁h undred", "ско й", "с кой", "or ia", "ori a", "o ria", "▁c y", "▁ cy", "▁w eb", "▁we b", "▁ web", "▁m ess", "▁me ss", "▁mes s", "le q", "l eq", "d y", "te x", "t ex", "▁a nim", "▁an im", "▁ anim", "at ur", "atu r", "▁str ucture", "▁struct ure", "▁ structure", "opt ion", "o ption", "▁act ual", "▁ actual", "▁Fr anc", "▁Fra nc", "▁Fran c", "en ced", "ence d", "enc ed", ".< /", ". ", "▁ />", "▁p roduction", "▁produ ction", "▁product ion", "▁prod uction", "▁ production", "ig er", "ige r", "i ger", "▁с т", "▁ ст", "sh ow", "s how", "▁pop ulation", "▁popul ation", "▁p ark", "▁par k", "▁ park", "▁Z e", "▁necess ary", "▁ necessary", "▁t rust", "▁tr ust", "▁sh own", "▁show n", "mod ule", "mo dule", "G E", "▁l ay", "▁la y", "▁ lay", "▁ann oun", "▁class Name", "▁ className", "▁cal cul", "▁calc ul", "Fun ction", "F unction", "▁S al", "▁Sa l", "▁ Sal", "O K", "T P", "▁en try", "▁ent ry", "▁entr y", "▁ entry", "▁St ud", "▁ Stud", "▁it ems", "▁item s", "▁ items", "▁se curity", "▁sec urity", "▁secur ity", "▁ security", "En try", "Ent ry", "f loat", "l s", "ib ly", "▁cont ribut", "▁C heck", "▁Che ck", "▁ Check", "M D", "▁impro ve", "Par t", "P art", "▁system s", "▁syst ems", "B l", "▁pol icy", "▁polic y", "▁ policy", "▁s creen", "▁sc reen", "▁scr een", "▁ screen", "▁A ny", "▁An y", "▁ Any", "▁op ened", "▁open ed", "al loc", "all oc", "allo c", "▁De cember", "▁Dec ember", "▁ É", "▁e mail", "▁em ail", "▁ email", "ad er", "ade r", "a der", "= >", "▁H en", "▁He n", "▁ Hen", "▁in fo", "▁inf o", "▁ info", "▁f loat", "▁flo at", "▁ float", "▁sw itch", "▁ switch", "ра н", "р ан", "ur ance", "▁as sum", "▁ass um", "us tr", "ust r", "u str", "▁g roups", "▁group s", "▁gro ups", "▁ groups", "▁R ead", "▁Re ad", "▁ Read", "▁w at", "▁wa t", "S p", "ве р", "в ер", "RA N", "R AN", "hi b", "h ib", "AL L", "A LL", "▁h us", "▁ hus", "Sp ec", "Spe c", "S pec", "\") )", "\" ))", "▁F rench", "▁C lass", "▁Cl ass", "▁ Class", "▁pres ident", "▁presid ent", "▁def init", "▁defin it", "▁N or", "▁No r", "▁T hom", "▁Th om", "ai gn", "a ign", "W idth", "D o", "▁{ @", "ag on", "ago n", "a gon", "▁L u", "▁ Lu", "▁follow ed", "M M", "as ons", "ason s", "tm p", "t mp", "▁th rows", "▁throw s", "▁thr ows", "▁thro ws", "▁ throws", "IT Y", "I TY", "но м", "▁f air", "▁fa ir", "▁p en", "▁pe n", "▁ pen", "é g", "▁inter face", "▁ interface", "▁s af", "▁sa f", "oo n", "o on", "B ack", "▁s peed", "▁sp eed", "▁spe ed", "▁ speed", "▁ext ends", "▁extend s", "em pty", "empt y", "emp ty", "▁п ере", "▁пер е", "▁пе ре", "▁pro per", "▁pr oper", "▁prop er", "▁d riv", "▁dr iv", "▁dri v", "ф и", "▁c enter", "▁cent er", "▁ center", "he ader", "head er", "▁} )", "▁ })", "w a", "▁m iddle", "▁ middle", "▁ch oose", "▁cho ose", "▁St ad", "▁Sta d", "S O", "Fact ory", "Factor y", "F actory", "De v", "D ev", "ic les", "icle s", "icl es", "i cles", "▁ap plication", "▁applic ation", "▁appl ication", "▁ application", "▁mod els", "▁model s", "▁mode ls", "▁ models", "pi te", "pit e", "p ite", "ca p", "c ap", "x i", "osp ital", "▁d ream", "▁dre am", "EN D", "E ND", "▁con tract", "▁cont ract", "▁contr act", "▁contra ct", "▁ contract", "icro soft", "▁th ous", "▁thou s", "iz es", "ize s", "i zes", "▁д а", "▁ да", "▁C O", "▁ CO", "▁d irection", "▁di rection", "▁direct ion", "▁dire ction", "▁dir ection", "▁ direction", "▁` `", "▁ ``", "▁d rive", "▁dr ive", "▁dri ve", "▁driv e", "▁ drive", "Ma x", "M ax", "ci a", "c ia", "▁contin u", "▁A lex", "▁Al ex", "▁Ale x", "▁ Alex", "▁g old", "▁go ld", "▁gol d", "▁ gold", "▁p rep", "▁pre p", "▁pr ep", "▁or igin", "▁orig in", "▁ origin", "▁r ap", "▁ra p", "▁ rap", "O p", "ous ly", "▁are as", "▁area s", "PO RT", "P ORT", "он а", "о на", "▁sa fe", "▁saf e", "▁ safe", "▁profess ional", "▁profession al", "ap ache", "apa che", "▁t emper", "▁tem per", "▁temp er", "s z", "▁u nit", "▁un it", "▁ unit", "▁c op", "▁co p", "▁ cop", "eq n", "List ener", "Listen er", "▁for mat", "▁form at", "▁forma t", "▁ format", "se lect", "sel ect", "s elect", "▁com fort", "▁ comfort", "▁me ant", "▁mean t", "id ay", "ida y", "i day", "em e", "e me", "▁act ive", "▁activ e", "▁ active", "▁n ote", "▁not e", "▁no te", "▁ note", "▁M il", "▁Mi l", "▁ Mil", "on ly", "▁< =", "▁ <=", "▁ne igh", "▁nei gh", "a o", "▁bl ue", "▁ blue", "▁T V", "▁ TV", "Ch ild", "▁re ached", "▁reach ed", "Add ress", "Addr ess", "ст в", "▁cl osed", "▁close d", "▁clos ed", "▁clo sed", "▁ closed", "in der", "ind er", "inde r", "i nder", "ol o", "o lo", "▁a lt", "▁al t", "▁ alt", "▁a dm", "▁ad m", "Form at", "For mat", "U I", "▁H am", "▁Ha m", "▁f requ", "▁fr equ", "▁fre qu", "▁in depend", "▁inde pend", "▁ independ", "▁eas ily", "▁L and", "▁La nd", "▁Lan d", "▁ Land", "▁t or", "▁to r", "▁ tor", "ograph y", "ograp hy", "in fty", "inf ty", "▁W ork", "▁Wor k", "▁ Work", "iv en", "ive n", "i ven", "▁Count y", "▁Coun ty", "▁s rc", "▁ src", "}$ ,", "} $,", "par se", "pars e", "p arse", "C D", "▁C our", "▁Co ur", "▁Cou r", "▁f ol", "▁fo l", "▁ fol", "Ent ity", "pg f", "▁Ch ina", "▁Chi na", "▁S ub", "▁Su b", "▁ Sub", "ho od", "h ood", "▁field s", "▁ fields", "▁y es", "▁ye s", "▁ yes", "re nd", "ren d", "r end", "▁to wards", "▁toward s", "▁tow ards", "▁st aff", "▁sta ff", "▁ staff", "▁A ir", "▁ Air", "▁st ation", "▁stat ion", "▁ station", "at ives", "ative s", "ati ves", "ativ es", "▁imp act", "в ы", "▁direct ly", "iss ions", "ission s", "iv a", "i va", "| \\", "Pt r", "P tr", "▁S ant", "▁San t", "▁Sa nt", "Po l", "P ol", "▁pro gress", "▁ progress", "it ar", "ita r", "i tar", "▁p arts", "▁part s", "▁par ts", "▁ parts", "▁pl ant", "▁plan t", "▁ plant", "▁abs olut", "▁gu ess", "eq ref", "▁t im", "▁ti m", "▁ tim", "▁L ou", "▁Lo u", "▁ Lou", "▁c ool", "▁co ol", "al u", "a lu", "▁m outh", "▁mo uth", "▁mou th", "▁ mouth", "ни х", "▁h eight", "▁he ight", "▁ height", "ge st", "ges t", "g est", "▁P ost", "▁Po st", "▁Pos t", "▁ Post", "▁b oard", "▁bo ard", "▁ board", "▁t it", "▁ti t", "▁ tit", "▁h our", "▁ho ur", "▁ hour", "▁ser ver", "▁serv er", "▁serve r", "▁ server", "▁p layers", "▁play ers", "▁player s", "ri er", "rie r", "r ier", "Lin k", "L ink", "▁Pres ident", "] (", "▁con struct", "▁const ruct", "▁constr uct", "▁constru ct", "▁ construct", "hand le", "}$ .", "} $.", "ry ing", "r ying", "▁s hop", "▁sh op", "▁ shop", "ia na", "ian a", "i ana", "ex p", "e xp", "Hel per", "Help er", "Off set", "ac hes", "ach es", "ache s", "a ches", "▁conne ction", "▁connect ion", "▁conn ection", "▁ connection", "▁d ifference", "▁dif ference", "▁differ ence", "serv ice", "s ervice", "▁g as", "▁ga s", "▁ gas", "▁p riv", "▁pr iv", "▁pri v", "▁ priv", "▁un ivers", "▁ univers", "▁w ish", "▁wis h", "Re m", "R em", "U rl", "ge b", "g eb", "S o", "ens ions", "ension s", "Mod ule", "Mo dule", "SI ZE", "▁p rem", "▁pre m", "▁pr em", "wind ow", "w indow", "▁d ies", "▁di es", "▁die s", "de l", "d el", "▁r ow", "▁ro w", "▁ row", "▁a verage", "▁aver age", "▁ave rage", "xi m", "x im", "▁p u", "▁ pu", "an ç", "De t", "D et", "ke r", "k er", "y a", "▁D et", "▁De t", "▁ Det", "▁p å", "▁n amed", "▁name d", "▁na med", "▁nam ed", "▁ named", "▁dec ision", "▁decis ion", "wi n", "w in", "▁Ge orge", "▁Georg e", "ar ily", "ari ly", "▁s olution", "▁sol ution", "▁mult iple", "▁multi ple", "▁multip le", "▁ multiple", "at egy", "ate gy", "ateg y", "▁le arning", "▁learn ing", "▁lear ning", "▁ learning", "▁se cret", "▁sec ret", "▁secre t", "▁ secret", "D O", "▁n ice", "▁ni ce", "▁nic e", "▁ nice", "//////// ////////", "S u", "it ation", "itat ion", "▁j oin", "▁jo in", "▁ join", "▁el ements", "▁element s", "▁ele ments", "▁elem ents", "▁ elements", "▁e mer", "▁em er", "til de", "t ilde", "▁d ep", "▁de p", "▁ dep", "▁s hot", "▁sh ot", "▁ shot", "▁pl atform", "▁plat form", "▁ platform", "ot hing", "oth ing", "o thing", "M y", "ed ia", "edi a", "om s", "o ms", "ail y", "ai ly", "a ily", "( [", "▁d ress", "▁dr ess", "▁dre ss", "▁off icial", "▁offic ial", "es tern", "est ern", "ester n", "este rn", "▁dis cover", "▁disc over", "▁disco ver", "▁m i", "▁ mi", "ны е", "C A", "od ing", "odi ng", "o ding", "▁F ound", "▁Fou nd", "▁Fo und", "▁ Found", "▁a ffect", "▁aff ect", "▁af fect", "Vi s", "V is", "st ract", "str act", "stra ct", "s tract", "ic ed", "ice d", "i ced", "de bug", "d ebug", "▁rel ated", "▁relate d", "▁ related", "▁s pect", "▁sp ect", "▁spec t", "▁spe ct", "▁ spect", "us hed", "ush ed", "сь ко", "▁b ank", "▁ban k", "▁ bank", "▁c ele", "▁ce le", "▁cel e", "AN D", "A ND", "ol f", "е м", "▁f ill", "▁fil l", "▁fi ll", "▁ fill", "▁g ives", "▁giv es", "▁give s", "▁gi ves", "▁б у", "▁ бу", "ar on", "aro n", "a ron", "▁J es", "▁Je s", "RE G", "▁s udd", "▁su dd", "▁sud d", "date d", "da ted", "dat ed", "d ated", "v i", "▁g i", "▁ gi", "se nd", "sen d", "s end", "cp p", "c pp", "▁s pent", "▁sp ent", "▁spe nt", "an de", "and e", "a nde", "▁oper ation", "▁ operation", "pro cess", "proc ess", "▁in form", "▁inf orm", "▁info rm", "▁F ree", "▁Fr ee", "▁Fre e", "▁ Free", "yo nd", "y ond", "▁per haps", "▁su rv", "▁sur v", "▁L oc", "▁Lo c", "▁ Loc", "▁con cl", "▁conc l", "▁ра з", "▁ раз", "▁O ver", "▁ Over", "ho l", "h ol", "ra z", "r az", "Wr ite", "Writ e", "W rite", "▁g iving", "▁giv ing", "▁gi ving", "r d", "in stance", "inst ance", "▁re leased", "▁rele ased", "▁release d", "▁R o", "▁ Ro", "R A", "▁pract ice", "▁g raph", "▁gr aph", "▁gra ph", "▁grap h", "▁ graph", "▁incre ase", "▁fig ure", "▁ figure", "Fil ter", "HE CK", "id x", "i dx", "▁g lass", "▁gl ass", "▁ glass", "sk i", "s ki", "com es", "co mes", "come s", "c omes", "▁c at", "▁ca t", "▁ cat", "▁c old", "▁col d", "▁co ld", "go to", "got o", "g oto", "uf act", "u fact", "▁C opyright", "▁Copy right", "▁ Copyright", "}} \\", "} }\\", "▁str eng", "▁stre ng", "▁d ir", "▁di r", "▁ dir", "to ken", "tok en", "t oken", "▁occ ur", "▁oc cur", "arl ier", "▁me asure", "▁meas ure", "▁ measure", "▁s ec", "▁se c", "▁ sec", "▁m ás", "▁má s", "▁N et", "▁Ne t", "▁ Net", "▁arg ument", "▁ argument", "▁s ou", "▁so u", "▁m oving", "▁mov ing", "▁mo ving", "▁p refer", "▁pre fer", "▁pref er", "ma sk", "mas k", "m ask", "< <", "▁bre ath", "▁breat h", "▁phys ical", "▁pos itive", "▁posit ive", "▁s or", "▁so r", "▁ sor", "▁de part", "▁dep art", "▁re move", "▁rem ove", "▁ remove", "▁k it", "▁ki t", "▁ kit", "▁me eting", "▁meet ing", "▁D ata", "▁Da ta", "▁Dat a", "▁ Data", "og raf", "act ions", "action s", "a ctions", "▁param eters", "▁parameter s", "▁ parameters", "▁A tt", "▁At t", "▁ Att", "es ch", "esc h", "e sch", "▁inv olved", "▁invol ved", "▁involve d", "ä t", "L L", "B ar", "▁с и", "▁ си", "ec h", "e ch", "GE T", "G ET", "▁pre vent", "▁pr event", "▁prev ent", "▁ prevent", "▁be yond", "▁O ther", "▁Ot her", "▁ Other", "ä n", "by te", "▁sudd en", "▁sud den", "ol ve", "olv e", "▁н о", "▁ но", "LO G", "L OG", "un it", "uni t", "u nit", "▁tr uth", "ra t", "r at", "S D", "▁e at", "▁M ad", "▁Ma d", "▁ Mad", "▁prov ides", "▁provide s", "▁s ession", "▁ session", "De le", "Del e", "D ele", "▁con vers", "▁conv ers", "▁conver s", "▁conve rs", "cent er", "cen ter", "c enter", "▁contin ued", "▁continue d", "▁continu ed", "ot ion", "oti on", "ca che", "c ache", "dis play", "disp lay", "▁prote ct", "▁prot ect", "am s", "a ms", "▁p ow", "▁po w", "▁ pow", "CT ION", "C TION", "▁M ac", "▁Ma c", "▁ Mac", "m o", "х а", "▁d istance", "▁di stance", "▁dist ance", "▁ distance", "▁T ime", "▁Tim e", "▁Ti me", "▁ Time", "g i", "▁s equ", "▁se qu", "▁seq u", "▁ sequ", "T arget", "с ле", "Ser ver", "Serv er", "▁w ide", "▁wid e", "▁ wide", "cl ose", "clos e", "▁c ru", "▁cr u", "Ex t", "E xt", "▁s elect", "▁se lect", "▁sel ect", "▁sele ct", "▁ select", "▁pat tern", "▁ pattern", "\") );", "\")) ;", "\" ));", "Pro vider", "Prov ider", "UR L", "U RL", "▁g reen", "▁gr een", "▁gre en", "▁ green", "▁wait ing", "▁wa iting", "pro to", "pr oto", "prot o", "▁immedi ately", "▁immediate ly", "com mon", "comm on", "az ione", "azi one", "a zione", "ri ver", "riv er", "rive r", "r iver", "▁s en", "▁se n", "▁ sen", "▁! ==", "▁!= =", "▁Febru ary", "▁Februar y", "ur b", "u rb", "▁S en", "▁Se n", "de st", "des t", "d est", "< ?", "▁ed ge", "▁ edge", "▁m ais", "▁ma is", "▁mai s", "gor ith", "cp u", "c pu", "▁educ ation", "▁associ ated", "▁associate d", "No ne", "Non e", "N one", "h i", "▁p oor", "▁po or", "se m", "s em", "▁W il", "▁Wi l", "▁b ud", "▁bu d", "▁ bud", "▁a uch", "▁au ch", "▁ auch", "el ler", "ell er", "elle r", "▁L ife", "▁Li fe", "▁ Life", "▁f iles", "▁fil es", "▁file s", "▁fi les", "▁ files", "▁le ading", "▁lead ing", "▁ leading", "▁ob tain", "▁obt ain", "▁J ul", "▁Ju l", "at ory", "ator y", "ato ry", "г у", "it able", "ita ble", "i table", "▁on to", "▁ont o", "▁ onto", "▁b orn", "▁bo rn", "▁bor n", "▁ born", "or em", "ore m", "o rem", "▁Stre et", "▁m aint", "▁main t", "▁ma int", "▁mai nt", "Param s", "Par ams", "ri p", "r ip", "▁S T", "▁ ST", "u v", "ma in", "m ain", "▁re cent", "▁rec ent", "▁rece nt", "We b", "W eb", "ov a", "o va", "ц а", "ais e", "ai se", "a ise", "yle s", "yl es", "y les", "▁de scribed", "▁desc ribed", "▁describ ed", "▁describe d", "▁begin ning", "▁D ay", "▁Da y", "▁ Day", "▁V ol", "▁Vo l", "▁ Vol", "▁h uge", "▁hug e", "Ha s", "H as", "an cy", "anc y", "He ader", "Head er", "▁a ren", "▁are n", "▁ar en", "▁ aren", "ва н", "в ан", "▁en sure", "▁ens ure", "▁ ensure", "▁p et", "▁pe t", "▁ pet", "mu lt", "mul t", "m ult", "▁L ike", "▁Li ke", "▁ Like", "▁man agement", "▁manage ment", "▁ management", "P S", "wh ile", "▁back ground", "▁ background", "ount er", "oun ter", "o unter", "bo ol", "b ool", "F C", "N um", "R L", "▁ex cl", "▁exc l", "▁e ye", "▁ey e", "im g", "i mg", "▁r om", "▁ro m", "▁ rom", "▁H el", "▁He l", "▁ Hel", "Opt ion", "O ption", "▁stop ped", "▁sto pped", "▁th read", "▁thr ead", "▁ thread", "to type", "tot ype", "t otype", ")) )", ") ))", "▁st age", "▁stag e", "▁sta ge", "▁ stage", "▁ü ber", "▁ über", "▁al though", "▁ although", "Type s", "Ty pes", "Typ es", "T ypes", "▁O h", "▁ Oh", "▁e ight", "▁ eight", "▁de scription", "▁des cription", "▁ description", "' '", "ö n", "▁sur face", "▁surf ace", "▁ surface", "▁Intern ational", "▁ch arg", "▁char g", "▁cha rg", "▁ charg", "▁col lection", "▁coll ection", "▁collect ion", "▁colle ction", "▁ collection", "▁us ers", "▁use rs", "▁user s", "▁ users", "▁ob vious", "▁cent ury", "▁ century", "ic ks", "ick s", "i cks", "▁art icle", "▁artic le", "▁ article", "▁\" \\", "▁ \"\\", "di m", "d im", "▁s in", "▁si n", "▁ sin", "en ge", "eng e", "Cont rol", "▁com mit", "▁comm it", "▁ commit", "ens ity", "▁t ra", "▁tr a", "▁ tra", "cript or", "▁N OT", "▁NO T", "▁ NOT", "we ll", "w ell", "▁M ichael", "▁Mich ael", "▁n od", "▁no d", "▁ nod", "▁m ort", "▁mor t", "▁mo rt", "iv o", "i vo", "is ation", "▁P o", "▁ Po", "▁P aris", "▁Par is", "▁Pa ris", "▁ad ministr", "▁admin istr", "▁ administr", "bu rg", "bur g", "b urg", "cd ot", "c dot", "▁mil itary", "▁milit ary", "▁militar y", "▁B est", "▁Be st", "▁Bes t", "▁ Best", "▁К а", "▁ Ка", "IN E", "I NE", "▁through out", "S l", "▁im pl", "▁imp l", "▁ impl", "cont rol", "contr ol", "▁ Ч", "▁u it", "▁ui t", "▁ uit", "▁un signed", "▁uns igned", "▁ unsigned", "▁M ary", "▁Mar y", "▁Ma ry", "Ch ar", "C har", "м і", "▁th reat", "▁c ourt", "▁co urt", "▁cour t", "▁cou rt", "▁ court", "vi lle", "vil le", "v ille", "▁ ш", "▁C am", "▁Ca m", "▁ Cam", ". \r", "▁current ly", "▁curr ently", "ro t", "r ot", "▁D ate", "▁Da te", "▁Dat e", "▁ Date", "▁s hit", "▁sh it", "▁ shit", "▁$ {\\", "▁${ \\", "un n", "u nn", "U s", "▁b uffer", "▁buff er", "▁buf fer", "▁ buffer", "▁s ont", "▁so nt", "▁son t", "▁let ter", "▁lett er", "▁ letter", "in ated", "ina ted", "inate d", "Ch ange", "▁h ref", "▁hr ef", "▁ href", "▁l ack", "▁la ck", "▁lac k", "▁o il", "▁C ons", "▁Con s", "▁Co ns", "▁ Cons", "▁J er", "▁Je r", "BU G", "B UG", "if orn", "▁pro perties", "▁proper ties", "▁ properties", "▁r andom", "▁ran dom", "▁rand om", "▁ random", "▁br other", "▁bro ther", "▁p iece", "▁pie ce", "▁ piece", "б у", "ist ics", "istic s", "isti cs", "▁techn ology", "gl obal", "glob al", "▁trans form", "▁ transform", "er d", "e rd", "▁B ecause", "▁ Because", "PE CT", "P ECT", "pr et", "pre t", "p ret", "▁го ду", "▁год у", "▁M et", "▁Me t", "▁ Met", "▁p sy", "▁ps y", "▁ psy", "▁о д", "▁g od", "▁go d", "▁ god", "▁D el", "▁De l", "▁ Del", "base d", "ba sed", "bas ed", "b ased", "▁v oor", "▁vo or", "▁C all", "▁Cal l", "▁Ca ll", "▁ Call", "S A", "▁fil ter", "▁ filter", "▁incl udes", "▁includ es", "▁include s", "▁inclu des", "▁ includes", "olut ions", "olution s", "f d", "▁w ind", "▁win d", "▁ wind", "▁б о", "▁ бо", "▁ab ility", "▁ ability", "ca rd", "car d", "c ard", "▁n umer", "▁num er", "▁nu mer", "▁ numer", "add ress", "addr ess", "▁go al", "ash ington", "ashing ton", "▁s light", "▁sl ight", "ab a", "a ba", "▁L og", "▁Lo g", "▁ Log", "Set tings", "Setting s", "ad ow", "ado w", "▁p i", "▁ pi", "ir ing", "iri ng", "i ring", "F T", "▁number s", "▁num bers", "con f", "co nf", "ta sk", "t ask", "▁î n", "т ы", "▁re ceive", "▁rece ive", "▁r oot", "▁ro ot", "▁ root", "▁Ind ia", "pat ch", "p atch", "é l", "▁sum mer", "▁method s", "▁ methods", "▁pl aces", "▁place s", "▁plac es", "▁М а", "▁ Ма", "▁cap ital", "▁capit al", "▁ev idence", "▁G erman", "▁Germ an", "▁Ger man", "\\ ,", "D A", "ec ute", "ecut e", "col umn", "▁fun ctions", "▁function s", "▁ functions", "▁c ounter", "▁co unter", "▁coun ter", "▁count er", "▁ counter", "▁ar ms", "▁arm s", "▁ arms", "▁f eed", "▁fe ed", "▁fee d", "▁ feed", "ve y", "v ey", "he nt", "hen t", "h ent", "MA X", "M AX", "▁ac qu", "▁app ly", "▁ap ply", "▁appl y", "▁ apply", "▁hus band", "▁k illed", "▁kill ed", "▁kil led", "▁S pec", "▁Sp ec", "▁Spe c", "▁ Spec", "ent ity", "enti ty", "▁e arlier", "▁M iss", "▁Mi ss", "▁Mis s", "▁ Miss", "▁set ting", "▁sett ing", "▁ setting", "it ect", "ite ct", "▁d ed", "▁de d", "▁ ded", "Ro w", "R ow", "▁r an", "▁ra n", "▁ ran", "▁Y es", "▁Ye s", "▁ Yes", "▁fin ancial", "▁financ ial", "s ession", "le ar", "l ear", "is hing", "ish ing", "ishi ng", "▁ne arly", "▁near ly", "▁d ur", "▁du r", "▁m achine", "▁mach ine", "▁ machine", "xf f", "x ff", "br o", "b ro", "▁s ymbol", "▁sym bol", "▁ symbol", "land s", "lan ds", "l ands", "Ac c", "A cc", "d i", "▁Rober t", "▁Ro bert", "▁Rob ert", "pro p", "pr op", "p rop", "ur ity", "uri ty", "▁# ####", "▁## ###", "▁### ##", "▁#### #", "▁walk ed", "▁wal ked", "▁intern ational", "▁internation al", "▁ Е", "Y es", "▁re lease", "▁rele ase", "▁ release", "▁start ing", "▁star ting", "st atic", "stat ic", "▁b ei", "▁be i", "al low", "all ow", "allo w", "▁Pe ople", "▁ People", "e z", "▁param eter", "▁ parameter", "C ache", "▁$ $", "▁ $$", "amp ions", "ampion s", "▁M er", "▁Me r", "▁ Mer", "▁k om", "▁ko m", "▁ kom", "le ted", "let ed", "lete d", "l eted", "oi s", "o is", "▁O pen", "▁Op en", "▁ Open", "ty pes", "type s", "typ es", "t ypes", "▁f ue", "▁fu e", "ac ters", "act ers", "acter s", "▁re ference", "▁refer ence", "▁ reference", "Equ als", "Equal s", "Eq uals", "▁a ware", "▁aw are", "▁ aware", "▁h ol", "▁ho l", "▁ hol", "▁de mand", "▁dem and", "lo r", "l or", "▁v eh", "▁ve h", "▁ veh", "▁not ice", "▁ notice", "▁com ponent", "▁compon ent", "▁ component", "f n", "▁anal ysis", "▁analy sis", "▁analys is", "▁ analysis", "mat ch", "m atch", "▁effect ive", "▁ effective", "pro duct", "produ ct", "prod uct", "ни к", "▁le gal", "▁leg al", "▁ legal", "е й", "se mb", "sem b", "s emb", "▁loc ated", "▁locate d", "▁с у", "▁ су", "Q L", "in ct", "inc t", "et o", "e to", "Dr aw", "D raw", "▁sc ale", "▁scal e", "▁ scale", "ро в", "р ов", "▁w ants", "▁want s", "H ow", "▁w el", "▁we l", "is ions", "ision s", "isi ons", "▁de liver", "▁del iver", "un der", "und er", "unde r", "u nder", "▁d eb", "▁de b", "▁j u", "▁ ju", "val ues", "value s", "▁s ister", "▁si ster", "▁sist er", "ко в", "к ов", "▁C reate", "▁Creat e", "▁Cre ate", "▁ Create", "▁I nc", "▁In c", "▁a ux", "▁au x", "▁ aux", "▁Wh ite", "▁Whit e", "▁ White", "Me nu", "Men u", "M enu", "au d", "a ud", "re source", "res ource", "▁c ab", "▁ca b", "▁l if", "▁li f", "▁ lif", "▁c ulture", "▁cult ure", "ic he", "ich e", "i che", "▁wh atever", "▁what ever", "▁de signed", "▁des igned", "▁design ed", "▁re pe", "▁rep e", "▁M ont", "▁Mon t", "▁Mo nt", "▁ Mont", "▁ch arge", "▁char ge", "▁charg e", "▁ charge", "Name s", "Na mes", "N ames", "▁in sp", "▁ins p", "▁custom ers", "▁customer s", "os a", "o sa", "▁d aughter", "▁E ast", "E Q", "▁o pin", "▁op in", "▁F re", "▁Fr e", "▁se ek", "▁see k", "▁ seek", "▁p ush", "▁pu sh", "▁ push", "▁n av", "▁na v", "▁ nav", "▁b urn", "▁bu rn", "▁bur n", "▁ burn", "ar den", "ard en", "arde n", "ha sh", "has h", "h ash", "▁opportun ity", "▁M at", "▁Ma t", "▁ Mat", "oy al", "oya l", "o yal", "▁p un", "▁pu n", "sc ale", "scal e", "yn amic", "ynam ic", "yna mic", "▁T ype", "▁Ty pe", "▁Typ e", "▁ Type", "il ing", "ili ng", "i ling", "▁qu ery", "▁que ry", "▁quer y", "▁ query", "▁m ist", "▁mis t", "▁mi st", "ro r", "r or", "for ce", "▁On ce", "▁ Once", "▁med ical", "▁medic al", "▁medi cal", "li e", "l ie", "▁stud ent", "▁ student", "ed eral", "eder al", "ede ral", "▁l ov", "▁lo v", "▁ lov", "if orm", "i form", "▁al tern", "▁alt ern", "▁alter n", "▁ altern", "bi n", "b in", "od er", "ode r", "o der", "▁return s", "▁ returns", "reg ister", "ut s", "u ts", "C I", "▁T or", "▁To r", "▁ Tor", "C R", "▁L os", "▁Lo s", "▁ Los", "am ily", "ami ly", "amil y", "air e", "ai re", "a ire", "++ ;", "Cont roller", "Control ler", "wi de", "wid e", "w ide", "x x", "row ser", "rows er", "▁B ook", "▁Bo ok", "▁ Book", "Cont ainer", "pl oad", "plo ad", "p load", "▁E v", "▁ Ev", "▁t al", "▁ta l", "▁ tal", "▁the ory", "eqn array", "б е", "▁rep orted", "▁report ed", "▁me aning", "▁mean ing", "▁s y", "▁ sy", "ri be", "rib e", "r ibe", "ic ate", "ica te", "ho ld", "hol d", "h old", "▁of fers", "▁off ers", "▁offer s", "▁t empl", "▁tem pl", "▁temp l", "cs s", "c ss", "▁p icture", "▁pict ure", "▁ picture", "▁a sync", "▁as ync", "▁ async", "▁st ock", "▁sto ck", "▁ stock", "▁in ternal", "▁inter nal", "▁intern al", "▁ internal", "t i", "B O", "V er", "с по", "▁d emon", "▁de mon", "▁dem on", "▁demo n", "▁l augh", "▁la ugh", "▁laug h", "▁E nd", "▁En d", "▁ End", "▁k on", "▁ko n", "▁ kon", "▁ide as", "▁idea s", "▁c andid", "▁can did", "▁cand id", "Me m", "M em", "iz z", "i zz", "re fix", "ref ix", "▁A ND", "▁AN D", "▁ AND", "eg en", "e gen", "E l", "▁camp aign", "H ttp", "▁R ob", "▁Ro b", "▁ Rob", "д і", "▁b ul", "▁bu l", "▁ bul", "▁К о", "▁ Ко", "▁count ries", "▁countr ies", "» .", "▁ex pression", "▁exp ression", "▁express ion", "▁expr ession", "▁ expression", "▁Eng land", "s f", "▁certain ly", "ag en", "age n", "a gen", "▁ч а", "▁ ча", "▁A NY", "▁AN Y", "▁ ANY", "▁conne ct", "▁conn ect", "▁ connect", "F E", "▁and roid", "▁ android", "▁G old", "▁Go ld", "▁Gol d", "▁ Gold", "▁op pos", "▁opp os", "ov ern", "ove rn", "over n", "o vern", "▁Com mun", "▁Comm un", ", _", "as ion", "asi on", "L a", "▁f irm", "▁fi rm", "▁fir m", "▁Al though", "▁G ood", "▁Go od", "▁ Good", "▁L aw", "▁La w", "er ve", "erv e", "▁b rand", "▁br and", "▁bra nd", "▁ brand", "M in", "fil l", "fi ll", "f ill", "'] ,", "' ],", "▁J ew", "▁Je w", "il er", "ile r", "i ler", "in gle", "ing le", "it hub", "ith ub", "▁D iv", "▁Di v", "▁ Div", "▁c ert", "▁ce rt", "▁cer t", "▁ cert", "He ight", "H eight", "ra el", "r ael", "The re", "Th ere", "T here", "it ute", "itut e", "itu te", "▁a maz", "▁am az", "▁ amaz", "lo ok", "l ook", "▁S E", "▁ SE", "▁j o", "▁ jo", "▁pull ed", "▁pul led", "▁re sources", "▁res ources", "▁resource s", "▁ resources", "▁M ax", "▁Ma x", "▁ Max", "▁ag reed", "▁agree d", "▁agre ed", "as y", "a sy", "▁treat ment", "\"> < /", "\" > >", "▁ >>", "com mand", "comm and", "at z", "a tz", "▁m al", "▁ma l", "▁ mal", "ста в", "▁P ress", "▁Pr ess", "▁Pres s", "▁Pre ss", "▁ Press", "▁char acters", "▁character s", "▁z ero", "▁ze ro", "▁ zero", "AG E", "A GE", "rap per", "▁kit chen", "am ing", "ami ng", "amin g", "a ming", "▁re str", "▁r estr", "▁res tr", "▁rest r", "X X", "▁Col lege", "▁Ar ray", "▁Arr ay", "▁ Array", "▁f resh", "▁fr esh", "▁fre sh", "▁fres h", "▁sh ift", "▁ shift", "▁spec ified", "pl ete", "ple te", "plet e", "p lete", "IT E", "I TE", "▁C amp", "▁Cam p", "▁Ca mp", "▁ Camp", "ri al", "ria l", "r ial", "c b", "▁T H", "▁ TH", "I B", "os en", "ose n", "o sen", "▁ ú", "▁par ams", "▁param s", "▁para ms", "▁ params", "ign ment", "ad ding", "add ing", "▁deg ree", "▁ degree", "Loc al", "Lo cal", "L ocal", "O h", "▁z ur", "▁zu r", "▁level s", "▁lev els", "C S", "fin ished", "finish ed", "C ase", "ri age", "ria ge", "Vec tor", "V ector", "▁s ea", "▁se a", "▁ sea", "ant ic", "anti c", "▁Le ague", "▁there fore", "▁ther efore", "On e", "O ne", "Re turn", "Ret urn", "R eturn", "Acc ess", "Ac cess", "A ccess", "va s", "v as", "▁о с", "▁r at", "▁ra t", "▁ rat", "Bi g", "B ig", "▁be havior", "▁behav ior", "▁behavi or", "k r", "▁un defined", "▁und efined", "▁ undefined", "▁E s", "▁ Es", "▁appe ared", "▁appear ed", "el es", "ele s", "e les", "▁W AR", "▁WA R", "▁ WAR", "St at", "S tat", "▁Go ogle", "▁ Google", "▁c redit", "▁cre dit", "▁cr edit", "▁cred it", "▁F ile", "▁Fil e", "▁Fi le", "▁ File", "an ging", "ang ing", "ho use", "hou se", "h ouse", "rom ise", "ge nt", "gen t", "g ent", "▁hab it", "▁ha bit", "▁soc iety", "▁soci ety", "▁societ y", "▁enc our", "▁p aint", "▁pain t", "▁pa int", "pe t", "p et", "▁U K", "▁ UK", "aw s", "a ws", "on om", "ono m", "o nom", "G l", "}_ {\\", "}_{ \\", "} _{\\", "el ess", "ele ss", "eles s", "e less", "em y", "e my", "▁C ong", "▁Con g", "▁Co ng", "▁develop ed", "▁im ages", "▁image s", "▁imag es", "▁ images", "▁ ö", "▁f ont", "▁fo nt", "▁fon t", "▁ font", "cl ear", "cle ar", "c lear", "gi n", "g in", "▁L ord", "▁Lo rd", "▁Lor d", "▁trans port", "▁ transport", "▁: :", "▁ ::", "▁c up", "▁cu p", "▁ cup", "ul ate", "ula te", "u late", "▁D uring", "▁Du ring", "▁Dur ing", "pr iv", "p riv", "▁ext rem", "▁extr em", "▁D i", "▁ Di", "▁d oubt", "▁dou bt", "▁doub t", "P y", "if ying", "ify ing", "sp lit", "spl it", "s plit", "eg o", "e go", "git hub", "g ithub", "▁) ,", "▁ ),", "RO M", "R OM", "▁ch air", "▁cha ir", "▁ chair", "▁t rade", "▁tr ade", "▁trad e", "▁tra de", "▁n icht", "▁ni cht", "▁nic ht", "To p", "T op", "St ore", "▁p arte", "▁part e", "▁par te", "pro ject", "ni a", "n ia", "▁в ід", "▁ві д", "wa r", "w ar", "▁Pro f", "▁Pr of", "▁c aught", "Th read", "ст ва", "ств а", "с тва", "aut hor", "auth or", "▁d oll", "▁do ll", "▁dol l", "▁h arm", "▁ha rm", "▁har m", "▁ harm", "▁G en", "▁Ge n", "▁ Gen", "tr ee", "tre e", "t ree", "et ime", "eti me", "e time", "cf g", "c fg", "▁gu ys", "▁guy s", "▁Cal ifornia", "▁G reen", "▁Gr een", "▁Gre en", "▁Gree n", "▁ Green", "▁mov ement", "▁move ment", "▁mo vement", "ie j", "i ej", "▁stat ement", "▁state ment", "▁ statement", "▁se eing", "▁see ing", "▁h aven", "▁have n", "▁ha ven", "▁hav en", "vent ion", "v ention", "S L", "ched ul", "ie rt", "ier t", "i ert", "▁pr imary", "▁prim ary", "▁pri mary", "▁prima ry", "▁ primary", "▁c ivil", "▁ci vil", "▁civ il", "ri an", "ria n", "r ian", "▁b utton", "▁but ton", "▁butt on", "▁ button", "▁l ived", "▁li ved", "▁live d", "▁liv ed", "P ass", "so r", "s or", "▁watch ing", "▁wat ching", "▁sk ills", "▁skill s", "te e", "t ee", "Le vel", "L evel", "▁sc ient", "h s", "▁a gre", "▁ag re", "ca t", "c at", "▁t end", "▁te nd", "▁ten d", "▁M ill", "▁Mil l", "▁Mi ll", "▁ Mill", "▁C ap", "▁Ca p", "▁ Cap", "OR D", "O RD", "gl e", "g le", "▁с во", "» ,", "▁a head", "▁ah ead", "ve st", "ves t", "v est", "▁J ose", "▁Jo se", "▁Jos e", "is cher", "isch er", "ische r", "isc her", "ș i", "▁le aving", "▁д ля", "▁s outh", "▁so uth", "▁sou th", "▁sout h", "▁con sum", "▁cons um", "▁ consum", "R ange", "▁activ ities", "Se c", "S ec", "▁s ales", "▁sa les", "▁sal es", "▁sale s", "▁f ix", "▁fi x", "▁ fix", "▁j ed", "▁je d", "▁ jed", "ru m", "r um", "ve ctor", "vec tor", "v ector", "▁s pot", "▁sp ot", "▁spo t", "▁ spot", "▁man ufact", "к т", "or row", "orr ow", "si gn", "sig n", "s ign", "▁col lege", "▁colle ge", "▁colleg e", "▁d river", "▁dr iver", "▁dri ver", "▁driv er", "▁drive r", "▁ driver", "▁def initely", "▁definit ely", "▁s pend", "▁sp end", "▁spe nd", "miss ion", "m ission", "з у", "at ively", "ative ly", "ativ ely", "b i", "Call back", "▁particular ly", "▁particul arly", "▁h ell", "▁he ll", "▁hel l", "▁ hell", "▁p ool", "▁po ol", "▁ pool", "PR E", "P RE", "▁cle arly", "▁clear ly", "P T", "ot hes", "oth es", "othe s", "▁I d", "▁ Id", "Loc ation", "L ocation", "▁R un", "▁Ru n", "▁ Run", "▁f ixed", "▁fix ed", "▁ fixed", "▁H and", "▁Ha nd", "▁Han d", "▁ Hand", "ba l", "b al", "d ouble", "C an", "Om ega", "▁chall eng", "▁stand ing", "▁stan ding", "▁ standing", "it en", "ite n", "i ten", "▁me chan", "▁d urch", "▁dur ch", "▁d ell", "▁de ll", "▁del l", "▁rais ed", "▁raise d", "▁ra ised", "▁we ak", "▁ weak", "▁D u", "▁ Du", "gr ad", "gra d", "g rad", "▁sc ene", "▁scen e", "▁ scene", "pos s", "po ss", "p oss", "▁t on", "▁to n", "▁ ton", "▁e arth", "▁ear th", "ul ations", "ulation s", "▁str ength", "▁stre ngth", "▁streng th", "ak ed", "ake d", "a ked", "▁re main", "▁rem ain", "▁B i", "▁ Bi", "▁custom er", "▁cust omer", "▁ customer", "ran ge", "r ange", "▁inter ested", "▁interest ed", "ON E", "O NE", "▁c off", "▁co ff", "re quire", "requ ire", "▁On ly", "▁ Only", "▁W eb", "▁We b", "▁ Web", "▁f arm", "▁far m", "▁fa rm", "▁act ivity", "▁activ ity", "▁ activity", "▁r out", "▁ro ut", "▁rou t", "bl ing", "b ling", "S Y", "▁Rich ard", "▁Ric hard", "▁R ef", "▁Re f", "▁ Ref", "▁ко н", "▁к он", "▁ кон", "▁j un", "▁ju n", "bo rn", "bor n", "b orn", "ij n", "Config uration", "um an", "uma n", "u man", "E E", "▁mar ried", "▁З а", "▁ За", "▁f at", "▁fa t", "▁k id", "▁ki d", "▁T ur", "▁Tu r", "▁ Tur", "▁off ered", "▁offer ed", "ni c", "n ic", "▁B ig", "▁Bi g", "▁ Big", "Ga mma", "G amma", "▁He alth", "▁ Health", "▁T R", "▁ TR", "▁s ię", "▁si ę", "▁const ruction", "▁construct ion", "▁constr uction", "▁constru ction", "▁ construction", "▁Ch urch", "▁B et", "▁Be t", "▁ Bet", "bu s", "b us", "▁e arn", "▁ear n", "ri ct", "ric t", "r ict", "▁п ра", "▁пр а", "▁ пра", "▁br ain", "▁bra in", "▁f ra", "▁fr a", "▁O p", "▁ Op", "FI G", "F IG", "em a", "e ma", "▁Europe an", "▁S aint", "▁Sa int", "▁ Saint", "AR E", "A RE", "ur i", "u ri", "▁R iver", "{ }", "▁s itting", "▁sit ting", "▁under standing", "▁understand ing", "▁pl ans", "▁plan s", "rop ri", "▁old er", "▁ol der", "▁ older", "▁pres sure", "▁press ure", "Im pl", "Imp l", "▁pe ace", "Conne ction", "Conn ection", "Connect ion", "▁f i", "▁ fi", "ri ch", "ric h", "r ich", "▁sh ut", "ap ers", "ape rs", "aper s", "a pers", "Po rt", "P ort", "▁L ook", "▁Lo ok", "▁ Look", "ri m", "r im", "au th", "aut h", "a uth", "au to", "aut o", "a uto", "▁high ly", "▁un less", "▁W al", "▁Wa l", "▁re n", "▁r en", "▁ ren", "w s", "▁c ore", "▁co re", "▁cor e", "▁ core", "( -", "▁c lim", "▁cl im", "ru it", "r uit", "▁call back", "▁ callback", "he st", "hes t", "h est", "▁Char les", "▁Charl es", "▁L ong", "▁Lo ng", "▁ Long", "} =", "ъ р", "▁sh ared", "▁share d", "▁shar ed", "▁sha red", "▁ shared", "ul ated", "ula ted", "ulate d", "gorith m", "▁H ome", "▁Ho me", "▁Hom e", "▁ Home", "▁vill age", "▁vil lage", "ee s", "e es", "s v", "▁rest aur", "re y", "r ey", "▁C ast", "▁Cas t", "▁Ca st", "▁ Cast", "▁P erson", "▁Per son", "▁Pers on", "▁ Person", "ки й", "▁organ iz", "▁R ad", "▁Ra d", "▁ Rad", "pon ents", "ponent s", "▁wer den", "▁werd en", "▁b ow", "▁bo w", "▁ bow", "se n", "s en", "am i", "a mi", "Inter face", "▁b asis", "▁bas is", "▁ba sis", "▁Comp any", "▁Compan y", "▁ Company", "er nel", "ern el", "erne l", "it u", "i tu", "Has h", "Ha sh", "H ash", "▁a an", "▁ х", "▁s mile", "▁sm ile", "x ml", "▁s cen", "▁sc en", "am m", "a mm", "to ol", "too l", "t ool", "ar ia", "ari a", "a ria", "▁acc ur", "▁ac cur", "▁ accur", "set tings", "setting s", "▁Jes us", "ac ement", "ace ment", "po wer", "pow er", "p ower", "( !", "▁c alls", "▁call s", "▁cal ls", "▁ calls", "▁bas ic", "▁ basic", "▁set tings", "▁sett ings", "▁setting s", "▁ settings", "ri pt", "rip t", "r ipt", "po ol", "p ool", "ct ors", "ctor s", "▁Found ation", "▁ Foundation", "▁we ap", "KE Y", "K EY", "fo ot", "foo t", "f oot", "▁r adio", "▁rad io", "▁radi o", "▁ radio", "▁hel ped", "▁help ed", "ma nn", "man n", "m ann", "▁j ump", "▁ju mp", "▁t ick", "▁ti ck", "▁ tick", "▁gr owing", "▁grow ing", "▁gro wing", "at en", "ate n", "a ten", "re al", "rea l", "▁incre asing", "Dev ice", "var epsilon", "vare psilon", "▁s ets", "▁se ts", "▁set s", "▁ sets", "▁adv ant", "Op en", "O pen", "▁re asons", "▁reason s", "▁sup posed", "▁supp osed", "▁suppose d", "oe s", "o es", "ed e", "e de", "te en", "tee n", "t een", "if def", "▁de lete", "▁del ete", "▁delet e", "▁ delete", "▁& =", "▁ &=", "▁B ill", "▁Bi ll", "▁Bil l", "▁ Bill", "▁a im", "▁ai m", "▁ aim", "▁O k", "▁ Ok", "▁A v", "▁ Av", "re ci", "rec i", "ac ks", "ack s", "a cks", "is te", "ist e", "i ste", "Pro perties", "▁t mp", "▁tm p", "▁ tmp", "▁d ei", "▁de i", "PE R", "P ER", "D C", "st a", "s ta", "ни и", "▁lim ited", "▁limit ed", "▁ limited", "▁great er", "▁gre ater", "de scription", "des cription", "or i", "o ri", "ain ts", "aint s", "▁h y", "▁ hy", "▁M el", "▁Me l", "▁C H", "▁ CH", "con s", "co ns", "c ons", "▁sur round", "▁W ho", "▁Wh o", "▁ Who", "ar c", "a rc", "▁te lev", "▁tele v", "▁tel ev", "it ution", "itut ion", "▁e qual", "▁equ al", "▁eq ual", "▁ equal", "к і", "▁Is rael", "ä h", "▁C aption", "▁Capt ion", "▁Ca ption", "▁ex erc", "em por", "emp or", "▁+ +", "▁ ++", "▁l ib", "▁li b", "▁ lib", "ma ke", "m ake", "▁M A", "▁ MA", "co py", "cop y", "c opy", "f riend", "▁ко то", "▁ кото", "▁dam age", "▁\\ ,", "▁ \\,", "od ed", "ode d", "o ded", "▁n one", "▁no ne", "▁non e", "▁ none", "▁ev alu", "▁eval u", "▁ evalu", "st on", "sto n", "s ton", "> ,", "FO R", "F OR", "▁n orm", "▁no rm", "▁nor m", "▁ norm", "ap pe", "app e", "a ppe", "S ession", "▁ad ult", "▁h ospital", "▁hosp ital", "▁recomm end", "pro perty", "ste in", "fin al", "fi nal", "f inal", "▁n u", "▁ nu", "se cond", "sec ond", "▁a spect", "▁as pect", "▁asp ect", "\") ]", "\" )]", "же н", "ж ен", "am ento", "ament o", "amen to", "▁r ac", "▁ra c", "▁ rac", "sa ve", "s ave", "▁foot ball", "A b", "un gs", "ung s", "ab il", "abi l", "a bil", "▁Ar ch", "▁Arc h", "▁ Arch", "sys tem", "s ystem", "hi st", "his t", "h ist", "▁l uck", "▁lu ck", "▁luc k", "re nder", "ren der", "rend er", "r ender", "▁se in", "▁sei n", "ion i", "io ni", "i oni", "▁r ot", "▁ro t", "▁ rot", "▁cor ner", "▁corn er", "▁app ropri", "▁ap propri", "▁ appropri", "▁Soft ware", "▁t ele", "▁te le", "▁tel e", "▁ tele", "De lete", "Dele te", "Del ete", "▁Acc ording", "▁pr ison", "▁pri son", "▁ prison", "▁l ic", "▁li c", "▁ lic", "▁м и", "▁ ми", "ter m", "te rm", "t erm", "se ts", "set s", "s ets", "▁v el", "▁ve l", "▁ vel", "▁r ank", "▁ran k", "▁ rank", "▁ex isting", "▁exist ing", "▁ existing", "▁V ir", "▁Vi r", "▁t rip", "▁tr ip", "▁tri p", "▁м у", "▁ му", "av ax", "ava x", "▁r is", "▁ri s", "▁ ris", "▁def ine", "▁defin e", "▁ define", "▁he at", "ca r", "c ar", "▁con vert", "▁conv ert", "▁conver t", "▁conve rt", "▁ convert", "em ail", "ema il", "e mail", "▁U nder", "▁Un der", "▁Und er", "▁ Under", "▁ Ш", "▁G rand", "▁Gr and", "▁Gran d", "▁Gra nd", "▁ex ists", "▁exist s", "▁ exists", "sy s", "s ys", "ef f", "e ff", "▁T op", "▁To p", "▁ Top", "▁ č", "▁t empor", "▁tem por", "▁temp or", "▁tempo r", "▁arg uments", "▁argument s", "▁ arguments", "▁support ed", "▁supp orted", "▁ supported", "en sed", "ens ed", "ense d", "▁Franc is", "▁co ord", "▁ coord", "▁achie ve", "▁N ame", "▁Na me", "▁Nam e", "▁ Name", "▁J ahr", "▁Jah r", "▁Ja hr", "▁G i", "sh e", "s he", "▁D ev", "▁De v", "▁ Dev", "▁a lla", "▁al la", "▁all a", "▁ alla", "▁W IT", "ag ment", "c ustom", "al ls", "all s", "& &", "W E", "▁h olding", "▁hold ing", "▁hol ding", "pro totype", "proto type", "prot otype", "▁f ing", "▁fin g", "▁fi ng", "▁b ag", "▁ba g", "▁ bag", "▁Par ty", "▁Part y", "st ack", "sta ck", "▁econom ic", "▁G al", "▁Ga l", "id ents", "ident s", "iden ts", "▁J un", "▁Ju n", "▁sh owed", "▁show ed", "os h", "o sh", "▁B ay", "▁Ba y", "▁ Bay", "ma il", "m ail", "▁S O", "▁ SO", "▁\" <", "graph ics", "▁f u", "▁ fu", "cl ick", "cli ck", "c lick", "▁b attle", "▁batt le", "▁bat tle", "{ {", "▁E vent", "▁Even t", "▁Ev ent", "▁Eve nt", "▁ Event", "ri or", "rio r", "r ior", "ch aft", "cha ft", "▁f avorite", "▁favor ite", "us ive", "sup port", "supp ort", "s upport", "b m", "K ind", "▁saf ety", "▁safe ty", "▁E nt", "▁En t", "▁ Ent", "cu p", "c up", "▁Austral ia", "▁dest roy", "▁destro y", "▁ destroy", "▁organ ization", "▁organiz ation", "id en", "ide n", "i den", "######## ########", "de c", "d ec", "▁z a", "▁ za", "▁s even", "▁se ven", "▁ seven", "ar ely", "are ly", "arel y", "▁f lag", "▁fl ag", "▁ flag", "Di r", "D ir", "▁C arl", "▁Car l", "▁Ca rl", "▁do ctor", "▁doc tor", "▁var iety", "▁vari ety", "▁L in", "▁Li n", "▁ Lin", "▁t om", "▁to m", "▁ tom", "^{ (", "^ {(", "B o", "an tes", "ant es", "ante s", "▁m ine", "▁min e", "▁mi ne", "▁ mine", "▁M it", "▁Mi t", "▁de scribe", "▁desc ribe", "▁describ e", "Ar gs", "Arg s", "L S", "AP I", "A PI", "▁L uc", "▁Lu c", "▁ Luc", "ph one", "▁sc ience", "▁ science", "▁O per", "▁Op er", "▁ Oper", "Ne xt", "N ext", "▁invest ig", "▁demon str", "▁G overn", "▁Go vern", "▁object s", "▁ objects", "▁Lou is", "▁Lo uis", "▁Return s", "▁ Returns", "▁h an", "▁ha n", "▁ han", "na m", "n am", "▁com me", "▁comm e", "▁pres ence", "▁p el", "▁pe l", "▁ pel", "▁det ect", "▁ detect", ") =", "▁Ch inese", "▁r ich", "▁ri ch", "▁ric h", "▁ rich", "▁class es", "▁classe s", "▁clas ses", "▁ classes", "▁exp and", "▁ expand", "▁D om", "▁Do m", "▁ Dom", "▁D ec", "▁De c", "▁ Dec", "s n", "pe ed", "p eed", "▁J im", "▁Ji m", "sh ould", "▁Sm ith", "▁p ages", "▁page s", "▁pa ges", "▁pag es", "▁ pages", "▁Je an", "ri cs", "ric s", "r ics", "▁S und", "▁Su nd", "▁Sun d", "ad s", "a ds", "▁The ir", "un icip", "uni cip", "unic ip", "в у", "▁down load", "▁ download", "▁st ress", "▁str ess", "▁stre ss", "▁P et", "▁Pe t", "▁ Pet", "me nu", "men u", "m enu", "re me", "rem e", "r eme", "▁com pared", "▁comp ared", "▁compar ed", "▁compare d", "St e", "S te", "IN D", "I ND", "cont ainer", "▁Ind ian", "▁India n", "or en", "ore n", "o ren", "▁s es", "▁se s", "▁ ses", "▁W he", "▁Wh e", "▁ Whe", "▁r oku", "▁ro ku", "▁estab lished", "▁establish ed", "▁gener ally", "▁general ly", "▁f le", "▁fl e", "__ (", "_ _(", "=\" +", "= \"+", "V ar", "▁M ake", "▁Ma ke", "▁Mak e", "▁ Make", "▁rem oved", "▁remove d", "▁ removed", "z z", "ü n", "▁m ix", "▁mi x", "▁ mix", "er k", "iat ion", "i ation", "ou ter", "out er", "oute r", "o uter", "S K", "▁be comes", "▁bec omes", "▁become s", "▁H all", "▁Ha ll", "▁Hal l", "sc ious", "▁w atched", "▁watch ed", "▁wat ched", "▁g ather", "▁ga ther", "▁ gather", "▁Res ult", "▁ Result", "pro of", "pa y", "p ay", "▁produ ced", "▁produce d", "▁prod uced", "▁| =", "▁b order", "▁bord er", "▁bor der", "▁ border", "▁d in", "▁di n", "▁s cript", "▁sc ript", "▁scr ipt", "▁ script", "▁a ctions", "▁act ions", "▁action s", "▁ actions", "▁m as", "▁ma s", "▁ mas", "щ а", "oot h", "oo th", "o oth", "▁Te chn", "▁Tech n", "Js on", "J son", "▁f illed", "▁fil led", "▁fill ed", "▁ filled", "де н", "д ен", "und le", "ст у", "с ту", "To ol", "Too l", "T ool", "▁k ing", "▁ki ng", "▁kin g", "▁ king", "▁v en", "▁ve n", "▁ ven", "st ra", "str a", "s tra", "▁pre dict", "▁pred ict", "▁ predict", "▁l ui", "▁lu i", "▁WAR RAN", "▁F un", "▁Fu n", "▁ Fun", "Sc ript", "S cript", "▁power ful", "▁l ose", "▁lo se", "▁los e", "at ically", "atic ally", "▁d aily", "▁da ily", "▁dai ly", "▁r ing", "▁ri ng", "▁ ring", "▁ar rived", "▁arriv ed", "▁arr ived", "▁arrive d", "St ack", "sc ope", "s cope", "▁B ack", "▁Ba ck", "▁ Back", "el ij", "eli j", "e lij", "▁z e", "▁ ze", "ke ys", "key s", "{ \"", "VI D", "V ID", "▁l icense", "▁lic ense", "▁ license", "wh at", "w hat", "▁pro ced", "▁proc ed", "ra nt", "ran t", "r ant", "est ival", "ag ram", "agr am", "agra m", "a gram", "▁L O", "▁ LO", "▁Hen ry", "▁fl ags", "▁flag s", "▁ flags", "Do wn", "D own", "scri ption", "script ion", "s cription", "▁famil ies", "▁familie s", "is se", "iss e", "bo ur", "b our", "▁B ur", "▁Bu r", "— \"", "▁b rief", "▁br ief", "▁ brief", "▁cre ating", "▁creat ing", "▁cl ients", "▁client s", "ran gle", "r angle", "▁amaz ing", "▁s ind", "▁si nd", "▁sin d", "▁cover ed", "▁cov ered", "▁ covered", "We ll", "W ell", "ст е", "с те", "то р", "т ор", "▁B as", "▁Ba s", "▁ Bas", "to tal", "tot al", "t otal", "▁I nit", "▁In it", "▁ Init", "▁s and", "▁sa nd", "▁san d", "Un it", "U nit", "▁mur der", "▁b right", "▁br ight", "▁brig ht", "▁t rav", "▁tr av", "▁tra v", "ic ans", "ica ns", "ican s", "▁att ribute", "▁attribut e", "▁ attribute", "f c", "▁pl aced", "▁place d", "▁plac ed", "ES T", "E ST", "Var i", "V ari", "▁c os", "▁co s", "▁ cos", "▁at tract", "▁att ract", "▁attr act", "▁attra ct", "an el", "ane l", "a nel", "}) .", "} ).", "by tes", "byte s", "▁p arse", "▁par se", "▁ parse", "▁be long", "▁bel ong", "B N", "▁S ol", "▁So l", "P o", "` ,", "▁c alling", "▁call ing", "▁cal ling", "▁? >", "▁ ?>", "▁it er", "▁i ter", "▁ iter", "▁u rl", "▁ur l", "▁ url", "▁ev ening", "▁even ing", "re ek", "ree k", "▁hon est", "▁direct or", "▁dire ctor", "▁dir ector", "R C", "▁s olid", "▁sol id", "▁ solid", "▁ph il", "ie ne", "ien e", "i ene", "FA ULT", "co pe", "cop e", "c ope", "▁Hist ory", "▁Histor y", "▁Hi story", "▁ History", "▁Te am", "▁ Team", "ree dom", "reed om", "▁r u", "▁ ru", "U B", "▁w orse", "▁wor se", "im o", "i mo", "Ma t", "M at", "▁M ex", "▁Me x", "ac tor", "act or", "a ctor", "▁v or", "▁vo r", "▁ vor", "ть ся", "▁exper iment", "▁experi ment", "▁P lay", "▁Pl ay", "▁ Play", "▁An other", "▁happ ens", "▁happen s", "ua n", "u an", "▁pat ients", "▁patient s", "▁re nd", "▁r end", "▁ren d", "▁ rend", "▁M o", "▁ Mo", "▁T ex", "▁Te x", "▁ Tex", "▁w ed", "▁we d", "▁ wed", "t n", "in sert", "ins ert", "▁п а", "▁ па", "▁an ti", "▁ant i", "▁ anti", "Mat ch", "M atch", "ampions hip", "ampion ship", "▁for ces", "▁force s", "▁H ot", "▁Ho t", "▁ Hot", "▁ph ase", "▁ phase", "▁t emplate", "▁templ ate", "▁temp late", "▁ template", "st op", "sto p", "s top", "ic ated", "ica ted", "icate d", "▁man aged", "▁manage d", "▁ managed", "wa it", "w ait", "▁* (", "▁ *(", "G B", "▁app oint", "▁ap point", "▁ appoint", "ł a", "▁s tick", "▁st ick", "▁ stick", "▁F OR", "▁FO R", "▁ FOR", "▁V is", "▁Vi s", "▁ Vis", "to r", "t or", "▁p ř", "qu est", "que st", "ques t", "q uest", "us es", "use s", "u ses", "\"); \r", "\") ;\r", "\" );\r", "▁sudden ly", "▁sud denly", "é c", "N D", "ur op", "uro p", "u rop", "ре д", "▁ins urance", "ac cess", "acc ess", "a ccess", "un finished", "▁t amb", "▁ta mb", "▁tam b", "▁s ac", "▁sa c", "▁C ourt", "▁Co urt", "▁Cour t", "▁Cou rt", "▁miss ing", "▁mis sing", "▁ missing", "▁W here", "▁Wh ere", "▁Whe re", "▁ Where", "▁S um", "▁Su m", "▁ Sum", "}^ {\\", "}^{ \\", "} ^{\\", "▁s ua", "▁su a", "_ ,", "▁th ick", "▁Tr ump", "▁Tru mp", "▁oper ations", "▁operation s", "▁ operations", "F S", "▁de ux", "d z", "Temp late", "T emplate", "▁\" /", "▁o dd", "▁od d", "▁ odd", "▁re ality", "▁real ity", "▁te ams", "▁team s", "▁tea ms", "▁c er", "▁ce r", "▁ cer", "om a", "o ma", "▁ și", "▁cl oud", "▁clo ud", "▁ cloud", "▁Dep artment", "N e", "▁requ ires", "▁require s", "it ems", "ite ms", "item s", "▁I II", "▁II I", "▁ III", "right arrow", ")- >", ") ->", "▁w riter", "▁wr iter", "▁writ er", "▁write r", "▁ writer", "re place", "rep lace", "▁t hr", "▁th r", "je n", "j en", "▁o t", "▁ ot", "▁occ up", "▁oc cup", "▁ occup", "▁event ually", "▁M ath", "▁Mat h", "▁Ma th", "▁ Math", "▁con serv", "▁cons erv", "▁conse rv", "am er", "ame r", "a mer", "▁F ort", "▁For t", "▁Fo rt", "▁d ry", "▁dr y", "▁sex ual", "▁co sts", "▁cost s", "▁cos ts", "▁for ms", "▁form s", "▁ forms", "▁V ict", "▁Vi ct", "▁Vic t", "PA R", "P AR", "frame work", "▁д и", "▁ ди", "Oper ation", "з на", "wh ich", "▁t ight", "▁ti ght", "In valid", "▁part ner", "▁п ред", "▁пре д", "▁th ank", "▁than k", "▁gu ard", "▁ guard", "he m", "h em", "Bo dy", "B ody", "▁e mot", "▁em ot", "I X", "fa st", "fas t", "f ast", "щ о", "ñ o", "ni ght", "n ight", "▁S ci", "▁Sc i", "ни ка", "ник а", "▁T O", "▁ TO", "▁individ uals", "▁individual s", "сс и", "с си", "}) ,", "} ),", "F alse", "(\" %", "( \"%", "▁op tim", "▁opt im", "▁ optim", "▁- ->", "▁-- >", "▁ -->", "▁f actor", "▁fact or", "▁fac tor", "▁fa ctor", "▁ factor", "▁sm aller", "▁small er", "▁con tain", "▁cont ain", "sp ect", "spec t", "spe ct", "s pect", "Eng ine", "▁ann ounced", "▁announ ced", "▁announce d", "▁Dem ocr", "▁r ob", "▁ro b", "▁ rob", "▁f lat", "▁fl at", "▁ flat", "os oph", "oso ph", "Se arch", "S earch", "ah l", "a hl", "▁Ex ception", "▁Except ion", "▁ Exception", "▁O l", "equ als", "eq uals", "equal s", "▁un ter", "▁unt er", "▁ unter", "sh ape", "sha pe", "N S", "Ob j", "▁spec ies", "▁spe cies", "we ight", "wei ght", "w eight", "yo u", "y ou", "▁e ste", "▁est e", "▁es te", "▁ este", "▁V iew", "▁Vi ew", "▁ View", "▁m ission", "▁miss ion", "▁ mission", "▁j ournal", "▁jour nal", "▁ journal", "Value s", "Val ues", "▁ein em", "▁eine m", "is mo", "ism o", "▁project s", "▁ projects", "▁D as", "▁Da s", "ri ble", "rib le", "r ible", "▁s erve", "▁ser ve", "▁serv e", "▁ serve", "▁op ening", "▁open ing", "▁h ur", "▁program s", "▁U SA", "▁US A", "▁ USA", "il iar", "ili ar", "ilia r", "id os", "ido s", "B r", "est amp", "esta mp", "▁t ools", "▁to ols", "▁too ls", "▁tool s", "▁ tools", "an ner", "ann er", "anne r", "R T", "▁St art", "▁Star t", "▁Sta rt", "▁ Start", "▁b ath", "▁bat h", "▁ba th", "▁coff ee", "or ter", "ort er", "orte r", "in ternal", "inter nal", "intern al", "file s", "fil es", "fi les", "f iles", "IN VAL", "ak o", "a ko", "d t", "▁Se cond", "▁Sec ond", "▁ Second", "▁al loc", "▁all oc", "▁ alloc", "▁en ded", "▁end ed", "▁ende d", "▁ ended", "ac ional", "aci onal", "acion al", "acio nal", "▁man ager", "▁manage r", "▁ manager", "▁S un", "▁Su n", "▁ Sun", "ag g", "a gg", "▁le ader", "▁lead er", "ol ved", "olve d", "olv ed", "▁ч то", "▁trad itional", "▁tradition al", "sh ot", "s hot", "ru p", "r up", "C F", "▁E ach", "▁ Each", "w r", "▁S om", "▁So m", "▁ Som", "▁material s", "▁mater ials", "▁m sg", "▁ms g", "▁ msg", "▁s yn", "▁sy n", "▁ syn", "▁produ ce", "▁prod uce", "▁st orage", "▁stor age", "▁sto rage", "▁ storage", "sub section", "▁S ie", "▁Si e", "▁I P", "▁ IP", "CE SS", "▁w a", "▁ wa", "Re cord", "Rec ord", "▁mark eting", "▁market ing", "pl et", "ple t", "p let", "D ialog", "▁mention ed", "▁ment ioned", "▁N a", "▁ Na", "▁Un ion", "▁ Union", "▁A PI", "▁AP I", "▁ API", "▁neg ative", "▁ negative", "tx t", "t xt", "▁eas ier", "le gal", "leg al", "De p", "D ep", "▁no vel", "▁nov el", "▁nove l", "eu r", "e ur", "ac ió", "aci ó", "a ció", "▁B ud", "▁Bu d", "▁c arry", "▁car ry", "sch aft", "s chaft", "▁br oken", "▁bro ken", "▁broke n", "▁t rees", "▁tr ees", "▁tre es", "▁tree s", ">( );", ">() ;", "> ();", "▁e mb", "▁em b", "▁ emb", "ie der", "ied er", "i eder", "▁r oute", "▁ro ute", "▁rout e", "▁rou te", "▁ route", "ik el", "ike l", "i kel", "▁l isten", "▁li sten", "▁list en", "▁ listen", "ash ion", "ashi on", "▁M rs", "▁Mr s", "▁equip ment", "ag ger", "agg er", "▁T hus", "▁Th us", "▁mat rix", "▁ matrix", "al la", "all a", "a lla", "▁T our", "▁To ur", "▁con versation", "▁convers ation", "Mo n", "M on", "our nal", "▁min ute", "▁minut e", "▁ minute", "A m", "Ap i", "A pi", "▁for get", "▁forg et", "M e", "lev ant", "te mp", "tem p", "t emp", "▁t elling", "▁tell ing", "▁tel ling", "mo ve", "mov e", "m ove", "▁in dependent", "▁independ ent", "to String", "ed it", "edi t", "e dit", "▁J ac", "▁Ja c", "az z", "a zz", "re act", "rea ct", "▁c in", "▁ci n", "▁ cin", "▁P rov", "▁Pro v", "▁Pr ov", "▁ Prov", "is ted", "ist ed", "iste d", "i sted", "▁h ash", "▁has h", "▁ha sh", "▁ hash", "on na", "ik i", "i ki", "▁gener ated", "▁generate d", "▁gene rated", "▁ generated", "Re nder", "Rend er", "R ender", "▁psy ch", "▁ps ych", "na v", "n av", "▁en tr", "▁ent r", "▁ entr", "п ра", "r x", "AT H", "A TH", "▁ass ume", "▁assum e", "Tr ee", "T ree", "semb ly", "sembl y", "▁M att", "▁Mat t", "▁Ma tt", "ca ption", "c aption", "▁s olutions", "▁solution s", "▁fa ith", "▁fait h", "▁dig ital", "▁digit al", "▁ex cell", "▁exc ell", "▁V ersion", "▁Vers ion", "▁ Version", "De bug", "D ebug", "▁ж и", "▁ жи", "▁car ried", "re set", "res et", "▁slow ly", "an cing", "anc ing", "▁own er", "▁ owner", "▁T er", "▁Te r", "▁D id", "▁Di d", "▁ Did", "▁g est", "▁ge st", "▁ges t", "▁ gest", "▁é té", "▁ét é", "▁ été", "▁pro of", "▁ proof", "F ont", "▁n ob", "▁no b", "▁ nob", "C o", "▁G NU", "▁l iber", "▁li ber", "▁lib er", "it ness", "▁h ij", "▁hi j", "▁v ert", "▁ver t", "▁ve rt", "▁ vert", "ш а", "FL AG", "ME NT", "M ENT", "▁S on", "▁So n", "Mu lt", "M ult", "▁d istrict", "▁di strict", "▁dist rict", "conne ct", "conn ect", "ject ion", "je ction", "j ection", "ly mp", "▁real ized", "▁realize d", "▁realiz ed", "mo s", "m os", "y e", "▁re nder", "▁r ender", "▁ren der", "▁rend er", "▁ render", "ri o", "r io", "▁inter pret", "▁ interpret", "▁slight ly", "fi x", "f ix", "▁stud ies", "▁r id", "▁ri d", "▁ rid", "at re", "atr e", "a tre", "▁benef its", "▁benefit s", "▁F ace", "▁Fa ce", "▁Fac e", "▁ Face", "iv ery", "ive ry", "iver y", "i very", "ри я", "doc ument", "d ocument", "▁as king", "▁ask ing", "La st", "L ast", "ar ante", "ara nte", "aran te", "▁Mart in", "▁E ll", "▁El l", "▁v ector", "▁ve ctor", "▁vec tor", "▁ vector", "▁for ced", "▁force d", "▁ forced", "о ло", "P H", "W R", "▁K l", "▁s ky", "▁sk y", "▁ sky", "▁str ategy", "▁strateg y", "▁strat egy", "oc ked", "ock ed", "▁ne ck", "ś ci", "O UT", ")) ,", ") ),", "C ustom", "▁w ie", "▁ wie", "▁s weet", "▁swe et", "▁t emp", "▁te mp", "▁tem p", "▁ temp", "▁fore ign", "▁h all", "▁ha ll", "▁hal l", "▁ hall", "as tr", "ast r", "a str", "As s", "A ss", "MO DE", "MOD E", "▁max imum", "▁maxim um", "an nels", "ann els", "annel s", "anne ls", "▁t ip", "▁ti p", "▁ tip", "▁second s", "▁sec onds", "▁ seconds", "▁st ack", "▁sta ck", "▁ stack", "ig a", "i ga", "▁r aise", "▁rais e", "▁ra ise", "▁ raise", "en able", "ena ble", "oi r", "o ir", "▁s oul", "▁so ul", "▁sou l", "K e", ")$ .", ") $.", "▁T im", "▁Ti m", "▁ Tim", "AL SE", "is er", "ise r", "i ser", "cont in", "be l", "b el", "▁m ad", "▁ma d", "▁ mad", "lic hen", "li chen", "lich en", "liche n", "l ichen", "ab e", "a be", "sa fe", "▁con cent", "▁conc ent", "▁conce nt", "bo und", "b ound", "▁R equ", "▁Re qu", "▁ Requ", "sw itch", "▁st one", "▁sto ne", "▁ stone", "▁trans l", "▁ transl", "▁v ac", "▁va c", "an don", "and on", "ando n", "▁F ore", "▁For e", "▁Fo re", "▁ Fore", "▁s ounds", "▁sound s", "▁P op", "▁Po p", "▁ Pop", "▁H T", "▁ HT", "li a", "l ia", "en ter", "ent er", "ente r", "▁hel ps", "▁help s", "ed y", "e dy", "ст вен", "ств ен", "стве н", "an ted", "ant ed", "ante d", "▁I ts", "▁It s", "▁St ep", "▁Ste p", "▁ Step", "I con", "▁EX PECT", "▁ EXPECT", "ial ized", "ialize d", "Pos t", "Po st", "P ost", "az e", "a ze", "▁Car ol", "▁Ca rol", "▁re q", "▁r eq", "▁ req", "▁crit ical", "▁critic al", "D S", "▁se at", "▁sea t", "ap ed", "ape d", "a ped", "▁up per", "▁upp er", "▁ upper", "▁S y", "▁ Sy", "▁ex plain", "▁expl ain", "▁' ./", "▁'. /", "ut ils", "util s", "uti ls", "poss ible", "▁d ont", "▁do nt", "▁don t", "H ost", "▁appro xim", "▁approx im", "As ync", "A sync", "▁g rab", "▁gr ab", "▁gra b", "▁s ources", "▁source s", "▁sour ces", "▁ sources", "▁M os", "▁Mo s", "▁Germ any", "▁German y", "▁Ger many", "▁r ub", "▁ru b", "▁ rub", "CH AN", "▁r ain", "▁ra in", "▁tr uly", "▁join ed", "▁jo ined", "▁< ?", "▁ ", "_ ->", "ag nost", "agn ost", "▁pro posed", "▁prop osed", "▁propos ed", "▁propose d", "▁G ame", "▁Ga me", "▁Gam e", "▁ Game", "▁eff orts", "▁effort s", "в я", "t c", "с к", "▁int ent", "▁inte nt", "▁ intent", "▁B re", "▁Br e", "is c", "i sc", "▁pro test", "▁prote st", "▁prot est", "▁h olds", "▁hold s", "▁hol ds", "▁ holds", "om etry", "ome try", "omet ry", "o metry", "▁H ave", "▁Ha ve", "▁Hav e", "▁ Have", "▁de tail", "▁det ail", "▁ detail", "▁WIT HOUT", "▁WITH OUT", "ye r", "y er", "▁K on", "▁Ko n", "▁not iced", "▁notice d", "▁require ments", "▁requirement s", "DE BUG", "ki ns", "kin s", "k ins", "▁S pan", "▁Sp an", "▁ Span", "▁c ars", "▁car s", "▁ca rs", "me ta", "met a", "m eta", "▁k il", "▁ki l", "▁ kil", "▁B ron", "▁Br on", "▁Bro n", "▁experience d", "▁experi enced", "▁re mind", "▁rem ind", "our se", "ours e", "▁W estern", "▁West ern", "▁Wes tern", "ter ed", "te red", "tere d", "t ered", "▁dev ices", "▁device s", "▁ devices", "▁pict ures", "▁picture s", "▁t ut", "▁tu t", "\" `", "▁im possible", "▁r ail", "▁ra il", "▁fe els", "▁feel s", "▁fee ls", "ic as", "ica s", "i cas", "il ling", "ill ing", "▁acc ident", "▁' @", "____ ____", "▁n otes", "▁not es", "▁no tes", "▁note s", "▁ notes", "om an", "oma n", "o man", "Par ser", "Parse r", "Pars er", "▁dis covered", "▁discover ed", "▁R oman", "▁Rom an", "▁Ro man", "▁Roma n", "▁bud get", "▁gu ide", "▁guid e", "ki ng", "kin g", "k ing", "▁in cred", "▁inc red", "▁incre d", "ol ar", "ola r", "o lar", "en den", "end en", "ende n", "Des c", "De sc", "D esc", "▁w ave", "▁wa ve", "▁ wave", "б ли", "ig t", "i gt", "▁re strict", "▁rest rict", "▁restr ict", "▁R et", "▁Re t", "▁ Ret", "▁m ac", "▁ma c", "▁ mac", "у р", "B S", "í s", "▁gener ation", "de m", "d em", "al o", "a lo", "б ра", "▁order ed", "▁ord ered", "▁ ordered", "dr op", "dro p", "d rop", "▁p p", "▁ pp", "▁Re view", "▁Rev iew", "▁ Review", "▁liter ally", "▁literal ly", "▁S ir", "▁Si r", "▁ Sir", "▁Y eah", "▁Ye ah", "▁ Yeah", "▁d ensity", "▁dens ity", "▁ density", "ri z", "r iz", "in de", "ind e", "i nde", "▁g ain", "▁ga in", "▁ gain", "▁p anel", "▁pan el", "▁pa nel", "▁ panel", "je t", "j et", "▁T imes", "▁Time s", "▁Tim es", "▁Ti mes", "▁ Times", "▁n ella", "▁ne lla", "▁nel la", "▁nell a", "▁pre viously", "▁previous ly", "▁prev iously", "point s", "Se nd", "S end", "▁B rown", "▁Br own", "▁Bro wn", "▁Brow n", "ea ch", "e ach", "▁tr igger", "▁ trigger", "ome times", "omet imes", "ic os", "ico s", "i cos", "G R", "Pane l", "Pan el", "P anel", "og en", "oge n", "o gen", "▁c m", "▁ cm", "ru ctions", "ruct ions", "ruction s", "▁k iss", "▁ki ss", "▁s olo", "▁so lo", "▁sol o", "▁f amous", "▁fam ous", "ra n", "r an", "п ро", "▁th ro", "▁thr o", "Gr aph", "G raph", "im it", "imi t", "i mit", "▁V alue", "▁Val ue", "▁ Value", "▁st arts", "▁start s", "▁star ts", "ip eline", "ipe line", "h d", "T C", "▁dis cussion", "▁discuss ion", "▁tr uck", "ak a", "a ka", "On ly", "▁E qu", "▁Eq u", "▁ Equ", "▁k ö", "▁ kö", "▁B es", "▁Be s", "▁crit ic", "▁pro pos", "▁prop os", "▁b att", "▁bat t", "▁ba tt", "▁S ection", "▁Se ction", "▁ Section", "Sh ow", "S how", "g p", "ST ATE", "STAT E", "PO ST", "POS T", "P OST", "▁N ord", "▁No rd", "▁Nor d", "▁in nov", "▁inn ov", "▁c rim", "▁cr im", "▁cri m", "▁ crim", "ax is", "a xis", "▁T urn", "▁Tur n", "▁Tu rn", "▁ Turn", "con n", "co nn", "Run time", "▁rem aining", "▁remain ing", "os ton", "ost on", "osto n", "o ston", "▁ Э", "▁window s", "▁wind ows", "▁ windows", "▁R oyal", "▁Ro yal", "▁Roy al", "▁v ide", "▁vi de", "▁vid e", "P P", "ch ron", "chr on", "▁s an", "▁sa n", "▁ san", "▁r ise", "▁ri se", "▁ris e", "▁ rise", "▁d elle", "▁de lle", "▁del le", "▁dell e", "▁D ur", "▁Du r", "▁rap id", "▁ra pid", "ce rt", "cer t", "c ert", "L A", "ed ge", "▁\\ ]", "▁ \\]", "▁en tered", "▁ent ered", "▁enter ed", "▁l aws", "▁la ws", "▁law s", "▁ph oto", "▁phot o", "▁ photo", "▁ap plications", "▁applic ations", "▁application s", "▁appl ications", "▁Ber lin", "▁ar rest", "▁arr est", "▁f ederal", "▁fed eral", "▁feder al", "▁R ussia", "▁Russ ia", "▁us ual", "▁r aw", "▁ra w", "▁ raw", "▁pi ù", "êt re", "ê tre", "JS ON", "J SON", "SI ON", "S ION", "xt ure", "ist ent", "iste nt", "isten t", "▁P ower", "▁Po wer", "▁Pow er", "▁ Power", "Bi t", "B it", "▁cap acity", "▁capac ity", "▁ capacity", "▁c ards", "▁car ds", "▁card s", "▁ cards", "UI D", "U ID", "im ents", "iment s", "imen ts", "i ments", "▁d ar", "▁da r", "▁ dar", "▁Ch icago", "▁comfort able", "ti p", "t ip", "ba s", "b as", "▁m u", "▁ mu", "▁en emy", "▁enem y", "ya n", "y an", "▁ф и", "▁ фи", "▁up dated", "▁update d", "▁ updated", "an go", "ang o", "E v", "E ffect", "os ing", "osi ng", "o sing", "ren ce", "r ence", "▁Con gress", "▁Cong ress", "▁d efe", "▁de fe", "▁def e", "▁i p", "▁ ip", "▁t out", "▁to ut", "▁tou t", "▁f reedom", "▁free dom", "▁freed om", "▁a o", "▁ ao", "▁There fore", "▁Ther efore", "Ed it", "E dit", "▁Vir gin", "RE E", "R EE", "ar go", "arg o", "▁D am", "▁Da m", "▁ Dam", "▁tra ffic", "▁traff ic", "ño s", "ñ os", "▁a lle", "▁al le", "▁all e", "▁ alle", "▁dep th", "▁ depth", "No w", "N ow", "▁s ides", "▁side s", "▁si des", "▁sid es", "▁го ди", "▁год и", "Des criptor", "▁art ikel", "▁n arrow", "▁narr ow", "▁nar row", "__ _", "_ __", "k w", "ut o", "u to", "▁Face book", "▁Fac ebook", "te gr", "t egr", "bo olean", "ni k", "n ik", "b d", "Tr ack", "Tra ck", "▁g ran", "▁gr an", "▁gra n", "res hold", "resh old", "ве т", "в ет", "wr ap", "w rap", "▁n oise", "▁no ise", "ig u", "i gu", "▁B on", "▁Bo n", "▁ Bon", "▁w y", "▁ wy", "lin ux", "ck s", "c ks", "▁f ans", "▁fa ns", "▁fan s", "▁m ach", "▁ma ch", "▁mac h", "▁p rices", "▁pr ices", "▁pri ces", "▁price s", "é v", "ou ts", "out s", "o uts", "stand ing", "stan ding", "▁c ateg", "▁cat eg", "; \\", "▁de cre", "▁dec re", "▁S aturday", "▁m enu", "▁me nu", "▁men u", "▁ menu", "▁N ov", "▁No v", "▁Y et", "▁Ye t", "▁та к", "lic he", "li che", "lich e", "l iche", "▁Ac adem", "▁commun ication", "us ing", "u sing", "▁Soc iety", "▁Soci ety", "▁n uc", "▁nu c", "pect ive", "or ial", "oria l", "ori al", "o rial", "▁af raid", "▁an imal", "▁anim al", "▁turn ing", "▁tur ning", "ds t", "d st", "math frak", "le rs", "ler s", "l ers", "▁l ots", "▁lo ts", "▁lot s", "▁ á", "▁T ra", "▁Tr a", "▁ Tra", "n p", "▁r ose", "▁ro se", "▁ rose", "▁G L", "▁ GL", "▁hel ping", "▁help ing", "▁w inter", "▁win ter", "▁ко м", "▁ ком", "Mo ck", "M ock", "▁invest ment", "Us e", "U se", "▁Can ad", "н д", "Co py", "Cop y", "C opy", "▁f ly", "▁fl y", "▁ fly", "SE R", "S ER", "▁F ar", "▁Fa r", "▁R os", "▁Ro s", "am il", "ami l", "a mil", "▁fight ing", "▁rel igious", "▁relig ious", "su per", "sup er", "s uper", "sc reen", "scr een", "s creen", "▁f urn", "▁fur n", "▁fu rn", "▁surpr ised", "▁surprise d", "▁re plied", "▁repl ied", "Act ivity", "Activ ity", "▁D own", "▁Do wn", "▁Dow n", "▁ Down", "▁in sert", "▁ins ert", "▁ insert", "▁O lymp", "▁point ed", "▁po inted", "▁C ard", "▁Car d", "▁Ca rd", "▁ Card", "dr iver", "drive r", "d river", "▁D a", "▁ Da", "! --", "ro ud", "rou d", "r oud", "un do", "und o", "▁m essages", "▁message s", "▁mess ages", "▁ messages", "▁P oint", "▁Po int", "▁ Point", "V M", "▁p lane", "▁pl ane", "▁plan e", "▁ plane", "x c", "▁telev ision", "▁tele vision", "▁televis ion", "ё н", "▁thous ands", "▁thousand s", "▁c ris", "▁cr is", "▁cri s", "▁de lay", "▁del ay", "▁ delay", "▁N ext", "▁Ne xt", "▁ Next", "▁no mbre", "▁nom bre", "▁t u", "▁ tu", "▁sk ip", "▁ski p", "▁ skip", "ro ad", "r oad", "istr ation", "▁t ur", "▁tu r", "▁De velop", "▁Devel op", "▁П а", "▁д ру", "▁др у", "▁wonder ful", "> &", "▁L iber", "▁Li ber", "▁Lib er", "▁s cope", "▁sc ope", "▁ scope", "▁man age", "▁ma nage", "▁d ass", "▁da ss", "▁das s", "▁re call", "▁rec all", "P M", "▁re levant", "▁relev ant", "▁E arth", "▁ка к", "▁a pr", "▁ap r", "▁A SS", "▁AS S", "▁ ASS", "ié n", "i én", "▁S H", "▁ SH", "oo m", "o om", "it et", "ite t", "no ne", "non e", "n one", "as i", "a si", "▁mot or", "▁mo tor", "▁S how", "▁Sh ow", "▁ Show", "n b", "▁fact ors", "▁fa ctors", "▁factor s", "▁f orest", "▁for est", "▁fore st", "▁fo rest", "▁в ре", "th m", "t hm", "▁m unicip", "▁turn s", "▁tur ns", "▁Div ision", "▁Di vision", "E C", "▁dis appe", "struct or", "stru ctor", "▁some where", "▁Afr ican", "▁Africa n", "▁Inst itute", "▁Institut e", "Gr id", "G rid", "▁te acher", "▁teach er", "▁tea cher", "ur ies", "uri es", "u ries", "▁respect ively", "▁respective ly", "▁S D", "▁ SD", "▁a live", "▁al ive", "▁ali ve", "▁p ou", "▁po u", "▁W ater", "▁Wat er", "▁Wa ter", "▁ Water", "ф е", "▁ch anging", "▁chang ing", "▁ changing", "▁after noon", "▁or ders", "▁order s", "▁ord ers", "▁ orders", "Re t", "R et", "Point er", "Po inter", "▁s av", "▁sa v", "er g", "e rg", "ok ed", "oke d", "o ked", "ess ions", "ession s", "▁F ire", "▁Fi re", "▁ Fire", "ar et", "are t", "a ret", "im m", "i mm", "▁des ire", "▁ що", "▁De sign", "▁Des ign", "▁ Design", "ut ure", "▁Off ice", "▁c md", "▁cm d", "▁ cmd", "▁e ating", "▁eat ing", "Net work", "▁r ough", "▁ro ugh", "▁rou gh", "▁ rough", "oper ator", "IG N", "I GN", "▁s ports", "▁sp orts", "▁sport s", "▁w eren", "▁we ren", "▁were n", "▁wer en", "▁n oted", "▁not ed", "▁no ted", "▁note d", "▁tw ice", "II I", "I II", "▁a nx", "▁an x", "▁e lim", "▁el im", "▁а в", "▁i o", "▁ io", "▁spe ech", "▁con du", "▁cond u", "el les", "ell es", "elle s", "id ade", "ida de", "idad e", "▁adv ance", "R I", "oc a", "o ca", "/ \\", "ap shot", "aps hot", "▁t ail", "▁ta il", "▁ tail", "mod els", "model s", "mode ls", "og y", "o gy", "▁J eff", "▁Je ff", "ir ation", "irat ion", "▁K ore", "▁Ko re", "▁Kor e", "▁le ads", "▁lead s", "ba t", "b at", "Ad apter", "c ategory", "ang ular", "angu lar", "▁s aved", "▁sa ved", "▁save d", "▁sav ed", "▁ saved", "▁un iform", "▁ uniform", "▁n é", "▁ né", "▁business es", "His t", "Hi st", "H ist", "▁а р", "▁ ар", "do main", "dom ain", "▁S i", "▁ Si", "ra ise", "rais e", "rai se", "r aise", "▁w arn", "▁war n", "▁wa rn", "▁ warn", "het ic", "h etic", "▁G ro", "▁Gr o", ")) .", ") ).", "} >", "з е", "▁Amaz on", "▁Or gan", "▁ Organ", "▁L ake", "▁La ke", "▁ag reement", "▁agree ment", "▁agre ement", "x a", "▁p erman", "▁per man", "▁perm an", "▁cont aining", "▁contain ing", "▁st range", "▁str ange", "▁strang e", "ст і", "с ті", "▁st upid", "▁spe aking", "▁speak ing", "▁Intern et", "▁Inter net", "pre fix", "pref ix", "p refix", "es c", "e sc", "As sert", "Ass ert", "pro te", "pr ote", "prot e", "p rote", "▁m anner", "▁man ner", "▁S z", "un te", "unt e", "u nte", "io t", "i ot", "Pro file", "ov en", "ove n", "o ven", "▁for med", "▁form ed", "▁forme d", "▁ formed", "▁l it", "▁li t", "▁ lit", "▁econom y", "▁ec onomy", "▁c z", "▁ cz", "wi d", "w id", "RE Q", "R EQ", "▁ch osen", "▁cho sen", "▁chose n", "▁P rodu", "▁Pro du", "▁ Produ", "os ter", "ost er", "o ster", "st ances", "stance s", "stan ces", "aw a", "a wa", "▁R en", "▁Re n", "▁conf irm", "▁ confirm", "▁Б о", "▁b illion", "▁bill ion", "▁d éc", "▁dé c", "ý ch", "▁ill ustr", "TI ES", "T IES", "▁P ub", "▁Pu b", "▁ Pub", "▁b an", "▁ba n", "▁ ban", "ad ed", "ade d", "a ded", "ah n", "a hn", "▁C ath", "▁Cat h", "▁Ca th", "no number", "non umber", "▁wor st", "▁М е", "▁sugg ested", "▁suggest ed", "st ats", "stat s", "sta ts", "▁c ant", "▁can t", "▁ca nt", "▁al ign", "▁ali gn", "▁ align", "kap pa", "k appa", "▁h en", "▁he n", "▁ hen", "▁in iti", "▁init i", "'] )", "' ])", "B I", "▁g arden", "▁gar den", "▁gard en", "▁sec ure", "▁secur e", "▁ secure", "▁\\ [", "▁ \\[", "hand ler", "handle r", "el li", "ell i", "e lli", "ld ots", "l dots", "se cut", "sec ut", "s ecut", "▁ext ended", "▁extend ed", "} -", "an ie", "ani e", "a nie", "▁F ind", "▁Fin d", "▁Fi nd", "▁ Find", "▁M useum", "▁Muse um", "▁C onne", "▁Con ne", "▁ Conne", "y y", "▁pass ion", "ak ers", "ake rs", "aker s", "a kers", "ah r", "a hr", "olog ies", "ologie s", "▁equ ation", "▁eq uation", "▁ equation", "▁occ asion", "▁occas ion", "Le t", "L et", "'] ['", "'][ '", "' ]['", "Pr int", "an es", "ane s", "a nes", "ie nte", "ient e", "ien te", "i ente", "▁T oday", "▁To day", "▁Tod ay", "LE CT", "L ECT", "▁A f", "▁ Af", ", ,", "▁Т а", "▁` ``", "▁`` `", "ev en", "eve n", "e ven", "si n", "s in", "ur er", "ure r", "u rer", "▁ °", "ot imes", "oti mes", "o times", "▁I O", "▁ IO", "▁po et", "() ));", "()) );", "())) ;", "( )));", "▁ −", "▁ad opt", "ph ere", "pher e", "p here", "# [", "▁c entre", "▁cent re", "ov es", "ove s", "o ves", "▁a ns", "▁an s", "▁ ans", "d p", "▁K ir", "▁Ki r", "▁applic able", "f p", "▁vis ual", "▁ok ay", "or o", "o ro", "▁opportun ities", "Re pository", "Rep ository", "▁l l", "▁ ll", "▁R od", "▁Ro d", "▁s hel", "▁sh el", "▁she l", "▁la unch", "▁con ven", "▁conv en", "▁conve n", "▁S pe", "▁Sp e", "▁ Spe", "Am er", "A mer", "▁c ette", "▁cet te", "Con d", "Co nd", "C ond", "de p", "d ep", "O wn", "▁h ook", "▁ho ok", "▁ hook", "▁d ict", "▁di ct", "▁dic t", "▁ dict", "▁Th ose", "▁f ellow", "▁fell ow", "▁fel low", "▁phil osoph", "▁philos oph", "vi n", "v in", "fer ences", "ference s", "ha v", "h av", "▁ad ding", "▁add ing", "▁ adding", "ivers e", "iver se", "i verse", "ga me", "g ame", "▁Bl ue", "▁ Blue", "▁c lin", "▁cl in", "not e", "no te", "n ote", "▁R am", "▁Ra m", "ме р", "м ер", "co very", "cover y", "cov ery", "c overy", "ñ a", "▁б и", "▁ би", "▁f ashion", "▁b roke", "▁br oke", "▁bro ke", "▁' \\", "▁ '\\", "▁re ader", "▁read er", "▁ reader", "но е", "но сти", "ност и", "▁pay ment", "▁ payment", "▁L ic", "▁Li c", "▁l ips", "▁li ps", "▁lip s", "▁ac adem", "▁M ot", "▁Mo t", "el ls", "ell s", "C HECK", "▁р у", "▁ ру", "▁M S", "▁ MS", "Ed itor", "Edit or", "▁z one", "▁zo ne", "▁ zone", "it ure", "itu re", "▁I T", "▁ IT", "run time", "▁pro ceed", "▁proc eed", "ло в", "л ов", "▁M aria", "▁Mar ia", "▁Ma ria", "ol ver", "olve r", "olv er", "▁Th anks", "▁Thank s", "▁ Thanks", "▁should n", "▁J oh", "▁Jo h", "▁Mod el", "▁Mo del", "▁Mode l", "▁ Model", "▁S ov", "▁So v", "! '", "D i", "▁c ancer", "▁can cer", "Id ent", "▁ex change", "il ler", "ill er", "ille r", "in f", "i nf", "LE N", "L EN", "() {", "( ){", "ag a", "a ga", "\"] ,", "\" ],", "u h", "▁K en", "▁Ke n", "▁ph otos", "▁phot os", "▁photo s", "▁t iny", "▁ti ny", "▁tin y", "▁ tiny", "▁g ent", "▁gen t", "▁ge nt", "▁ gent", "ü l", "▁T ake", "▁Ta ke", "▁Tak e", "▁ Take", "id el", "ide l", "i del", "ou ting", "out ing", "In ternal", "Inter nal", "Intern al", "▁c ells", "▁cell s", "▁cel ls", "ни м", "н им", "ha rd", "har d", "h ard", "▁T own", "▁To wn", "▁Tow n", "ob e", "o be", "pl ex", "ple x", "p lex", "те р", "т ер", "to ns", "ton s", "t ons", "▁conc entr", "▁concent r", "mo ck", "m ock", "v c", "á z", "▁Ch ampionship", "▁Champion ship", "▁Champions hip", "▁б е", "▁ бе", "? ?", "ér i", "é ri", "al y", "a ly", "▁ Ц", "ier te", "iert e", "▁tot ally", "▁total ly", "▁A uf", "▁Au f", "▁our selves", "▁S elf", "▁Sel f", "▁ Self", "Form s", "For ms", "ight er", "igh ter", "▁is land", "fm t", "f mt", "▁r c", "▁ rc", "▁t ells", "▁tell s", "▁tel ls", "B B", "di t", "d it", "▁vari ables", "▁variable s", "▁ variables", "▁int ended", "▁intend ed", "iz ont", "izon t", "izo nt", "▁pl ays", "▁play s", "da m", "d am", "se q", "s eq", "▁S up", "▁Su p", "▁ Sup", "▁c ultural", "▁cult ural", "▁sc ream", "__ ,", "_ _,", "ci pl", "cip l", "Time out", "▁ ж", "or te", "ort e", "▁repl aced", "▁replace d", "E M", "▁ab andon", "▁Spec ial", "▁Spe cial", "▁ Special", "el len", "ell en", "elle n", "▁B ru", "▁Br u", "ir med", "irm ed", "T e", "ol t", "o lt", "j u", "Arg ument", "▁ne ut", "▁neu t", "▁ neut", "sc ape", "▁R ay", "▁Ra y", "▁ Ray", "▁Pol it", "▁Po lit", "▁crow d", "▁cro wd", "▁Window s", "▁Wind ows", "▁ Windows", "ie go", "ieg o", "i ego", "▁e scape", "▁esc ape", "▁ escape", "▁Ap ache", "sy nc", "syn c", "s ync", "eb en", "e ben", "if ies", "ifi es", "et her", "eth er", "ethe r", "e ther", "Met a", "Me ta", "M eta", "▁big gest", "Ga me", "G ame", "▁trans action", "▁ transaction", "En v", "E nv", "▁М о", "▁pl enty", "▁m el", "▁me l", "▁ mel", "п ре", "▁mot iv", "▁о р", "▁ ор", "or gan", "org an", "▁m ock", "▁mo ck", "▁ mock", "▁$ _", "▁ $_", "ен е", "е не", "▁N umber", "▁Num ber", "▁Nu mber", "▁ Number", "ck now", "c know", "▁Up date", "▁ Update", "ze ro", "zer o", "z ero", "▁sur prise", "▁surpr ise", "ce an", "pd f", "p df", "Gl obal", "▁att end", "▁f ond", "▁fo nd", "▁fon d", "▁under stood", "Na v", "N av", "▁M ic", "▁Mi c", "▁ Mic", "= $", "ok ing", "oki ng", "o king", "▁Stad ium", "Cl ose", "▁compet ition", "▁sold iers", "▁soldier s", "▁O P", "▁ OP", "ag ne", "agn e", "▁An ton", "▁Ant on", "Ma in", "M ain", "á k", "▁# [", "▁ #[", "▁Com mit", "▁Comm it", "▁ Commit", "py x", "▁e ast", "▁eas t", "▁ east", "▁Or der", "▁Ord er", "▁ Order", "F loat", "▁accept ed", "▁mon itor", "▁ monitor", "▁p ad", "▁pa d", "▁ pad", "on ic", "oni c", "o nic", "▁p ushed", "▁push ed", "▁re place", "▁rep lace", "▁repl ace", "▁ replace", "CR E", "C RE", "▁r ide", "▁ri de", "▁rid e", "▁ ride", "fo und", "f ound", "= %", "во й", "▁mat ches", "▁match es", "▁ matches", "▁L ie", "▁Li e", "▁exper iences", "▁experience s", "▁experi ences", "Po ol", "P ool", "up s", "u ps", "A V", "▁ex istence", "▁exist ence", "▁t hin", "▁th in", "▁m agn", "▁mag n", "▁ma gn", "CO MP", "COM P", "ho me", "hom e", "h ome", "▁n i", "▁ ni", "▁wur den", "▁wurde n", "ла в", "▁te eth", "▁S tan", "▁St an", "▁Sta n", "ap pro", "app ro", "an ny", "ann y", "if ts", "ift s", "▁un known", "▁ unknown", "▁h omes", "▁home s", "▁hom es", "▁ho mes", "▁ent ity", "▁ entity", "ci e", "c ie", "ле ние", "ia r", "i ar", "▁compl iance", "▁focus ed", "uz z", "u zz", "=\\ \"", "= \\\"", "com ponents", "component s", "Att r", "At tr", "all ery", "alle ry", "aller y", "▁ident ify", "O k", "pi e", "p ie", "▁St ill", "▁off ering", "▁offer ing", "▁bu sy", "▁bus y", "ct l", "c tl", "it ors", "itor s", "ito rs", "▁concern ed", "▁concer ned", "▁b rown", "▁br own", "▁bro wn", "▁brow n", "cl k", "Se lected", "Select ed", "▁B lock", "▁Bl ock", "▁Blo ck", "▁ Block", "▁e gy", "▁eg y", "▁ egy", "ic ing", "ici ng", "i cing", "▁U RL", "▁ URL", "▁t opic", "▁to pic", "▁top ic", "▁ topic", "▁Pro duct", "▁Produ ct", "▁ Product", "▁ч и", "▁ чи", "▁t rial", "▁tr ial", "▁tri al", "▁week end", "l u", "▁I V", "▁ IV", "▁E gy", "▁Eg y", "x C", "▁n ove", "▁no ve", "▁nov e", "▁l ett", "▁le tt", "▁let t", "▁ lett", "en ne", "enn e", "() ).", "()) .", "( )).", ".* *", ". **", "▁p romise", "▁prom ise", "el ection", "ele ction", "elect ion", "e lection", "Aut h", "A uth", "r v", "ri l", "r il", "▁con duct", "▁cond uct", "▁condu ct", "▁ conduct", "▁main tain", "▁maint ain", "▁bo at", "▁ boat", "▁op posite", "▁oppos ite", "sp in", "spi n", "s pin", "web pack", "an ta", "ant a", "▁o rient", "▁or ient", "▁ orient", "▁s uc", "▁su c", "▁ex ercise", "▁exerc ise", "▁eff icient", "▁ efficient", "▁trad ition", "▁z w", "▁ zw", "▁S ud", "▁Su d", "go ing", "▁P ier", "▁Pi er", "in v", "i nv", "ip es", "ipe s", "i pes", "ensure math", "▁con ver", "▁conv er", "▁conve r", "cre en", "cr een", "c reen", "▁t error", "▁ter ror", "▁terr or", "▁D ou", "▁Do u", "▁in valid", "▁ invalid", "ce ived", "ceive d", "▁A rab", "▁Ar ab", "▁w ire", "▁wir e", "▁ wire", "ap plication", "sh ift", "Gener ic", "▁P lan", "▁Pl an", "▁ Plan", "▁W all", "▁Wal l", "▁Wa ll", "▁ Wall", "▁direct ory", "▁director y", "▁ directory", "▁e gg", "▁eg g", "▁we alth", "▁ wealth", "ran dom", "rand om", "r andom", "att ribute", "▁h ide", "▁hi de", "▁hid e", "▁ hide", "Se rial", "Ser ial", "S erial", "ca m", "c am", "▁it al", "▁i tal", "▁ ital", "▁L ine", "▁Lin e", "▁Li ne", "▁ Line", "▁C HECK", "▁ CHECK", "ploy ment", "▁mass ive", "▁ex tract", "▁ext ract", "▁extra ct", "▁extr act", "▁ extract", "ch ain", "cha in", "Res t", "Re st", "R est", "▁L as", "▁La s", "▁b ear", "▁be ar", "▁ bear", "▁l inks", "▁link s", "▁lin ks", "▁ links", "▁new sp", "▁news p", "▁F C", "▁ FC", "Car d", "C ard", "ak s", "a ks", "▁v isible", "▁vis ible", "▁ visible", "▁M arc", "▁Mar c", "▁Ma rc", "▁B oston", "▁Bo ston", "▁Bos ton", "▁res erved", "▁reserv ed", "▁reserve d", "▁ro of", "lic enses", "license s", "d c", "▁In formation", "▁ Information", "▁w itness", "S k", "*) ,", "* ),", "Sc ope", "S cope", "'] ;", "' ];", "▁M ir", "▁Mi r", "▁ Mir", "ud ing", "udi ng", "u ding", "▁t rend", "▁tr end", "▁tre nd", "▁tren d", "re p", "r ep", "▁mus ical", "▁music al", "▁ne ither", "▁nei ther", "▁C reat", "▁Cre at", "▁ Creat", "▁pos itions", "▁position s", "▁posit ions", "L C", "rid ge", "r idge", "▁offic ers", "▁office rs", "▁officer s", "▁vi olence", "▁viol ence", "▁T em", "▁Te m", "▁S us", "▁Su s", "▁W ay", "▁Wa y", "Af ter", "A fter", "ac ket", "ack et", "▁S ou", "▁So u", "ac er", "ace r", "a cer", "| |", "▁re mark", "▁r emark", "▁rem ark", "▁ remark", "wa ter", "w ater", "n ě", "▁С а", "▁s ed", "▁se d", "▁ sed", "E ach", "▁phot ograph", "▁photo graph", "▁let ters", "▁letter s", "▁lett ers", "▁in vent", "▁inv ent", "▁M as", "▁Ma s", "▁s ongs", "▁son gs", "▁song s", "ó l", "ki nd", "kin d", "k ind", "▁N on", "▁No n", "▁ Non", "▁d ust", "▁du st", "** :", "* *:", "nab la", ".\" ,", ". \",", "Loc k", "Lo ck", "L ock", "▁Д о", "▁cl uster", "▁ cluster", "lo ss", "los s", "l oss", "▁ASS ERT", "▁ ASSERT", "fa ll", "f all", "▁re ject", "▁ reject", "▁Sp ring", "▁Spr ing", "▁ Spring", "▁wed ding", "▁g rav", "▁gr av", "▁gra v", "▁ grav", "ress ion", "r ession", "li mit", "lim it", "l imit", "RE S", "R ES", "] }", "▁l isted", "▁li sted", "▁list ed", "▁ listed", "▁T ele", "▁Te le", "▁Tel e", "▁ Tele", "hl ine", "h line", "▁ch ief", "▁chi ef", "ME M", "M EM", "да р", "д ар", "▁exp ensive", "tr ace", "tra ce", "▁R og", "▁Ro g", "▁C oll", "▁Col l", "▁Co ll", "▁ Coll", "▁Aut hor", "▁Auth or", "▁ Author", "▁B oard", "▁Bo ard", "▁ Board", "▁C apt", "▁Cap t", "▁Ca pt", "▁ Capt", "TE XT", "T EXT", "▁re con", "▁rec on", "es ta", "est a", "e sta", "▁proper ly", "▁& \\", "▁ &\\", "le ton", "let on", "l eton", "ik er", "ike r", "i ker", "G u", "▁K om", "▁Ko m", "oc o", "o co", "▁any more", "▁t aste", "▁ta ste", "▁tast e", "▁S anta", "▁San ta", "▁Sant a", "ge x", "g ex", "▁Se cret", "▁Sec ret", "▁ Secret", "▁tal ent", "▁tale nt", "▁mom ents", "▁moment s", "▁mo ments", "▁B a", "▁ex tr", "▁ext r", "▁ extr", "▁Com mission", "▁Comm ission", "▁mod ify", "▁Fig ure", "▁ Figure", "▁d omin", "▁do min", "▁dom in", "▁ domin", "▁p lot", "▁pl ot", "▁ plot", "en ger", "eng er", "enge r", "ut ch", "▁c ities", "▁cit ies", "▁ci ties", "▁n ut", "▁nu t", "▁ nut", "pro file", "prof ile", "▁S tat", "▁St at", "▁Sta t", "▁ Stat", "▁n odes", "▁no des", "▁node s", "▁nod es", "▁ nodes", "▁n s", "▁ ns", "ess ages", "essage s", "essa ges", "im pl", "imp l", "ic ker", "ick er", "i cker", "▁ex amples", "▁example s", "▁exam ples", "ab eth", "abe th", "abet h", "▁st ated", "▁stat ed", "▁state d", "▁sta ted", "fi re", "f ire", "bu l", "b ul", "▁danger ous", "▁P ay", "▁Pa y", "▁ Pay", "▁G re", "▁Gr e", "▁ Gre", "▁Mon day", "▁Mond ay", "es ome", "eso me", "e some", "ig an", "iga n", "i gan", "ru nd", "run d", "r und", "pr ise", "p rise", "fa il", "f ail", "▁N ever", "▁Ne ver", "▁Nev er", "▁ Never", "A v", "▁line ar", "▁lin ear", "▁ linear", "▁u l", "▁ ul", "WA R", "W AR", "ре н", "р ен", "▁A T", "▁ AT", "▁d op", "▁do p", "▁n ou", "▁no u", "Des t", "De st", "D est", "▁claim s", "en da", "end a", "▁c razy", "▁cr azy", "ge l", "g el", "og gle", "ogg le", "▁rep resentation", "▁represent ation", "in en", "ine n", "i nen", "▁altern ative", "▁alter native", "D M", "AB ILITY", "face s", "fa ces", "fac es", "f aces", "▁do ors", "▁door s", "▁ doors", "at iv", "ati v", "Lo ok", "L ook", "▁J SON", "▁JS ON", "▁ JSON", "▁appe arance", "▁appear ance", "б ря", "S QL", "▁sil ence", "ud o", "u do", "▁Direct or", "▁Dire ctor", "▁Dir ector", "State ment", "Stat ement", "se lected", "select ed", "hi gh", "h igh", "pr ime", "prim e", "▁ign ore", "▁ignor e", "▁ ignore", "▁col ors", "▁color s", "▁ colors", "us hing", "ush ing", "▁v irt", "▁vi rt", "▁vir t", "▁ virt", "man ager", "▁rem ote", "▁remot e", "▁ remote", "ł o", "sm all", "▁cr ime", "▁crim e", "▁cri me", "r b", "▁c reation", "▁cre ation", "▁creat ion", "▁f light", "▁fl ight", "▁S ign", "▁Si gn", "▁Sig n", "▁ Sign", "IL E", "I LE", "▁D O", "▁ DO", "com ment", "comm ent", "▁C ost", "▁Co st", "▁Cos t", "▁ Cost", "._ _", ". __", "▁C op", "▁Co p", "▁ Cop", "▁v om", "▁vo m", "▁Sc ience", "▁Sci ence", "ле ния", "oo p", "o op", "inter face", "▁WARRAN TIES", "▁P age", "▁Pa ge", "▁ Page", "** ****", "**** **", "*** ***", "ско м", "с ком", "TR UE", "▁re peated", "▁repe ated", "▁repeat ed", "▁е го", "ш о", "▁r oz", "▁ro z", "▁ roz", "P e", "▁IS BN", "ir ts", "irt s", "pos es", "po ses", "pose s", "p oses", "}) $", "} )$", "▁ І", "child ren", "ble s", "bl es", "b les", "EC T", "E CT", "▁i z", "▁ iz", "▁b uilder", "▁build er", "▁ builder", "▁M edia", "▁Med ia", "▁ Media", "ia t", "i at", "▁contr ast", "▁contra st", "” ,", "▁L ink", "▁Lin k", "▁ Link", "▁Educ ation", "▁j oint", "▁join t", "▁jo int", "▁ joint", "▁ex ternal", "▁extern al", "▁ external", "▁ро з", "▁b its", "▁bit s", "▁bi ts", "▁ bits", "FO RM", "FOR M", "F ORM", "er man", "erm an", "w p", "▁M ike", "▁Mi ke", "▁Mik e", "▁M aster", "▁Ma ster", "▁Mas ter", "▁ Master", "▁sen ior", "▁N av", "▁Na v", "▁ Nav", "▁record ed", "el ing", "eli ng", "elin g", "e ling", "es h", "e sh", "f x", "ка н", "к ан", "▁t all", "▁tal l", "▁ta ll", "▁John son", "▁s ono", "▁so no", "▁son o", "▁an che", "▁anc he", "▁anch e", "▁ anche", "ic ken", "ick en", "i cken", "lo op", "l oop", "ici ency", "empor ary", "▁D oes", "▁Do es", "▁ Does", "▁re lation", "▁rel ation", "▁ relation", "м ы", "wa s", "w as", "lo w", "l ow", "ich te", "icht e", "i chte", "▁J ones", "▁Jo nes", "▁Jon es", "▁bed room", "DI S", "D IS", "▁mag net", "▁magn et", "▁Eng ine", "▁ Engine", "▁feel ings", "▁feeling s", "▁fee lings", "G C", "▁t orn", "▁to rn", "▁tor n", "▁relationship s", "▁relation ships", "▁Р е", "▁p roud", "▁pro ud", "▁pr oud", "▁t we", "▁tw e", "ov al", "ova l", "o val", "▁w aste", "▁was te", "▁wa ste", "▁red uced", "▁redu ced", "▁reduce d", "il ton", "ilt on", "B P", "▁for got", "▁forg ot", "▁bod ies", "▁H aw", "▁Ha w", "la g", "l ag", "▁w ww", "▁ www", "do or", "d oor", "▁s ufficient", "▁suff icient", "▁doll ars", "▁dollar s", "Le n", "L en", "▁talk ed", "▁tal ked", "▁b ond", "▁bo nd", "▁bon d", "▁B or", "▁Bo r", "}} {", "} }{", "ro d", "r od", "Pass word", "qu are", "▁l ights", "▁light s", "▁ lights", "er en", "ere n", "e ren", "▁th irty", "N C", "▁T ODO", "▁TO DO", "▁res pond", "▁respon d", "▁resp ond", "▁ respond", "ки х", "dir ect", "di rect", "dire ct", "d irect", "a ção", "▁he av", "Med ia", "M edia", "ex it", "e xit", "L icense", "` .", "▁m ixed", "▁mix ed", "▁d esk", "▁de sk", "▁des k", "▁te aching", "▁teach ing", "▁tea ching", "▁m aj", "▁ma j", "▁n erv", "▁ne rv", "▁ner v", "in ations", "ination s", "type of", "▁co ast", "▁ж е", "▁ же", "▁be side", "▁bes ide", "um my", "umm y", "Do c", "D oc", "▁sche dule", "▁schedul e", "▁sched ule", "▁ schedule", "▁re cover", "▁rec over", "▁Fur ther", "▁ste el", "bo ot", "b oot", "▁Per haps", "▁с ъ", "▁O s", "▁ Os", "ri ck", "ric k", "r ick", "▁В и", "Supp ort", "Sup port", "S upport", "▁( _", "▁ (_", "ni l", "n il", "pi s", "p is", "x pected", "▁process ing", "▁proces sing", "▁ processing", "Bu ild", "B uild", "ar ian", "ari an", "aria n", "a rian", "▁i con", "▁ic on", "▁ icon", "▁C A", "▁ CA", "wi ck", "w ick", "= (", "▁al gorithm", "▁ algorithm", "▁You ng", "▁Man agement", "▁ Management", "▁anc ient", "▁anci ent", "но сть", "ност ь", "ot i", "o ti", "▁comb ination", "wor ld", "w orld", "n n", "▁d ram", "▁dr am", "en abled", "ena bled", "enable d", "A c", "C CESS", "ar ation", "▁bl ocks", "▁block s", "▁blo cks", "▁ blocks", "▁Ang eles", "▁Angel es", "▁Q ual", "▁Qu al", "▁ Qual", "▁suc ceed", "▁succ eed", "net work", "▁ob lig", "spring framework", "▁T re", "▁Tr e", "ok es", "oke s", "o kes", "mu n", "m un", "▁Net work", "▁ Network", "De l", "D el", "▁e state", "▁est ate", "▁esta te", "▁l iqu", "▁li qu", "▁p ob", "▁po b", "▁d ad", "▁da d", "▁dist inct", "▁T it", "▁Ti t", "▁L ear", "▁Le ar", "fer red", "and roid", "andro id", "▁sub sequ", "▁subs equ", "▁Flor ida", "sub set", "▁whis per", "Vo l", "V ol", "ul ous", "ulo us", "▁c rew", "▁cre w", "▁cr ew", "▁l ug", "▁lu g", "pi d", "p id", "oc ity", "oci ty", "o city", "sk b", "s kb", "▁t ea", "▁te a", "у н", "▁hon or", "▁ho nor", "▁I ns", "▁In s", "▁ Ins", "▁g ew", "▁ge w", "▁ gew", "Det ails", "Detail s", "ene ath", "e neath", "at ar", "ata r", "a tar", "▁_ {", "▁ _{", "am en", "ame n", "a men", "▁set up", "▁ setup", "Trans action", "▁bl ank", "▁ blank", "Fail ed", "F ailed", "jo b", "j ob", "▁p ret", "▁pre t", "▁pr et", "▁ pret", "ß e", "lo or", "l oor", "ř í", "nc ia", "n cia", "▁any where", "▁L ight", "▁Li ght", "▁ Light", "▁A k", "B D", "▁exc ited", "▁excit ed", "ag ers", "age rs", "ager s", "a gers", "▁w arning", "▁war ning", "▁warn ing", "▁ warning", "▁process es", "▁proces ses", "h u", "▁y outh", "▁you th", "▁yo uth", "▁d ogs", "▁do gs", "▁dog s", "▁o ct", "▁oc t", "▁ oct", "▁n ine", "▁ni ne", "▁nin e", "Write r", "Wr iter", "Writ er", "W riter", "gr id", "g rid", "▁import ance", "est ic", "▁care fully", "▁careful ly", "ma ster", "mas ter", "m aster", "▁dec isions", "▁decision s", "▁decis ions", "▁p in", "▁pi n", "▁ pin", "▁cr ack", "TE ST", "TES T", "T EST", "▁L ocal", "▁Loc al", "▁Lo cal", "▁ Local", "▁R ight", "▁ Right", "▁v ast", "▁va st", "▁vas t", "▁f aster", "▁fa ster", "▁fast er", "▁inst itut", "▁ann ual", "LA N", "L AN", "▁e pisode", "▁epis ode", "▁X V", "▁del ivery", "▁deliver y", "t l", "F P", "ci rc", "cir c", "▁typ ically", "▁typical ly", "ig o", "i go", "▁int el", "▁inte l", "▁ intel", "na t", "n at", "x b", "ст ро", "с тро", ") -", "▁B al", "▁Ba l", "▁ Bal", "▁J os", "▁Jo s", "▁g onna", "▁R est", "▁Re st", "▁Res t", "▁ Rest", "jo r", "j or", "on ia", "oni a", "o nia", "or ship", "ors hip", "ov ery", "ove ry", "over y", "o very", "LI NE", "LIN E", "L INE", "] :", "Que ue", "▁com pare", "▁comp are", "▁compar e", "▁ compare", "▁ap artment", "▁apart ment", "▁r ul", "▁ru l", "D r", "gen cy", "g ency", "▁ob viously", "▁obvious ly", "zi e", "z ie", "yc l", "y cl", "fort unately", "fortun ately", "fortunate ly", "▁ste pped", "▁step ped", "▁S eg", "▁Se g", "▁ Seg", "▁Wh ich", "▁ Which", "▁P C", "▁ PC", "▁a st", "▁as t", "▁ ast", "end or", "endo r", "▁per mission", "▁perm ission", "▁ permission", "CO L", "C OL", "▁T EST", "▁TE ST", "▁ TEST", "P ay", "ère s", "è res", "▁stud ied", "▁accom pl", "▁accomp l", "ro le", "rol e", "r ole", "Wh ere", "Whe re", "W here", "proto buf", "met adata", "meta data", "Jo b", "J ob", "▁F our", "▁Fou r", "▁Fo ur", "pl ements", "ple ments", "plement s", "dis able", "▁l oud", "▁lo ud", "▁lou d", "▁happ ening", "▁happen ing", "▁U sing", "▁Us ing", "▁ Using", "ro g", "r og", "▁depend s", "▁dep ends", "í m", "' \\", "▁t aught", "sh ared", "sha red", "share d", "▁att ributes", "▁attribute s", "▁attribut es", "▁ attributes", "▁A ction", "▁Act ion", "▁ Action", "▁d ess", "▁de ss", "▁des s", "▁ dess", "▁h ouses", "▁house s", "▁hous es", "▁ho uses", "▁re set", "▁res et", "▁ reset", "▁b ien", "▁bi en", "▁ex plicit", "▁expl icit", "LO W", "-> _", "▁P M", "▁ PM", "C ategory", "oi ce", "o ice", "in to", "int o", "▁m ail", "▁ma il", "▁mai l", "▁ mail", "▁author ity", "▁un able", "▁una ble", "file name", "fil ename", "é k", "ле й", "л ей", "▁s ector", "▁se ctor", "▁sec tor", "▁sect or", "ap point", "app oint", "▁h ang", "▁ha ng", "▁han g", "▁ hang", "▁c el", "▁ce l", "▁ cel", "rel ated", "it ate", "ita te", "itat e", "▁' <", "am ber", "amb er", "a mber", "▁c heap", "▁che ap", "▁en abled", "▁enable d", "▁ enabled", "▁di vision", "▁div ision", "▁divis ion", "An y", "A ny", "▁h ier", "▁hi er", "▁H ead", "▁He ad", "▁ Head", "nt ax", "n tax", "ud a", "u da", "▁lim itations", "▁limit ations", "▁limitation s", "▁st udio", "▁stud io", "med ia", "medi a", "m edia", "▁cir cle", "▁circ le", "▁ circle", "но ва", "нов а", "▁l aug", "▁la ug", "ac ts", "act s", "▁В о", "ó d", "pl ed", "ple d", "p led", "LO C", "L OC", "Ex pr", "Exp r", "> :", "▁pr és", "▁pré s", "▁ prés", "▁laugh ed", "▁laug hed", "▁Th ree", "▁ Three", "л ы", "▁en ds", "▁end s", "▁ ends", "▁fund ament", "▁in her", "▁ inher", "▁l iv", "▁li v", "▁ liv", "bi d", "b id", "▁respons ibility", "▁check ed", "▁ checked", "▁P ac", "▁Pa c", "▁f ault", "▁fa ult", "▁y ellow", "▁s alt", "▁sa lt", "▁sal t", "▁Franc isco", "▁Francis co", "▁ ^", "▁O N", "▁ ON", "▁beaut y", "y g", "▁A ff", "▁Af f", "▁ Aff", "▁E q", "▁ Eq", "▁mag ic", "▁hand ler", "▁handle r", "▁ handler", "x E", "▁numer ous", "▁numero us", "▁h ole", "▁hol e", "▁ho le", "▁ hole", "▁ro oms", "▁room s", "▁ rooms", "cc ión", "cció n", "c ción", "▁A rm", "▁Ar m", "▁ Arm", "per son", "pers on", "p erson", "▁build ings", "▁building s", "▁p late", "▁pl ate", "▁plat e", "ble d", "bl ed", "b led", "er rors", "err ors", "error s", "▁A gain", "▁Ag ain", "▁Def ault", "▁ Default", "▁H ard", "▁Har d", "▁Ha rd", "▁ Hard", "t ó", "hu s", "h us", "▁dim ension", "ial e", "ia le", "i ale", "▁M ult", "▁Mu lt", "▁Mul t", "▁ Mult", "▁Govern ment", "Fun c", "F unc", "▁b low", "▁bl ow", "▁blo w", "▁re ct", "▁r ect", "▁rec t", "▁ rect", "er ra", "err a", "conne ction", "connect ion", "conn ection", "▁pass ing", "▁pas sing", "ße n", "ß en", "ph as", "pha s", "p has", "ens ional", "ension al", "re cord", "rec ord", "co hol", "▁H arry", "▁Har ry", "▁Harr y", "izont al", "izon tal", "▁f inger", "▁fin ger", "▁fing er", "▁young er", "▁S C", "▁ SC", "oper ation", "B Y", "he im", "▁B ad", "▁Ba d", "▁ Bad", "▁st orm", "▁stor m", "▁sto rm", "▁ storm", "▁N at", "▁Na t", "▁bu ying", "▁buy ing", "▁S ometimes", "▁Some times", "▁С та", "es sed", "ess ed", "esse d", "▁da mn", "▁dam n", "▁m eg", "▁me g", "um es", "ume s", "u mes", "ün d", "ü nd", "т ра", "▁sil ver", "w d", "hid den", "h idden", "ar do", "ard o", "▁commun ities", "▁d iet", "▁di et", "▁die t", "ot ted", "ott ed", "otte d", "▁b at", "▁ba t", "▁ bat", "an cer", "ance r", "anc er", "▁f mt", "▁ fmt", "▁P en", "▁Pe n", "▁ Pen", "▁t il", "▁ti l", "▁ til", "En um", "E num", "PA TH", "P ATH", "▁mat ters", "▁matter s", "▁matt ers", "time out", "-- ----------", "---- --------", "-------- ----", "--- ---------", "----- -------", "---------- --", "------ ------", "--------- ---", "------- -----", "----------- -", "- -----------", "ka n", "k an", "▁Cor por", "=\" ../../", "=\"../ ../", "▁A le", "▁Al e", "hent ication", "hentic ation", "▁com plic", "▁comp lic", "▁compl ic", "▁Se curity", "▁Sec urity", "▁ Security", "OF F", "O FF", "R ad", "ap se", "aps e", "a pse", "▁d ance", "▁dan ce", "▁perm issions", "▁permission s", "▁war rant", "▁l ad", "▁la d", "▁ lad", "▁is ol", "▁i sol", "d l", "▁A u", "ye s", "y es", "▁t v", "▁ tv", "▁pro vider", "▁prov ider", "▁provide r", "▁ provider", "▁ter rible", "▁terr ible", "▁dep artment", "▁depart ment", "er al", "era l", "e ral", "▁implement ation", "S R", "▁h earing", "▁he aring", "▁hear ing", "▁K n", "F R", "t v", "▁d iss", "▁dis s", "▁di ss", "F UN", "▁dur ante", "▁durant e", "os is", "osi s", "o sis", "▁task s", "▁ tasks", "▁B lo", "▁Bl o", "▁ Blo", "во д", "▁br anch", "▁ branch", "▁polit ics", "▁E lle", "▁El le", "▁Ell e", "▁lead ership", "▁leader ship", "▁leaders hip", "ex pr", "exp r", "▁techn iques", "▁technique s", "pr ec", "pre c", "p rec", "Sig ma", "S igma", "im ately", "imate ly", "imat ely", "t k", "ach ment", "▁En ter", "▁Ent er", "▁ Enter", "▁cre ative", "▁creat ive", "▁з на", "▁ зна", "ap py", "app y", "un ched", "unch ed", "unc hed", "▁' ',", "▁'' ,", "on der", "ond er", "onde r", "o nder", "{ -", "NU M", "N UM", "▁n arr", "▁na rr", "▁nar r", "Mem ory", "▁win ning", "▁ winning", "▁F ollow", "▁Fol low", "▁ Follow", "*/ \r", "vis ion", "v ision", "res ents", "resent s", "zi one", "z ione", "▁l atter", "▁lat ter", "▁requ ests", "▁request s", "▁ requests", "▁m argin", "▁mar gin", "▁marg in", "▁ margin", "▁{ \"", "▁ {\"", "v ideo", "c n", "▁Im age", "▁ Image", "T im", "CON FIG", "CONF IG", "▁all owing", "▁allow ing", "▁comb ined", "▁combine d", "PU T", "P UT", "▁instance of", "ig in", "igi n", "i gin", "▁p ero", "▁per o", "▁pe ro", "▁' '", "▁ ''", "▁conf idence", "▁equ ivalent", "▁equival ent", "pa d", "p ad", "ef fect", "eff ect", "e ffect", "R X", "▁l ang", "▁la ng", "▁lan g", "▁ lang", "str ong", "▁b ridge", "▁br idge", "▁ bridge", "ay a", "a ya", "▁t reated", "▁tre ated", "▁treat ed", "▁f orth", "▁for th", "▁fort h", "S W", "▁account s", "▁P O", "▁ PO", "▁list ening", "▁listen ing", "Ro ute", "R oute", "() ))", "()) )", "( )))", "cp y", "c py", "▁re form", "▁ref orm", "▁g ate", "▁ga te", "▁ gate", "▁W alk", "▁Wal k", "▁ Walk", "▁some how", "t f", "▁l ayout", "▁la yout", "▁lay out", "▁ layout", "um in", "umi n", "u min", "▁consider ing", "▁consid ering", "▁pre mi", "▁pr emi", "▁prem i", "▁M om", "▁Mo m", "at han", "ath an", "a than", "Ge n", "G en", "▁plan et", "▁plane t", "am ples", "amp les", "ample s", "▁M O", "▁ MO", "sh op", "s hop", "▁prem ier", "▁premi er", "▁s impl", "▁sim pl", "▁s egu", "▁se gu", "▁seg u", "L Y", "Su m", "S um", "▁t ables", "▁table s", "▁tab les", "▁ta bles", "▁ tables", "sk a", "s ka", "▁ ž", "p d", "▁s ous", "▁so us", "▁sou s", "▁con ference", "▁confer ence", "▁D at", "▁Da t", "▁ Dat", "Sc roll", "▁stand ards", "▁standard s", "▁г ру", "es se", "ess e", "▁citiz ens", "▁citizen s", "▁occur red", "▁dem ocr", "▁demo cr", "▁e lev", "▁el ev", "▁ele v", "▁S em", "▁Se m", "▁ Sem", "ens us", "he aders", "head ers", "header s", "▁Ch ris", "im ento", "iment o", "imen to", "ko m", "k om", "Co r", "C or", "MI N", "M IN", "us her", "ush er", "Data base", "Dat abase", "▁f ormal", "▁for mal", "▁form al", "▁forma l", "ig ne", "ign e", "▁organ izations", "▁organiz ations", "▁organization s", "▁I re", "▁Ir e", "X ml", "и з", "▁p ray", "▁pr ay", "▁pra y", "▁b omb", "▁bo mb", "▁bom b", "▁m and", "▁man d", "▁ma nd", "▁ mand", "er ts", "ert s", "▁c lock", "▁cl ock", "▁clo ck", "▁ clock", "▁b uck", "▁bu ck", "ва ли", "вал и", "в али", "en sch", "ens ch", "▁v olt", "▁vo lt", "▁vol t", "▁ volt", "▁fil ms", "▁film s", "▁pl ants", "▁plan ts", "▁plant s", "in ode", "ino de", "i node", "Bo olean", "▁restaur ant", "ía n", "í an", "▁de but", "▁deb ut", "page s", "pa ges", "pag es", "p ages", "▁wor dt", "▁word t", "▁Б а", "▁great est", "(\" /", "▁c opyright", "▁copy right", "▁ copyright", "▁r it", "▁ri t", "▁ rit", "size of", "Tr ace", "Tra ce", "ue nt", "uen t", "u ent", "ту р", "т ур", "▁k o", "▁ ko", ": \\", "▁b igger", "▁big ger", "▁perfect ly", "ten ance", "MA SK", "M ASK", "r é", "▁e tt", "▁et t", "▁ ett", "▁n ose", "▁no se", "▁nos e", "▁c raft", "▁cr aft", "▁ craft", "it eral", "ite ral", "iter al", "▁discuss ed", "▁Jew ish", "C ap", "▁Un less", "▁Jack son", "Att ributes", "Attribute s", "Attrib utes", "▁l unch", "▁lun ch", "ö l", "at r", "a tr", "▁pay ing", "▁pa ying", "Par se", "Pars e", "P arse", "() \r", "( )\r", "la d", "l ad", "▁r are", "▁ra re", "▁[ ];", "▁[] ;", "▁ [];", "st one", "ston e", "sto ne", "▁u nc", "▁un c", "▁ unc", "▁def ense", "▁defens e", "} +", "▁Gl obal", "▁ Global", "▁Sov iet", "▁Austral ian", "▁Australia n", "▁g li", "▁gl i", "var iant", "vari ant", "▁R on", "▁Ro n", "▁lo an", "St ep", "Ste p", "me mber", "mem ber", "m ember", "Sc h", "S ch", "▁Commit tee", "▁s pending", "▁sp ending", "▁spend ing", "▁T ri", "▁Tr i", "▁ Tri", "▁J ournal", "▁Jour nal", "▁ Journal", "▁s ugar", "▁su gar", "▁sug ar", "el ly", "ell y", "HT ML", "▁ad vent", "▁adv ent", "win g", "wi ng", "w ing", "▁Wh ether", "▁Whe ther", "or ation", "▁N E", "▁ NE", "iv eness", "ive ness", "iven ess", "▁h av", "▁ha v", "▁ hav", "▁con scious", "▁ conscious", "ee n", "e en", "Sym bol", "S ymbol", "▁к у", "▁ ку", "Log ger", "▁L ittle", "▁Lit tle", "wide t", "wi det", "wid et", "oc ation", "pi n", "p in", "▁sym met", "▁A D", "▁ AD", "▁pos ts", "▁po sts", "▁post s", "▁ posts", "sh al", "sha l", "s hal", "▁Con f", "▁Co nf", "▁ Conf", "▁ch ose", "▁cho se", "ma l", "m al", "ul o", "u lo", "▁M ethod", "▁ Method", "▁miss ed", "▁mis sed", "Re move", "Rem ove", "Aut o", "A uto", "VAL UE", "th let", "▁For ce", "▁ Force", "p f", "▁ Я", "la te", "lat e", "l ate", "▁p ul", "▁pu l", "▁ pul", "Po p", "P op", "▁adv anced", "▁advance d", "air es", "ai res", "aire s", "a ires", "res sed", "ress ed", "resse d", "r essed", "AM E", "A ME", "be ll", "bel l", "b ell", "ac hing", "ach ing", "achi ng", "a ching", "i ć", "ec ho", "ech o", "e cho", "H S", "▁fun ny", "ри и", "▁e er", "▁ve get", "▁four th", "c f", "trans form", "▁g rown", "▁gr own", "▁grow n", "▁gro wn", "▁Mc C", "si te", "s ite", "▁b eneath", "▁be neath", "▁s hell", "▁sh ell", "▁she ll", "▁shel l", "▁ shell", "x d", "Pl ay", "P lay", "sh ort", "Ro le", "R ole", "▁relig ion", "in ator", "ina tor", "} <", "\" ><", "as p", "a sp", "aj o", "a jo", "ex ports", "exp orts", "export s", "▁N ode", "▁No de", "▁ Node", "▁j ako", "▁ja ko", "▁jak o", "▁y a", "▁ ya", "▁success fully", "▁successful ly", "▁friend ly", "▁ friendly", "buf f", "bu ff", "b uff", "DE FAULT", "▁pre gn", "▁preg n", "Requ ired", "Require d", "▁b inary", "▁bin ary", "▁ binary", "is ting", "ist ing", "isti ng", "▁st ared", "▁star ed", "▁stare d", "▁sta red", "▁circum stances", "▁х о", "▁ хо", "re i", "r ei", "▁Г о", "Trans form", "cn t", "c nt", "▁E xt", "▁Ex t", "▁ Ext", "re port", "rep ort", "repo rt", "VER SION", "▁an aly", "▁anal y", "▁ analy", "▁M arg", "▁Mar g", "▁Ma rg", "▁al leg", "▁all eg", "▁alle g", "build er", "b uilder", "To String", "La yer", "L ayer", "ís t", "í st", "Pro p", "Pr op", "P rop", "▁E mp", "▁Em p", "▁ Emp", "} ]", "▁s elling", "▁sell ing", "▁sel ling", "▁ selling", "▁que ue", "▁ queue", "▁ser iously", "▁serious ly", "▁L ead", "▁Le ad", "▁ Lead", "text it", "tex tit", "test ing", "tes ting", "▁П ре", "se curity", "sec urity", "ia ł", "i ał", "ú n", "ch ip", "chi p", "c hip", "▁c andidate", "▁candid ate", "▁min ister", "▁mini ster", "▁minist er", "▁ minister", "er ia", "eri a", "e ria", "▁H et", "▁He t", "ди н", "д ин", "▁Brit ain", "▁b arely", "▁bar ely", "▁bare ly", "▁s ty", "▁st y", "▁ sty", "▁Span ish", "▁V en", "▁Ve n", "time r", "ti mer", "tim er", "t imer", "кі в", "к ів", "▁document s", "▁doc uments", "(' .", "( '.", "▁d ebug", "▁de bug", "▁deb ug", "▁ debug", "▁cont ro", "▁contr o", "сто я", "▁j oy", "▁jo y", "▁ joy", "S n", "In v", "I nv", "▁pro tocol", "▁proto col", "▁prot ocol", "▁ protocol", "▁f aces", "▁face s", "▁fac es", "▁fa ces", "▁ faces", "▁Des pite", "se d", "s ed", "Con f", "Co nf", "AR G", "A RG", "▁e volution", "▁ev olution", "▁t od", "▁to d", "▁P romise", "▁Prom ise", "▁ Promise", "▁pos ted", "▁po sted", "▁post ed", "Per m", "Pe rm", "P erm", "be t", "b et", "An g", "A ng", "J ust", "▁r um", "▁ru m", "▁ rum", "la yer", "lay er", "l ayer", "▁beh avi", "▁behav i", "ip ping", "ipp ing", "ippi ng", "i pping", "▁d ynam", "▁dy nam", "▁dyn am", "▁sch eme", "▁sche me", "▁ scheme", "▁pro to", "▁pr oto", "▁prot o", "▁ proto", ") /", "Col lections", "Collection s", "Collect ions", "ri ev", "rie v", "r iev", "▁C lick", "▁Cl ick", "▁ Click", "▁u ns", "▁un s", "▁ uns", "wide tilde", "widet ilde", "▁remember ed", "г і", "in ates", "ina tes", "inate s", "▁incor por", "▁De scription", "▁Des cription", "▁ Description", "▁pre pare", "▁prep are", "▁prepar e", "▁ prepare", "▁F inal", "▁Fin al", "▁Fi nal", "▁ Final", "u ation", "▁Qu een", "▁Que en", "> ;", "▁autom atically", "▁automatic ally", "▁sh arp", "▁shar p", "▁sha rp", "▁me at", "at eur", "ate ur", "as tern", "ast ern", "aster n", "aste rn", "▁st uck", "ASS ERT", "▁pl anned", "▁plan ned", "do ts", "dot s", "d ots", "ook ie", "oo kie", "▁His tor", "▁Hist or", "▁re views", "▁review s", "IM P", "I MP", "▁answ ered", "▁answer ed", "To tal", "T otal", "▁s au", "▁sa u", "▁Me xico", "▁Mex ico", "contin ue", "▁App le", "▁Ap ple", "like ly", "lik ely", "з ва", "us ers", "use rs", "user s", "▁ident ified", "▁L ev", "▁Le v", "▁m ol", "▁mo l", "▁Is lam", "▁com mitted", "▁comm itted", "▁commit ted", "wr it", "w rit", "бе р", "б ер", "ri ft", "rif t", "r ift", "▁inter rupt", "▁ interrupt", "▁read only", "sch ema", "sche ma", "s chema", "S m", "D ouble", "az a", "a za", "▁H al", "▁Ha l", "▁ Hal", "Mo ve", "M ove", "▁S eries", "▁Se ries", "▁Ser ies", "▁Serie s", "▁ Series", "in line", "▁кото ры", "so c", "s oc", "▁t ent", "▁te nt", "▁ten t", "▁a mer", "▁am er", "▁ amer", "ak i", "a ki", "▁l ady", "▁la dy", "▁lad y", "▁t ired", "▁ti red", "▁tire d", "▁tir ed", "if i", "i fi", "▁m ême", "▁ même", "ou ver", "▁a side", "▁as ide", "Di d", "D id", "', \r", "' ,\r", "▁br inging", "▁bring ing", "Draw ing", "ar o", "a ro", "▁R h", "▁N az", "▁Na z", "es so", "ess o", "▁re action", "▁react ion", "mit ted", "mitt ed", "m itted", "▁abs olute", "▁absolut e", "▁ absolute", "ha ust", "haus t", "(( )", "( ()", "▁T ask", "▁Ta sk", "▁ Task", "ER S", "E RS", "▁^ {", "▁ ^{", "V D", "▁t one", "▁to ne", "▁ton e", "dis t", "di st", "d ist", "v s", "▁whe el", "▁ wheel", "▁administr ation", "▁admin istration", "▁inter ests", "▁interest s", "▁point er", "▁po inter", "▁ pointer", "▁en counter", "▁enc ounter", "ave r", "av er", "a ver", "▁n ord", "▁no rd", "▁nor d", "ke t", "k et", "▁b each", "▁be ach", "▁enjoy ed", "cont ains", "▁app end", "▁ap pend", "▁appe nd", "▁ append", "W ait", "▁s quad", "▁squ ad", "ze l", "z el", "▁med ium", "▁medi um", "▁ medium", "▁s ending", "▁send ing", "▁sen ding", "▁L ady", "▁La dy", "▁Lad y", "ç ões", "▁dest ination", "▁destin ation", "▁ destination", "ny ch", "n ych", "▁conf lict", "▁conflic t", "▁L y", "▁v ul", "▁vu l", "▁bas ically", "▁basic ally", "re ated", "reat ed", "reate d", "rea ted", "bl ack", "ug ins", "ugin s", "▁cal m", "▁ca lm", "ér ie", "éri e", "é rie", "ha r", "h ar", "ла н", "л ан", "▁С е", "w atch", "▁P ut", "▁Pu t", "▁ Put", "▁d ump", "▁du mp", "▁ dump", "ac her", "ach er", "ache r", "a cher", "sc roll", "scr oll", "▁cl aimed", "▁claim ed", "▁ claimed", "▁Cont rol", "▁ Control", "▁bl ind", "en ti", "ent i", "▁Ke ep", "▁ Keep", "▁Develop ment", "im ages", "image s", "ima ges", "imag es", "▁t ough", "▁to ugh", "▁tou gh", "ge bra", "geb ra", "▁se pt", "▁sep t", "he w", "h ew", "▁s kill", "▁sk ill", "▁ski ll", "▁ skill", "▁T ay", "▁Ta y", "▁k tó", "ow ner", "own er", "par e", "pa re", "p are", "▁f ee", "▁fe e", "▁ fee", "▁contin ues", "▁continue s", "▁continu es", "▁k an", "▁ka n", "▁ kan", "be s", "b es", "▁c ha", "▁ch a", "▁ cha", "ov o", "o vo", "▁N ight", "▁Ni ght", "ict ure", "sh ire", "s hire", "▁es say", "▁ess ay", "▁sup pose", "▁supp ose", "et ic", "eti c", "Ar t", "A rt", "ac on", "aco n", "a con", "ll a", "l la", "word s", "wor ds", "w ords", "▁compar ison", "▁B E", "▁ BE", "▁challeng es", "▁challenge s", "▁o l", "▁ ol", "cite p", "cit ep", "▁F oot", "▁Fo ot", "▁ Foot", "▁S uch", "▁Su ch", "▁ Such", "▁p apers", "▁paper s", "▁pa pers", "▁pap ers", "act iv", "qu er", "que r", "q uer", "т я", "▁Т о", "сь кий", "th ur", "do ne", "don e", "d one", "▁sh ock", "▁ded icated", "▁dedic ated", "▁cor respond", "▁correspon d", "Se cond", "Sec ond", "▁b ull", "▁bu ll", "▁bul l", "li fe", "lif e", "l ife", "ind ent", "inde nt", "inden t", "▁fig ures", "▁figure s", "▁And rew", "▁Andre w", "▁Andr ew", "is p", "i sp", "▁fav our", "зд а", "з да", "▁E lect", "▁El ect", "▁Ele ct", "F ull", "▁near by", "▁Reg ister", "▁ Register", "Sc ale", "Scal e", "ic ations", "ication s", "и н", "▁A M", "▁ AM", "pa ir", "p air", "▁pers pective", "▁n os", "▁no s", "▁ nos", "ap a", "a pa", "ost ał", "osta ł", "▁P ers", "▁Per s", "▁Pe rs", "▁ Pers", "ic er", "ice r", "i cer", "▁pl astic", "до в", "д ов", "ci ples", "cipl es", "cip les", "z ą", "cl os", "c los", "▁у ча", "▁ Á", "pl ugin", "plug in", "▁an gle", "▁ang le", "▁angl e", "▁ angle", "▁com mission", "▁comm ission", "▁fun ds", "▁fund s", "▁in du", "▁ind u", "▁d rawn", "▁dr awn", "▁draw n", "á m", "▁develop ing", "▁seg ment", "▁ segment", "is me", "ism e", "sc r", "s cr", "▁l ies", "▁li es", "▁lie s", "▁I L", "▁ IL", "▁a pi", "▁ap i", "▁ api", "Ext ension", "▁s cal", "▁sc al", "▁ scal", "inst all", "▁We ek", "▁ Week", "▁gen tle", "▁gent le", "▁Canad ian", "▁d ialog", "▁dial og", "▁dia log", "▁ dialog", "▁art icles", "▁article s", "▁artic les", "The me", "Th eme", "S M", "▁B ul", "▁Bu l", "▁ Bul", "▁l eur", "▁le ur", "▁s tom", "▁st om", "▁sto m", "Pl ugin", "▁по сле", "▁пос ле", "▁st ead", "▁ste ad", "▁ stead", "▁ ś", "ip her", "iph er", "i pher", "▁pr ze", "▁prz e", "▁d raft", "▁dr aft", "▁ draft", "bot tom", "b ottom", "▁{ };", "▁{} ;", "▁stay ed", "fe ature", "feat ure", "▁v ot", "▁vo t", "▁fab ric", "ç a", "(' #", "re a", "r ea", "▁re put", "▁rep ut", "▁C ir", "▁Ci r", "▁ Cir", "▁A L", "▁ AL", "▁assert Equals", "▁ assertEquals", "result s", "▁C ross", "▁Cr oss", "▁Cro ss", "▁ Cross", "urs day", "▁a udio", "▁aud io", "▁ audio", "▁g ap", "▁ga p", "▁stre ets", "▁street s", "▁scient ific", "pl atform", "▁a uss", "▁au ss", "▁aus s", "▁C ro", "▁Cr o", "▁part ial", "▁parti al", "▁ partial", "un c", "u nc", "▁cho ices", "▁choice s", "▁и ли", "pr ed", "pre d", "p red", "▁he ads", "▁head s", "▁ heads", "ter day", "▁N ick", "▁Nic k", "▁Ni ck", "▁we ird", "as ant", "asa nt", "▁represent ed", "▁п и", "▁ пи", "D P", "or ders", "ord ers", "order s", "cl ock", "c lock", "▁H o", "ar ters", "art ers", "arter s", "arte rs", "C md", "og a", "o ga", "Key s", "Ke ys", "Re port", "Rep ort", "Repo rt", "▁V ill", "▁Vi ll", "▁Vil l", "▁M u", "▁ Mu", "▁own ed", "▁ owned", "SU CCESS", "▁type of", "▁ typeof", "hd r", "h dr", "ua ble", "u able", "▁neighbor hood", "▁A P", "▁ AP", "▁result ing", "▁sh adow", "▁ shadow", "STR ING", "▁video s", "▁vide os", "ле ння", "лен ня", "ex pect", "exp ect", "▁Val ley", "▁Vall ey", "▁g oto", "▁go to", "▁got o", "▁ goto", "▁S her", "▁She r", "▁Sh er", "fr astr", "▁oper ating", "▁opera ting", "▁э то", "▁License d", "▁Lic ensed", "Var iable", "Vari able", "▁P R", "▁ PR", "▁H ans", "▁Ha ns", "▁Han s", "cl one", "▁G esch", "▁Ge sch", "▁Ges ch", "▁B and", "▁Ba nd", "▁Ban d", "▁ Band", "... .....", ".... ....", "..... ...", "ui ng", "u ing", "▁hundred s", "▁о к", "▁emot ional", "▁emotion al", "▁Ind ust", ") +", "▁Egy pt", "▁fr anç", "▁ š", "▁f asc", "▁fa sc", "on to", "ont o", "▁A dam", "▁Ad am", "▁l aid", "▁la id", "▁r ig", "▁ri g", "▁ rig", "▁det ailed", "▁detail ed", "▁im plements", "▁implement s", "▁impl ements", "▁univers ity", "▁H y", "▁ Hy", "▁g rid", "▁gr id", "▁gri d", "▁ grid", "▁reg ions", "▁region s", "St op", "S top", "▁s lot", "▁sl ot", "▁ slot", "▁ang ry", "▁- =", "▁wait ed", "▁wa ited", "Ver t", "V ert", "\": \"", "\" :\"", "▁e lem", "▁el em", "▁ele m", "▁ elem", "▁r ég", "▁ré g", "ow ed", "owe d", "o wed", "Mem ber", "Me mber", "M ember", "▁r atio", "▁rat io", "▁ ratio", "is en", "ise n", "i sen", "▁L em", "▁Le m", "ge ry", "ger y", "g ery", "▁c ream", "▁cre am", "▁ét ait", "▁ était", "▁g eb", "▁ge b", "▁ geb", "un ique", "uni que", "▁D eb", "▁De b", "▁f actory", "▁fact ory", "▁factor y", "▁ factory", "ż e", "d ialog", "▁Con fig", "▁Conf ig", "▁ Config", "Sy nc", "S ync", "an gers", "ang ers", "ange rs", "anger s", "▁gover ning", "▁govern ing", "▁H un", "▁Hu n", "Sp ace", "S pace", "▁j est", "▁je st", "ic ious", "ici ous", "icio us", "▁em phas", "▁emp has", "um ps", "ump s", "▁E sp", "▁Es p", "▁ Esp", "▁s ul", "▁su l", "▁histor ical", "▁historic al", "ij a", "i ja", "▁l ying", "▁ly ing", "▁ lying", "▁St eve", "▁Ste ve", "▁me asures", "▁measure s", "▁meas ures", "os to", "ost o", "o sto", "? ”", "▁p ocket", "▁poc ket", "▁S at", "▁Sa t", "▁p itch", "▁pit ch", "▁n atur", "▁nat ur", "▁hum ans", "▁human s", "▁Sim on", "▁Si mon", "ad ores", "ado res", "ador es", "(\" \\", "( \"\\", "in king", "ink ing", "▁ex pos", "▁exp os", "mat erial", "mate rial", "m aterial", "▁app arently", "▁apparent ly", "▁appar ently", "▁C amb", "▁Cam b", "▁Ca mb", "▁B ox", "▁Bo x", "▁ Box", "▁s paces", "▁sp aces", "▁space s", "ex ists", "exist s", "▁act ing", "▁ac ting", "OR Y", "зо ва", "Go od", "G ood", "ien ne", "i enne", "▁William s", "▁f ruit", "▁fr uit", "▁fru it", "ie ra", "ier a", "i era", "▁L im", "▁Li m", "▁ Lim", "▁t rait", "▁tr ait", "▁tra it", "▁ trait", "▁art ists", "▁artist s", "▁ab sor", "▁abs or", "ra it", "rai t", "r ait", "LO AD", "▁mov ies", "▁movie s", "▁d ynamic", "▁dynam ic", "▁dyn amic", "▁ dynamic", "as ts", "ast s", "a sts", "▁In teger", "▁ Integer", "▁sm oke", "п і", "an gel", "ang el", "ange l", ">( \"", "> (\"", "▁in strument", "▁instr ument", "▁f uel", "▁fue l", "▁fu el", "но ї", "atal ogue", "atalog ue", "▁s erial", "▁se rial", "▁ser ial", "▁ serial", "File s", "Fil es", "Fi les", "F iles", "▁bath room", "il o", "i lo", "es to", "est o", "e sto", "▁p m", "▁ pm", "ent ials", "ential s", "enti als", "▁On line", "wh ite", "▁t ips", "▁tip s", "▁ti ps", "▁cap able", "Fi g", "F ig", "T V", "▁о н", "▁ он", "k é", "bit r", "bi tr", "b itr", "Map ping", "Ma pping", "M apping", "▁t ak", "▁ta k", "ю щи", "в ля", ")\" ,", ") \",", "▁K arl", "▁Kar l", "▁Ka rl", "▁H uman", "▁Hu man", "▁Hum an", "▁P ot", "▁Po t", "▁rep resents", "▁represent s", "▁cons istent", "▁consist ent", "_ (", "we n", "w en", "▁R ose", "▁Ro se", "▁Ros e", "la w", "l aw", "▁F ROM", "▁FR OM", "▁ FROM", "▁beg ins", "▁begin s", "▁e dit", "▁ed it", "▁ edit", "▁mount ain", "▁ch apter", "▁chap ter", "▁wonder ed", "▁indust rial", "▁M ajor", "▁Ma jor", "▁Maj or", "▁g es", "▁ge s", "▁ ges", "▁direct ed", "▁dire cted", "er os", "ero s", "e ros", "▁W ild", "▁Wil d", "▁Wi ld", "li ament", "lia ment", "Bo ok", "B ook", "user name", "ho t", "h ot", "▁n am", "▁na m", "▁ nam", "▁le ague", "br a", "b ra", "ко н", "к он", "▁T al", "▁Ta l", "▁В а", "▁ex ports", "▁exp orts", "▁export s", "▁ exports", "( @", "▁sh aring", "▁shar ing", "▁sha ring", "▁T ro", "▁Tr o", "ś ć", "ues day", "yl v", "y lv", "▁gu itar", "el en", "ele n", "e len", "Se lection", "Select ion", "S election", "▁conf ident", "ry pto", "rypt o", "▁h ors", "▁hor s", "▁ho rs", "ed itor", "edit or", "edi tor", "▁should ers", "▁shoulder s", "get Name", "en cing", "enc ing", "enci ng", "SE LECT", "SEL ECT", "в ши", "▁kind s", "▁kin ds", "▁W el", "▁We l", "▁pur poses", "▁purpose s", "Mat rix", "in valid", "▁own ers", "▁owner s", "▁ owners", "▁Rec ords", "▁Record s", "▁ Records", "▁Pro cess", "▁ Process", "▁c hat", "▁ch at", "▁cha t", "▁ chat", "▁D or", "▁Do r", "▁b in", "▁bi n", "▁ bin", "re dit", "red it", "r edit", "oi re", "oir e", "o ire", "▁T otal", "▁To tal", "▁Tot al", "▁ Total", "▁F amily", "▁Famil y", "▁ Family", "AR Y", "▁b read", "▁br ead", "▁bre ad", "▁ bread", "▁com pre", "▁comp re", "▁compr e", "▁sh oes", "▁shoe s", "▁r az", "▁ra z", "▁ raz", "▁tr ace", "▁tra ce", "▁ trace", "ne j", "n ej", "or ted", "ort ed", "orte d", "h n", "▁pro cedure", "▁proced ure", "pro perties", "pl ier", "▁h ero", "▁he ro", "▁her o", "▁ hero", "pan el", "pa nel", "p anel", "▁mark ed", "▁mar ked", "▁wor ried", "\\ |", "pt s", "p ts", "▁S upport", "▁Sup port", "▁Supp ort", "▁ Support", "▁ser ving", "▁serv ing", "F ail", "▁dis appoint", "▁Sc ot", "▁ple asure", "▁j udge", "▁jud ge", "▁judg e", "ze ich", "▁for ever", "▁fore ver", "▁Ze it", "uo us", "u ous", "in ent", "ine nt", "inen t", "i nent", "▁d w", "▁ dw", "▁w aren", "▁war en", "▁wa ren", "▁ware n", "▁fl ash", "▁ flash", "▁tro ops", "▁dr ugs", "▁dru gs", "▁drug s", "▁d iam", "▁di am", "▁dia m", ". ~", "im p", "i mp", "in ned", "inn ed", "▁E V", "▁ EV", "St ruct", "Str uct", "▁just ice", "▁offic ials", "▁official s", "ff ff", "fff f", "f fff", "▁Com mon", "▁Comm on", "▁ Common", "▁C at", "▁Ca t", "▁ Cat", "▁tom orrow", "▁é l", "▁ él", "Text ure", "Te xture", "qp oint", "q point", "▁F ried", "▁Fr ied", "▁T erm", "▁Te rm", "▁Ter m", "▁ Term", "pgf qpoint", "▁n em", "▁ne m", "▁ nem", "no rm", "nor m", "n orm", "▁hard ly", "od a", "o da", "ze ta", "zet a", "z eta", "em ic", "emi c", "e mic", "▁по лу", "▁пол у", "▁lo aded", "▁load ed", "▁ loaded", "ke s", "k es", "ci ó", "c ió", "▁f ool", "▁fo ol", "▁foo l", "▁t rick", "▁tr ick", "▁tri ck", "▁d st", "▁ds t", "▁ dst", "Fin d", "Fi nd", "F ind", "▁в се", "}} ,", "} },", "▁frame work", "▁ framework", "▁mer ely", "▁mere ly", "▁un ion", "▁ union", "▁Ed ward", "ri f", "r if", "Fl ag", "F lag", "▁cris is", "▁cri sis", "▁fin ite", "▁ finite", "▁l ol", "▁lo l", "▁K im", "▁Ki m", "на та", "sin ce", "s ince", "▁com pat", "▁comp at", "▁ compat", "▁p ert", "▁per t", "▁pe rt", "▁ pert", "ib ilities", "ibil ities", "▁tamb ién", "ib li", "▁t een", "▁te en", "▁ teen", "▁sym pt", "or al", "ora l", "o ral", "de rs", "der s", "d ers", "ot te", "ott e", "п ри", "▁J ane", "▁Jan e", "▁Ja ne", "▁original ly", "▁origin ally", "▁thro at", "ma g", "m ag", "su p", "s up", "un i", "u ni", "$ $", "▁L ibrary", "▁ Library", "▁att acks", "▁attack s", "in gen", "ing en", "inge n", "(' /", "▁h es", "▁he s", "▁ hes", "co in", "c oin", "oun ce", "▁Academ y", "MOD ULE", "is ms", "ism s", "▁A dv", "▁Ad v", "▁ Adv", "▁B ol", "▁Bo l", "▁inc ident", ")^ {", ") ^{", "▁b ij", "▁bi j", "▁R ome", "▁Rom e", "▁Ro me", "▁It aly", "▁Ital y", "ev ents", "event s", "even ts", "▁F ern", "▁Fe rn", "▁Fer n", "▁b er", "▁be r", "▁ ber", "▁sil ent", "▁p ier", "▁pie r", "▁pi er", "▁Y O", "▁pl ain", "▁ plain", "B as", "▁p ill", "▁pi ll", "▁pil l", "ra se", "ras e", "r ase", "▁car rying", "▁carry ing", "▁re sp", "▁r esp", "▁res p", "▁ resp", "ну ю", "▁typ ical", "Wrap per", "W rapper", "▁g au", "▁ga u", "▁chem ical", "▁h al", "▁ha l", "▁ hal", "th row", "Cl uster", "▁G ab", "▁Ga b", "▁G irl", "▁Gi rl", "▁Gir l", "qu ir", "▁A rg", "▁Ar g", "▁ Arg", "▁rel ief", "▁relie f", "▁reli ef", "▁В е", "d m", "▁fr ustr", "▁fru str", "\\ %", "▁st ores", "▁store s", "▁stor es", "▁sto res", "▁bott le", "▁bot tle", "▁L ew", "▁Le w", "tw o", "t wo", "st ad", "sta d", "▁che ek", "▁concern s", "▁concer ns", "▁help ful", "▁co verage", "▁cover age", "is i", "i si", "AD D", "A DD", "as ync", "asy nc", "a sync", "▁approxim ately", "▁approx imately", "▁approximate ly", "if fer", "iff er", "iffe r", "ho ok", "h ook", "▁e num", "▁en um", "▁ enum", "ov á", "o vá", "▁e vil", "▁ev il", "▁const antly", "▁constant ly", "ap ply", "app ly", "▁si è", "▁pract ices", "▁practice s", "▁te achers", "▁teach ers", "▁teacher s", "▁S n", "▁ Sn", "▁A wards", "▁Award s", "▁Aw ards", "▁sub stant", "▁subst ant", "▁$ .", "▁ $.", "d k", "▁m ob", "▁mo b", "▁ mob", "▁ing red", "ve re", "ver e", "v ere", "Mult i", "пе р", "п ер", "st al", "sta l", "s tal", "ya rd", "yar d", "y ard", "requ ired", "require d", "ve ment", "v ement", "▁int elligence", "▁intellig ence", "▁th inks", "▁think s", "▁thin ks", "▁person ally", "▁personal ly", "▁tr ained", "▁tra ined", "▁train ed", "▁ trained", "or ney", "orn ey", "orne y", ") \\", "an al", "ana l", "a nal", "Se ction", "S ection", "pl us", "ü t", "▁em bed", "▁emb ed", "▁ embed", "▁st rings", "▁str ings", "▁string s", "▁ strings", "Be fore", "B efore", "pro c", "pr oc", "p roc", "▁с по", "▁сп о", "▁ спо", "tr l", "t rl", "v r", "Back ground", "log ger", "ag raph", "agr aph", "agra ph", "a graph", "ie st", "ies t", "i est", "▁good s", "bat ch", "b atch", "▁opt ional", "▁option al", "▁ optional", "▁Tay lor", "▁recogn ize", "wal k", "w alk", "▁H it", "▁Hi t", "▁ Hit", "▁Eliz abeth", "} :", "▁care ful", "кра ї", "▁loc ations", "▁location s", "▁struct ures", "▁structure s", "▁d isk", "▁dis k", "▁di sk", "▁ disk", "▁sh ips", "▁ship s", "▁ ships", "▁s uo", "▁su o", "▁s owie", "▁so wie", "▁sow ie", "▁E ss", "▁Es s", "▁H ash", "▁Ha sh", "▁Has h", "▁ Hash", "▁reason able", "▁More over", "▁form ula", "▁C entre", "▁Cent re", "▁res idents", "▁resident s", "▁resid ents", "R S", "Id s", "I ds", "▁K now", "▁Kn ow", "▁t rib", "▁tr ib", "▁tri b", "▁r és", "▁ré s", "▁s table", "▁st able", "▁sta ble", "▁stab le", "▁ stable", "▁W ould", "▁Wo uld", "▁ Would", "▁break ing", "▁bre aking", "▁ breaking", "▁me al", "▁p hen", "▁ph en", "▁f el", "▁fe l", "▁ fel", "▁F red", "▁Fr ed", "▁Fre d", "Aut hor", "Auth or", "▁c apture", "▁capt ure", "▁ capture", "op ts", "opt s", "o pts", "▁every where", "▁s que", "▁squ e", "▁sq ue", "▁m oder", "▁mod er", "▁mo der", "▁mode r", "set up", "▁S upp", "▁Su pp", "▁Sup p", "▁ Supp", "▁when ever", "▁whe never", "{ (", "wa rt", "war t", "w art", "▁t oe", "▁to e", "Pre fix", "Pref ix", "P refix", "ho u", "h ou", "ga ge", "g age", "> \"", "▁f rag", "▁fr ag", "▁fra g", "▁ frag", "▁The orem", "mem ory", "▁cont ents", "▁content s", "▁conten ts", "▁ contents", "do cs", "doc s", "} '", "▁Ir ish", "The n", "Th en", "T hen", "aa ts", "aat s", "a ats", "Sa ve", "S ave", "▁a gency", "▁ag ency", "▁и ме", "▁им е", "до ва", "дов а", "▁F unction", "▁Fun ction", "▁ Function", "N N", "dest roy", "▁M essage", "▁Mess age", "▁ Message", "▁c ancel", "▁can cel", "▁ cancel", "▁super ior", "▁e c", "▁ ec", "▁liter ature", "▁P ART", "▁PA RT", "▁PAR T", "▁ PART", "I l", "▁C ab", "▁Ca b", "eng ine", "▁b asket", "▁bas ket", "wor th", "wort h", "w orth", "▁S el", "▁Se l", "f etch", "▁St adt", "▁Stad t", "▁Sta dt", "▁К и", "▁con j", "▁se iner", "▁sein er", "▁seine r", "▁sei ner", "▁conf irmed", "▁confirm ed", "▁Ar gent", "▁Arg ent", "am ar", "ama r", "a mar", "pgf path", "▁strugg le", "Pat tern", "▁M iddle", "it an", "ita n", "i tan", "▁m oon", "▁mo on", "or ough", "oro ugh", "o rough", "▁Cath olic", "▁str uck", "▁stru ck", "] ->", "▁we apon", "▁weap on", "▁su bst", "▁sub st", "▁subs t", "▁inst ructions", "▁instruct ions", "▁instruction s", "▁occ as", "▁oc cas", "prote cted", "▁L ess", "▁Le ss", "▁Les s", "▁ Less", "▁b atch", "▁bat ch", "▁ batch", "▁con tra", "▁cont ra", "▁contr a", "▁de ck", "▁dec k", "▁ deck", "▁ign ored", "▁ignore d", "▁ignor ed", "▁ref used", "▁refuse d", "tr igger", "▁crim inal", "G A", "ol ly", "oll y", "▁B ell", "▁Be ll", "▁Bel l", "▁ Ю", "for ward", "▁p refix", "▁pre fix", "▁pref ix", "▁ prefix", "▁im mediate", "▁immedi ate", "▁as signed", "▁ass igned", "▁assign ed", "▁e lected", "▁elect ed", "▁ele cted", "▁to night", "▁ton ight", "▁D ies", "▁Die s", "▁Di es", "▁B each", "▁Be ach", "▁pre ced", "▁prec ed", "ow ał", "owa ł", "▁gal ax", "▁log ic", "en za", "enz a", "▁Cap tain", "▁Capt ain", "▁H ay", "▁Ha y", "▁f acts", "▁fact s", "▁fac ts", "▁н и", "▁ ни", "t é", "▁s b", "▁ sb", "op ed", "ope d", "o ped", "▁com bat", "▁comb at", "▁expl ore", "▁explo re", "▁( -", "▁ (-", "Load er", "Lo ader", "▁Wil son", "▁l ocked", "▁loc ked", "▁lock ed", "▁ locked", ": )", "▁qu el", "▁que l", "▁q uel", "▁ quel", "▁Г а", "T y", "▁tem ps", "▁temp s", "▁g host", "Mat erial", "M aterial", "ER CHANT", "point er", "po inter", "ж да", "ah a", "a ha", "ul f", "▁sup plement", "▁supp lement", "▁d ismiss", "▁dis miss", "▁cl osing", "▁clos ing", "▁clo sing", "▁vul ner", "▁ap rès", "▁apr ès", "▁over whel", "ско е", "▁dis ag", "ac ia", "aci a", "a cia", "ou red", "our ed", "o ured", "ru ption", "rupt ion", "▁P S", "▁ PS", "End point", "Re al", "▁T ag", "▁Ta g", "▁ Tag", "▁st airs", "▁sta irs", "▁stair s", "▁ stairs", "ly n", "l yn", "▁e leg", "▁el eg", "▁ele g", "▁v eter", "▁ve ter", "▁vet er", "factor y", "fact ory", "f actory", "an ne", "ann e", "▁B at", "▁Ba t", "▁fr anc", "▁fra nc", "lu ng", "l ung", "▁\" '", ".' ,", ". ',", "▁C ountry", "▁Count ry", "▁Coun try", "▁ Country", "^{ [", "▁y ours", "▁you rs", "▁your s", "▁yo urs", "ail ability", "Cl ear", "C lear", "ät t", "ä tt", "пи с", "п ис", "▁j oke", "▁jo ke", "▁ann oy", "▁r ag", "▁ra g", "▁ rag", "var i", "va ri", "v ari", "ле кс", "лек с", "▁P sy", "il ty", "ilt y", "mo unt", "m ount", "▁c ual", "▁cu al", "▁s olar", "▁so lar", "▁sol ar", "}^ {(", "}^{ (", "} ^{(", "Sh ort", "▁tax es", "App end", "Ap pend", "Appe nd", "W in", "est yle", "esty le", "e style", "▁fac il", "▁fa cil", "в ро", "▁s ought", "▁sou ght", "▁b are", "▁bar e", "▁ba re", "▁re act", "▁ react", "ja r", "j ar", "MA C", "M AC", "lo v", "l ov", "wa rn", "war n", "w arn", "▁cru cial", "▁m useum", "ни ц", "▁K ent", "▁Ke nt", "▁Ken t", "May be", "▁b ike", "▁bi ke", "▁Add ress", "▁ Address", "X ML", "▁ad mitted", "▁adm itted", "▁admit ted", "▁$ (\\", "▁$( \\", "▁sp ell", "▁spe ll", "▁spel l", "▁ spell", "▁v ic", "▁vi c", "gr e", "g re", "▁p roc", "▁pro c", "▁pr oc", "▁ proc", "th eless", "the less", "▁N om", "▁No m", "▁R ail", "▁Ra il", "▁acc eler", "▁con vin", "▁conv in", "▁Pro perty", "▁ Property", "▁D A", "▁ DA", "▁cl ip", "▁ clip", "▁pl ugin", "▁plug in", "▁ plugin", "Lim it", "Li mit", "L imit", "view s", "br u", "b ru", "▁p ra", "▁pr a", "▁a k", "▁ ak", "▁e j", "▁ ej", "▁o pts", "▁op ts", "▁opt s", "▁ opts", "▁sl ip", "▁g ang", "▁gan g", "▁ga ng", "▁ gang", "as ted", "ast ed", "aste d", "a sted", "ual s", "ua ls", "u als", "▁d ying", "▁dy ing", "Col l", "Co ll", "C oll", "am men", "amm en", "▁Pol icy", "▁ Policy", "ERCHANT ABILITY", "▁Col lection", "▁Coll ection", "▁Collect ion", "▁ Collection", "▁v ec", "▁ve c", "▁ vec", "▁D ick", "▁Di ck", "st ud", "▁la yers", "▁lay ers", "▁layer s", "▁ layers", "▁t ied", "▁tie d", "▁ti ed", "}\\ \\", "} \\\\", "▁al ors", "▁j ou", "▁jo u", "▁ch icken", "▁chi cken", "▁chick en", "▁perman ent", "▁Every thing", "▁L ow", "▁Lo w", "▁ Low", "▁C ook", "▁Co ok", "▁pe ak", "▁PARTIC ULAR", "▁d ear", "▁de ar", "i č", "▁introdu ce", "▁caus ing", "▁ca using", "пи са", "пис а", "Bo und", "B ound", "hu nd", "h und", "mult i", "mul ti", "▁p are", "▁par e", "▁pa re", "▁ pare", "an nt", "ann t", "▁b reat", "▁bre at", "▁commit ment", "▁increasing ly", "ко й", "▁F riend", "▁ Friend", "▁stat istics", "▁statist ics", "▁Man ager", "▁ Manager", "pl icate", "plic ate", "plica te", "Cl oud", "ac i", "a ci", "▁Con ference", "Sp an", "S pan", "▁C EO", "▁CE O", "▁W ait", "▁Wa it", "▁ Wait", "▁O ber", "▁Ob er", "if ting", "ift ing", "im iento", "imi ento", "get Element", "▁g le", "▁gl e", "▁ gle", "ли я", "▁w ieder", "▁wie der", "▁inst ruction", "▁instr uction", "▁instruct ion", "gl y", "g ly", "▁bl ame", "▁list ade", "▁lista de", "▁a apt", "▁Lew is", "Fr agment", "▁g ear", "▁ge ar", "mi ll", "mil l", "m ill", "pro d", "pr od", "p rod", "▁bur ning", "▁burn ing", "є ться", "▁m é", "▁ mé", "è ne", "▁com plicated", "▁compl icated", "▁complic ated", "b h", "▁Just ice", "▁t ested", "▁te sted", "▁test ed", "▁st aring", "▁star ing", "▁sta ring", "▁surv ive", "▁surviv e", "▁c ous", "▁co us", "▁cou s", "▁r ib", "▁ri b", "▁ rib", "am l", "a ml", "▁T rust", "▁Tr ust", "▁Tru st", "▁c ad", "▁ca d", "▁T err", "▁Te rr", "▁Ter r", "▁m apping", "▁map ping", "▁ma pping", "▁ mapping", "▁tw elve", "▁g rant", "▁gr ant", "▁gran t", "▁gra nt", "▁th orough", "▁ Ü", "▁fol ks", "▁folk s", "▁Cont ent", "▁ Content", "▁child hood", "ck er", "cke r", "c ker", "с но", "RE CT", "REC T", "R ECT", "▁f inale", "▁fin ale", "▁final e", "▁sh ower", "▁show er", "ér ic", "éri c", "é ric", "▁s pat", "▁sp at", "od ge", "р ь", "▁p es", "▁pe s", "▁ pes", "ed a", "e da", "D b", "▁Ant onio", "▁Anton io", "▁eng aged", "▁engage d", "▁v ess", "▁ve ss", "val s", "va ls", "v als", "▁elect ronic", "▁electron ic", "▁electro nic", "le mma", "lem ma", "▁W y", "ma d", "m ad", "mer ge", "ap on", "a pon", "▁priv ile", "▁nov embre", "▁nove mbre", "▁S ports", "▁Sp orts", "▁Sport s", "wi ll", "w ill", "▁control s", "▁contr ols", "▁contro ls", "▁ controls", "▁c ategories", "▁categ ories", "▁categor ies", "▁ categories", "▁Georg ia", "ip edia", "▁A V", "▁ AV", "at ori", "ator i", "ato ri", "▁_ __", "▁__ _", "▁ ___", "▁ À", "▁R yan", "▁Ry an", "▁Char lie", "▁Charl ie", "▁и сто", "▁ис то", "▁em otion", "▁emot ion", "▁co oking", "▁cook ing", "▁attempt s", "▁FIT NESS", "ät er", "ä ter", "En able", "D T", "▁Ch ange", "▁ Change", "Asp Net", "▁г а", "▁ га", "▁ord inary", "▁ordin ary", "▁S QL", "▁ SQL", "pl ane", "plan e", "p lane", "% .", "▁Sum mer", "▁av ait", "up p", "u pp", "▁ill ness", "UI NT", "U INT", "> {", "▁zw ischen", "▁hard ware", "▁sound ed", "equ iv", "▁p iano", "▁pi ano", "▁pian o", "us et", "use t", "u set", "k n", "TR Y", "▁b ab", "▁ba b", "не н", "н ен", "▁rel iable", "▁reli able", "▁Bron nen", "▁St ore", "▁Sto re", "▁ Store", "A z", "▁» ,", "▁ »,", "St atic", "Stat ic", "d w", "gr een", "gre en", "g reen", "▁' ';", "▁'' ;", "li j", "l ij", "ev a", "e va", "ні й", "▁S yd", "▁Sy d", "in ois", "ino is", "con vert", "conv ert", "▁decl are", "▁declar e", "br es", "bre s", "b res", "IN K", "it led", "itle d", "▁acc ord", "▁ac cord", "▁m ars", "▁mar s", "▁ma rs", "Sequ ence", "zi p", "z ip", "▁Braz il", "▁meet ings", "▁meeting s", "▁accur acy", "▁M achine", "▁Mach ine", "▁ Machine", "▁aut or", "▁au tor", "▁auto r", "▁ autor", "▁a insi", "▁ain si", "Sim ple", "Res ources", "Re sources", "Resource s", "ка за", "каз а", "▁M P", "▁ MP", "th ey", "the y", "▁B ang", "▁Ba ng", "▁Ban g", "▁e ing", "▁ein g", "▁ eing", "ate ful", "▁Some thing", "▁Som ething", "▁ Something", "▁up set", "Hist ory", "Hi story", "dim ensional", "▁explan ation", "▁c iv", "▁ci v", "▁c once", "▁con ce", "▁conc e", "▁kö z", "▁prom ised", "▁promise d", "ж ду", "we d", "w ed", "For e", "F ore", "Am ount", "A mount", "ab b", "a bb", "▁cl othing", "▁cloth ing", "▁clo thing", "ли сь", "oe n", "o en", "▁Pr int", "▁Pri nt", "▁Prin t", "▁ Print", "▁s izes", "▁size s", "▁si zes", "▁b anks", "▁bank s", "▁ban ks", "ri bed", "rib ed", "ribe d", "▁' ../", "▁'. ./", "FI X", "F IX", "▁H ug", "▁Hu g", "▁z n", "▁ zn", "▁I NT", "▁IN T", "▁ INT", "▁in stances", "▁inst ances", "▁instance s", "▁along side", "Name space", "Names pace", "▁re new", "▁ren ew", "▁a sc", "▁as c", "▁ asc", "▁w aves", "▁wa ves", "▁wave s", "▁p om", "▁po m", "D uration", "day s", "da ys", "d ays", "$ (", "▁grab bed", "▁sur gery", "▁surge ry", "▁surg ery", "▁re store", "▁rest ore", "▁ restore", "Norm al", "N ormal", "▁L eb", "▁Le b", "▁anal yt", "▁analy t", "Lite ral", "L iteral", "H A", "▁sh ares", "▁share s", "▁shar es", "▁sha res", "il let", "ill et", "ille t", "ol s", "o ls", "▁D og", "▁Do g", "or no", "orn o", "▁man ip", "ja v", "j av", "▁ess entially", "▁essential ly", "▁cas ual", "op l", "o pl", "▁ р", "▁S U", "▁ SU", "▁engine ering", "▁engineer ing", "▁Pr ime", "▁Pri me", "▁Prim e", "▁S W", "▁ SW", "▁re aching", "▁reach ing", "▁в ла", "▁Ро сси", "▁K re", "▁Kr e", "er ry", "err y", "▁op pon", "▁opp on", "pro gram", "pr ogram", "em per", "emp er", "is Empty", "▁U nit", "▁Un it", "▁ Unit", "IN TER", "INT ER", "INTE R", "et he", "eth e", "e the", "z d", "CU R", "C UR", "▁v m", "▁ vm", "con v", "co nv", "ro pol", "rop ol", "r opol", "▁Co ast", "▁S elect", "▁Se lect", "▁Sel ect", "▁ Select", "▁бы ла", "▁был а", "▁V e", "ow y", "o wy", "▁my th", "ce ptions", "ception s", "cept ions", "class es", "▁w orden", "▁wor den", "▁word en", "▁ass ault", "▁d ual", "▁du al", "OR K", "▁in ches", "▁inc hes", "▁inch es", "▁F A", "▁ FA", "▁St ation", "▁Stat ion", "▁ Station", "▁person ality", "▁personal ity", "▁s car", "▁sc ar", "▁ scar", "▁reg ime", "▁not en", "▁no ten", "▁note n", "▁r ural", "▁ru ral", "iz a", "i za", "Aud io", "A udio", "▁dis put", "▁disp ut", "▁a ver", "▁av er", "▁ave r", "▁ aver", "▁o bst", "▁ob st", "▁obs t", "▁Reg ion", "▁ Region", "ut f", "u tf", "▁C ass", "▁Cas s", "▁Ca ss", "hs pace", "h space", "▁sh ipping", "▁ship ping", "ik o", "i ko", "ic ked", "ick ed", "num er", "nu mer", "n umer", "д на", "ri el", "rie l", "r iel", "dis abled", "disable d", "op ol", "o pol", "lo oking", "look ing", "▁class ical", "▁classic al", "▁construct ed", "▁constru cted", "▁refer enties", "] +", "▁capt ured", "▁capture d", "▁min imal", "▁minim al", "▁mini mal", "▁s ock", "▁so ck", "▁soc k", "▁ sock", "fa ther", "f ather", "is ión", "isi ón", "▁equ ally", "▁equal ly", "▁eq ually", "▁red uction", "▁redu ction", "An t", "A nt", "ais on", "ai son", "a ison", "▁ar gue", "▁arg ue", "cir cle", "circ le", "▁t oler", "▁to ler", "}\" ,", "} \",", "▁prim arily", "us al", "usa l", "u sal", "▁al gebra", "▁gather ed", "▁Re member", "▁Rem ember", "_) ;", "_ );", "UT E", "U TE", "▁K it", "▁Ki t", "▁ Kit", "S y", "HE AD", "▁re cipe", "▁rec ipe", "▁recip e", "▁sc enario", "▁scen ario", "▁Follow ing", "VA R", "V AR", "▁y ard", "▁ya rd", "▁ yard", "▁st ad", "▁sta d", "▁ stad", "* (", "▁valid ate", "▁ validate", "DE X", "D EX", "▁commit tee", "▁t emporary", "▁tempor ary", "▁consequ ences", "▁consequence s", "▁égal ement", "кти в", "к тив", "▁r a", "▁ ra", "▁dis pl", "▁di spl", "▁disp l", "▁app s", "▁ap ps", "▁ apps", "▁Te il", "▁» .", "▁ ».", "▁adopt ed", "ten sor", "t ensor", "▁fe min", "▁fem in", "▁м ар", "▁ма р", "ло ги", "te ch", "t ech", "▁R ot", "▁Ro t", "▁ Rot", "▁kn ees", "▁kne es", "▁knee s", "ph ys", "phy s", "ow ej", "owe j", "▁Ox ford", "ан д", "а нд", "he ll", "hel l", "h ell", "ograf ia", "▁ex posed", "▁exp osed", "▁expos ed", "▁expose d", "kt op", "k top", "ob y", "o by", "lo wer", "low er", "l ower", "▁Se nate", "▁Sen ate", "▁s word", "▁sw ord", "▁swo rd", "Fl ow", "F low", "▁Un fortunately", "▁box es", "▁ boxes", "▁cu ando", "▁pi lot", "▁pil ot", "▁Al bum", "▁Alb um", "B al", "So rt", "S ort", "FI ELD", "▁de sert", "▁des ert", "CO MM", "COM M", "ro ns", "ron s", "r ons", "ad ows", "ado ws", "adow s", "▁l oyal", "▁lo yal", "▁as set", "▁ass et", "▁ asset", "▁m ud", "▁mu d", "ф а", "▁second ary", "▁ secondary", "▁А р", "▁c ul", "▁cu l", "▁ cul", "▁As ian", "▁Asia n", "▁stay ing", "▁sta ying", "▁data set", "▁dat aset", "▁ dataset", "▁U SE", "▁US E", "▁ USE", "▁l oves", "▁lo ves", "▁love s", "▁lov es", "▁vel ocity", "▁veloc ity", "á v", "▁purch ased", "▁purchase d", "SO C", "S OC", "▁compet itive", "▁Foot ball", "is ka", "isk a", "i ska", "▁kn ock", "st airs", "sta irs", "az y", "a zy", "▁v end", "▁ve nd", "▁ven d", "▁ar ts", "▁art s", "▁ arts", "▁B ras", "▁Br as", "▁Bra s", "ue la", "uel a", "u ela", "кт о", "к то", "tr im", "tri m", "t rim", "▁d irty", "▁dir ty", "▁dirt y", "▁ dirty", "▁webs ites", "▁website s", "▁In dep", "▁Ind ep", "▁с тра", "▁ст ра", "▁ стра", "s r", "▁t icket", "▁tick et", "at ile", "ati le", "a tile", "▁implement ed", "▁вре мя", "▁bo wl", "▁bow l", "DA TE", "DAT E", "D ATE", "▁al ter", "▁alt er", "▁ alter", "▁S pace", "▁Sp ace", "▁ Space", "▁accom pan", "▁accomp an", "or don", "ord on", "▁do ctors", "▁doctor s", "ist as", "ista s", "C ast", "до м", "CT L", "C TL", "ur ers", "ure rs", "urer s", "▁ingred ients", "▁calcul ated", "▁calculate d", "▁calc ulated", "▁le ather", "▁s ensitive", "▁sens itive", "▁sus pic", "▁susp ic", "st an", "sta n", "s tan", "▁an ni", "▁ann i", "▁ anni", "aw ait", "awa it", "a wait", "▁Fr anç", "▁Fran ç", "▁ab ort", "▁ abort", "▁Sp irit", "▁W alter", "▁Wal ter", "▁Walt er", "un kt", "unk t", "▁vert ical", "▁ vertical", "OR S", "O RS", "be st", "bes t", "b est", "▁Cl ient", "▁ Client", "it ated", "ita ted", "itate d", "itat ed", "▁в а", "▁ ва", "▁ Č", "▁v ille", "▁vi lle", "▁vill e", "▁vil le", "▁ ville", "▁di plom", "or ne", "orn e", "▁b ars", "▁bar s", "▁ba rs", "▁ bars", "U ri", "AP TER", "pon s", "po ns", "p ons", "ut z", "u tz", "Pro to", "Pr oto", "▁st ir", "▁ц е", "▁ це", "▁pr imer", "▁prim er", "▁pri mer", "▁prime r", "ig ible", "igi ble", "ex tra", "ext ra", "extr a", "▁Bo oks", "▁Book s", "▁B os", "▁Bo s", "▁E t", "▁W elt", "▁We lt", "▁Wel t", "▁Kore a", "▁Ko rea", "▁Kor ea", "ри то", "р ито", "▁v ibr", "▁vi br", "S elf", "line ar", "lin ear", "о б", "▁L ang", "▁La ng", "▁Lan g", "▁ Lang", "▁de eper", "▁deep er", "▁term in", "▁ter min", "▁ termin", "en schaft", "ens chaft", "ensch aft", "▁ро ці", "am med", "amm ed", "vis ible", "v isible", "▁IO Exception", "▁ IOException", "▁W ind", "▁Win d", "▁Wi nd", "us qu", "▁S top", "▁St op", "▁Sto p", "▁ Stop", "▁ор га", "IN VALID", "INVAL ID", "▁c ub", "▁cu b", "▁j ew", "▁je w", "▁cap tain", "▁capt ain", "з і", "ch unk", "apt ure", "ash board", "▁div ided", "▁divid ed", "▁divide d", "▁ext ensive", "▁extens ive", "▁s uffer", "▁suff er", "▁he ading", "▁head ing", "▁ heading", "cre ated", "create d", "creat ed", "c reated", "▁quiet ly", "▁n y", "▁ ny", "▁по л", "▁ пол", "\" +", "ik an", "ika n", "i kan", "▁design s", "z u", "}+ \\", "} +\\", "Oper ator", "▁Le mma", "▁Lem ma", "▁на у", "ac ji", "ло ве", "лов е", "Serv let", "▁K evin", "▁Ke vin", "st age", "sta ge", "b n", "text width", "fa iled", "fail ed", "f ailed", "▁St aff", "▁Sta ff", "▁e nem", "▁en em", "un de", "und e", "u nde", "ен ь", "е нь", "Pack et", "P acket", "▁A ls", "▁Al s", "ka r", "k ar", "][ '", "] ['", "ke d", "k ed", "Per s", "Pe rs", "P ers", ">: :", "> ::", "▁a rc", "▁ar c", "▁ arc", "▁sy nt", "▁syn t", "SP E", "S PE", "▁Д а", "▁M i", "▁M oh", "▁Mo h", "▁De ath", "b rowser", "▁D ave", "▁Dav e", "▁Da ve", "▁s ucc", "▁su cc", "▁suc c", "t oggle", "▁t ack", "▁ta ck", "Com ment", "Comm ent", "er on", "ero n", "e ron", "▁aware ness", "▁h ug", "▁cont emporary", "▁contempor ary", "ul ating", "ula ting", "▁T itle", "▁Tit le", "▁Ti tle", "▁ Title", "▁TH IS", "hav ior", "ran k", "r ank", "▁do zen", "▁che ese", "co ln", "col n", "▁rad ius", "▁radi us", "▁ radius", "▁dim ensions", "▁dimension s", "rodu ction", "rod uction", "▁ad ds", "▁add s", "▁house hold", "▁D avis", "▁Dav is", "▁Da vis", "pk g", "p kg", "{ $", "▁cas ino", "▁P ierre", "▁Pier re", "▁Pi erre", "▁object ive", "tr ain", "tra in", "▁Mich igan", "pay load", "▁r ug", "▁ru g", "▁ rug", "▁se vere", "▁sever e", "me an", "▁t oss", "▁to ss", "▁embar rass", "▁V ery", "▁Ver y", "▁Ve ry", "▁ Very", "▁appe al", "▁Com put", "▁Comp ut", "▁ Comput", "▁forgot ten", "▁k ernel", "▁ker nel", "▁ kernel", "▁car bon", "▁carb on", "f w", "▁С у", "▁Emp ire", "▁qu ote", "▁quot e", "▁ quote", "et z", "e tz", "▁m ini", "▁min i", "▁mi ni", "▁p ipe", "▁pi pe", "▁pip e", "▁ pipe", "▁n ous", "▁no us", "▁nou s", "▁M ove", "▁Mo ve", "▁Mov e", "▁ Move", "▁д у", "▁ ду", "▁nerv ous", "▁М ар", "▁Ма р", "* \r", "▁B ush", "▁Bus h", "▁Bu sh", "▁pe er", "▁ peer", "▁W rit", "▁Wr it", "▁ Writ", "▁satisf ied", "▁pull ing", "▁pul ling", "▁P ur", "▁Pu r", "▁M iller", "▁Mil ler", "▁Mill er", "▁F L", "▁ FL", "am az", "ama z", "a maz", "▁m ile", "▁mil e", "▁mi le", "▁ mile", "▁N eed", "▁Ne ed", "▁ Need", "▁sup plies", "▁a ño", "▁p ace", "▁pa ce", "▁pac e", "▁ pace", "▁Vict oria", "▁Victor ia", "▁ou ght", "▁ ought", "▁P layer", "▁Pl ayer", "▁Play er", "▁ Player", "agnost ic", "▁v iv", "▁vi v", "▁ viv", "▁Pat rick", "▁Patri ck", "▁ Š", "▁St ory", "▁Sto ry", "ac a", "a ca", "▁mount ains", "▁mountain s", "CL ASS", "▁fr agment", "▁frag ment", "▁ fragment", "▁sett lement", "▁settle ment", "▁Further more", "▁dr ivers", "▁dri vers", "▁driv ers", "▁drive rs", "▁driver s", "▁J u", "▁бы ли", "▁был и", "Row s", "Ro ws", "R ows", "▁im pression", "▁imp ression", "▁impress ion", "▁in fer", "▁inf er", "▁Ex pl", "▁Exp l", "ol ute", "olut e", "olu te", "ov an", "ova n", "o van", "ar ance", "aran ce", "CA P", "C AP", "▁en force", "▁B urn", "▁Bur n", "▁Bu rn", "Res et", "Re set", "mo ther", "mot her", "m other", "▁B attle", "▁Bat tle", "▁Batt le", "▁ Battle", "pad ding", "p adding", "ia te", "iat e", "i ate", "▁c ried", "▁cr ied", "▁cri ed", "A K", "un s", "u ns", "▁siè cle", "▁Cont in", "▁ Contin", "ban k", "b ank", "ju nit", "j unit", "object s", "Ro t", "R ot", "is sa", "iss a", "▁be gun", "▁beg un", "* -", "▁vis iting", "▁visit ing", "ж де", "▁target s", "▁tar gets", "▁L atin", "▁Lat in", "у т", "▁E sc", "▁Es c", "* ;", "ån g", "å ng", "▁( {", "▁ ({", "▁di agram", "▁dia gram", "Mod els", "Model s", "Mode ls", "▁part nership", "▁partner ship", "▁partners hip", "▁fr ån", "ul ty", "ult y", "Po d", "P od", "CA LL", "CAL L", "C ALL", "mod al", "mo dal", "si g", "s ig", "it zer", "itz er", "it el", "ite l", "▁convin ced", "▁convince d", "ab l", "a bl", "ст ве", "ств е", "▁c ot", "▁co t", "▁re peat", "▁repe at", "▁ repeat", "▁l ists", "▁li sts", "▁list s", "▁ lists", "so und", "s ound", "▁r oyal", "▁ro yal", "▁gr ace", "▁gra ce", "▁o raz", "▁or az", "Not ification", "pr ite", "prit e", "p rite", "▁arriv al", "▁arr ival", "an cell", "ance ll", "anc ell", "ancel l", "hent ic", "de code", "dec ode", "▁fant astic", "pro gress", "pro xy", "pr oxy", "z ő", "ke l", "k el", "▁conven ient", "aqu e", "a que", "ri et", "rie t", "r iet", "▁Dig ital", "io rs", "ior s", "i ors", "▁B udd", "▁Bud d", "▁Bu dd", "and ra", "ad dy", "add y", "▁o vers", "▁over s", "▁ov ers", "▁consum ers", "▁consumer s", "▁consume rs", "p n", "mo use", "m ouse", "▁B C", "▁ BC", "de g", "d eg", "pe rm", "per m", "p erm", "it és", "ité s", "▁и спо", "▁ис по", "he ast", "h east", "ho ur", "hou r", "h our", "PAR AM", "con scious", "▁w ing", "▁win g", "▁ wing", "▁atmos phere", "▁g ig", "▁gi g", "▁con tre", "▁cont re", "▁contr e", "▁dr ama", "▁dram a", "я т", "▁Fr ont", "▁Fro nt", "▁ Front", "▁philosoph y", "▁H art", "▁Har t", "▁Ha rt", "▁n urs", "▁nu rs", "▁nur s", "ur as", "ura s", "u ras", "▁T ru", "▁Tr u", "▁s ud", "▁su d", "▁per forming", "▁perform ing", "п ы", "▁conf used", "▁che cks", "▁check s", "am t", "a mt", "Ma ke", "M ake", "▁R O", "▁ RO", "▁d f", "▁ df", "iz ations", "ization s", "▁deg li", "▁architect ure", "Render er", "▁Л а", "▁p tr", "▁pt r", "▁ ptr", "▁die ser", "▁dies er", "▁diese r", "sub mit", "▁top ics", "▁topic s", "▁princip les", "▁prin ciples", "▁principle s", "var s", "va rs", "v ars", "so ck", "soc k", "s ock", "▁ton gue", "▁tong ue", "▁percent age", "▁S S", "▁ SS", "▁d ol", "▁do l", "▁r ice", "▁ri ce", "▁ric e", "▁ rice", "í o", "▁E astern", "▁East ern", "▁Easter n", "▁recogn ition", "▁E rn", "▁Er n", "▁U t", "▁ Ut", "▁c aut", "▁ca ut", "▁Cl oud", "▁ Cloud", "▁con version", "▁conv ersion", "▁convers ion", "▁Oh io", "▁M E", "▁ ME", "▁sur ely", "▁sure ly", "▁g ard", "▁gar d", "▁ga rd", "pu is", "p uis", "▁u rg", "▁ur g", "▁ urg", "im i", "i mi", "▁abs ence", "▁w inner", "▁win ner", "L anguage", "▁HT TP", "▁ HTTP", "w t", "▁trans lation", "▁transl ation", "▁ translation", "с с", "▁K ind", "▁Ki nd", "▁Kin d", "▁ Kind", "Tw o", "T wo", "▁Re volution", "▁Rev olution", "In sert", "Ins ert", "Ev ery", "E very", "or ient", "ori ent", "orie nt", "o rient", "▁т ра", "▁ тра", "▁emot ions", "▁emotion s", "det ails", "detail s", "▁f lu", "▁fl u", "▁ flu", "▁oper ate", "▁opera te", "A g", "un ning", "unn ing", "▁part ie", "▁parti e", "tr i", "t ri", "▁gold en", "▁gol den", "▁Б и", "▁found ation", "is ten", "ist en", "iste n", "i sten", "▁Car los", "▁Carl os", "▁Carlo s", "Child ren", "▁neigh b", "▁C art", "▁Car t", "▁Ca rt", "▁ Cart", "Be gin", "B egin", "г да", "▁s cheduled", "▁schedule d", "▁schedul ed", "' >", "▁observ ations", "▁observation s", "▁produ cer", "▁produce r", "ath ers", "ather s", "a thers", "но му", "ном у", "▁expect ations", "▁expectation s", "os o", "o so", "z h", "mu table", "mut able", "▁wr ites", "▁writ es", "▁write s", "▁p ushing", "▁push ing", "▁se ats", "▁sea ts", "▁seat s", "▁br east", "▁bre ast", "ap ing", "api ng", "a ping", "▁Sim ple", "▁ Simple", "▁s ocket", "▁soc ket", "▁sock et", "▁ socket", "▁sl ave", "▁sla ve", "▁ slave", "il ey", "ile y", "i ley", "▁ass istant", "▁assist ant", "▁t rim", "▁tr im", "▁tri m", "▁ trim", "▁land scape", "▁landsc ape", "▁associ ation", "qu ant", "▁Pal est", "▁swe at", "en gers", "eng ers", "enge rs", "enger s", "? _", "é p", "> .", "▁c urious", "▁cur ious", "▁Com ponent", "▁ Component", "▁re placement", "▁repl acement", "▁replace ment", "ра ль", "рал ь", "р аль", "▁Tr ack", "▁Tra ck", "▁ Track", "▁Re move", "▁Rem ove", "▁ Remove", "▁S ize", "▁Si ze", "▁ Size", "pe ror", "per or", "▁cal culate", "▁calcul ate", "▁calc ulate", "▁s essions", "▁session s", "▁type d", "▁typ ed", "▁ty ped", "▁sub mit", "▁subm it", "▁ submit", "!! !", "! !!", "▁part ition", "▁ partition", "ed ing", "edi ng", "e ding", "-- ---", "---- -", "--- --", "- ----", "az ioni", "azi oni", "lie ß", "on al", "ona l", "o nal", "▁sh ru", "▁shr u", "▁RE G", "▁ REG", "▁F ac", "▁Fa c", "▁ Fac", "config uration", "▁бы ло", "▁был о", "▁A mong", "▁Am ong", "__ );", "__) ;", "_ _);", "▁Ser ver", "▁Serv er", "▁ Server", "▁L OG", "▁LO G", "▁ LOG", "▁c and", "▁can d", "▁ca nd", "'] );", "']) ;", "' ]);", "go v", "g ov", "▁S ix", "▁Si x", "un defined", "und efined", "undef ined", "▁t y", "▁ ty", "as a", "a sa", "▁part icles", "▁partic les", "▁particle s", "▁parti cles", "▁ф ор", "▁фо р", "▁ фор", "` `", "T ube", "el and", "ela nd", "e land", "fo ld", "fol d", "f old", "og o", "o go", "▁appro aches", "▁approach es", "on da", "ond a", "ag r", "a gr", ", $", "▁{ {", "▁ {{", "▁Mod ern", "▁Mo dern", "▁Mode rn", "▁W inter", "▁Win ter", "av ailable", "▁L ud", "▁Lu d", "▁c asa", "▁cas a", "▁ca sa", "▁C ould", "▁Co uld", "▁Cou ld", "▁ Could", "▁fif teen", "▁pot entially", "▁potential ly", "^ ^", "▁se it", "▁sei t", "An imation", "Anim ation", "ко го", "к ого", "Z one", "el if", "eli f", "e lif", "▁acknow led", "▁own ership", "▁owner ship", "▁owners hip", "▁describ es", "▁describe s", "▁re verse", "▁revers e", "▁rever se", "▁ reverse", "▁con test", "▁cont est", "▁sc ored", "▁score d", "▁op posed", "▁opp osed", "▁oppos ed", "fl ex", "f lex", "kr e", "k re", "▁mer ge", "▁ merge", "▁cover ing", "▁cov ering", "▁hon estly", "▁honest ly", "▁M ess", "▁Me ss", "▁Mes s", "▁r arely", "▁rare ly", "▁incred ible", "it age", "ita ge", "▁vict ims", "▁victim s", "ны ми", "ным и", "w l", "iz za", "izz a", "i zza", "d n", "on de", "ond e", "o nde", "▁pr zy", "▁prz y", "▁HT ML", "▁ HTML", "▁pay load", "▁ payload", "Bu s", "B us", "us b", "u sb", "F n", "▁display ed", "▁o cean", "▁A venue", "▁Av enue", "ac ion", "aci on", "acio n", "gh an", "g han", "met ric", "m etric", "ie ties", "iet ies", "▁attract ive", "▁attr active", "▁f ö", "▁ fö", "C reat", "ver ter", "vert er", "▁Al ice", "▁Ali ce", "по л", "▁f raction", "▁fr action", "▁fra ction", "▁fract ion", "▁behav iour", "▁behavi our", "▁Jer sey", "▁re venue", "▁rev enue", "▁reven ue", "▁t res", "▁tr es", "▁tre s", "▁ tres", "IL D", "I LD", "▁É t", "▁s ync", "▁sy nc", "▁syn c", "▁ sync", "wi ch", "w ich", "▁anc est", "ъ т", "om o", "o mo", "▁I de", "▁Id e", "▁g ained", "▁gain ed", "▁ga ined", "▁moment um", "▁K o", "ie u", "i eu", "ie lt", "iel t", "i elt", "▁bon us", "▁te xture", "▁text ure", "▁ texture", "Mod al", "Mo dal", "NE XT", "N EXT", "▁годи не", "▁l anguages", "▁language s", "v t", "▁represent ing", "▁D ream", "▁Dre am", "cur r", "cu rr", "qu al", "q ual", "▁j s", "▁ js", "bu rn", "bur n", "b urn", "▁contribut ions", "▁contribution s", "▁r ic", "▁ri c", "▁ ric", "}- \\", "} -\\", "={ {", "= {{", "ca rt", "car t", "c art", "F B", "ju d", "j ud", "es p", "e sp", "▁elect ron", "▁electro n", "▁e ll", "▁el l", "▁ ell", "▁Run time", "▁ Runtime", "ac hel", "ach el", "ache l", "a chel", "\\ _", "we ek", "pack et", "p acket", "▁Secret ary", "▁Jahr hund", "▁th reshold", "▁ threshold", "ba ge", "bag e", "b age", "▁con cer", "▁conc er", "▁conce r", "▁b one", "▁bo ne", "▁bon e", "▁ bone", "▁Holly wood", "Cur sor", "C ursor", "▁aw arded", "▁award ed", "▁sum mary", "▁summar y", "▁ summary", "ag gio", "agg io", "aggi o", "▁st ell", "▁ste ll", "▁ stell", "▁f lesh", "▁fl esh", "▁fle sh", "P air", "▁A ge", "▁Ag e", "ing ton", "▁' .", "▁ '.", "as er", "ase r", "a ser", "ко ва", "ков а", "▁qu art", "▁q uart", "▁quar t", "ry ption", "rypt ion", "All oc", "Al loc", "ft en", "fte n", "f ten", "Oper and", "▁ind icated", "▁indic ated", "▁indicate d", "($ _", "( $_", "get String", "▁list ener", "▁listen er", "▁ listener", "sp ir", "spi r", ") _", "ve ns", "ven s", "v ens", "▁food s", "▁foo ds", "an za", "anz a", "te il", "DE SC", "▁n otion", "▁not ion", "▁em ployment", "▁employ ment", "▁s wing", "▁sw ing", "▁ swing", "nb sp", "▁p ounds", "▁pound s", "to ols", "tool s", "too ls", "t ools", "▁particip ate", "▁T ax", "▁Ta x", "▁ Tax", "▁с кла", "ap ol", "a pol", "▁f ost", "▁fo st", "▁fos t", "com pat", "comp at", "▁public ation", "▁rapid ly", "▁W is", "▁Wi s", "Event Listener", "▁prem ière", "▁premi ère", "us o", "u so", "ext end", "▁M ERCHANTABILITY", "UT F", "U TF", "▁exper iments", "▁experi ments", "▁experiment s", "sin gle", "sing le", "s ingle", "z k", "▁n aj", "▁na j", "}} }", "} }}", "Li n", "L in", "▁inter act", "▁inte ract", "▁c ms", "▁cm s", "▁Ro ger", "▁Rog er", "▁Р у", "> '", "com mit", "comm it", "ло сь", "▁out come", "▁h its", "▁hit s", "▁hi ts", "▁и м", "▁ им", "▁s park", "▁sp ark", "con sole", "cons ole", "▁ver w", "▁ve rw", "▁ка то", "agnost ics", "agnostic s", "▁s oci", "▁so ci", "▁soc i", "▁d ining", "▁di ning", "▁din ing", "▁t ech", "▁te ch", "▁ tech", "š t", "fo lio", "fol io", "ult ane", "ultan e", "кт ор", "кто р", "к тор", "▁B rand", "▁Br and", "▁Bra nd", "Jo in", "J oin", "▁и ю", "▁p ros", "▁pro s", "▁pr os", "▁pos it", "Pub lic", "P ublic", "AspNet Core", "▁S hop", "▁Sh op", "▁ Shop", "▁co inc", "▁coin c", "ни ем", "ние м", "▁re ferences", "▁refer ences", "▁reference s", "ab out", "name space", "names pace", "D L", "▁I R", "▁ IR", "▁c ada", "▁ca da", "▁cad a", "▁Jord an", "▁g ep", "▁ge p", "▁b ron", "▁br on", "▁bro n", "andid ate", "EX PECT", "EXP ECT", "am o", "a mo", "▁De utsch", "au c", "a uc", "▁ра йо", "▁рай о", "▁L abor", "▁La bor", "▁Lab or", "▁surround ed", "т ро", "▁n ome", "▁no me", "▁nom e", "▁under lying", "▁educ ational", "▁education al", "R IGHT", "CO UNT", "in ch", "inc h", "Ty p", "T yp", "um ph", "ump h", "fo ur", "f our", "Control s", "▁c p", "▁ cp", "co st", "cos t", "c ost", "▁mechan ism", "en ess", "ene ss", "enes s", "e ness", "é qu", "▁acqu ired", "▁acquire d", "▁f alls", "▁fall s", "▁fal ls", "▁ falls", "▁H ou", "▁Ho u", "▁L E", "▁ LE", "for Each", "▁ver tex", "▁vert ex", "▁ vertex", "▁I F", "▁ IF", "cur s", "cu rs", "c urs", "' =>", "те ри", "тер и", "▁S A", "▁ SA", "ri ers", "rie rs", "rier s", "r iers", "▁u w", "▁ uw", "▁m arks", "▁mark s", "▁mar ks", "▁ marks", "▁en erg", "▁ener g", "ho f", "h of", "ylv ania", "▁Al len", "▁All en", "um py", "ump y", "о го", "ст ву", "ств у", "vo ice", "v oice", "▁en gage", "▁eng age", "▁m ant", "▁man t", "▁ma nt", "or se", "ors e", "== =", "= ==", "▁impro vement", "▁improve ment", "Op t", "O pt", "▁arr ested", "▁arrest ed", "ти я", "▁с ле", "▁ сле", "it ched", "itch ed", "soc ket", "sock et", "s ocket", "▁c ycl", "▁cy cl", "▁ cycl", "▁S M", "▁ SM", "▁S ex", "▁Se x", "▁neut ral", "▁neutr al", "ва в", "▁J ess", "▁Je ss", "▁Jes s", "▁d ip", "▁di p", "▁op position", "▁oppos ition", "▁b orrow", "▁bor row", "с пе", "▁av ant", "ко ла", "▁t a", "▁ ta", "An im", "A nim", "▁G all", "▁Gal l", "▁Ga ll", "rg b", "r gb", "▁gu ilty", "▁guilt y", "▁bu ried", "▁bur ied", "▁g y", "▁ gy", "Init ial", "▁acc omp", "▁ac comp", "▁accom p", "▁breath ing", "▁breat hing", "ber ry", "b erry", "GR O", "G RO", "▁subsequ ent", "rou pe", "roup e", "ul pt", "ulp t", "t b", "▁ ä", "P i", "arg v", "▁M ust", "▁Mus t", "▁Mu st", "▁ Must", ": '", "sv g", "ou p", "o up", "▁prec isely", "▁precise ly", "▁T a", "re na", "ren a", "r ena", "▁f older", "▁fol der", "▁fold er", "▁ folder", "▁Ch annel", "▁ Channel", "▁re vol", "▁rev ol", "M iss", "ло м", "red dit", "adel ph", "▁dis crim", "▁disc rim", "▁a ve", "▁av e", "▁ ave", "pl eted", "ple ted", "plete d", "plet ed", "p leted", "▁g ently", "▁gent ly", "FF FF", "ro py", "rop y", "r opy", "▁d ial", "▁di al", "▁dia l", "Not Found", "▁\" [", "Hom e", "H ome", "on te", "ont e", "o nte", "▁re lie", "▁rel ie", "▁reli e", "▁Con text", "▁Cont ext", "▁ Context", "▁st ats", "▁stat s", "▁sta ts", "▁ stats", "▁E nergy", "oun ced", "ounce d", "▁gr ave", "▁grav e", "▁gra ve", "▁re cip", "▁rec ip", "ли н", "л ин", "bl og", "blo g", "b log", "▁na am", "▁w o", "▁ wo", "▁direct ions", "▁dire ctions", "▁direction s", "▁Lin coln", "! )", "un ci", "unc i", "ne q", "n eq", "Tag s", "T ags", "▁t um", "▁tu m", "▁s aving", "▁sa ving", "▁sav ing", "ail le", "ai lle", "a ille", "item ize", "▁F amil", "▁Fa mil", "ms m", "m sm", "ne ws", "new s", "FF ER", "F FER", "▁D ead", "▁De ad", "▁ Dead", "▁terr itory", "▁territor y", "▁territo ry", "▁K at", "▁Ka t", "oc ker", "ock er", "o cker", "in teger", "inte ger", "▁s ne", "▁sn e", "▁f ails", "▁fa ils", "▁fail s", "▁franç ais", "▁int roduction", "▁introdu ction", "▁G rant", "▁Gr ant", "▁Gran t", "▁Gra nt", "ycl e", "yc le", "y cle", "'] .", "' ].", "▁v ier", "▁vi er", "▁vie r", "▁ vier", "nat ive", "n ative", "▁K le", "▁Kl e", "qu ote", "quot e", "User s", "Us ers", "Use rs", "▁ad vis", "▁adv is", "▁g ym", "▁gy m", "▁prote in", "ا ل", "▁M ai", "▁Ma i", "▁prov iders", "▁provide rs", "▁provider s", "▁so il", "gu i", "g ui", "▁N ation", "▁Nat ion", "re ation", "reat ion", "▁T ab", "▁Ta b", "▁ Tab", "en sis", "ens is", "in as", "ina s", "i nas", "▁Scot land", "▁dis patch", "▁disp atch", "▁ dispatch", "un ion", "uni on", "▁b ere", "▁be re", "▁ber e", "▁ bere", "▁P ow", "▁Po w", "▁H ig", "▁Hi g", "▁stud ying", "▁study ing", "RE F", "R EF", "SS L", "S SL", "▁f right", "▁fr ight", "▁S ORT", "▁SO RT", "▁com pr", "▁comp r", "▁Mad rid", "row ned", "rown ed", "r owned", "op es", "ope s", "o pes", "pd ev", "p dev", "▁w ash", "▁was h", "▁wa sh", "▁' ../../", "▁'../ ../", "}} _", "} }_", "▁acc um", "rol ling", "roll ing", "▁N C", "▁ NC", "▁f iction", "▁fi ction", "▁fict ion", "ip t", "i pt", "conne cted", "connect ed", "lim its", "limit s", "▁l ap", "▁la p", "▁ lap", "▁where as", "pro m", "pr om", "p rom", "▁appoint ment", "Pro gram", "Pr ogram", "▁П ер", "▁Пе р", "na h", "n ah", "Valid ation", "ic ons", "ico ns", "icon s", "i cons", "äl l", "ä ll", "▁rad ical", "▁radi cal", "▁ex clusive", "▁excl usive", "▁exclus ive", "em ony", "emon y", "▁challeng ing", "▁m s", "▁ ms", "▁P rivate", "▁Priv ate", "▁ Private", "▁v ida", "▁vi da", "▁vid a", "▁дру ги", "▁camp us", "▁cam pus", "form s", "for ms", "д но", "pl aat", "bs t", "b st", "AT ED", "ATE D", "▁Ab stract", "▁Abs tract", "▁ Abstract", "▁int ense", "▁intens e", "▁L td", "▁contro vers", "ó g", "▁s ă", "▁land ing", "▁lan ding", "! =", "▁sc enes", "▁scene s", "▁scen es", "▁Ch ap", "▁Cha p", "▁sp oken", "▁spoke n", "▁spo ken", "cre d", "cr ed", "c red", "▁p ride", "▁pr ide", "▁pri de", "qu et", "que t", "▁m eter", "▁me ter", "▁met er", "▁de utsch", "uu m", "u um", "▁b less", "▁bl ess", "▁ble ss", "▁H ann", "▁Ha nn", "▁Han n", "▁input s", "▁ inputs", "▁R ow", "▁Ro w", "▁ Row", "▁with draw", "▁withd raw", "P al", "ac les", "acle s", "acl es", "a cles", "as sets", "ass ets", "asse ts", "asset s", "▁v l", "▁ vl", "ве де", "вед е", "▁G ot", "▁Go t", "▁air port", "win d", "wi nd", "w ind", "▁Columb ia", "▁ch ocolate", "▁h ö", "▁ hö", "▁al arm", "FT WARE", "▁J ay", "▁Ja y", "▁s ake", "▁sa ke", "▁reg istration", "▁registr ation", "vi d", "v id", "▁l ake", "▁la ke", "▁user name", "▁ username", "▁h ack", "▁ha ck", "index Of", "c x", "▁f estival", "▁fest ival", "▁club s", "case s", "ca ses", "cas es", "c ases", "CT RL", "]; \r", "] ;\r", "▁A ud", "▁Au d", "▁ Aud", "▁prim era", "▁prime ra", "▁primer a", "ва т", "в ат", "▁brill iant", "ut her", "uth er", "u ther", "▁difficult y", "it als", "ital s", "ita ls", "▁sc ores", "▁score s", "▁pol ít", "data base", "dat abase", "as ka", "ask a", "a ska", "▁## ####", "▁### ###", "▁#### ##", "▁##### #", "▁a cid", "▁ac id", "at on", "ato n", "a ton", "at omic", "ato mic", "atom ic", "fr eq", "fre q", "f req", "▁WARRAN TY", "▁report ing", ".) ,", ". ),", "▁n ights", "▁night s", "▁program me", ")} {", ") }{", "xi c", "x ic", "▁s po", "▁sp o", "line d", "li ned", "lin ed", "l ined", "qu arters", "er ee", "ere e", "e ree", "mer s", "me rs", "m ers", "▁s erves", "▁ser ves", "▁serv es", "▁serve s", "co w", "c ow", "ль ко", "en so", "ens o", "▁env iron", "▁ environ", "Li ke", "L ike", "an che", "anc he", "anch e", "▁cr ash", "▁K ap", "▁Ka p", "no indent", "Con n", "Co nn", "▁ав то", "▁in frastructure", "IM E", "I ME", "▁R oom", "▁Ro om", "▁ Room", "ne ed", "n eed", "or er", "ore r", "o rer", "▁D est", "▁De st", "▁Des t", "▁ Dest", "▁D omin", "▁Do min", "▁Dom in", "ather ine", "▁Syd ney", "▁g auge", "▁gau ge", "▁ga uge", "▁j et", "▁je t", "▁ jet", "b ably", "▁comm only", "▁common ly", "▁st ations", "▁stat ions", "▁station s", "ia h", "i ah", "n l", "ж у", "et en", "ete n", "e ten", "_ )", "ia c", "i ac", "am os", "amo s", "a mos", "ne ment", "nem ent", "n ement", "ko n", "k on", "Inter val", "▁cab in", "▁ca bin", "▁e g", "▁ eg", "▁sh ots", "▁shot s", "▁ shots", "▁A rea", "▁Ar ea", "▁Are a", "▁ Area", "sm ith", "param eter", "' }", "▁h em", "▁he m", "▁ hem", "▁s inging", "▁sing ing", "▁sin ging", "▁access ible", "▁P rin", "▁Pr in", "▁Pri n", "opt ional", "option al", "an cial", "anc ial", "ancia l", "sh ips", "ship s", "▁can vas", "▁ canvas", "sp e", "s pe", "▁address es", "▁x ml", "▁ xml", "▁' \"", "▁ '\"", "▁k ar", "▁ka r", "▁ kar", "ö ff", "▁a ges", "▁ag es", "▁age s", "▁ ages", "ё р", "zi ng", "zin g", "z ing", "▁ö ver", "▁C lean", "▁Cle an", "▁ Clean", "▁Sil ver", "▁о со", "▁ос о", "he alth", "Al i", "A li", "▁t s", "▁ ts", "at ern", "ate rn", "ater n", "a tern", "▁cho osing", "▁bur ned", "▁burn ed", "br id", "b rid", "ro oms", "room s", "öt t", "ö tt", "K ERN", "▁d ish", "▁dis h", "▁di sh", "S a", "De tail", "Det ail", "▁H ind", "▁Hi nd", "▁D ans", "▁Dan s", "▁Da ns", "i ę", "▁J ahren", "▁Jah ren", "▁Jahr en", "▁Jahre n", "▁Ja hren", "ext ension", "al las", "all as", "alla s", "▁B illy", "▁Bill y", "▁Bil ly", "us ammen", "it ud", "itu d", "ge on", "geo n", "Te mp", "T emp", "Le g", "L eg", "itt el", "itte l", "add le", "▁mus cle", "▁sc ared", "▁scar ed", "ss on", "s son", "▁de note", "▁den ote", "ie urs", "ieu rs", "ieur s", "i eurs", "▁o range", "▁or ange", "▁h ub", "▁ hub", "▁re b", "▁r eb", "▁ reb", "ed i", "e di", "▁vo ices", "▁voice s", "F older", "▁s uspend", "▁sus pend", "▁susp end", "▁ suspend", "▁He art", "▁sc rap", "▁scr ap", "▁a ggreg", "▁ag greg", "▁Gu ide", "trans action", "▁r iding", "▁ri ding", "▁rid ing", "▁v á", "▁ vá", "▁b reed", "▁br eed", "▁bre ed", "▁bree d", "▁con cert", "▁conc ert", "▁conce rt", "▁concer t", "appro x", "▁ch ances", "▁chance s", "To k", "T ok", "E q", "par ts", "part s", "p arts", "▁sch olar", "▁schol ar", "of fs", "off s", "fl ush", "flu sh", "! ”", "▁lo gin", "▁log in", "▁ login", "▁so ort", "▁M and", "▁Man d", "▁Ma nd", "▁function al", "▁B ou", "▁Bo u", "▁subject s", "my s", "m ys", "▁extra ord", "▁Build ing", "ik t", "i kt", "B ad", "ia mi", "iam i", "i ami", "Dr iver", "D river", "êt e", "ê te", "▁k v", "▁ kv", "▁t imer", "▁time r", "▁tim er", "▁ti mer", "▁ timer", "ition ally", "itional ly", "▁a thlet", "▁ath let", "▁\" );", "▁\") ;", "▁ \");", "w y", "CF G", "▁he aven", "▁heav en", "о в", "▁exper imental", "▁experiment al", "▁b ounds", "▁bound s", "▁ bounds", "IC K", "I CK", "▁ex cit", "▁exc it", "▁qu it", "▁qui t", "▁q uit", "▁univers al", "д ь", "▁S P", "▁ SP", "▁st ub", "▁ stub", "▁k le", "▁kl e", "▁ kle", "▁B art", "▁Bar t", "▁Ba rt", "▁\" @", "pe l", "p el", "▁( !(", "▁(! (", "▁se lector", "▁select or", "▁sel ector", "▁sele ctor", "▁ selector", "E B", "▁c oc", "▁co c", "et ed", "ete d", "e ted", "ют ь", "ю ть", "▁poss ess", "▁R ick", "▁Ric k", "▁unus ual", "ter min", "term in", "▁b ags", "▁bag s", "▁ba gs", "▁lo ading", "▁load ing", "▁ loading", "▁t f", "▁ tf", "▁) \r", "▁ )\r", "pro vider", "prov ider", "plet ion", "▁c ursor", "▁cur sor", "▁ cursor", "▁pa used", "▁paus ed", "▁pause d", "и м", "▁coun sel", "] <", "ze ch", "zec h", "z ech", "▁t ie", "▁ti e", "▁M oon", "▁Mo on", "▁ar med", "▁arm ed", "▁ armed", "▁ob serve", "▁observ e", "▁obs erve", "▁per met", "▁perm et", "▁J ob", "▁Jo b", "▁ Job", "fö r", "f ör", "arg ument", "▁egg s", "▁eg gs", "ás t", "á st", "▁incred ibly", "wer ken", "werk en", "iz ard", "izar d", "iza rd", "▁p ainted", "▁pain ted", "▁pa inted", "▁paint ed", "▁Viet nam", "▁vi olent", "▁viol ent", "Es t", "E st", "ier ra", "i erra", "re ader", "read er", "rea der", "we ise", "wei se", "▁J osh", "▁Jo sh", "▁Jos h", "▁H im", "▁Hi m", "as hes", "ash es", "or igin", "orig in", "ori gin", "▁sp ir", "▁ spir", "▁T ree", "▁Tr ee", "▁Tre e", "▁ Tree", "▁n iet", "▁nie t", "▁ni et", "WI N", "W IN", "mar gin", "m argin", "▁inv olves", "▁invol ves", "▁involve s", "▁organ is", "▁N acional", "bar a", "ba ra", "b ara", "▁de puis", "▁dep uis", "pi o", "p io", "fe atures", "feature s", "feat ures", "st ru", "str u", "▁Dis ney", "▁restaur ants", "▁restaurant s", "Mil l", "M ill", ")) \r", ") )\r", "с ла", "rem ote", "▁Th ird", "▁base ball", "▁al gun", "▁alg un", "] $", "▁em ployed", "▁employ ed", "po t", "p ot", "▁Un ityEngine", "▁ UnityEngine", "▁integr ation", "▁risk s", "▁ris ks", "▁st ro", "▁str o", "▁ag osto", "▁ago sto", "incl uding", "▁M ind", "▁Min d", "▁Mi nd", "▁st roke", "▁str oke", "▁stro ke", "▁ stroke", "▁de als", "▁deal s", "aj ax", "aja x", "a jax", "ё т", "▁\\ |", "▁ \\|", "ta r", "t ar", "adelph ia", "▁s ab", "▁sa b", "pu r", "p ur", "▁sc rew", "▁scr ew", "▁in ev", "▁\\ ;", "▁Don ald", "▁ Donald", "ö d", "cc a", "c ca", "es is", "esi s", "e sis", "▁separ ated", "▁separate d", "DB G", "D BG", "ag ent", "age nt", "agen t", "a gent", "▁p acked", "▁pack ed", "▁pac ked", "▁ packed", "н ня", "in tern", "int ern", "inter n", "inte rn", "▁M onte", "▁Mon te", "▁Mont e", "▁Mo nte", "▁prov ince", "▁provinc e", "▁provin ce", "▁exp anded", "▁expand ed", "▁appro ached", "▁approach ed", "▁E p", "CL K", "▁o re", "▁or e", "▁ ore", "B atch", "▁impress ive", "R M", "▁L ocation", "▁Loc ation", "▁ Location", "▁sh ame", "▁sha me", "wrap per", "w rapper", "un wrap", "pe er", "Bit s", "Bi ts", "B its", "▁S N", "▁ SN", "sc ar", "s car", "Com e", "Co me", "C ome", "▁coun cil", "▁shout ed", "ma king", "m aking", "▁M aur", "▁Ma ur", "▁Mau r", "▁w is", "LE TE", "LET E", "▁f s", "▁ fs", "▁d z", "▁ dz", "un que", "ue go", "u ego", "R andom", "H tml", "ze m", "z em", "▁D utch", "▁Gold en", "▁Gol den", "▁T ar", "▁Ta r", "▁H erm", "▁He rm", "▁Her m", "▁str etch", "▁stret ch", "var d", "va rd", "v ard", "▁t ries", "▁tr ies", "▁tri es", "W I", "▁disappe ared", "▁disappear ed", "▁cr usher", "▁crush er", "▁K an", "▁Ka n", "Ma g", "M ag", "ø r", "▁Cam bridge", "▁Camb ridge", "▁do po", "▁dop o", "at ura", "atur a", "atu ra", "he art", "▁Sp iel", "/* *\r", "/** \r", "Dir ection", "Direct ion", "Di rection", "D irection", "at ting", "att ing", "atti ng", "wi g", "w ig", "▁c odes", "▁co des", "▁code s", "▁cod es", "▁ codes", "▁pow der", "al ert", "ale rt", "aler t", "sem bl", "semb l", "▁y e", "▁ ye", "St ar", "S tar", "▁ro ots", "▁root s", "▁H oll", "▁Hol l", "▁Ho ll", "Re le", "Rel e", "R ele", "▁const itu", "n c", "“ .", "re ference", "refer ence", "if icial", "ific ial", "ifi cial", "clos ure", "▁fig ured", "▁figure d", "▁assum ption", "getElement ById", "▁A G", "▁ AG", "os es", "ose s", "o ses", "▁_ \"", "ep per", "ob re", "o bre", "en umerate", "о графи", "▁less ons", "▁lesson s", "▁qual ified", "Per son", "Pers on", "P erson", "an se", "ans e", "▁M ort", "▁Mor t", "▁Mo rt", "s ylvania", "▁c ré", "▁cr é", "Bind ing", "Bin ding", "B inding", "і с", "▁V ari", "▁Var i", "▁Va ri", "▁ Vari", "▁re minded", "▁remind ed", "▁members hip", "▁member ship", "ip er", "ipe r", "i per", "zt e", "z te", "▁c ref", "▁cre f", "▁cr ef", "▁ cref", "▁P A", "▁ PA", "plaat st", "▁Env ironment", "▁ Environment", "bo y", "b oy", "▁ph rase", "▁phr ase", "▁ phrase", "riv ial", "ra g", "r ag", "во ди", "вод и", "▁p se", "▁ps e", "▁ pse", "▁tour nament", ")} ,", ") },", "▁S ound", "▁So und", "▁Sou nd", "▁ Sound", "▁V el", "▁Ve l", "▁ Vel", "▁B erg", "▁Be rg", "▁Ber g", "el son", "els on", "▁ref uge", "▁else where", "qu ality", "qual ity", "▁abandon ed", "▁F lo", "▁Fl o", "ib il", "i bil", "UA L", "U AL", "▁Pl atz", "▁d elta", "▁del ta", "▁ delta", "▁B uy", "▁Bu y", "ri ère", "r ière", "▁fl our", "▁flo ur", "▁laugh ing", "▁laug hing", "▁Look ing", "▁Lo oking", "Ag ent", "A gent", "▁w x", "▁ wx", "▁W ales", "▁Wal es", "▁Wa les", "C tx", "▁c ake", "▁ca ke", "▁c rate", "▁cr ate", "▁ crate", "▁к ла", "▁ кла", "an ga", "ang a", "Z ero", "▁amount s", "Tr a", "T ra", "om etric", "omet ric", "o metric", "▁con straints", "▁constr aints", "▁constraint s", "▁tem ple", "▁templ e", "▁temp le", "▁install ation", "st roke", "str oke", "▁N eder", "▁Ne der", "▁Ned er", "ț i", "▁I bid", "▁o bs", "▁ob s", "▁ obs", "ent ries", "entr ies", "▁j usqu", "OR M", "O RM", "▁S ky", "▁Sk y", "ik es", "ike s", "i kes", "na k", "n ak", "▁m odes", "▁mod es", "▁mo des", "▁mode s", "▁Hit ler", "▁b elt", "▁be lt", "▁bel t", "▁point ing", "▁B an", "▁Ba n", "ign ore", "▁per su", "▁pers u", "▁Bes ides", "yn om", "y nom", "▁leg is", "▁C PU", "▁CP U", "▁ CPU", "an ded", "and ed", "ande d", "ui s", "u is", "bs ite", "b site", "▁E uro", "▁Eu ro", "▁ut ter", "▁ utter", "e clipse", "▁ir re", "▁irr e", "▁D ocument", "▁Doc ument", "▁ Document", "▁Mean while", "▁famil ie", "ver ify", "▁J ason", "▁Ja son", "▁O rt", "▁Or t", "▁ci udad", "▁techn ologies", "▁ча сти", "▁част и", "▁час ти", "ni ca", "nic a", "n ica", "can cel", "c ancel", "V irtual", "▁ev ident", "am an", "ama n", "a man", "▁Sup reme", "at oes", "ato es", "▁ste ady", "▁stead y", "▁month ly", "▁SO FTWARE", "Di e", "D ie", "▁app lying", "▁apply ing", "▁appl ying", "Di g", "D ig", "vi er", "v ier", "▁го ро", "▁W H", "▁ WH", "▁min ds", "▁mind s", "▁k am", "▁ka m", "▁expert ise", "▁not ification", "▁ notification", ". -", "▁del iber", "▁H E", "▁ HE", "▁res ist", "ou tes", "out es", "oute s", "o utes", "▁How ard", "▁Ho ward", "spec ial", "spe cial", "▁p resentation", "▁present ation", "▁You Tube", "mi r", "m ir", "▁r ust", "▁ru st", "▁rus t", "▁ rust", "▁n ations", "▁nat ions", "▁nation s", "▁G ets", "▁Ge ts", "▁Get s", "▁respon ses", "▁response s", "▁respons es", "ar ded", "ard ed", "arde d", "im mer", "imm er", "▁reve al", "▁M eg", "▁Me g", "▁tod os", "▁todo s", "▁a de", "▁ad e", "▁ ade", "ateg ories", "ategor ies", "▁pay ments", "▁payment s", "ô t", "En umer", "Enum er", "E numer", "▁platform s", "▁plat forms", "▁life time", "▁lif etime", "Com plete", "Comp lete", "Qu est", "Que st", "Q uest", "en ders", "end ers", "ender s", "ende rs", "▁c um", "▁cu m", "pl er", "ple r", "p ler", "▁app l", "▁ap pl", "äh rend", "ähr end", "з ь", "en ez", "ene z", "e nez", "over ty", "yn chron", "▁arg ued", "▁argue d", "▁K ath", "▁Kat h", "▁Ka th", "▁s ynchron", "▁syn chron", "▁B uilder", "▁Build er", "▁ Builder", "B order", "Pl an", "P lan", "ri eb", "rie b", "r ieb", "n m", "FOR MAT", "FORM AT", "us k", "u sk", "▁j umped", "▁jump ed", "ch arg", "char g", "cha rg", "▁cont ribute", "▁contribut e", "Me sh", "M esh", "Un ivers", "re ll", "rel l", "r ell", "▁p olar", "▁pol ar", "▁po lar", "▁tr ois", "▁tro is", "ic io", "ici o", "i cio", "Group s", "G roups", "▁( %", "▁ (%", "Lo op", "L oop", "▁g az", "▁ga z", "db g", "d bg", "LA Y", "L AY", "Jo hn", "J ohn", "bl ocks", "block s", "blo cks", "▁l ung", "▁lu ng", "▁lun g", "▁ lung", "▁k ön", "▁kö n", "th rough", "▁fif th", "lish er", "l isher", "▁inv olving", "▁invol ving", "▁De ep", "▁ Deep", "▁обла сти", "▁s ull", "▁su ll", "▁sul l", "Ex port", "Exp ort", "▁K ate", "▁Kat e", "▁Ka te", "per iod", "ch arge", "char ge", "charg e", "G T", "\"> \r", "\" >\r", "ти н", "т ин", "▁O tt", "▁Ot t", "▁inter actions", "▁interaction s", "▁interact ions", "▁Tor onto", "TR ACE", "TRA CE", "▁d ifer", "▁di fer", "▁dif er", "▁lib eral", "▁liber al", "▁p article", "▁part icle", "▁partic le", "▁parti cle", "▁sur ve", "▁surv e", "al ous", "alo us", "re ason", "rea son", "▁de pression", "▁dep ression", "▁depress ion", "а л", "▁f lower", "▁fl ower", "▁flo wer", "▁flow er", "▁wa ar", "▁h ade", "▁had e", "▁ha de", "▁cent uries", "ut y", "u ty", "par ty", "part y", "▁appro val", "gener ate", "▁B arn", "▁Bar n", "▁Ba rn", "▁m arg", "▁mar g", "▁ma rg", "▁m onde", "▁mon de", "▁mo nde", "▁mond e", "▁o ok", "▁ ook", "▁Cl ark", "▁Clar k", "▁the oret", "vious ly", "vi ously", "v iously", "? )", "▁R ud", "▁Ru d", "st mt", "in ction", "inct ion", "▁t un", "▁tu n", "▁ro ads", "▁road s", "▁rot ation", "▁ rotation", "pp en", "ppe n", "p pen", "sen sor", "s ensor", "▁K ol", "▁Ko l", "id elines", "ide lines", "idel ines", "▁ є", "▁com posed", "▁comp osed", "▁compos ed", "▁v irus", "▁vi rus", "▁vir us", "' $", "S N", "▁V on", "▁Vo n", "mon t", "mo nt", "m ont", "la r", "l ar", "▁opin ions", "▁opinion s", "uct ion", "u ction", "ru pal", "rup al", "under line", "▁hor ror", "Mus t", "Mu st", "M ust", "ot to", "ott o", "o tto", "Sh ould", "▁stat ist", "▁g em", "▁ge m", "▁ gem", "▁se cre", "▁sec re", "▁st rip", "▁str ip", "▁stri p", "▁ strip", "▁d irt", "▁di rt", "▁dir t", "ama zon", "amaz on", "▁R ound", "▁Ro und", "▁Rou nd", "▁ Round", "▁dis covery", "▁disc overy", "▁discover y", "▁disco very", "▁G O", "▁ GO", "▁substant ial", "ib t", "i bt", "▁dem ands", "▁demand s", "▁every day", "▁b esch", "▁be sch", "▁bes ch", "▁B ridge", "▁Br idge", "▁H D", "▁ HD", "▁D ol", "▁Do l", "▁t rès", "▁tr ès", "an ni", "ann i", "ro it", "() );\r", "()); \r", "()) ;\r", "( ));\r", "fa r", "f ar", "tim estamp", "▁bul k", "Bl ack", "▁g an", "▁ga n", "▁ gan", "set ting", "ret val", "ва не", "ван е", "nu ng", "n ung", "▁talk s", "▁tal ks", "▁scient ists", "▁scientist s", "▁v ig", "▁vi g", "▁quant ity", "▁G ard", "▁Gar d", "▁Ga rd", "▁mov ements", "▁move ments", "▁movement s", "äh r", "ä hr", "ling s", "lin gs", "l ings", "▁Т е", "te am", "ri to", "rit o", "r ito", "▁as sembly", "▁ assembly", "il st", "ils t", "i lst", "▁happ iness", "▁le af", "▁ leaf", "▁ass essment", "▁assess ment", "Co ord", "ir s", "i rs", "sa m", "s am", "▁att orney", "▁g eme", "▁ge me", "▁gem e", "▁ geme", "ID E", "I DE", "▁V ere", "▁Ver e", "▁Ve re", "▁Anth ony", "am iento", "ami ento", "▁A st", "▁As t", "▁cir cul", "▁circ ul", "▁Fr ances", "▁Franc es", "▁France s", "▁Fran ces", "▁p ent", "▁pe nt", "▁pen t", "▁ pent", "▁m ate", "▁mat e", "▁ma te", "▁ mate", "▁Trans port", "▁ Transport", "ow o", "o wo", "ч у", "is tes", "ist es", "iste s", "TR AN", "TRA N", "T RAN", "IM PORT", "IMP ORT", "▁B reak", "▁Bre ak", "▁ Break", "▁s ons", "▁so ns", "▁son s", "▁invest ors", "▁Phil ipp", "▁Philip p", "TH OD", "▁pan ic", "▁pa nic", "▁ panic", "▁: )", "▁d etection", "▁det ection", "▁detect ion", "▁sim ultane", "nt e", "n te", "▁list ened", "▁listen ed", "к ре", "▁B rig", "▁Br ig", "Option al", "Opt ional", "▁a bund", "▁ab und", "▁c riteria", "▁crit eria", "▁c hip", "▁ch ip", "▁chi p", "▁ chip", "▁ок ру", "▁Con stant", "▁Const ant", "▁ Constant", "▁m ining", "▁min ing", "▁mi ning", "▁mini ng", "та л", "т ал", "ma tes", "mat es", "mate s", "m ates", "▁w orship", "▁wor ship", "ro uter", "rou ter", "route r", "r outer", "C N", "▁M atch", "▁Mat ch", "▁ Match", "▁C ole", "▁Col e", "▁Co le", "▁down t", "▁dow nt", "▁h oles", "▁hol es", "▁ho les", "▁hole s", "▁gr ateful", "RES ULT", "▁Europ a", "▁Euro pa", "▁con sent", "▁cons ent", "▁conse nt", "l ä", "op ter", "opt er", "▁colle agues", "or ous", "oro us", "o rous", "▁enem ies", "ha ng", "han g", "h ang", "act ual", "Object s", "▁я к", "▁fl uid", "▁flu id", "fix ed", "f ixed", "▁G raph", "▁Gr aph", "▁Gra ph", "▁ Graph", "▁scr atch", "ce rs", "cer s", "c ers", "ri bu", "rib u", "▁valid ation", "▁ validation", "▁com pletion", "▁complet ion", "▁B egin", "▁Be gin", "▁Beg in", "▁ Begin", "end point", "ri ent", "rie nt", "rien t", "r ient", "C M", "▁S ite", "▁Si te", "▁ Site", "▁expl ains", "▁explain s", "tr es", "tre s", "t res", "▁any body", "fo reach", "fore ach", "for each", "lo n", "l on", "Ch ain", "▁B uff", "▁Bu ff", "▁ Buff", "oc al", "oca l", "o cal", "▁M organ", "▁Mor gan", "▁s ang", "▁sa ng", "▁san g", "▁pass es", "▁pas ses", "@ @", "ij d", "i jd", "W ord", "▁H ung", "▁Hun g", "▁Hu ng", "▁F er", "▁Fe r", "▁v ý", "ba st", "bas t", "b ast", "▁enter tainment", "▁entertain ment", "hi n", "h in", "▁g rat", "▁gr at", "▁gra t", "▁M ember", "▁Me mber", "▁Mem ber", "▁ Member", "▁M inn", "▁Min n", "▁Mi nn", "▁pr inted", "▁print ed", "▁prin ted", "▁Frank lin", "▁I mp", "▁Im p", "▁ Imp", "M achine", "column s", "▁de leted", "▁del eted", "▁delete d", "▁delet ed", "▁ deleted", "▁manufact uring", "▁re ly", "▁r ely", "▁rel y", "▁con se", "▁cons e", "▁f ishing", "▁fish ing", "▁fis hing", "bl o", "b lo", "- $", "▁. \"", "▁ .\"", "▁clin ical", "▁clinic al", "▁Stud ies", "▁Б у", "def inition", "▁evalu ation", "▁eval uation", "▁att acked", "▁attack ed", "▁fro zen", "ze nt", "zen t", "z ent", "▁ú lt", "▁r ational", "▁rat ional", "▁ratio nal", "ot he", "oth e", "o the", "Can cel", "C ancel", "hi story", "hist ory", "set Text", "▁a lc", "▁al c", "▁h ydro", "▁hy dro", "▁hyd ro", "▁The atre", "▁M aterial", "▁Mat erial", "▁ Material", "IO Exception", "**** **/", "****** /", "sp l", "s pl", "NO DE", "att rs", "attr s", "▁m ie", "▁mi e", "▁off ices", "▁offic es", "▁office s", "r ó", "▁j am", "▁ja m", "▁Id ent", "▁Ide nt", "▁ Ident", "v é", "Set ting", "▁Sever al", "▁Sev eral", "▁dec ay", "And roid", "▁S ave", "▁Sa ve", "▁Sav e", "▁ Save", "un ted", "unt ed", "unte d", "▁Mount ain", "us c", "u sc", "▁mar zo", "▁a sleep", "▁as leep", "▁sold ier", "▁D ouble", "▁Dou ble", "▁ Double", "P K", "▁cont rad", "▁contr ad", "▁contra d", "▁w ins", "▁win s", "ce iver", "ceive r", "▁se asons", "▁season s", "▁seas ons", "▁C hall", "▁Ch all", "▁Cha ll", "▁health care", "ła d", "ł ad", "о т", "▁F ive", "▁Fi ve", "▁H ell", "▁He ll", "▁Hel l", "▁world wide", "▁' ,", "▁ ',", "я н", "ma de", "mad e", "m ade", "▁respon ded", "▁respond ed", "▁a y", "▁ ay", "▁proced ures", "▁procedure s", "те ра", "тер а", "▁cle ared", "▁clear ed", "\"] .", "\" ].", "▁T arget", "▁Tar get", "▁ Target", "▁S ide", "▁Si de", "▁Sid e", "▁ Side", "om in", "omi n", "o min", "▁de ploy", "▁T ell", "▁Te ll", "▁Tel l", "▁ Tell", "▁on going", "fl oor", "f loor", "▁b ones", "▁bo nes", "▁bon es", "▁bone s", "▁De lete", "▁Del ete", "▁ Delete", "▁shru gged", "O ur", "De r", "D er", "▁init ialize", "▁initial ize", "▁ initialize", "▁T ed", "▁Te d", "MA GE", "MAG E", "M AGE", "▁h ire", "▁hi re", "▁ hire", "▁tr acking", "▁track ing", "▁a sh", "▁as h", "▁ ash", "▁ce iling", "ка х", "et ti", "ett i", "e tti", "▁cour age", "▁cou rage", "ensch app", "ют ся", "ю тся", "Mo re", "M ore", "▁fol g", "▁fo lg", "▁ folg", "▁Gr ace", "▁Gra ce", "▁K elly", "▁Kel ly", "▁re ven", "▁r even", "▁rev en", "▁reve n", "▁A li", "▁Al i", "▁ Ali", "▁d isp", "▁dis p", "▁di sp", "▁ disp", "▁de feat", "▁defe at", "▁cre ature", "▁creat ure", "▁Kenn edy", "▁D iego", "▁Die go", "▁Di ego", "EM P", "E MP", "▁s team", "▁ste am", "end ance", "ri g", "r ig", "▁ign or", "▁ig nor", "em en", "eme n", "e men", "▁G ru", "▁Gr u", "▁pro posal", "▁propos al", "▁we iter", "▁weit er", "▁ лі", "ib les", "ible s", "i bles", "▁consider ation", "▁belie ves", "▁believe s", "▁S oph", "▁So ph", "“ ,", "▁Matt hew", "▁circ uit", "▁s inger", "▁sing er", "▁sin ger", "▁S quare", "ç o", "Ed ge", "▁a str", "▁as tr", "▁ast r", "▁ astr", "▁represent ative", "▁comprehens ive", "li ga", "lig a", "l iga", "▁m ere", "▁me re", "▁mer e", "tb l", "t bl", "▁contin uing", "▁continu ing", "ograph er", "ograp her", "LE D", "L ED", "▁/* **/", "▁/** */", "▁s ear", "▁se ar", "▁sea r", "▁enorm ous", "iz i", "i zi", "Di t", "D it", "th ere", "ther e", "the re", "t here", "і н", "си те", "▁gu erra", "▁end point", "▁ endpoint", "▁le sson", "▁les son", "▁less on", "zo n", "z on", "var iable", "vari able", "и с", "▁research ers", "▁attempt ed", "▁e nf", "▁en f", "ту ра", "тур а", "▁de fin", "▁def in", "ве ст", "▁aw ful", "▁lo west", "▁low est", "ru les", "rule s", "r ules", "▁un like", "inter val", "▁produ cing", "▁K am", "▁Ka m", "▁I MP", "▁IM P", "▁ IMP", "Gener al", "Gen eral", "▁f aire", "▁fa ire", "▁fair e", "▁max im", "▁ma xim", "as semb", "ass emb", "asse mb", "assem b", "ac ent", "ace nt", "a cent", "? >", "pl ica", "plic a", "p lica", "▁r am", "▁ra m", "▁ ram", "ma te", "mat e", "m ate", "ц у", "m n", "▁H i", "▁ Hi", "▁st ages", "▁stage s", "▁stag es", "▁sta ges", "▁Ed itor", "▁Edit or", "▁ Editor", "▁t ang", "▁tan g", "▁ta ng", "R D", "▁i ch", "▁ic h", "▁ ich", "▁depend ent", "▁dep endent", "▁ dependent", "li fer", "life r", "lif er", "l ifer", "as cript", "asc ript", "a script", "▁expos ure", "ре з", "▁m art", "▁mar t", "▁ma rt", "▁ mart", "▁Bar cel", "xs pace", "x space", "SE SSION", "▁p rest", "▁pre st", "▁pr est", "▁pres t", "UR CE", "- .", "▁се ло", "ha ve", "hav e", "h ave", "▁observ ation", "▁obs ervation", "▁comm ands", "▁command s", "▁ commands", "▁e ager", "▁out door", "▁DE BUG", "▁ DEBUG", "▁h r", "▁ hr", "A X", "▁p uzz", "▁pu zz", "bl ank", "бу р", "б ур", "▁k ennis", "▁reg arded", "▁regard ed", "▁} ),", "▁}) ,", "▁ }),", "vol ume", "▁про из", "▁Tr aining", "▁Tra ining", "▁Train ing", "a ñ", "▁f ois", "▁foi s", "▁fo is", "▁т ри", "▁ три", "в ня", "▁opt imal", "▁optim al", "▁sub scription", "▁subs cription", "▁ subscription", "br idge", "brid ge", "b ridge", "im ental", "iment al", "imen tal", "▁Th ink", "▁\" ;", "▁ \";", "▁leg isl", "▁legis l", "▁H op", "▁Ho p", "▁br anches", "▁branch es", "▁V eg", "▁Ve g", "▁s print", "▁spr int", "▁fl ux", "▁flu x", "▁Fr eder", "▁Fre der", "▁Fred er", "si s", "s is", "not ify", "▁Ф ран", "so m", "s om", "ny m", "n ym", "▁R é", "le tt", "let t", "l ett", "ing ham", "▁F arm", "▁Far m", "▁Fa rm", "DO M", "D OM", "▁sh ield", "He re", "Her e", "H ere", "▁T reat", "▁Tre at", "▁Lu ke", "▁un safe", "an ton", "ant on", "anto n", "▁Im per", "▁Imp er", "▁tele phone", "▁un lock", "▁ unlock", "Own er", "col lection", "coll ection", "collect ion", "▁s nd", "▁sn d", "▁ snd", "▁su iv", "▁ent ering", "▁enter ing", "ше н", "ш ен", "▁L abel", "▁La bel", "▁Lab el", "▁ Label", "se lector", "sel ector", "select or", "▁G ET", "▁ GET", "▁qu ando", "▁quand o", "▁f ed", "▁fe d", "▁ fed", "j Query", "Or igin", "▁A lan", "▁Al an", "math scr", "▁pregn ant", "▁preg nant", "Ex pect", "Exp ect", "re sources", "res ources", "resource s", "▁er sten", "▁erst en", "▁ers ten", "▁erste n", "al ia", "ali a", "a lia", "▁ret ired", "▁retire d", "û t", "Cr ed", "C red", "▁m éd", "▁mé d", "▁e rh", "▁er h", "Frame work", "Sl ot", "S lot", "d uration", "sa l", "s al", "▁com position", "▁compos ition", "art icle", "gp u", "g pu", "▁per mitted", "▁perm itted", "▁permit ted", "▁F ont", "▁Fo nt", "▁ Font", "▁M uch", "▁Mu ch", "▁p ending", "▁pen ding", "▁ pending", "▁ag encies", "Column s", "▁k lik", "▁kl ik", "▁r ating", "▁rat ing", "▁ra ting", "▁ rating", "min d", "mi nd", "m ind", "▁Penn sylvania", "J ava", "ab stract", "abs tract", "▁d umb", "▁du mb", "▁V I", "▁ VI", "us a", "u sa", "Rem ote", "▁YO U", "▁C reek", "▁Cre ek", "ма ти", "мат и", "Bot tom", "B ottom", "▁roll ing", "▁ rolling", "▁b undle", "▁bund le", "▁ bundle", "▁g olf", "▁gol f", "gp io", "g pio", "▁Ch air", "▁Cha ir", "▁c ls", "▁cl s", "▁ cls", "$ }", "▁Par liament", "f ühr", "Man y", "Ma ny", "M any", "▁S ep", "▁Se p", "▁ Sep", "▁bad ly", "ig i", "i gi", "▁Geme inde", "Il l", "I ll", "▁А н", "ua rt", "uar t", "u art", "it empty", "item pty", "▁N iger", "▁Ni ger", "▁im migr", "▁imm igr", "Su per", "Sup er", "S uper", "v á", "ist ribute", "istribut e", "Hel pers", "Helper s", "Help ers", "▁w aters", "▁water s", "▁wat ers", "▁wa ters", "▁join ing", "▁jo ining", "om itempty", "▁Other wise", "▁H ost", "▁Ho st", "▁ Host", "▁re dd", "▁red d", "▁d y", "▁ dy", "▁con verted", "▁convert ed", "▁conver ted", "▁pr ayer", "▁pray er", "▁pra yer", "▁У краї", "▁Укра ї", "▁e lections", "▁elect ions", "▁ele ctions", "▁election s", "re b", "r eb", "er ie", "eri e", "e rie", "▁с вя", "Ab s", "A bs", "ie mbre", "iem bre", "i embre", "hol ders", "hold ers", "holder s", "▁R ol", "▁Ro l", "ut schen", "uts chen", "utsch en", "utsche n", "▁G h", "ter y", "te ry", "t ery", "ан г", "а нг", "▁narr ative", "min us", "m inus", "▁I ron", "▁Ir on", "=\" #", "▁w and", "▁wa nd", "▁ wand", "▁w ished", "▁wish ed", "▁wis hed", "ic ode", "ico de", "i code", "or r", "o rr", "[ [", "▁detect ed", "▁municip al", "▁P our", "▁Po ur", "▁S erv", "▁Se rv", "▁Ser v", "▁ Serv", "cite t", "cit et", "c itet", "▁g rey", "▁gr ey", "▁gre y", "▁R ap", "▁Ra p", "▁v oy", "▁vo y", "▁l leg", "▁ll eg", "▁cur rency", "▁curr ency", "▁ currency", "▁S cript", "▁Sc ript", "▁ Script", "str ument", "stru ment", "▁expect ing", "▁t ickets", "▁tick ets", "▁ticket s", "▁b ucket", "▁buck et", "▁ bucket", "eg r", "e gr", "▁j acket", "▁jack et", "dr v", "d rv", "▁lo ans", "▁loan s", "▁k ann", "▁kan n", "▁ka nn", "▁integr al", "▁character istics", "▁characteristic s", "(\" .", "( \".", "▁man ual", "▁d ynamics", "▁dynam ics", "▁dynamic s", ": *", "sh a", "s ha", "re ens", "ree ns", "reen s", "on ical", "oni cal", "onic al", "▁to ile", "añ a", "a ña", "▁d istant", "▁di stant", "▁dist ant", "▁hand led", "▁handle d", "Bo ol", "B ool", "▁pe nal", "▁pen al", "▁Th ings", "▁prom inent", "▁ex ped", "▁exp ed", "▁He lp", "▁Hel p", "▁ Help", "▁a sp", "▁as p", "▁ asp", "la p", "l ap", "▁A uth", "▁Aut h", "▁Au th", "▁ Auth", "Bas ic", "ach uset", "▁B ild", "▁Bi ld", "▁Bil d", "▁ent itled", "▁j ag", "▁ja g", "▁reject ed", "▁m emor", "▁me mor", "▁mem or", "▁memo r", "or ts", "ort s", "▁ap plies", "▁appl ies", "▁L anguage", "▁ Language", "spec ific", "achuset ts", "HA ND", "H AND", "▁R oute", "▁Ro ute", "▁Rou te", "▁ Route", "mark et", "mar ket", "▁K y", "▁p ose", "▁pos e", "▁po se", "▁ pose", "AC HE", "ACH E", "po ll", "pol l", "p oll", "▁r ocks", "▁ro cks", "▁rock s", "bo ne", "bon e", "b one", "▁D IS", "▁DI S", "▁ DIS", "W atch", "▁sm iling", "ри о", "Mon th", "Mont h", "▁e fter", "con struct", "const ruct", "▁b ands", "▁band s", "▁ban ds", "▁ bands", "▁collabor ation", "ни ми", "ним и", "gl as", "g las", "▁v y", "▁ vy", "▁eng agement", "▁engage ment", "__ )", "_ _)", "▁w ings", "▁win gs", "▁wing s", "ки м", "к им", "net je", "at iva", "ati va", "ativ a", "▁Du ke", "ле е", "▁With in", "▁d ove", "▁do ve", "▁c b", "▁ cb", "ye rs", "yer s", "y ers", "po w", "p ow", "[ (", "▁evalu ate", "▁eval uate", "Point s", "▁р і", "▁ рі", "od igd", "odi gd", "on omy", "ono my", "onom y", "▁Ill inois", "▁T yp", "▁Ty p", "▁ Typ", "▁coord inates", "▁coordinate s", "pis ode", "uck ed", "uc ked", "▁f lav", "▁fl av", "▁br ands", "▁brand s", "▁cal endar", "▁ calendar", "Li b", "L ib", "▁uit gen", "▁t ale", "▁tal e", "▁ta le", "▁brief ly", "▁m ic", "▁mi c", "▁ mic", "RE SS", "RES S", "▁sp äter", "▁integr ated", "▁integrate d", "▁cook ies", "▁cookie s", "▁uitgen odigd", "▁P riv", "▁Pr iv", "▁Pri v", "▁ Priv", "▁phen omen", "▁vo egen", "Su pp", "Sup p", "S upp", "▁re fers", "▁ref ers", "▁refer s", "па д", "▁Cl inton", "▁Clin ton", "▁ass ignment", "▁assign ment", "in als", "ina ls", "inal s", "i nals", "▁a sym", "▁as ym", "cy cle", "cycl e", "c ycle", "▁And erson", "▁Anders on", "▁b inding", "▁bin ding", "▁bind ing", "▁ binding", "ri que", "r ique", "hi nd", "hin d", "h ind", "▁be half", "▁beh alf", "▁F le", "▁Fl e", "▁break s", "▁bre aks", "▁so ap", "▁ soap", "ва р", "в ар", "▁v ä", "▁ vä", "▁c rying", "▁cr ying", "▁cry ing", "▁ →", "▁m sm", "▁ms m", "▁ msm", "▁bo ots", "▁boot s", "ow ing", "owi ng", "o wing", "▁b ell", "▁be ll", "▁bel l", "▁ bell", "su ite", "suit e", "▁Bund es", "▁Bun des", "Y ear", "nd ef", "nde f", "n def", "O ther", "▁go ogle", "▁goog le", "▁ google", "EN CE", "ENC E", "WE R", "W ER", "Le s", "L es", "Sh ared", "Share d", "▁E D", "▁ ED", "IF T", "I FT", "▁flo ating", "▁float ing", "ý m", "{} ,", "{ },", "Bin ary", "B inary", "▁ro ce", "ra j", "r aj", "▁be werken", "B F", "▁H ur", "▁Hu r", "ce n", "c en", "▁e re", "▁er e", "▁ ere", "▁c amb", "▁cam b", "▁ca mb", "▁Pak istan", "▁great ly", "▁log ging", "▁ logging", "/ .", "Ten sor", "T ensor", "▁op ens", "▁open s", "▁ opens", "▁R io", "▁klik ken", "▁sc ulpt", "ap ore", "apor e", "w x", "▁N ich", "▁Nic h", "▁Ni ch", "na n", "n an", "▁inj ured", "com pare", "comp are", "compar e", "th a", "t ha", "Sam ple", "S ample", "Sh ell", "She ll", "S hell", "▁comm ander", "▁command er", "▁re ceiver", "▁rece iver", "▁receive r", "▁h opes", "▁hope s", "▁hop es", "▁ho pes", "▁b yl", "▁by l", "▁pro xy", "▁pr oxy", "▁ proxy", "▁g all", "▁gal l", "▁ga ll", "get Id", "▁B ab", "▁Ba b", "fe ld", "fel d", "f eld", "▁\" _", "▁H ab", "▁Ha b", "sim ple", "▁execut ed", "▁execute d", "▁a te", "▁at e", "▁ ate", "▁an imation", "▁anim ation", "▁ animation", "▁in hab", "▁бо ль", "▁r outer", "▁ro uter", "▁rout er", "▁route r", "▁rou ter", "▁ router", "▁gl ob", "▁glo b", "▁ glob", "Ge plaatst", "▁begin netje", "▁K ur", "▁Ku r", "▁Х а", "al igned", "align ed", "▁cert ificate", "▁ Å", ".) .", ". ).", "▁s oll", "▁so ll", "▁sol l", "▁Im port", "▁Imp ort", "▁ Import", "ре ди", "ред и", "р еди", "▁pand emic", "▁n ic", "▁ni c", "▁ nic", "v ä", "▁G ree", "▁Gr ee", "▁Gre e", "▁S ay", "▁Sa y", "▁д і", "▁ ді", "▁N um", "▁Nu m", "▁ Num", "▁rough ly", "▁des pués", "▁ ​", "▁spec ify", "Map per", "lic ht", "li cht", "lich t", "l icht", "▁th umb", "▁ thumb", "wi e", "w ie", "▁un likely", "▁unlike ly", "▁ unlikely", "▁E dd", "▁Ed d", "He y", "H ey", "▁O pt", "▁Op t", "▁ Opt", "B LOCK", "во р", "в ор", "▁ ×", "▁b a", "▁ ba", "▁period s", "▁title s", "▁tit les", "Me d", "M ed", "▁f on", "▁fo n", "▁ fon", "▁b ast", "▁bas t", "▁ba st", "▁ bast", "▁F orest", "▁For est", "▁Fore st", "▁Fo rest", "▁ №", "on ds", "ond s", "▁f al", "▁fa l", "▁g esch", "▁ge sch", "▁ges ch", "▁ gesch", "dir ection", "di rection", "direct ion", "dire ction", "d irection", "IF Y", "▁L A", "▁ LA", "▁( ((", "▁(( (", "▁ (((", "GT H", "G TH", "it udes", "itude s", "itu des", "itud es", "▁dest ruction", "▁destruct ion", "▁J a", "▁s take", "▁st ake", "▁sta ke", "iffer ent", "iffe rent", "▁ident ical", "▁f og", "▁fo g", "▁R eb", "▁Re b", "▁ Reb", "ски е", "сту п", "ja x", "j ax", "▁M ars", "▁Mar s", "▁Ma rs", "▁hist oric", "▁histor ic", "▁V o", "▁ Vo", "▁entre pre", "▁t ension", "▁tens ion", "▁W HERE", "▁WH ERE", "▁WHE RE", "▁Phil adelphia", "Count er", "Co unter", "C ounter", "▁fr ames", "▁frame s", "▁fra mes", "▁fram es", "▁ frames", "▁m uy", "▁mu y", "e j", "ö t", "e u", "▁че лове", "PRO C", "PR OC", "▁res olved", "▁resolve d", "▁ resolved", "▁t ape", "▁tap e", "▁ta pe", "ци он", "▁sing ular", "▁person nel", "▁M un", "▁Mu n", "▁O cc", "▁ Occ", "▁scal ar", "▁ scalar", "de ss", "des s", "d ess", "▁c able", "▁cab le", "▁ca ble", "be ing", "b eing", "▁J enn", "▁Je nn", "▁Jen n", "▁er st", "▁ers t", "▁ erst", "Action s", "Act ions", "A ctions", "Env ironment", "vi a", "v ia", "▁strugg ling", "▁D VD", "wh e", "w he", "▁throw ing", "▁thr owing", "▁thro wing", "Bound s", "B ounds", "▁M D", "▁ MD", "▁\" ../", "▁\". ./", "▁satisf y", "▁Color ado", "▁Act ive", "▁Activ e", "▁ Active", "Task s", "<>( );", "<> ();", "< >();", "▁sl ipped", "▁slip ped", "▁po ison", "▁poi son", "z b", "Dis patch", "war ning", "warn ing", "w arning", "▁ult imate", "p icture", "ex pression", "exp ression", "expr ession", "express ion", "▁T alk", "▁Tal k", "▁f lick", "▁fl ick", "▁rais ing", "▁ra ising", "▁ raising", "▁trans actions", "▁transaction s", "▁gl ance", "▁g ri", "▁gr i", "▁п рез", "▁пре з", "se lection", "sel ection", "select ion", "s election", "њ а", "en dl", "end l", "▁A bb", "▁Ab b", "▁b old", "▁bo ld", "▁bol d", "▁ bold", "▁maint ained", "▁maintain ed", "Ex ists", "▁encour aged", "▁encourage d", "Qu al", "Q ual", "▁ess ere", "▁h ired", "▁hi red", "▁hire d", "let ter", "lett er", "lette r", "it ches", "itch es", "oth ers", "other s", "othe rs", "o thers", "▁w oj", "▁wo j", "▁inj uries", "▁d il", "▁di l", "ex ecut", "exec ut", "▁Ste el", "▁G arden", "▁Gar den", "▁Gard en", "з я", "\\, \\", "\\ ,\\", "▁An gel", "▁Ang el", "pr im", "p rim", ">: ]<", "g b", "pe at", "in te", "int e", "i nte", "▁ap olog", "▁reg ulations", "▁regul ations", "▁regulation s", "S rc", "k h", "Up load", "U pload", "ma pping", "map ping", "m apping", "▁p resents", "▁pres ents", "▁present s", "▁po etry", "▁poet ry", "▁st ops", "▁stop s", "▁sto ps", "▁T ol", "▁To l", "▁t ower", "▁to wer", "▁tow er", "▁O UT", "▁ OUT", "Th ank", "Than k", "▁organ ic", "▁d rei", "▁dr ei", "▁dre i", "▁p ound", "▁po und", "▁pou nd", "cent ury", "▁mod ules", "▁module s", "▁ modules", "▁д ере", "▁де ре", "▁w orn", "▁wor n", "▁wo rn", "▁par ad", "▁para d", "▁pa rad", "▁C os", "▁Co s", "fi c", "f ic", "▁бе з", "▁Jim my", "▁l ands", "▁land s", "▁lan ds", "▁ lands", "▁min ist", "▁mini st", "vs pace", "v space", "▁light ing", "▁n aked", "▁na ked", "▁design er", "▁St ream", "▁Stre am", "▁ Stream", "TM P", "T MP", "Cent er", "C enter", "resent ation", "ON T", "O NT", "▁e rs", "▁er s", "▁ ers", "▁measure ment", "▁mus cles", "▁muscle s", "▁I gn", "▁ Ign", "▁C OM", "▁CO M", "▁ COM", "▁f ru", "▁fr u", "▁gen re", "▁al pha", "▁ alpha", "▁ret irement", "▁retire ment", "▁G on", "▁Go n", "ő l", "cont ents", "content s", "▁he aling", "▁heal ing", "▁s ido", "▁si do", "▁sid o", "incip al", "Per mission", "Perm ission", "ра к", "▁G ordon", "▁Gor don", "▁R ank", "▁Ran k", "▁ Rank", "▁Aut om", "▁Au tom", "▁Auto m", "▁ Autom", "Con structor", "Construct or", "wi ki", "wik i", "w iki", "▁concern ing", "▁concer ning", "riz ona", "▁var iant", "▁vari ant", "▁ variant", "▁arr anged", "▁arrang ed", "▁arrange d", "▁S pr", "▁Sp r", "BP ACK", "B PACK", "Tim estamp", "re store", "rest ore", "aw are", "awa re", "a ware", "▁Ob serv", "▁ Observ", "▁S V", "▁ SV", "ip p", "i pp", "▁Execut ive", "▁col leg", "▁coll eg", "▁colle g", "▁explicit ly", "wr itten", "writ ten", "▁K ön", "▁Kö n", "ir us", "i rus", "▁H old", "▁Hol d", "▁Ho ld", "▁P ract", "▁Pr act", "▁Pra ct", "Char acter", "▁red istribute", "uer to", "▁Stud ent", "▁ Student", "▁el der", "▁D op", "▁Do p", "v p", "▁H ub", "▁Hu b", "▁ Hub", "▁gr ounds", "▁ground s", "▁R y", "▁sign als", "▁sig nals", "▁signal s", "▁g ifts", "▁gift s", "▁streng then", "▁strength en", "▁L yn", "▁Ly n", "com mun", "comm un", "▁на й", "▁fin ance", "▁financ e", "no c", "n oc", "he lm", "hel m", "h elm", "▁c uts", "▁cut s", "▁cu ts", "▁advent ure", "▁R ic", "▁intellect ual", "▁Out put", "▁ Output", "▁aw k", "▁ awk", "▁concentr ation", "▁guid ance", "Buf f", "Bu ff", "B uff", "▁f illing", "▁fil ling", "▁fill ing", "▁reg ul", "▁del icious", "([ ]", "( []", "ши х", "▁t ons", "▁to ns", "▁ton s", "▁ tons", "act ivity", "activ ity", "G P", "LO B", "L OB", "st adt", "sta dt", "stad t", "ta l", "t al", "▁im g", "▁i mg", "▁ img", "▁r ush", "▁ru sh", "▁rus h", "att ice", "atti ce", "▁p ok", "▁po k", "st eps", "ste ps", "step s", "▁l id", "▁li d", "▁D NA", "B rowser", "▁lad ies", "▁an nées", "▁ann ées", "▁resc ue", "av ity", "avi ty", "ro ck", "roc k", "r ock", "▁glass es", "▁B ey", "▁Be y", ")} $", ") }$", "de tail", "det ail", "▁d és", "▁dé s", "ta x", "t ax", "▁favour ite", "▁prec ision", "▁con oc", "▁co noc", "M s", "▁N ative", "▁Nat ive", "▁ Native", "▁P il", "▁Pi l", "Input Stream", "or p", "o rp", "▁P ap", "▁Pa p", "▁p icking", "▁pick ing", "▁pic king", "ip h", "i ph", "Load ing", "Lo ading", "▁pr iest", "▁pri est", "H ook", "▁p ist", "▁pi st", "▁U ne", "▁Un e", "▁ Une", "% ,", "▁b il", "▁bi l", "▁ bil", "▁conserv ative", "ev al", "eva l", "e val", "ik ing", "iki ng", "i king", "'} ,", "' },", "▁sa uce", "▁sau ce", "▁D ue", "▁Du e", "as sen", "ass en", "asse n", "▁occasion ally", "▁occasional ly", "▁Д ж", "un known", "unk nown", "DE D", "D ED", "▁d rum", "▁dr um", "▁dru m", "▁d ub", "▁du b", "AT URE", "us age", "usa ge", "get Type", "re ply", "rep ly", "▁strateg ic", "▁k ap", "▁ka p", "▁ kap", "de sign", "des ign", "date time", "dat etime", "▁P rim", "▁Pr im", "▁Pri m", "▁ Prim", "Ma ster", "M aster", "▁Cor ps", "▁consider able", "▁T u", "▁ ла", "▁t ous", "▁to us", "▁tou s", "▁c lar", "▁cl ar", "▁po em", "al bum", "] *", "lo aded", "load ed", "▁travel ing", "▁trav eling", "вы е", "▁F err", "▁Fe rr", "▁Fer r", "▁p harm", "▁ph arm", "ab i", "a bi", "▁} \\", "▁ }\\", "col lect", "coll ect", "▁B our", "▁Bo ur", "▁Bou r", "O C", "▁measure ments", "▁measurement s", "▁Profess ional", "▁s ensor", "▁sens or", "▁sen sor", "▁ sensor", "ut sche", "uts che", "utsch e", "▁dem anded", "▁demand ed", "▁accompan ied", "▁p rend", "▁pre nd", "▁pr end", "▁enc oding", "▁ encoding", "▁Gesch ichte", "▁m ig", "▁mi g", "▁G ib", "▁Gi b", "▁Re ich", "▁m yster", "▁my ster", "▁myst er", "▁M ock", "▁Mo ck", "▁ Mock", "▁phys ically", "▁physical ly", "▁B au", "▁Ba u", "▁S ingle", "▁Sing le", "▁Sin gle", "▁ Single", "▁man aging", "▁K il", "▁Ki l", "▁Tem ple", "▁Temp le", "▁l ev", "▁le v", "▁ lev", "▁l í", "CP U", "C PU", "▁Prem ier", "▁G ive", "▁Gi ve", "ir i", "i ri", "N V", "▁A I", "▁ AI", "▁f p", "▁ fp", "лекс анд", "▁t ant", "▁tan t", "▁ta nt", "▁f ot", "▁fo t", "Null able", "▁gu ards", "▁guard s", "On ce", "▁ch amber", "▁cha mber", "fil m", "fi lm", "▁b ias", "▁bi as", "▁ bias", "▁T ai", "▁Ta i", "ins ic", "insi c", "▁m l", "▁ ml", "▁K a", "ва л", "в ал", "▁S ER", "▁SE R", "▁ SER", "▁Some one", "}} _{", "}}_ {", "} }_{", "Fix ed", "F ixed", "▁b ent", "▁be nt", "▁ben t", "▁pro hib", "▁b id", "▁bi d", "▁ bid", "▁fe wer", "▁few er", "к ры", "▁l ugar", "▁lug ar", "▁lu gar", "▁de serve", "▁des erve", "ss l", "s sl", "▁c fg", "▁cf g", "▁ cfg", "re ck", "rec k", "▁st ability", "▁stabil ity", "▁stab ility", "re size", "res ize", "▁assert That", "Tr igger", "▁ста нов", "▁стан ов", "▁ станов", "pl ugins", "plugin s", "plug ins", "▁l ets", "▁le ts", "▁let s", "▁ lets", "хі д", "х ід", "▁La ura", "▁Lau ra", "не р", "н ер", "▁br ut", "▁bru t", "▁F I", "▁ FI", "is ons", "ison s", "iso ns", "▁d yn", "▁dy n", "▁ dyn", "ic her", "ich er", "iche r", "i cher", "ray ed", "▁frequ ent", "▁jed och", "▁Mar ine", "st rings", "str ings", "string s", "▁U til", "▁Ut il", "▁ Util", "▁b os", "▁bo s", "Mu s", "M us", "▁Portug al", "Str ategy", "▁по се", "▁пос е", "▁sl ice", "▁slic e", "▁ slice", "▁ins ight", "▁w idget", "▁wid get", "▁ widget", "▁gén éral", "message s", "m essages", "▁H u", "▁requ irement", "▁require ment", "Si de", "S ide", "empl ates", "emplate s", "▁cer emony", "▁ceremon y", "▁phys ics", "▁grad uate", "▁gradu ate", "▁ graduate", "par a", "pa ra", "p ara", "▁pre serv", "▁pres erv", "▁sh ops", "▁shop s", "▁ shops", "ze k", "z ek", "▁u b", "▁ ub", "pre pare", "▁O il", "▁f ib", "▁fi b", "▁run time", "▁ runtime", "▁h ogy", "▁ho gy", "Warn ing", "War ning", "W arning", "▁Con vert", "▁ Convert", "bour ne", "▁emer ged", "▁emerge d", "▁Д и", "ight h", "igh th", "gu ard", "ka l", "k al", "valid ation", "ên cia", "ê ncia", "▁dr inks", "▁drink s", "the orem", "H R", "ie v", "i ev", "ploy ee", "Us age", "▁с пе", "▁сп е", "▁ спе", "dis patch", "disp atch", "▁inst antly", "▁instant ly", "ob i", "o bi", "▁just ify", "▁N ev", "▁Ne v", "▁я вля", "ag ra", "agr a", "a gra", "▁trans mission", "▁transm ission", "fl y", "f ly", "; ' ;", "> ';", "▁cou sin", "▁cous in", "create Element", "Co uld", "C ould", "▁cap ac", "▁p ause", "▁pa use", "▁paus e", "▁ pause", "Array List", "kt e", "k te", "ord ered", "order ed", "▁sh aking", "▁sha king", "label s", "lab els", "▁redu cing", "вы х", "US ED", "USE D", "U SED", "▁v oting", "▁vo ting", "▁vot ing", "▁Min istry", "▁M ig", "▁Mi g", "▁C hen", "▁Ch en", "▁Che n", "▁ac company", "▁accompan y", "▁accomp any", "ul le", "ull e", "u lle", "▁g a", "▁ ga", "▁equ ipped", "▁equip ped", "▁n un", "▁nu n", "Be t", "B et", "▁lic ensed", "▁license d", "AR CH", "F N", "▁eng ines", "▁engine s", "▁s ter", "▁st er", "▁ste r", "▁ ster", "▁loc ale", "▁local e", "▁ locale", "▁в ъ", "lin ks", "link s", "l inks", "▁Cap ital", "▁al ien", "▁ali en", "W r", "р ъ", "Car t", "C art", "▁Mark eting", "▁Market ing", "▁R T", "▁ RT", "File Name", "▁t i", "▁ ti", "ij i", "i ji", "▁vers us", "li ve", "liv e", "l ive", "Sy m", "S ym", "ko r", "k or", "▁e mission", "▁em ission", "um m", "u mm", "yc z", "y cz", "▁clim bed", "▁climb ed", "▁plus ieurs", "к ри", "ya r", "y ar", "os ten", "ost en", "o sten", "▁u sb", "▁us b", "▁ usb", "▁cross ing", "▁pol ynom", "▁poly nom", "▁rem oval", "▁Ad ams", "▁Adam s", "▁i hre", "▁ih re", "▁ihr e", "an den", "and en", "ande n", "▁Ben j", "▁P hill", "▁Ph ill", "▁Phil l", "▁wound ed", "▁Cast le", "▁Cas tle", "bi ld", "bil d", "b ild", "An notation", "Process or", "▁t in", "▁ti n", "fo lg", "fol g", "▁Stud ents", "▁Student s", "▁Mex ican", "▁administr ative", "IL ED", "ILE D", "I LED", "▁con qu", "▁che er", "▁C es", "▁Ce s", "B ecause", "▁J uni", "▁Jun i", "▁Ju ni", "▁en contr", "av i", "a vi", "V I", "ak u", "a ku", "▁T on", "▁To n", "▁sm oking", "▁b ay", "▁ba y", "work s", "wor ks", "а т", "at tered", "att ered", "atter ed", "atte red", "▁Bo olean", "▁ Boolean", "▁B alt", "▁Ba lt", "▁Bal t", "de fer", "def er", "path y", "pat hy", "pa thy", "A h", "▁a kt", "▁ak t", "▁ akt", "▁gover nor", "▁govern or", "P ad", "▁si sters", "▁sister s", "▁sist ers", "La t", "L at", "▁re vel", "▁r evel", "▁rev el", "▁reve l", "▁S Y", "▁ SY", "it os", "ito s", "i tos", "▁fil ters", "▁filter s", "▁ filters", "Ch unk", "con sum", "cons um", "▁rem oving", "▁H err", "▁He rr", "▁Her r", "▁gener ator", "▁ generator", "▁C ra", "▁Cr a", "▁far mers", "▁farm ers", "▁farmer s", "▁Mem bers", "▁Member s", "▁ Members", "▁over come", "▁C in", "▁Ci n", "ig keit", "cri ptions", "cription s", "cript ions", "Test s", "Te sts", "T ests", "▁к лу", "▁sh ake", "▁sha ke", "▁ shake", "▁y y", "▁ yy", "pl acement", "place ment", "plac ement", "▁a wards", "▁aw ards", "▁award s", "▁epis odes", "▁episode s", "▁Bl ood", "▁Blo od", "▁bul let", "▁bull et", "▁ bullet", "▁v iene", "▁vi ene", "▁vie ne", "▁Fin ancial", "F uture", "▁r ou", "▁ro u", "▁ rou", "▁bi ologie", "▁use State", "ia ni", "ian i", "i ani", "pie ce", "p iece", "▁spe aker", "▁speak er", "▁re fr", "▁ref r", "AR K", "▁M IT", "▁MI T", "▁ MIT", "▁T an", "▁Ta n", "▁B ased", "▁Bas ed", "▁Base d", "▁Ba sed", "▁ Based", "▁cult iv", "▁hung ry", "▁A y", "▁H ey", "▁He y", "▁ Hey", "▁excit ement", "ibr aries", "Hi t", "H it", "▁E nde", "▁En de", "▁End e", "N G", "FI L", "F IL", ".\" )", ". \")", "F amily", "in ery", "ine ry", "iner y", "ne cess", "ve lope", "vel ope", "velop e", "▁B ot", "▁Bo t", "▁ Bot", "port er", "por ter", "porte r", "p orter", "▁cl imb", "▁clim b", "▁E li", "▁El i", "ur ent", "ure nt", "uren t", "u rent", "▁mist akes", "▁mistake s", "áb an", "á ban", "mark s", "mar ks", "m arks", "pk t", "p kt", "L ibrary", "st ed", "ste d", "s ted", "ublic e", "ubl ice", "▁Administr ation", "▁Admin istration", "▁sh apes", "▁shape s", "▁sha pes", "пу бли", "Go d", "G od", "in nen", "inn en", "ко ло", "к оло", "<< <<", "ib e", "i be", "ê s", "▁С ША", "▁Fore ign", "▁ Foreign", "▁Marg aret", "▁g ene", "▁gen e", "▁ge ne", "▁dist urb", "▁т ер", "▁те р", "▁ тер", "▁on Click", "▁Engine ering", "▁stop ping", "▁sto pping", "▁restr ictions", "▁restrict ions", "▁restriction s", ", *", "BU F", "▁sh adows", "▁shadow s", "hc i", "h ci", "▁Christ ians", "▁Christian s", "▁f ence", "▁fen ce", "▁lux ury", "ak h", "a kh", "co ord", "▁invest igate", "▁investig ate", "▁convent ional", "▁convention al", "\" —", "▁vis its", "▁visit s", "is é", "▁S ac", "▁Sa c", "class Name", "▁Psy ch", "▁ref lected", "▁reflect ed", "▁п ло", "▁ пло", "▁V ice", "▁Vi ce", "▁Vic e", "ła w", "ł aw", "________ ________", "▁W olf", "▁Wol f", "re nte", "ren te", "rent e", "r ente", "▁Ch ampion", "▁sim ulation", "es ota", "eso ta", "▁S oon", "▁So on", "▁C el", "▁Ce l", "▁the ories", "▁S TR", "▁ST R", "▁ STR", "▁collect ive", "▁coord inate", "query Selector", "em ed", "eme d", "e med", "B reak", "▁g ef", "▁ge f", "▁electric ity", "▁gather ing", "at ers", "ate rs", "ater s", "a ters", "ex per", "exp er", "▁R oma", "▁Rom a", "▁Ro ma", "▁Co oper", "SY MBOL", "v d", "ivers ary", "ain es", "ai nes", "aine s", "a ines", "▁G rad", "▁Gr ad", "▁Gra d", "▁ Grad", "▁independ ence", "wo h", "w oh", "▁con sequence", "▁consequ ence", "▁convers ations", "▁conversation s", "▁R ou", "▁Ro u", "▁and ere", "▁ander e", "▁System s", "га р", "г ар", "▁mo ist", "▁mois t", "fl u", "f lu", "ці я", "ни ш", "▁r ode", "▁ro de", "▁rod e", "▁p erd", "▁per d", "▁pe rd", "▁s zer", "▁sz er", "▁fl ood", "▁flo od", "▁in tim", "▁int im", "std err", "▁ref lection", "▁reflect ion", "Sc an", "S can", "▁dis aster", "ake spe", "akes pe", "▁In valid", "▁ Invalid", "▁hum or", "▁Fried rich", "▁suggest ions", "▁suggestion s", "uv ud", "De lay", "Del ay", "br ief", "b rief", "▁и с", "▁ ис", "gl ied", "fa s", "f as", "▁S mart", "▁Sm art", "▁m edi", "▁me di", "▁med i", "▁ medi", "sd k", "s dk", "▁se us", "▁seu s", "▁A rizona", "▁innoc ent", "War n", "W arn", "ac ious", "aci ous", "acio us", "▁Mos cow", "▁c aps", "▁cap s", "▁ca ps", "▁ caps", "Dele gate", "▁dram atic", "bo oks", "book s", "▁sh ore", "▁ shore", "uk i", "u ki", "▁Russ ell", "▁cor relation", "▁corre lation", "▁correl ation", "He lp", "Hel p", "▁pub blic", "zy m", "z ym", "com b", "co mb", "c omb", "E Y", "LEN GTH", "▁M ün", "▁_ .", "▁ _.", "▁f erm", "▁fe rm", "▁fer m", "▁I an", "▁St udio", "▁Stud io", "▁aff airs", "▁affair s", "lo s", "l os", "Rule s", "R ules", "run ning", "r unning", "▁Post ed", "▁Po sted", "▁Pos ted", "P ixel", "▁d ancing", "▁dan cing", "▁agree ments", "▁agre ements", "▁agreement s", "▁P ic", "▁Pi c", "an cia", "anc ia", "a ncia", "▁m á", "ation Token", "des criptor", "▁C arter", "▁Car ter", "▁Cart er", "Re lease", "Rele ase", "**** ********", "******** ****", "****** ******", "▁out standing", "ch anges", "change s", "chan ges", "AR RAY", "▁Bar bara", "▁Barb ara", "▁nur se", "▁nurs e", "( \r", "▁Dou glas", "▁Doug las", "▁nu cle", "▁nuc le", "ou ri", "our i", "o uri", "▁St yle", "▁ Style", "av o", "a vo", "▁pain ful", "▁s lic", "▁sl ic", "▁sein em", "▁seine m", "▁sei nem", "SUP PORT", "og ene", "ogen e", "oge ne", "▁sat ell", "ta gon", "tag on", "t agon", "▁coll apse", "▁ collapse", "ve lle", "vel le", "v elle", "MO N", "M ON", "augh ters", "aught ers", "aughter s", "▁threat ened", "▁Il legal", "▁desper ate", "st rict", "str ict", "stri ct", "ru s", "r us", "сти ту", "\\\" :", "\\ \":", "▁conf lic", "down load", "at os", "ato s", "a tos", "▁Pos ition", "▁ Position", ".* ;", ". *;", "▁the ater", "▁ple asant", "▁C ette", "▁Sing apore", "he et", "▁p ir", "▁pi r", "▁ac quis", "▁acqu is", "▁на зва", "те ля", "тел я", "▁rec ru", "же ния", "ё л", "вер сите", "▁res pective", "▁respect ive", "▁t unnel", "▁tun nel", "▁tunn el", "▁De an", "D u", "▁un cle", "▁unc le", "▁off ensive", "co lo", "col o", "c olo", "▁Un like", "se ries", "ser ies", "serie s", "s eries", "▁A rn", "▁Ar n", "min ute", "▁des criptor", "▁ descriptor", "▁st ones", "▁stone s", "▁sto nes", "IC ATION", "▁P ad", "▁Pa d", "▁ Pad", "▁i Phone", "e i", "▁fant asy", "▁Kore an", "▁Korea n", "\" }", "▁or th", "▁ orth", "hal ten", "halt en", "de ep", "▁K ay", "▁Ka y", "requ ency", "▁du ties", "▁dut ies", "aw t", "a wt", "▁ne arest", "▁near est", "▁dis order", "ст ру", "▁Ch ile", "▁Chi le", "▁s eq", "▁se q", "▁ seq", "▁transport ation", "O O", "▁D ez", "▁De z", "ij u", "i ju", "▁Result s", "▁ Results", "je d", "j ed", "iv el", "ive l", "i vel", "HO ST", "H OST", "▁ €", "▁ Î", "▁c hin", "▁ch in", "▁chi n", "▁m att", "▁mat t", "▁ma tt", "▁v oted", "▁vo ted", "▁vote d", "▁vot ed", "▁ge hör", "▁s ue", "▁su e", "▁leg acy", "в ся", "SO URCE", "W ORK", "it is", "iti s", "▁$ |", "▁о бо", "▁об о", "▁n r", "▁ nr", "▁T amb", "▁Ta mb", "▁Tam b", "▁sn ap", "▁ snap", "▁im pressed", "▁imp ressed", "▁impress ed", "▁depos it", "▁d ivid", "▁di vid", "▁div id", "Seg ment", "▁к ар", "▁ка р", "▁ кар", "▁G as", "▁Ga s", "▁cr imes", "▁crim es", "▁crime s", "▁cri mes", "▁ins ult", "▁H um", "▁Hu m", "▁bound ed", "▁k icked", "▁kick ed", "▁М у", "▁| \\", "▁ |\\", "ad ded", "add ed", "Pro du", "P rodu", "▁. /", "▁ ./", "▁awk ward", "▁К ра", "▁ ї", "▁CON TR", "▁be im", "▁bei m", "▁place holder", "▁ placeholder", "sp i", "s pi", "▁B ei", "▁Be i", "▁P f", "ient es", "ien tes", "iente s", "i entes", "dis k", "di sk", "d isk", "bl k", "ne o", "it arian", "ita rian", "itar ian", "▁c ogn", "▁co gn", "▁s out", "▁so ut", "▁sou t", "▁tr ash", "▁tra sh", "▁tras h", "▁R ab", "▁Ra b", "▁dec line", "▁decl ine", "ta t", "t at", "▁comb ine", "▁T ot", "▁To t", "▁dr ops", "▁dro ps", "▁drop s", "Time s", "Tim es", "T imes", "ched uler", "chedul er", "▁govern ments", "▁government s", "Te x", "T ex", "▁U sed", "▁Us ed", "▁Use d", "▁ Used", "за н", "з ан", "▁p d", "▁ pd", "ме т", "м ет", "▁&= &", "▁N ag", "▁Na g", "▁до л", "▁ дол", "▁Al ways", "rt c", "r tc", "ск е", "с ке", "▁perform ances", "▁performance s", "rupt ed", "rup ted", "▁д ва", "▁man agers", "▁manager s", "▁manage rs", "▁P itt", "▁Pi tt", "▁myst ery", "▁myster y", "▁set tle", "▁sett le", "ul se", "uls e", "cr oss", "cro ss", "c ross", "quest ion", "as ha", "ash a", "a sha", "se ed", "see d", "s eed", "ur able", "ura ble", "Fin al", "Fi nal", "F inal", "++ ++", "input s", "▁back up", "▁ backup", "▁Le arning", "▁Lear ning", "▁Learn ing", "▁* ,", "▁ *,", "lo go", "log o", "l ogo", "▁se inen", "▁sein en", "▁seine n", "▁sei nen", "▁vulner able", "direct ory", "i ë", "▁friend ship", "▁friends hip", "t u", "▁V ec", "▁Ve c", "▁ Vec", "rif ice", "rific e", "▁б ра", "▁ бра", "▁inv olve", "▁invol ve", "TO N", "T ON", "▁cor rid", "se par", "sep ar", "Dest roy", "▁j ul", "▁ju l", "▁inequ ality", "▁a in", "▁ai n", "▁ ain", "he x", "h ex", "▁w ider", "▁wide r", "▁wid er", "те ли", "тел и", "▁j ack", "▁ja ck", "▁ jack", "▁qu ot", "▁ quot", "▁G len", "▁Gl en", "▁Gle n", "init ely", "ih ood", "i hood", "▁wa ist", "▁Man chester", "reg ular", "▁( &", "▁ (&", "▁mass es", "▁mas ses", "▁DE FAULT", "▁ DEFAULT", "▁ch airs", "▁chair s", "▁cha irs", "▁F ast", "▁Fa st", "▁ Fast", "▁c itt", "▁cit t", "▁ci tt", "_{ {\\", "_ {{\\", "o a", "▁$ \\{", "▁$\\ {", "▁se eds", "▁see ds", "▁seed s", "▁A ld", "▁Al d", "▁B att", "▁Ba tt", "▁Bat t", "fa b", "f ab", "▁democr acy", "DT O", "D TO", "▁H ij", "▁Hi j", "PT R", "P TR", "N a", "▁Har vard", "si d", "s id", "Pr ed", "Pre d", "P red", "fer s", "fe rs", "f ers", "▁s pare", "▁sp are", "AM P", "A MP", "▁g roupe", "▁group e", "▁s ender", "▁se nder", "▁send er", "▁sen der", "▁ sender", "▁Christ opher", "▁prison ers", "▁prisoner s", "▁K er", "▁Ke r", "▁C rist", "▁Cr ist", "▁Cris t", "▁A LL", "▁AL L", "▁ ALL", "ri ce", "ric e", "r ice", "▁an tes", "▁ant es", "▁ante s", "▁ antes", "nat ural", "▁Su san", "▁Sus an", "▁J uli", "▁Jul i", "▁Ju li", "▁di ab", "▁dia b", "ix on", "ic ator", "ica tor", "▁flex ible", "▁re serve", "▁res erve", "▁reserv e", "Cont ains", "▁H il", "▁Hi l", "▁I sa", "▁Is a", "▁town s", "▁tow ns", "G S", "▁T rad", "▁Tr ad", "▁Tra d", "▁L ock", "▁Loc k", "▁Lo ck", "▁ Lock", "▁G rund", "▁Gr und", "▁Gru nd", "▁crit icism", "▁critic ism", "н ю", "▁c ă", "▁polit ician", "st able", "sta ble", "s table", "Ac cept", "Sum mary", "▁tamb ém", "▁també m", "}^ {-", "}^{ -", "} ^{-", "▁I M", "▁ IM", "id al", "ida l", "i dal", "мо р", "м ор", "Bl ue", "GRO UP", "▁term inal", "▁termin al", "▁complex ity", "▁loc ally", "▁local ly", "DO WN", "▁N ear", "▁Ne ar", "Dep th", "▁p ole", "▁pol e", "▁po le", "▁e quality", "▁equ ality", "▁equal ity", "Si te", "S ite", "▁is instance", "Sp eed", "Spe ed", "S peed", "ip pi", "ipp i", ", &", "▁E nc", "▁En c", "▁ Enc", "ще н", "щ ен", "▁m ater", "▁mat er", "▁ma ter", "▁mate r", "▁sl aves", "▁slave s", "▁sla ves", "AC TION", "ACT ION", "A CTION", "usal em", "usa lem", "▁h az", "▁ha z", "▁Be at", "▁w rest", "▁wr est", "▁l lam", "▁ll am", "In s", "I ns", "ми на", "▁бу в", "▁Fr ame", "▁Fra me", "▁ Frame", "us hes", "ush es", "▁virtual ly", "▁virt ually", "▁P erm", "▁Per m", "▁Pe rm", "▁ Perm", "▁we ights", "▁weight s", "▁weigh ts", "▁ weights", "▁ll vm", "▁ llvm", "▁c ave", "▁ca ve", "▁cav e", "st ates", "state s", "stat es", "sta tes", "DM A", "D MA", "el lt", "ell t", "if act", "ifa ct", "i fact", "v endor", "▁E mma", "▁Em ma", "Loc ale", "Local e", "▁S ET", "▁SE T", "▁ SET", "▁ge ometry", "▁ geometry", "St yles", "Style s", "▁Ref eree", "▁Refer ee", "▁we it", "fi ca", "fic a", "f ica", "▁a ds", "▁ad s", "▁ ads", "gr ay", "gra y", "g ray", "▁B urg", "▁Bur g", "▁Bu rg", "ion a", "io na", "i ona", "dag ger", "d agger", "▁Jan uar", "де й", "д ей", "ister schaft", "pp o", "p po", "oid s", "oi ds", "o ids", "▁dé part", "Sh ader", "▁con straint", "▁constr aint", "▁ constraint", "Se cret", "Sec ret", "▁P eters", "▁Pe ters", "▁Peter s", "▁Pet ers", "▁Pete rs", "▁ey eb", "▁eye b", "▁m esh", "▁me sh", "▁mes h", "▁ mesh", "▁c ookie", "▁cook ie", "▁ cookie", "▁P ick", "▁Pic k", "▁Pi ck", "▁n ick", "▁ni ck", "▁nic k", "▁ nick", "by e", "b ye", "▁sav ings", "▁saving s", "Tr y", "T ry", "py thon", "▁p atri", "▁pat ri", "▁pa tri", "▁mult ip", "▁multi p", "▁mul tip", "▁ multip", "▁k inda", "▁kind a", "▁kin da", "▁' _", "▁ '_", "▁Fr anz", "▁Fran z", "▁cl oth", "▁clo th", "зу льта", "▁fle et", "▁human ity", "re sa", "res a", "r esa", "bl ob", "blo b", "▁T X", "▁ TX", "▁B uch", "▁Bu ch", "▁Buc h", "▁L ond", "▁Lo nd", "▁val ley", "▁m urm", "▁mur m", "▁mu rm", "▁T rade", "▁Tr ade", "▁Tra de", "▁Trad e", "line width", "▁e special", "▁espec ial", "up per", "upp er", "▁h osp", "▁ho sp", "▁t anto", "▁tan to", "▁tant o", "▁old est", "▁ol dest", "▁R oose", "▁Ro ose", "▁h itting", "▁hit ting", "do g", "d og", "ov i", "o vi", "}, \r", "} ,\r", "▁compat ible", "▁ compatible", "▁We bsite", "▁Web site", "po ch", "p och", "▁B ag", "▁Ba g", "▁ Bag", "▁accompl ish", "▁accomp lish", "Ch rist", "as set", "ass et", "asse t", "▁U ntil", "▁Un til", "▁ Until", "▁g eld", "▁ge ld", "▁gel d", "List en", "Li sten", "L isten", "S B", "Set up", "ic ia", "ici a", "i cia", "▁l um", "▁lu m", "▁jan vier", "PA GE", "P AGE", "▁N u", "/ \"", "▁divor ce", "Ex ecute", "Execut e", "Exec ute", "De pend", "Dep end", "▁Scott ish", "▁T s", "ru ppe", "rup pe", "▁ref use", "▁Ok tober", "ij k", "i jk", "▁A my", "▁Am y", "▁di min", "▁dim in", "▁g ross", "▁gr oss", "▁gro ss", "▁t rat", "▁tr at", "▁tra t", "is ible", "isi ble", "mix er", "m ixer", "▁aut res", "▁au tres", "▁autre s", "▁ autres", "▁ne at", "▁ot ros", "▁otro s", "Vo id", "V oid", "▁sc hol", "▁sch ol", "▁Wal ker", "▁Walk er", "▁t ube", "▁tu be", "▁tub e", "olog ists", "ologist s", "▁г руп", "▁гру п", "▁h aben", "▁hab en", "▁ha ben", "ub er", "ube r", "u ber", "ACT IVE", "▁Att endance", "▁о п", "▁bl ade", "opl us", "o plus", "▁Or iginal", "▁Origin al", "▁ Original", "▁manufact urer", "as z", "a sz", "ât e", "â te", "re r", "r er", "▁J son", "▁ Json", "▁succeed ed", "uff le", "▁b acked", "▁back ed", "es ian", "esi an", "ti ck", "t ick", "Ex ternal", "▁X IX", "▁XI X", "▁he arts", "▁heart s", "▁hear ts", "▁По сле", "ol u", "o lu", "▁ле т", "▁ лет", "VI CE", "V ICE", "ár io", "á rio", "▁fr aud", "▁fra ud", "ed u", "e du", "Pr imary", "Prim ary", "▁g aming", "▁gam ing", "▁ga ming", "▁p lt", "▁pl t", "ig ator", "iga tor", "IE S", "I ES", "Comp iler", "▁mon ument", "ag em", "age m", "a gem", "▁R ain", "▁Ra in", "▁mo ins", "ok u", "o ku", "os ex", "ose x", "o sex", "▁K ansas", "▁gep ublice", "▁J oy", "▁Jo y", "Sc ene", "▁king dom", "ri ces", "ric es", "rice s", "r ices", "▁ju in", "▁uncomfort able", "▁M oney", "▁Mon ey", "▁Mo ney", "ob b", "o bb", "ex pl", "exp l", "str cmp", "▁d read", "▁dr ead", "▁dre ad", "rit ion", "r ition", "▁C hi", "▁Ch i", "▁demonstr ated", "▁demonstrate d", "▁vert ices", "ч о", "▁C ulture", "▁ Culture", "F X", "D ictionary", "▁D ru", "▁Dr u", "tr m", "t rm", "▁ex amine", "▁exam ine", "▁the rap", "▁ther ap", "i ème", "ми ни", "▁produ ces", "▁produce s", "▁photograph s", "▁thread s", "▁ threads", "▁M I", "▁ MI", "▁extraord inary", "ски м", "ск им", "с ким", "▁gepublice erd", "▁Pol and", "▁Po land", "▁guarante ed", "▁guarantee d", "R G", "os c", "o sc", "ал и", "а ли", "▁те х", "err no", "sc ience", "if fs", "iff s", "▁T am", "▁Ta m", "▁B eth", "▁Be th", "▁Bet h", "▁Tr avel", "▁Tra vel", "▁trans late", "▁transl ate", "▁ translate", "ch é", "▁l ing", "▁li ng", "▁lin g", "▁ ling", "▁bel ongs", "▁belong s", "▁elect rical", "▁electric al", "en sk", "ens k", "▁Com pet", "▁Comp et", "c g", "V C", "to pic", "top ic", "t opic", "▁pre sum", "▁pres um", "ве та", "вет а", "▁approxim ation", "▁approx imation", "▁g rim", "▁gr im", "▁gri m", "▁И з", "_{ (", "_ {(", "ви н", "в ин", "ut ion", "uti on", "ow ych", "owy ch", "å g", "ster reich", "▁character istic", "om ing", "omin g", "omi ng", "o ming", "▁/* !", "▁ /*!", "▁pr ize", "▁pri ze", "▁Minn esota", "te d", "t ed", "ц ы", "▁O m", "▁ Om", "▁ind ices", "▁indic es", "▁ indices", "▁s tem", "▁st em", "▁ste m", "re gon", "reg on", "ни че", "▁Sal v", "▁Sa lv", "és e", "é se", "▁a ged", "▁ag ed", "▁age d", "▁ aged", "▁P ast", "▁Pa st", "▁Pas t", "▁intern ation", "▁V ic", "▁Vi c", "▁res ume", "▁ resume", "akespe are", "▁est ado", "▁esta do", "▁estad o", "▁ab ilities", "▁ abilities", "▁b row", "▁br ow", "▁bro w", "▁N FL", "▁tr ends", "▁trend s", "▁tren ds", "▁Aust in", "▁L IMIT", "▁LI MIT", "▁ LIMIT", "▁K or", "▁Ko r", "▁f olk", "▁fol k", "▁ folk", "▁w ard", "▁war d", "▁wa rd", "▁ ward", "▁n est", "▁ne st", "▁Jun ior", "▁Juni or", "▁maint aining", "▁maintain ing", "P ub", "OB JECT", "▁blo ody", "▁blood y", "▁s j", "▁d type", "▁dt ype", "▁ dtype", "Pan e", "P ane", "▁b acter", "▁grad ually", "▁gradu ally", "m r", "Te am", "▁ind icating", "▁indic ating", "▁decre ase", "te k", "t ek", "▁Re present", "▁Rep resent", "▁develop ers", "▁developer s", "Gu id", "Gui d", "G uid", "▁D iet", "▁Die t", "▁Di et", "▁re tr", "▁r etr", "▁ret r", "Nav igation", "es i", "e si", "▁l azy", "▁la zy", "Stand ard", "E r", "A W", "▁Ét ats", "▁ass ured", "▁assure d", "Sa n", "S an", "▁And re", "▁Andr e", "’ ,", "fa ng", "fan g", "f ang", "ér ation", "▁indust ries", "▁in con", "▁inc on", "Em it", "E mit", "▁г де", "▁ret riev", "▁retr iev", "en i", "e ni", "▁Tur key", "▁Turk ey", "iz ers", "ize rs", "izer s", "An gle", "Ang le", "▁o c", "▁ oc", "▁pal m", "▁pa lm", "▁s tan", "▁st an", "▁sta n", "▁ stan", "ль но", "▁C SS", "▁CS S", "▁ CSS", "▁fr ances", "▁franc es", "▁g rin", "▁gr in", "▁gri n", "▁tiem po", "▁P rix", "▁Pr ix", "▁Pri x", "]) .", "] ).", "▁de put", "▁dep ut", "▁P in", "▁Pi n", "▁ Pin", "▁si xt", "▁six t", "▁predict ed", "▁pred icted", "az ure", "azu re", "▁Mo tor", "▁Mot or", "▁i hm", "▁ih m", "▁man us", "ap os", "a pos", "▁instr uments", "▁instrument s", "▁co unts", "▁coun ts", "▁count s", "▁aim ed", "▁ai med", "▁ aimed", "pro fit", "prof it", "▁d ok", "▁do k", "об ра", "о бра", "▁e stud", "▁est ud", "ie sz", "ies z", "i esz", "▁p iss", "▁pi ss", "▁in aug", "▁vo ters", "▁vote rs", "▁vot ers", "▁pack ages", "▁package s", "▁ packages", "▁c ute", "▁cut e", "▁cu te", "▁f itness", "▁fit ness", "▁l eurs", "▁le urs", "▁leur s", "▁s orted", "▁sort ed", "▁sor ted", "ph ant", "pha nt", "phan t", "OP T", "O PT", "▁z ip", "▁ zip", "se ason", "sea son", "em i", "e mi", "enc oding", "wo n", "w on", "el ect", "ele ct", "e lect", "▁t ooth", "▁to oth", "▁too th", "▁up coming", "▁G raham", "▁Gra ham", "nu t", "n ut", "▁Ar k", "äl t", "ä lt", "▁prec ious", "ag le", "a gle", "né e", "n ée", "ни ца", "ниц а", "ar is", "ari s", "a ris", "▁p ile", "▁pi le", "▁pil e", "co le", "col e", "c ole", "▁W ITH", "▁WIT H", "▁ WITH", "rou ting", "r outing", "▁* **", "▁** *", "▁ ***", "Appe arance", "ll vm", "▁O liver", "▁Ol iver", "▁P L", "▁ PL", "if ndef", "et zt", "etz t", "sk iego", "ski ego", "▁p on", "▁po n", "▁ pon", "AR GET", "ARG ET", "k ö", "al led", "all ed", "alle d", "▁= \\", "▁ =\\", "su re", "sur e", "s ure", "mat ches", "match es", "▁temper atures", "▁temperature s", "SE L", "S EL", "▁cl one", "▁clo ne", "▁ clone", "▁el ler", "▁elle r", "▁ell er", "▁ eller", "er na", "ern a", "▁п оло", "▁по ло", "▁пол о", "Man agement", "comp any", "▁l un", "▁lu n", "▁stre aming", "▁stream ing", "▁N i", "▁s í", "Cont act", "▁C redit", "▁Cr edit", "▁Cre dit", "▁O ak", "▁пред став", "rad ius", "cl i", "c li", "IE NT", "I ENT", "▁Lu cy", "▁Luc y", "▁calcul ation", "▁calc ulation", "▁p ixel", "▁ pixel", "▁m ul", "▁mu l", "▁ mul", "▁out comes", "▁outcome s", "▁cent ers", "▁center s", "▁res idence", "▁resid ence", "Con straint", "▁pre serve", "▁pres erve", "▁preserv e", "pe on", "uf fix", "uff ix", "▁Rober ts", "▁Robert s", "▁Rob erts", "▁pro mot", "▁pr omot", "▁prom ot", "? !", "bal ance", "▁cour ts", "▁court s", "▁dis g", "▁di sg", "PR INT", "PRI NT", "▁и х", "el fare", "elf are", "▁ret reat", "▁А в", "Co st", "C ost", "al so", "als o", "▁F ür", "▁Mär z", "DI O", "D IO", "▁b ez", "▁be z", "▁ bez", "AUT H", "AU TH", "De n", "D en", "▁a tom", "▁at om", "▁ atom", "▁r oman", "▁ro man", "▁rom an", "▁P el", "▁Pe l", "▁Roose velt", "▁Pl ant", "▁Plan t", "Cont ents", "Content s", "▁Bet ween", "▁cou pling", "▁coup ling", "str ucture", "struct ure", "▁Mar shall", "▁Mars hall", "▁Marshal l", "▁Care er", "▁rail way", "▁B ureau", "▁Bur eau", "▁poss ibilities", "▁k or", "▁ko r", "▁ kor", "){ \r", ") {\r", "mer o", "me ro", "m ero", "mo v", "m ov", "анг л", "AI N", "A IN", "mu nd", "mun d", "m und", "let te", "lett e", "l ette", "▁sum mar", "▁describ ing", "▁N AS", "▁NA S", "▁E mb", "▁Em b", "▁ Emb", "Inst ruction", "li est", "lie st", "l iest", "▁S ig", "▁Si g", "▁ Sig", "Bi ll", "B ill", "▁v erd", "▁ver d", "▁ve rd", "pl ant", "plan t", "▁galax ies", "\"] )", "\" ])", "▁Py Object", "▁ PyObject", "▁G y", "▁m ě", "▁organ isation", "▁organis ation", "He r", "H er", "Se p", "S ep", "oc om", "oco m", "o com", "▁S ame", "▁Sam e", "▁Sa me", "▁ Same", "▁b ite", "▁bit e", "▁bi te", "▁Se attle", "зы ва", "Ob server", "Observ er", "’ .", "▁m orph", "▁mor ph", "ur ches", "urch es", "al ph", "re ement", "ree ment", "con sin", "cons in", "^ -", "▁d ann", "▁da nn", "▁dan n", "trans late", "transl ate", "ви х", "Re act", "▁c ats", "▁cat s", "▁ca ts", "▁b rew", "▁br ew", "▁bre w", "▁ brew", "▁d s", "▁ ds", "▁cir cles", "▁circ les", "▁circle s", "▁d rift", "▁dr ift", "▁dri ft", "ag ma", "▁Val ent", "PI N", "P IN", "AR M", "A RM", "▁sur viv", "▁surv iv", "al in", "ali n", "a lin", "Pr ef", "Pre f", "P ref", "friend ly", "▁uncertain ty", "▁f d", "▁ fd", "▁engine er", "Be n", "B en", "ic ular", "i cular", "or est", "ore st", "ores t", "o rest", "▁hor izontal", "▁horizon tal", "▁ horizontal", "UT C", "U TC", "text rm", "tex trm", "Li ve", "L ive", "Sc ore", "S core", "▁Germ ans", "▁German s", "▁Ger mans", "di stance", "dist ance", "d istance", "ut i", "u ti", "▁é qu", "▁ équ", "▁numer ical", "▁re ass", "Act iv", "▁c od", "▁co d", "▁ cod", "bul let", "en sing", "ens ing", "▁G em", "▁Ge m", "▁nav igation", "▁navig ation", "▁ navigation", "add Class", "▁simultane ously", "ви й", "▁йо го", "▁й ого", "▁H ö", "▁har sh", "prec ated", "p recated", "С СР", "▁Equ ip", "ad get", "▁T YPE", "▁ TYPE", "▁m g", "▁ mg", "IG H", "▁v in", "▁vi n", "▁ vin", "▁fin dings", "▁find ings", "▁finding s", "iv an", "iva n", "i van", "▁pos session", "▁poss ession", "▁possess ion", "▁т ого", "▁то го", "▁ того", "▁par sed", "▁parse d", "▁ parsed", "ri ors", "rior s", "rio rs", "r iors", "zeich net", "ни ков", "ник ов", "Work er", "▁en ables", "▁enable s", "▁( $\\", "▁($ \\", "▁C opy", "▁Co py", "▁Cop y", "▁ Copy", "▁orient ation", "ст ре", "с тре", "▁Ind ians", "▁India ns", "▁Indian s", "▁G ary", "▁Gar y", "▁Ga ry", "▁Ins urance", "is an", "isa n", "i san", "Ch at", "C hat", "▁com un", "▁co mun", "▁co ron", "▁cor on", "ографи я", "up dated", "update d", "▁И н", "The se", "Th ese", "SE C", "S EC", "▁boy friend", "Di agnostics", "Hi nt", "H int", "mu l", "m ul", "▁in ode", "▁i node", "▁ inode", "x A", "ef t", "e ft", "OP TION", "OPT ION", "un ct", "unc t", "an non", "ann on", "anno n", "EN S", "E NS", "st rip", "str ip", "stri p", "▁enthus i", "▁W hit", "▁Wh it", "▁Ф и", "au de", "aud e", "a ude", "▁disag ree", "▁sn apped", "▁snap ped", "Ph ys", "▁S yn", "▁Sy n", "▁s our", "▁so ur", "▁sou r", "▁L ux", "▁Lu x", "ug ar", "uga r", "u gar", "til e", "ti le", "t ile", "▁in fection", "▁inf ection", "▁infect ion", "▁F eb", "▁Fe b", "▁C hem", "▁Ch em", "▁Che m", "data set", "dat aset", "ch ts", "cht s", "D ynamic", "▁с ред", "▁qu een", "▁que en", "work er", "wor ker", "sw ap", "▁tim estamp", "▁ timestamp", "▁In tegr", "▁Int egr", "▁ Integr", "▁inter views", "▁interview s", "su ch", "s uch", "▁l aughter", "▁laugh ter", "pro f", "pr of", "▁B ird", "▁Bi rd", "▁Bir d", "( |", "â n", "▁g ra", "▁gr a", "▁ gra", "& =", "ze ns", "zen s", "z ens", "get Message", "▁O st", "▁Os t", "▁g ab", "▁ga b", "▁mort gage", "mult icol", "multi col", "LE VEL", "part ition", "se en", "see n", "s een", "▁dec lar", "▁decl ar", "A U", "▁o x", "▁ ox", "▁l igger", "▁lig ger", "▁C arm", "▁Car m", "▁Ca rm", "ge me", "gem e", "g eme", "▁Ve gas", "▁Veg as", "▁E ug", "▁Eu g", "or us", "o rus", "▁b rick", "▁br ick", "▁as í", "▁Mag azine", "HasColumn Type", "V R", "lic her", "li cher", "lich er", "liche r", "l icher", "▁F uture", "▁Fut ure", "▁ Future", "▁J ug", "▁Ju g", "at tan", "att an", "atta n", "con structor", "construct or", "V P", "▁т ур", "▁ту р", "▁ тур", "чи на", "чин а", "Comp arator", "Compar ator", "▁aut hentic", "▁mon ster", "▁trans formed", "▁transform ed", "▁firm s", "▁fir ms", "F W", "▁c atalog", "▁catal og", "▁ catalog", "bo ards", "board s", "▁dise ases", "▁disease s", "▁Benj amin", "▁hor izon", "▁Av ailable", "▁ Available", "M vc", "St ud", "▁l ord", "▁lo rd", "▁ lord", "gen eral", "gener al", "па р", "п ар", "▁cab inet", "▁cabin et", "▁Bas ic", "▁ Basic", "Test Case", "an sk", "ans k", "▁S now", "▁Sn ow", "ier ten", "iert en", "ierte n", "i erten", "▁v ocal", "▁vo cal", "▁voc al", "Pad ding", "P adding", "ha lt", "hal t", "h alt", "▁Alex and", "▁Col omb", "iv amente", "iva mente", "▁art ificial", "▁Atl anta", "▁m entre", "▁men tre", "▁ment re", "▁est aba", "▁estab a", "▁esta ba", "je kt", "jek t", "j ekt", "▁sle pt", "▁end less", "▁endl ess", "ér o", "é ro", "at tery", "att ery", "atter y", "atte ry", "uu r", "u ur", "▁weak ness", "▁attempt ing", "BY TE", "▁found er", "▁fo under", "▁fou nder", "▁sa lv", "▁sal v", "▁Medic ine", "ti d", "t id", "▁Sch we", "▁Schw e", "ra ction", "ract ion", "r action", "▁ ¿", "cr ate", "c rate", "SER VER", "▁comp ound", "▁con ve", "▁conv e", "▁c af", "▁ca f", "▁hand ful", "on ne", "úblic a", "▁def ensive", "▁defens ive", "Al ignment", "Align ment", "▁pr éc", "▁pré c", "▁signific ance", "él é", "é lé", "ar ta", "art a", "Da m", "D am", "▁per pet", "▁c aller", "▁call er", "▁cal ler", "ic ients", "ici ents", "icient s", "ce p", "c ep", "▁Mult i", "▁Mul ti", "▁ Multi", "▁st olen", "▁sto len", "▁stole n", "▁focus ing", "em bed", "emb ed", "▁b ree", "▁br ee", "▁bre e", "▁A B", "▁ AB", "▁occasion s", "▁occas ions", "se a", "s ea", "Pro v", "Pr ov", "P rov", "че ние", "▁C ategory", "▁ Category", "▁s q", "▁ sq", "▁Ф е", "V A", "Di ff", "D iff", "Tr i", "T ri", "iss ement", "isse ment", "▁act ress", "▁П е", "▁j ej", "▁je j", "▁tw isted", "▁twist ed", "▁N icol", "▁Nic ol", "▁Ni col", "▁jun ior", "▁junio r", "▁juni or", "So und", "S ound", "▁Bra sil", "▁Bras il", "▁ju ice", "▁> >>", "▁>> >", "▁ >>>", "▁A lb", "▁Al b", "▁soft ly", "▁Mc K", "▁G ren", "▁Gr en", "▁Gre n", "▁ital iano", "▁cre atures", "▁creat ures", "▁creature s", "▁res idential", "▁resident ial", "▁resid ential", "▁Inst agram", "uck s", "uc ks", "u cks", "▁k iller", "▁kill er", "▁kil ler", "▁John ny", "▁enter prise", "D to", "ch estra", "che stra", "ches tra", "chestr a", "▁T el", "▁Te l", "▁Act iv", "▁ Activ", "fa ctor", "fac tor", "fact or", "f actor", "ou st", "ous t", "o ust", "▁vac uum", "ра л", "р ал", "') ->", "' )->", "▁L eft", "▁Le ft", "▁ Left", "▁de fect", "▁def ect", "▁defe ct", "▁nine te", "▁nin ete", "fa re", "far e", "f are", "▁reg ret", "▁s har", "▁sh ar", "▁sha r", "ctr ine", "me sh", "mes h", "m esh", "ci ty", "cit y", "c ity", "ic it", "ici t", "i cit", "▁F em", "▁Fe m", "lim ited", "limit ed", "ok a", "o ka", "!\\ !\\", "Don ald", "з но", "▁pro vision", "▁prov ision", "▁discuss ions", "▁discussion s", "Dr ag", "D rag", "▁In cl", "▁Inc l", "Ex it", "E xit", "▁A bd", "▁Ab d", "st ory", "sto ry", "ie ve", "iev e", "i eve", "▁by ł", "ol ving", "olv ing", "woh ner", "▁gu idelines", "▁guide lines", "▁guid elines", "▁st raw", "▁str aw", "▁stra w", "ü ss", "▁бу ло", "▁bur den", "▁spat ial", "▁stret ched", "▁stretch ed", "▁I nf", "▁In f", "▁ Inf", "▁type def", "▁typed ef", "▁ro bot", "▁rob ot", "▁D oc", "▁Do c", "▁ Doc", "pl iers", "plier s", "wa l", "w al", "ca mp", "cam p", "c amp", "▁dif fé", "▁diff é", "▁Mc G", "▁t el", "▁te l", "ar ette", "aret te", "▁sub sequently", "▁subsequ ently", "▁subsequent ly", "▁h oney", "▁hon ey", "▁ho ney", "FUN C", "▁establish ment", "te sy", "tes y", "▁któ ry", "▁се ль", "▁F O", "▁ FO", "▁Is lands", "▁Island s", "▁m p", "▁ mp", "Scal ar", "▁Y an", "▁Ya n", "ck en", "cke n", "c ken", "▁var iation", "▁vari ation", "i ą", "op tim", "opt im", "az or", "tu ple", "t uple", "▁gr avity", "▁grav ity", "▁con clude", "▁concl ude", "▁col lections", "▁collection s", "▁collect ions", "▁colle ctions", "és z", "é sz", "▁L iver", "▁Li ver", "▁Live r", "▁Liv er", "▁eth nic", "comp ile", "▁p arl", "▁par l", "▁pa rl", "Sur face", "{ '", "▁par agraph", "▁para graph", "▁ paragraph", "pos ite", "po site", "ít ulo", "ob a", "o ba", "bin ary", "b inary", "ro b", "r ob", "▁Pe dro", "▁Ped ro", "▁f is", "▁fi s", "▁Gr ande", "▁Grand e", "▁Gran de", "▁Gra nde", "od ox", "odo x", "▁pos ting", "▁post ing", "< !--", "▁rac ial", "▁ra cial", "CO M", "C OM", "ё м", "▁A UT", "▁AU T", "▁ AUT", "▁d ishes", "▁dis hes", "▁dish es", "assert True", "▁G row", "▁Gr ow", "▁Gro w", "▁sl id", "▁ju illet", "сс о", "с со", "Run ner", "Sa l", "S al", "Sa me", "Sam e", "S ame", "▁Stud y", "▁Col onel", "▁J oin", "▁Jo in", "▁ Join", "ar ms", "arm s", "▁l y", "▁ ly", "▁co oper", "▁cur ves", "▁curve s", "▁curv es", "He alth", "▁M OD", "▁MO D", "▁ MOD", "▁pr imo", "▁prim o", "▁pri mo", "ock ets", "ocket s", "multi column", "multicol umn", "▁С ан", "▁Са н", "▁H unter", "▁Hun ter", "▁Hunt er", "Custom er", "ot hy", "oth y", "o thy", "Des ign", "De sign", "ma ss", "mas s", "m ass", "▁fam ille", "▁famil le", "▁fue ron", "▁fu eron", "▁fuer on", "ä m", "▁head quarters", "▁d ign", "▁di gn", "▁dig n", "▁Ro bin", "▁Rob in", "▁me ets", "▁meet s", "▁so it", "па да", "пад а", ")\" );", ") \");", "▁w rapper", "▁wrap per", "▁ wrapper", "▁theoret ical", "▁u d", "▁ ud", "pl icity", "plic ity", "plicit y", "▁w p", "▁ wp", "▁испо ль", "▁c amps", "▁camp s", "▁cam ps", "▁A gency", "▁Ag ency", "g c", "hu m", "h um", "AT T", "A TT", "B tn", "C ent", "▁H elen", "▁He len", "▁Hel en", "▁am plit", "▁ampl it", "▁Mem orial", "und ial", "SH IFT", "wi k", "w ik", "▁Lie utenant", "VAL ID", "▁B ath", "▁Ba th", "▁Bat h", "▁Jeff erson", "▁C ut", "▁Cu t", "▁ Cut", "▁ser vers", "▁serv ers", "▁server s", "▁serve rs", "▁ servers", "ly ph", "▁CO PY", "▁COP Y", "▁comput ers", "▁computer s", "▁compute rs", "const ruction", "construct ion", "▁P DF", "▁PD F", "▁ PDF", "▁pro tagon", "▁prot agon", "▁fore head", "custom er", "Un is", "U nis", "▁sign ing", "▁sig ning", ". ’", "F etch", "▁S core", "▁Sc ore", "▁ Score", "hu man", "hum an", "h uman", "▁down town", "▁downt own", "In tern", "Int ern", "Inter n", "▁bes ides", "▁beside s", "▁д во", "▁пра ви", "▁ прави", "▁c c", "▁ cc", "▁D ebug", "▁De bug", "▁Deb ug", "▁ Debug", "▁Cl ose", "▁ Close", "el ihood", "eli hood", "▁al gorithms", "▁algorithm s", "▁H amb", "▁Ham b", "▁Ha mb", "ч на", "▁c ust", "▁cu st", "▁mo unted", "▁mount ed", "par en", "pa ren", "pare n", "p aren", "▁isol ated", "▁A gr", "▁Ag r", "▁or bit", "▁orb it", "print k", "▁t urb", "▁tu rb", "▁tur b", "▁gru po", "ми и", "\"\" \"", "\" \"\"", "▁h ills", "▁hill s", "ря д", "▁B od", "▁Bo d", "▁об ще", "est one", "esto ne", "eston e", "e stone", "▁satisf ying", "▁satisfy ing", "▁I van", "▁Iv an", "▁associ ate", "name d", "na med", "nam ed", "n amed", "oc cup", "occ up", "GP IO", "G PIO", "hi t", "h it", "▁dis tract", "▁di stract", "▁dist ract", "▁bar rel", "▁barr el", "▁in variant", "di d", "d id", "▁l ieu", "▁li eu", "▁lie u", "sc ene", "UN K", "▁Ont ario", "▁M ission", "▁Miss ion", "zi al", "z ial", "▁comp ete", "▁compet e", "▁cou ples", "▁couple s", "▁coup les", "SH A", "S HA", "▁s ei", "▁se i", "▁m igration", "▁migr ation", "ac ked", "ack ed", "▁b arn", "▁bar n", "▁ba rn", "hal f", "h alf", "▁neigh bour", "▁neighb our", "ft e", "f te", "▁od ds", "▁odd s", "▁optim ization", "▁I C", "▁ IC", "▁H end", "▁He nd", "▁Hen d", "pay ment", "M r", "') :", "' ):", "vo ir", "v oir", "▁R ange", "▁Rang e", "▁Ran ge", "▁ Range", "▁polit icians", "▁politician s", "▁K han", "▁Kh an", "▁shel ter", "▁tim ing", "▁ti ming", "Create d", "Creat ed", "C reated", "▁sept embre", "li t", "l it", "▁S hel", "▁She l", "▁Sh el", "▁c ouch", "▁co uch", "▁cou ch", "▁d är", "ult ur", "▁G iov", "▁Gi ov", "ô le", "RE AM", "▁O cean", "▁M B", "▁ MB", "▁lie gt", "▁o v", "▁ ov", "▁car pet", "та р", "т ар", "▁го дина", "▁годи на", "▁S ão", "▁о тно", "▁от но", "ab ling", "abl ing", "a bling", "in th", "int h", "▁purs ue", "▁Const itution", "an j", "▁F BI", "▁ar row", "▁arr ow", "▁ arrow", "ph ones", "phone s", "▁kn ocked", "▁knock ed", "▁de com", "▁dec om", "ie k", "i ek", "ь е", "St rip", "Str ip", "▁V enez", "▁Ven ez", "▁Ve nez", "▁p upp", "▁pu pp", "▁pup p", "bi an", "bia n", "b ian", "▁cot ton", "h p", "▁the atre", "▁accept able", "cuss ion", "▁r ounds", "▁round s", "▁act ively", "▁activ ely", "▁active ly", "▁among st", "▁a bc", "▁ab c", "▁ abc", "F M", "Pop up", "▁divers ity", "us z", "u sz", "▁employ er", "spec ially", "special ly", "▁sus pected", "▁suspect ed", "▁c rypt", "▁cry pt", "▁O scar", "▁Os car", "no r", "n or", "▁bab ies", "во м", "▁m undo", "▁li bert", "▁lib ert", "▁liber t", "S G", "ah ren", "ahr en", "a hren", "▁magn itude", "T M", "' +", "▁об ъ", "▁G ust", "▁Gu st", "▁gr ain", "▁gra in", "мен т", "м ент", "to Equal", "▁m os", "▁mo s", "▁ mos", "▁consist ently", "▁consistent ly", "х у", "▁domin ant", "Con verter", "Convert er", "at able", "ata ble", "a table", "▁J ag", "▁Ja g", "scri ptions", "script ions", "scription s", "s criptions", "x B", "▁ ©", "fol der", "fold er", "f older", "▁sub stance", "▁subst ance", "▁по с", "L o", "BU S", "B US", "bas ic", "us sen", "uss en", "▁co ins", "▁coin s", ": -", "▁N elson", "▁Nel son", "In ner", "ograf ía", "▁ex empl", "▁exem pl", "ch g", "▁sy nd", "▁syn d", "dyn amic", "d ynamic", "ver ted", "vert ed", "▁EV ENT", "▁ EVENT", "se ek", "see k", "av ier", "avi er", "a vier", "▁p rot", "▁pro t", "▁pr ot", "▁ prot", "-- ----", "---- --", "--- ---", "----- -", "- -----", "▁con vention", "▁conv ention", "▁convent ion", "▁станов ника", "gl ing", "g ling", "hor a", "ho ra", "h ora", "ши й", "▁wh ilst", "ser ialize", "serial ize", "s erialize", "▁R ing", "([ '", "( ['", "▁c her", "▁ch er", "▁che r", "▁ cher", "сь кі", "▁D anny", "▁Dan ny", "▁re aches", "▁reach es", "▁el igible", "▁P arent", "▁Par ent", "▁Pa rent", "▁ Parent", "▁came ras", "▁cam eras", "▁camera s", "▁discipl ine", "▁s illy", "▁sil ly", "re ts", "ret s", "r ets", "yt ics", "▁Reg ional", "▁Region al", "▁B aby", "▁Ba by", "▁Bab y", "te le", "t ele", "WAR NING", "WARN ING", "su pp", "sup p", "s upp", "▁refer ring", "▁mer ch", "▁merc h", "ol ves", "olve s", "olv es", "em et", "eme t", "e met", "ck e", "c ke", "▁M unicip", "▁Mun icip", "Wh ite", "▁ Ś", "ri os", "rio s", "r ios", "log ging", "▁d x", "▁ dx", "▁su sp", "▁sus p", "ex ternal", "▁Liber al", "▁Lib eral", "▁Init ialize", "▁Initial ize", "▁ Initialize", "▁exhib ition", "▁exhibit ion", "▁ext ensions", "▁extension s", "▁extens ions", "▁ extensions", "ke eper", "keep er", "kee per", "SY S", "▁J ake", "▁Ja ke", "▁Jak e", "fo oter", "foot er", "foo ter", "▁ph ones", "▁phone s", "▁ phones", "▁real m", "▁contribut ed", "▁contribute d", "ME SS", "▁For mat", "▁Form at", "▁ Format", "Per iod", "▁h id", "▁hi d", "▁ hid", "▁me tres", "▁met res", "▁D im", "▁Di m", "▁ Dim", "ache lor", "achel or", "▁T ak", "▁Ta k", "▁ве ли", "▁g ram", "▁gr am", "▁gra m", "▁ gram", "▁M Y", "▁ MY", "on ders", "ond ers", "onder s", "onde rs", "'; \r", "' ;\r", "▁F ro", "▁Fr o", "▁advant ages", "▁advantage s", "io v", "i ov", "▁she ets", "▁sheet s", "ce mbre", "c embre", "ž e", "] \r", "▁D J", "subset eq", "UP DATE", "▁b locked", "▁bl ocked", "▁block ed", "▁pan els", "▁pa nels", "▁panel s", "E A", "nd e", "n de", "ê t", "Bu l", "B ul", "▁m eters", "▁me ters", "▁met ers", "▁meter s", "jo ur", "j our", "▁rap port", "▁rapp ort", "▁J ak", "▁Ja k", "▁V AL", "▁VA L", "▁ VAL", "▁p up", "▁pu p", "▁k a", "▁ ka", "for ced", "force d", "▁ав гу", "ener gy", "e nergy", "▁V a", "not es", "no tes", "note s", "n otes", "▁relax ed", "C r", "id ding", "idd ing", "▁def ines", "▁define s", "▁defin es", "▁kiss ed", "▁inv asion", "▁invas ion", "▁sc reens", "▁screen s", "C trl", "▁pass engers", "▁passenger s", "▁Х о", "ation ship", "ations hip", "per cent", "\\ }", "▁be ating", "▁beat ing", "life ray", "lifer ay", "▁V M", "▁ VM", "▁Gab riel", "▁g allery", "▁gall ery", "▁Л о", "iv ot", "ivo t", "▁r ental", "▁ren tal", "▁rent al", "▁sh ocked", "▁shock ed", "▁Ste in", "▁B h", "▁ ло", "Un e", "U ne", "ге н", "г ен", "▁kom mun", "an ka", "ank a", "▁C ape", "▁Cap e", "▁Ca pe", "Re ady", "Read y", "▁к ри", "▁ кри", "tr ag", "tra g", "t rag", "Al ign", "Ali gn", "▁host ed", "▁ho sted", "▁\\ (", "▁S ession", "▁ Session", "ys k", "y sk", "Pen ding", "P ending", "ellig ence", "elli gence", "▁Never theless", "bit ro", "bitr o", "ho lm", "hol m", "quir y", "▁mechan ical", "▁D é", "an eous", "ane ous", "▁psych ological", "▁a broad", "▁ab road", "▁a voir", "▁av oir", "▁separ ation", "▁sep aration", "▁Haw ai", "iej sc", "▁N ether", "▁Ne ther", "▁Net her", "▁sub tle", "bi rd", "b ird", "▁mark er", "▁mar ker", "▁ marker", "▁со зда", "ва ла", "вал а", "▁Work ing", "▁Wor king", "▁h over", "▁ho ver", "▁ hover", "%%%% %%%%", "▁м ат", "▁ма т", "▁ мат", "▁s oup", "▁so up", "▁sou p", "Al ert", "ch r", "c hr", "▁P CI", "▁PC I", "▁ PCI", "▁m ús", "ient ras", "ien tras", "▁St orage", "▁Sto rage", "▁ Storage", "▁av ailability", "▁op era", "▁oper a", "▁P roduction", "▁Produ ction", "▁Product ion", "ia ne", "ian e", "i ane", "▁Bet ter", "▁B utton", "▁But ton", "▁ Button", "▁Pe ace", "▁Mor ris", "▁s ib", "▁si b", "▁f iber", "▁fi ber", "▁fib er", "Int ent", "▁D esc", "▁De sc", "▁Des c", "▁ Desc", "ning en", "n ingen", "ze j", "z ej", "av an", "ava n", "a van", "cover ed", "cov ered", "▁s yst", "▁sy st", "▁sys t", "_ +", "▁орга ни", "▁Re lig", "▁Rel ig", "ци аль", "▁s pite", "▁sp ite", "▁re prés", "▁~ ~", "▁ ~~", "▁to xic", "▁a pro", "▁ap ro", "▁apr o", "X Y", "▁tr ips", "▁tri ps", "▁trip s", "▁pl aats", "▁con vey", "▁conv ey", "▁conve y", "Pr im", "P rim", "▁о ста", "▁ос та", "▁ост а", "ok o", "o ko", "▁l obby", "▁lob by", "▁recommend ations", "▁recommendation s", "SP ACE", "▁overwhel ming", "ennes see", "▁ac quire", "▁acqu ire", "w m", "LOB AL", "▁D EF", "▁DE F", "▁ DEF", "je r", "j er", "▁re cur", "▁rec ur", "om men", "omm en", "▁j og", "▁jo g", "▁n ast", "▁na st", "▁nas t", "▁L P", "▁ LP", "jo n", "j on", "▁w ishes", "▁wish es", "▁wis hes", "▁N ancy", "▁support ers", "▁supp orters", "^{ -\\", "^{- \\", "▁T rib", "▁Tr ib", "▁Tri b", "▁ Ä", "▁disappoint ed", "▁у ни", "x D", "li nt", "lin t", "l int", "I p", "▁Islam ic", "än de", "änd e", "ä nde", "end ment", "dt ype", "d type", "▁di gest", "▁dig est", "▁Set tings", "▁Setting s", "▁ Settings", "ér a", "é ra", "▁aggress ive", "▁intellig ent", "eder börd", "ster dam", "pc i", "p ci", "▁over flow", "▁ overflow", "im b", "i mb", "re ach", "rea ch", "r each", "cept or", "cep tor", "▁yield s", "▁Se bast", "▁ut ility", "▁util ity", "▁р и", "▁ ри", "▁fac ulty", "▁In ternal", "▁Intern al", "▁Inter nal", "▁ Internal", "▁attract ed", "▁attra cted", "рі в", "р ів", "▁mix ing", "▁R uth", "▁Ru th", "▁esc aped", "▁escape d", "▁E asy", "▁dr ain", "▁r ings", "▁ring s", "▁ rings", "qu ire", "quir e", "Av ailable", "▁ц и", "▁ ци", "▁conv ince", "▁convin ce", "or sch", "ors ch", "ут бо", "CP P", "C PP", "ra ge", "rag e", "r age", "ч і", "▁p rod", "▁pro d", "▁pr od", "▁ prod", "▁p ig", "▁pi g", "▁C atal", "▁Cat al", "▁Ca tal", "▁al ias", "▁ali as", "▁ alias", "▁че мпи", "▁чем пи", "Pl ace", "P lace", "▁g orge", "▁depend ence", "▁cr uel", "▁cru el", "▁ther mal", "▁therm al", "ut down", "ref resh", "▁re sort", "▁res ort", "▁S HA", "▁SH A", "▁ SHA", "ти й", "fo od", "foo d", "f ood", "▁N ad", "▁Na d", "▁pregn ancy", "▁pro jection", "▁project ion", "▁pa ís", "▁полу чи", "▁the mes", "▁them es", "▁theme s", "▁fun eral", "▁cas o", "▁ca so", "ле кт", "лек т", "Ex tra", "Ext ra", "▁t issue", "▁dr agon", "▁drag on", "▁l ig", "▁li g", "▁ lig", "▁n ei", "▁ne i", "▁com edy", "▁come dy", "▁comed y", "те м", "т ем", "сла в", "с лав", "▁pass enger", "Cl one", "i ção", "yg on", "y gon", "▁H alf", "▁Hal f", "▁la bour", "▁lab our", "▁vill ages", "▁village s", "▁ві й", "▁О т", "▁L isa", "▁Li sa", "▁Lis a", "_ [", "ba g", "b ag", "▁d iver", "▁di ver", "▁div er", "▁dive r", "▁M L", "▁ ML", "▁transl ated", "▁translate d", "▁per ò", "ab ama", "aba ma", "▁cas tle", "▁cast le", "▁ castle", "* \\", "▁reg ia", "!! !!", "!!! !", "! !!!", "*> (", "* >(", "▁Work s", "▁Wor ks", "▁N ature", "▁Nat ure", "▁Natur e", "NE L", "N EL", "▁P om", "▁Po m", "tt a", "t ta", "▁Jam ie", "▁p unch", "▁pun ch", "tain ment", "▁K rieg", "▁Kr ieg", "▁restr icted", "▁restrict ed", "mob ile", "m obile", "▁grand mother", "Arg uments", "Argument s", "▁s inc", "▁si nc", "▁sin c", "▁Mon th", "▁Mont h", "▁ Month", "esc ape", "e scape", "▁opt ical", "▁L ane", "▁La ne", "▁Lan e", "▁Deutsch land", "▁S aison", "▁Sa ison", "▁V irtual", "▁ Virtual", "pe z", "p ez", "In line", "ow any", "owa ny", "rad io", "r adio", "ö ß", "▁O thers", "▁Other s", "MA IN", "M AIN", "sc al", "s cal", "▁D allas", "▁Dal las", "▁an chor", "▁anc hor", "▁anch or", "▁ anchor", "en cias", "enc ias", "encia s", "enci as", "▁re porter", "▁rep orter", "▁report er", "▁veget ables", "▁enforce ment", "▁Wis consin", "▁con dem", "▁cond em", "▁e b", "▁ eb", "▁s its", "▁sit s", "▁si ts", "▁calcul ations", "▁calculation s", "▁calc ulations", "▁\" --", "▁\"- -", "ue lle", "uel le", "u elle", "▁tip o", "▁ti po", "▁P AR", "▁PA R", "▁ PAR", "co rd", "cor d", "c ord", "▁ро ків", "ph an", "pha n", "p han", "▁kon nte", "▁z ap", "▁za p", "wr iting", "writ ing", "en gu", "eng u", "▁pert urb", "Fac e", "F ace", "ag og", "ago g", "▁De cl", "▁Dec l", "▁ Decl", "est ly", "▁War ren", "▁H ills", "▁Hill s", "▁Hil ls", "▁ref resh", "▁refr esh", "▁refres h", "▁ refresh", "▁fl ip", "io p", "i op", "▁key board", "is to", "ist o", "i sto", "▁prom oted", "▁promote d", "▁promot ed", "back s", "ba cks", "b acks", "Enc oding", "▁ ال", "▁g min", "ро б", "р об", "▁follow ers", "▁p epper", "um ble", "umb le", "▁sp ray", "▁spr ay", "▁dr ives", "▁dri ves", "▁driv es", "▁drive s", "P ush", "cook ie", "c ookie", "▁gel dig", "▁geld ig", "ig ung", "igu ng", "vis it", "▁at omic", "▁atom ic", "▁ atomic", "▁A thlet", "▁Ath let", "▁Or igin", "▁Ori gin", "▁ Origin", "▁H appy", "▁G ra", "▁Gr a", "▁att ribut", "▁п ов", "▁по в", "▁ пов", "▁n ost", "▁no st", "▁nos t", "▁ nost", "ur u", "u ru", "▁Ne ither", "▁ma ar", "ject ions", "je ctions", "jection s", "▁re nov", "▁ren ov", "fin ity", "f inity", "gener ic", "init ialize", "initial ize", "pgf set", "▁hyp othes", "▁ma cro", "▁mac ro", "ma ps", "map s", "m aps", "▁f are", "▁far e", "▁fa re", "▁ fare", "Be st", "B est", "uch t", "uc ht", "u cht", "co d", "c od", "▁h orm", "▁hor m", "▁ho rm", "▁P oll", "▁Pol l", "▁Po ll", "▁host ing", "▁Re ading", "▁Read ing", "Cert ificate", "▁и ма", "▁им а", "▁C ov", "▁Co v", "▁P red", "▁Pr ed", "▁Pre d", "▁ Pred", "re direct", "red irect", "▁l attice", "▁port folio", "▁o ven", "▁ov en", "▁ oven", "ie len", "iel en", "iele n", "i elen", "sub scribe", "foot note", "но ю", "▁mom ento", "▁moment o", "▁d ich", "▁di ch", "▁dic h", "▁ent ert", "▁enter t", "▁g é", "▁connect ing", "▁n acional", "▁o tt", "▁ot t", "▁ ott", "ні в", "н ів", "▁rac ist", "▁penal ty", "ül t", "ü lt", "▁Israel i", "▁( †", "▁desc end", "▁ос іб", "▁b elly", "▁bel ly", "▁bell y", "ні сть", "▁encounter ed", "T ip", "▁gu ilt", "▁d amp", "▁da mp", "▁dam p", "ze ug", "▁Mem ory", "▁ Memory", "Check ed", "▁Sh akespeare", "hi ll", "h ill", "▁w oke", "▁wo ke", "▁sal ary", "eth eless", "ethe less", "e theless", "▁Т и", "er de", "erd e", "▁He in", "▁g it", "▁gi t", "▁ git", "=\" \"", "= \"\"", "ül l", "ü ll", "ge ben", "geb en", "g eben", "Pr es", "Pre s", "P res", "ie val", "iev al", "i eval", "mark er", "mar ker", "▁д ан", "▁да н", "▁ дан", "▁oct obre", "RO L", "R OL", "▁jan u", "▁ja nu", "▁) :", "▁ ):", "br anch", "▁J erry", "▁Jer ry", "ke hr", "▁contr acts", "▁contract s", "▁aff air", "▁Росси и", "ja ck", "j ack", "AN G", "A NG", "▁dro pping", "▁drop ping", "▁d ic", "▁di c", "sch ool", "▁Fin land", "▁d ort", "▁do rt", "▁K ings", "▁King s", "▁Kin gs", "▁Arg ument", "▁ Argument", "▁Sim ilarly", "▁Similar ly", "▁V erm", "▁Ver m", "▁Ve rm", "▁pret end", "! _", "łu g", "ł ug", "же ння", "жен ня", "da ting", "dat ing", "d ating", "cs v", "c sv", "▁dialog ue", "▁dial ogue", "STR U", "▁public ly", "wed ge", "w edge", "▁H och", "▁Ho ch", "▁spe aks", "▁speak s", "▁compens ation", "an ca", "anc a", "text tt", "▁Fil ter", "▁ Filter", "▁part ly", "▁us eless", "▁use less", "▁г у", "▁ гу", "▁d eter", "▁de ter", "▁det er", "IE W", "▁con secut", "▁cons ecut", "▁conse cut", "▁h oly", "▁hol y", "▁ho ly", "▁grad uated", "▁gradu ated", "▁graduate d", "an dal", "and al", "anda l", "ți e", "ț ie", "▁W ant", "▁Wa nt", "▁Aust ria", "or den", "ord en", "fr ag", "f rag", "▁f oo", "▁fo o", "▁ foo", "cl aimed", "claim ed", "во е", "▁not able", "▁no table", "▁journal ist", "▁M ail", "▁Ma il", "▁Mai l", "▁ Mail", "!( \"", "! (\"", "ps e", "p se", "▁C lay", "▁Cl ay", "iv i", "i vi", "▁sc ales", "▁scale s", "▁scal es", "▁er ste", "▁erst e", "▁ers te", "Data Type", "▁D iam", "▁Di am", "í r", "loc ale", "local e", "▁rel uct", "ien st", "iens t", "ast ro", "astr o", "act ly", "я х", "▁Vill age", "▁Villa ge", "▁Vil lage", "▁d aughters", "▁daughter s", "▁manufact urers", "▁manufacturer s", "▁print ing", "▁prin ting", "ч ка", "Nd Ex", "Ch anges", "Change s", "▁/ ******/", "ver tex", "vert ex", "▁b rows", "▁br ows", "▁bro ws", "▁brow s", "▁K ö", "not ations", "notation s", "▁i ls", "▁il s", "▁ ils", "at el", "ate l", "C ir", "▁meaning ful", "q a", "▁C old", "▁Col d", "▁Co ld", "ue to", "u eto", "you r", "yo ur", "y our", "m f", "мо в", "м ов", "▁Ü ber", "▁fam ilia", "▁famil ia", "▁ste ep", "▁pres idential", "▁president ial", "▁presid ential", "▁z á", "▁ zá", "▁w ars", "▁war s", "▁wa rs", "▁C re", "▁Cr e", "▁after wards", "▁afterward s", "ha lb", "hal b", "▁strugg led", "▁struggle d", "Ch art", "Char t", "User Id", "ac ular", "a cular", "iv ia", "ivi a", "i via", "▁u gly", "▁K unst", "E s", "▁Q String", "▁C ow", "▁Co w", "Rad ius", "▁Gr iff", "▁V as", "▁Va s", "HA L", "H AL", "Mod ified", "ra le", "ral e", "r ale", "mem cpy", "▁в клю", "▁r s", "▁ rs", "▁h alt", "▁ha lt", "▁hal t", "▁ halt", "▁Miss iss", "▁h uvud", "ec a", "e ca", "▁Jahrhund ert", "E urope", "Sign ature", "▁grand father", "▁O regon", "gu e", "g ue", "xy gen", "fr ames", "frame s", "▁hab its", "▁ha bits", "▁habit s", "Support ed", "Supp orted", "▁low ered", "▁lower ed", "▁rad iation", "▁radi ation", "ab en", "abe n", "a ben", "▁Pro gress", "▁ Progress", "▁C osta", "▁Co sta", "▁Cost a", "▁Cos ta", "▁dev oted", "▁gest ure", "▁Dez ember", "▁qu oted", "▁quote d", "▁quot ed", "▁difficult ies", "т ре", "▁sustain able", "▁d ense", "▁den se", "▁dens e", "▁ih rer", "▁ihr er", "▁ihre r", "▁firm ly", "â t", "om ent", "ome nt", "omen t", "o ment", "▁c out", "▁co ut", "▁cou t", "▁ cout", "▁p oi", "▁po i", "d jango", "▁pro found", "▁prof ound", "▁Wil helm", "▁fl ush", "▁flu sh", "▁ flush", "▁av ril", "LA B", "L AB", "▁B row", "▁Br ow", "▁Bro w", "▁pro pose", "▁prop ose", "▁propos e", "▁r anks", "▁ran ks", "▁rank s", "WI D", "W ID", "▁mut ual", "▁text s", "▁tex ts", "▁S ale", "▁Sal e", "▁Sa le", "▁qu asi", "▁n og", "▁no g", "▁ nog", "▁nouve au", "▁c v", "▁ cv", "▁no ble", "▁nob le", "▁dé cembre", "▁déc embre", "▁cl ever", "▁cle ver", "▁P ir", "▁Pi r", "▁graph ics", "▁graphic s", "▁ graphics", "▁G R", "▁ GR", "че ской", "▁s ag", "▁sa g", "ict ions", "iction s", "i ctions", "na nt", "nan t", "n ant", "▁th é", "C G", "▁Jac ques", "W M", "▁F inn", "▁Fin n", "▁Fi nn", "▁dev ast", "зо м", "хо в", "х ов", "▁En tre", "▁Ent re", ". ;", "▁fl uct", "▁flu ct", "▁Sc iences", "▁Sci ences", "▁Science s", "▁т у", "▁ ту", "path s", "pat hs", "▁sh orter", "▁short er", "▁suggest ion", "ER Y", "▁D ire", "▁Di re", "▁Dir e", "at eurs", "ate urs", "ateur s", "▁round ed", "▁t art", "▁tar t", "▁ta rt", "ю ще", "up er", "u per", "▁secret s", "▁sec rets", "▁secre ts", "▁compan ion", "▁K EY", "▁ KEY", "T ile", "▁B ibli", "x s", "▁ang ular", "▁ angular", "pa g", "p ag", "er ness", "ern ess", "erne ss", "▁S orry", "▁Sor ry", "▁ Sorry", "▁pre diction", "▁predict ion", "▁pred iction", "▁M aking", "▁Ma king", "▁Mak ing", "на род", "ol are", "ola re", "olar e", "rp c", "r pc", "▁t ens", "▁te ns", "▁ten s", "en as", "ena s", "e nas", "▁Re ally", "▁Real ly", "H I", "port al", "por tal", "▁for me", "▁form e", "gan g", "ga ng", "g ang", "▁l ane", "▁la ne", "▁lan e", "▁ lane", "▁s tag", "▁st ag", "▁sta g", "▁Mar x", "▁Ma rx", "▁L LC", "▁LL C", "▁d are", "▁da re", "▁dar e", "▁Olymp ic", "▁p ant", "▁pan t", "▁pa nt", "build ing", "; ;", "▁c ops", "▁co ps", "▁cop s", "▁r ushed", "▁rush ed", "▁rus hed", "▁L ot", "▁Lo t", "▁init iative", "▁initi ative", "▁inv ite", "▁Saf ety", "▁Safe ty", "FA ILED", "FAIL ED", "▁habit ants", "en sen", "ens en", "ense n", "▁l ég", "▁W elcome", "▁Wel come", "Valid ate", "▁qu atre", "▁G ray", "▁Gr ay", "▁Gra y", "▁E ve", "▁Ev e", "▁C omb", "▁Com b", "▁Co mb", "▁ Comb", "▁p endant", "a qu", "con figure", "config ure", "▁A dm", "▁Ad m", "▁rif le", "▁Exper ience", "Decl aration", "▁å r", "▁ år", "ill ery", "ille ry", "iller y", "os pel", "osp el", "▁A rena", "▁Ar ena", "▁Are na", "▁bo ards", "▁board s", "▁ boards", "▁pur ple", "▁p ills", "▁pil ls", "▁pill s", "ueto oth", "li que", "l ique", "▁pop ulations", "▁population s", "▁popul ations", "▁acc ent", "▁ac cent", "▁r anges", "▁range s", "▁ran ges", "▁rang es", "▁Anal ysis", "▁ Analysis", "▁d ictionary", "▁Dr agon", "▁Drag on", "re ction", "rect ion", "r ection", "▁vis itor", "▁visit or", "seg ment", "▁д р", "▁F uck", "▁Fu ck", "д ж", "▁ident ification", "Class Name", "boot strap", "▁sur faces", "▁surface s", "▁surf aces", "▁scream ing", "кт у", "к ту", "pl ain", "sh adow", "incl udes", "include s", "▁j azz", "▁ja zz", "▁á l", "▁ ál", "ri ka", "rik a", "r ika", "ho p", "h op", "▁i on", "▁io n", "▁ ion", "vr e", "v re", "▁newsp apers", "▁newspaper s", "▁i hn", "▁ih n", "▁P arse", "▁Par se", "▁Pars e", "▁ Parse", "П о", "▁strict ly", "▁re covered", "▁recover ed", "▁U na", "▁Un a", "▁err e", "▁er re", "▁ erre", "iss ues", "issue s", "▁exp ense", "че ния", "▁do nc", "▁don c", "Bi n", "B in", "▁Com ment", "▁Comm ent", "▁ Comment", "▁sac rifice", "▁sacrific e", "T uple", "() [", "( )[", "▁tra vers", "▁trav ers", "Im p", "I mp", "J e", "▁Lin ux", "▁е ё", "▁P i", "▁ Pi", "▁cur ios", "▁cu rios", "▁r age", "▁rag e", "▁ra ge", "▁ rage", "▁e scal", "▁es cal", "▁esc al", "▁al ignment", "▁align ment", "▁pent ru", "▁cur r", "▁cu rr", "▁ curr", "▁b este", "▁be ste", "▁best e", "▁bes te", "[] ,", "[ ],", "▁// !", "H ub", "Vis ibility", "▁A sk", "▁As k", "ab ul", "a bul", "co lon", "col on", "colo n", "▁D ays", "▁Day s", "▁Da ys", "Aut hentication", "ві т", "▁l od", "▁lo d", "xF C", "x FC", "Look up", "js ce", "Al pha", "▁harm ony", "▁harmon y", "▁W ard", "▁War d", "▁Wa rd", "trans fer", "▁H orn", "▁Hor n", "▁Ho rn", "▁s d", "▁ sd", "so ap", "▁z ich", "▁Con sole", "▁Cons ole", "▁ Console", "▁ко ли", "▁Ph one", "▁ Phone", "pa per", "p aper", "й н", "▁z m", "▁ zm", "Do ne", "Don e", "D one", "ph ase", "pha se", "phas e", "▁Jul ia", "▁Ju lia", "▁Juli a", "▁ed ited", "▁edit ed", "af fe", "aff e", "Sy ntax", "yl l", "y ll", "▁Lu cas", "▁Luc as", "▁and eren", "▁andere n", "▁ander en", "[ <", "▁Data base", "▁Dat abase", "▁ Database", "▁spect ral", "▁spectra l", "ass ador", "ска та", "с ката", "▁import ante", "▁important e", "▁х а", "▁ ха", "t z", "▁s tere", "▁st ere", "▁ste re", "▁ster e", "▁m elt", "▁me lt", "▁mel t", "▁C row", "▁Cr ow", "▁Cro w", "ш ка", "it utes", "itut es", "itute s", "itu tes", "▁satisf ies", "▁L iga", "▁Li ga", "▁t omb", "▁to mb", "▁tom b", "▁f ühr", "▁ führ", "▁sol ely", "▁sole ly", "▁E ither", "▁t ennis", "▁ten nis", "▁s igh", "▁si gh", "▁sig h", "ser de", "s erde", "ub a", "u ba", "ę d", "le z", "l ez", "Fac t", "F act", "▁sque ez", "▁Thom pson", "▁N L", "▁ NL", "▁P ara", "▁Par a", "▁Pa ra", "▁? ?", "▁ ??", "▁fin ishing", "▁finish ing", "She et", "S heet", "LIN K", "L INK", "▁б ро", "▁ бро", "▁l over", "▁lo ver", "▁love r", "▁lov er", "m achine", "▁L esser", "▁Les ser", "▁Less er", "pon d", "po nd", "p ond", "▁pain tings", "▁paint ings", "▁painting s", "▁assum ptions", "▁assumption s", "▁mod ification", "fr e", "f re", "▁U lt", "▁Ul t", "▁A F", "▁ AF", "R V", "bin ding", "bind ing", "b inding", "▁toile t", "ra r", "r ar", "▁an ge", "▁ang e", "▁ ange", "▁she ep", "PRO TO", "act ic", "a ctic", "▁S peed", "▁Sp eed", "▁Spe ed", "▁ Speed", "▁I ce", "gn u", "g nu", "ow ned", "own ed", "Sub scription", "yr ics", "y rics", "▁back ward", ">\" .", "> \".", "pi t", "p it", "▁real istic", "öff ent", "az i", "a zi", "DE R", "D ER", "b ucket", "én y", "é ny", "xF E", "x FE", "▁f ancy", "▁fan cy", "ex cept", "▁S ul", "▁Su l", "▁l aser", "▁la ser", "▁las er", "Mon itor", "▁c omic", "▁com ic", "▁co mic", "▁Arch itect", "▁ex pr", "▁exp r", "▁ expr", "ount ers", "oun ters", "ounter s", "▁Mel bourne", "com plex", "comp lex", "'. $", "' .$", "om ot", "omo t", "o mot", "▁M enu", "▁Me nu", "▁Men u", "▁ Menu", "astic search", "▁ed iting", "▁edit ing", "Pre sent", "Pres ent", "P resent", "op les", "ople s", "opl es", "o ples", "è ncia", "▁в то", "gl ise", "she et", "s heet", "▁he lic", "▁hel ic", "▁str anger", "▁strange r", "▁strang er", "▁ex ec", "▁ exec", "FE R", "F ER", "in ian", "ini an", "SET TING", "▁M ix", "▁Mi x", "▁ Mix", "▁com plain", "▁compl ain", "▁in crement", "▁incre ment", "CS S", "C SS", "mm a", "m ma", "sl ide", "▁про тив", "▁проти в", "▁Lim ited", "Con sole", "Cons ole", "▁eng aging", "ul er", "ule r", "u ler", "▁O ptions", "▁Option s", "▁Opt ions", "▁ Options", "▁l ens", "▁le ns", "▁len s", "Ma il", "M ail", "▁bar rier", "▁barr ier", "trans port", "▁c ups", "▁cu ps", "▁cup s", "it err", "ite rr", "iter r", "▁const ants", "▁constant s", "▁ constants", "▁T ech", "▁Te ch", "iz io", "izi o", "сту па", "ступ а", "▁Sw eden", "at hon", "ath on", "a thon", "▁M agn", "▁Mag n", "▁Ma gn", "trans ition", "де ла", "es k", "e sk", "So ft", "S oft", "fun ctions", "function s", "ne a", "n ea", "Im plement", "Impl ement", "Imp lement", "ev ery", "ever y", "eve ry", "e very", "▁Man ufact", "▁improve ments", "▁improvement s", "▁Ind iana", "▁India na", "▁Indian a", "▁host s", "▁ho sts", "C V", "We st", "W est", "to wn", "t own", "can vas", "▁ш ко", "▁Col umn", "▁ Column", "▁Par ker", "▁Park er", "▁es pa", "▁esp a", "▁Pub lish", "▁которы й", "av is", "avi s", "a vis", "▁Z w", "▁emphas is", "ol v", "o lv", "▁re curs", "▁rec urs", "▁recur s", "it aire", "ita ire", "▁B ishop", "▁Bi shop", "▁Bis hop", "ne ro", "ner o", "n ero", "▁d eny", "▁de ny", "▁den y", "▁do ub", "▁dou b", "peon ato", "▁C ourse", "▁Cour se", "▁Que ens", "▁Queen s", "▁bl ur", "el ed", "ele d", "e led", "iz o", "i zo", "▁dé but", "▁Mod ule", "▁Mo dule", "▁ Module", "▁anx ious", "▁st are", "▁star e", "▁sta re", "▁Pro position", "▁K u", "▁i c", "▁ ic", "Per cent", "Qu ant", "▁И сто", "▁h ex", "▁he x", "▁ hex", "ass oci", "asso ci", "▁arrang ement", "▁arrange ment", "▁bo ats", "▁boat s", "Un d", "U nd", "▁sl ots", "▁slot s", "се н", "с ен", "necess ary", "▁app earing", "▁appe aring", "▁appear ing", "▁R ule", "▁Ru le", "▁ Rule", "▁G T", "▁ GT", "For ce", "et to", "ett o", "e tto", "ze nia", "zen ia", "▁o uts", "▁out s", "▁ou ts", "▁ outs", "▁vari ations", "▁variation s", "▁wh ites", "▁white s", "▁g lo", "▁gl o", "▁B R", "▁ BR", "ic ky", "ick y", "▁j ury", "▁ju ry", "▁jur y", "▁treat ments", "▁treatment s", "▁The ater", "kn ow", "k now", "▁pro files", "▁prof iles", "▁profile s", "▁con spir", "▁class room", "▁B ass", "▁Bas s", "▁Ba ss", "▁law yers", "▁lawyer s", "v ue", "▁A rc", "▁Ar c", "▁ Arc", "▁s la", "▁sl a", "▁att ending", "▁attend ing", "n x", "m x", "TO P", "T OP", "▁b ored", "▁bo red", "▁bore d", "▁bor ed", "pre vious", "prev ious", "r w", "pt ic", "љ у", "▁app ar", "▁ap par", "▁P ont", "▁Po nt", ": _", "ii i", "i ii", "▁j erk", "▁jer k", "hed ral", "сс а", "с са", "▁Pr ize", "▁Pri ze", "▁Р и", "б ре", "▁hand les", "▁handle s", "▁j ak", "▁ja k", "▁Afghan istan", "▁b oring", "▁bo ring", "▁bor ing", "if ik", "ifi k", "▁sh ade", "▁sha de", "air o", "ai ro", "a iro", "od ay", "oda y", "o day", "▁pl ates", "▁plate s", "▁plat es", "▁Championship s", "▁Champion ships", "▁che eks", "▁cheek s", "ri ke", "rik e", "r ike", "▁kön nen", "▁app le", "▁ap ple", "▁appl e", "▁ apple", "▁Ed die", "▁Edd ie", "▁s od", "▁so d", "▁tr ains", "▁tra ins", "▁train s", "pan ic", "pa nic", "▁Ad vent", "▁Adv ent", "ub re", "u bre", "▁d å", "▁S ymbol", "▁Sym bol", "▁ Symbol", "▁с те", "▁ст е", "▁ сте", "Sa m", "S am", "inher it", "cam era", "▁c ours", "▁co urs", "▁cour s", "▁cou rs", "▁make up", "re gex", "reg ex", "▁U E", "▁ UE", "▁Det roit", "▁W eight", "▁We ight", "▁ Weight", "▁P iet", "▁Pi et", "▁a ria", "▁ar ia", "▁ aria", "DI RECT", "DIR ECT", "ace ae", "▁In fo", "▁Inf o", "▁ Info", "an ya", "any a", "back end", "▁T ennessee", "pi cker", "pic ker", "pick er", "p icker", "▁Le o", "▁P oss", "▁Po ss", "▁Pos s", "pr ises", "prise s", "▁m ature", "▁mat ure", "сь ких", "▁F ant", "▁Fa nt", "Re ason", "▁m oy", "▁mo y", "▁B aker", "▁Ba ker", "▁Bak er", "▁sub set", "▁subs et", "▁ subset", "▁Stan ley", "▁el even", "▁ele ven", "▁elev en", "ol ate", "ola te", "o late", "▁fort une", "Status Code", "▁ent ities", "▁ entities", "▁Ok ay", "▁ Okay", "ц о", "an os", "ano s", "a nos", "rel ative", "▁order ing", "▁ord ering", "▁No body", "▁Nob ody", "▁str len", "▁ strlen", "▁r ope", "▁ro pe", "▁cig arette", "hol ds", "hold s", "h olds", "ir able", "ira ble", "value Of", "St ub", "▁phot ography", "▁photograph y", "es tra", "est ra", "estr a", "e stra", "▁cult ures", "▁culture s", "▁decl aration", "▁declar ation", "mer cial", "LI ED", "au te", "aut e", "a ute", "al ter", "alt er", "Sub mit", "▁Mag ic", "▁r hythm", "Pay ment", "ni h", "n ih", "▁inter section", "l é", "EN TRY", "/ )", "▁m og", "▁mo g", "ru st", "rus t", "r ust", "▁threat s", "▁Mil itary", "ap or", "a por", "▁s igu", "▁si gu", "▁sig u", "set minus", "▁I ng", "▁In g", "st ation", "stat ion", "T ake", "▁s hed", "▁sh ed", "▁she d", "▁Fr ancia", "▁Franc ia", "▁Fra ncia", "▁Fran cia", "pos ts", "po sts", "post s", "Mar ker", "Mark er", "Lower Case", "▁be find", "▁bef ind", "▁C zech", "▁Cz ech", "ícul a", "▁Per formance", "▁W es", "▁We s", "▁L arry", "▁Lar ry", "▁o st", "▁os t", "▁ ost", "▁em ails", "▁email s", "▁Re lease", "▁ Release", "▁ad apter", "▁adapt er", "▁ adapter", "▁pad re", "ac io", "aci o", "a cio", "▁з ем", "▁gen etic", "▁ge netic", "▁U nd", "▁Un d", "▁ Und", "▁accept ance", "да н", "д ан", "▁Girl s", "▁Gir ls", "comp iler", "compile r", "su n", "s un", "▁whe els", "▁wheel s", "▁thorough ly", "gr und", "gru nd", "g rund", "un ction", "unct ion", "▁e lla", "▁el la", "▁ell a", "▁ ella", "X FF", "ug s", "u gs", "ient os", "ien tos", "iento s", "▁D M", "▁ DM", "▁polit ique", "▁campaign s", "▁Tok yo", "▁album s", "KERN EL", "pd ata", "p data", "▁lap top", "▁lapt op", "▁v ál", "▁vá l", "▁f ou", "▁fo u", "or b", "o rb", "▁T ower", "▁To wer", "▁Tow er", "▁Get ting", "▁cor ners", "▁corner s", "▁corn ers", "pl ess", "ple ss", "ples s", "p less", "▁special ist", "▁i v", "▁ iv", "Ui nt", "U int", "▁name ly", "▁nam ely", "▁sc aling", "▁scal ing", "Ext ensions", "Extension s", "▁cent ro", "omorph ism", "▁dé f", "), \\", ") ,\\", "▁contr ary", "▁contra ry", "▁str iking", "▁stri king", "▁B ere", "▁Be re", "▁Ber e", "▁fore cast", "▁z ones", "▁zone s", "▁zo nes", "sm art", "s mart", "as hi", "ash i", "ri n", "r in", "NE W", "▁sim ulations", "▁simulation s", "▁R ather", "▁Ra ther", "▁Rat her", "▁Writ ing", "▁Wr iting", "▁$ [", "▁as sh", "▁ass h", "▁f ailing", "▁fa iling", "▁fail ing", "▁man if", "▁B og", "▁Bo g", "▁D ir", "▁Di r", "▁ Dir", "▁influ enced", "▁influence d", "conf irm", "▁we igh", "▁in ventory", "▁invent ory", "▁a pare", "▁ap are", "▁e u", "▁ eu", "char acter", "io m", "i om", "▁o rb", "▁or b", "▁ orb", "dev ices", "device s", "▁L ED", "▁LE D", "▁ LED", "▁prop ortion", "▁proport ion", "▁Hon or", "▁Ho nor", "▁appro aching", "▁approach ing", "de leg", "del eg", "▁B B", "▁ BB", "hel pers", "help ers", "helper s", "re pository", "rep ository", "▁б ере", "▁бе ре", "▁inhab it", "▁s ão", "▁travel ed", "▁trav eled", "ne x", "n ex", "▁C lin", "▁Cl in", "CE PT", "▁off ense", "▁in cent", "▁inc ent", "ID S", "I DS", "▁coeff icients", "▁coefficient s", "▁l p", "▁ lp", "чно го", "ч ного", "▁c d", "▁ cd", "mu st", "mus t", "m ust", "▁soon er", "ez e", "e ze", "C at", "ma ker", "make r", "m aker", "▁r anked", "▁ran ked", "▁rank ed", "ful ness", "▁part ially", "▁partial ly", "▁parti ally", "Pro m", "Pr om", "P rom", "▁ф он", "▁фо н", "▁Pro bably", "▁c ached", "▁cache d", "▁ca ched", "▁ cached", "▁bal anced", "▁balance d", "ah oma", "aho ma", "▁Mur ray", "▁a li", "▁al i", "▁ ali", "iv os", "ivo s", "▁b ark", "▁bar k", "IT EM", "ITE M", "▁Kir che", "▁alloc ated", "▁allocate d", "Al t", "A lt", "▁am éric", "íl ia", "í lia", "▁c ens", "▁ce ns", "▁lic enses", "▁license s", "▁ licenses", "ac z", "a cz", "▁G ate", "▁Ga te", "▁ Gate", "▁B L", "▁ BL", "▁re public", "▁rep ublic", "RO W", "▁состав ля", "▁соста вля", "▁Fil ip", "▁Ind ivid", "▁tr ials", "▁tri als", "▁trial s", "/* !", "▁G P", "▁ GP", "ni ka", "nik a", "n ika", "▁ex em", "▁ad vers", "▁adv ers", "um ped", "ump ed", "▁Dev ice", "▁ Device", "wa ke", "w ake", "Ex ec", "ar ding", "ard ing", "ardi ng", "▁pobl ación", "▁k een", "▁ke en", "▁b itch", "▁bit ch", "▁embed ded", "▁B ond", "▁Bo nd", "▁Bon d", "ri des", "ride s", "rid es", "r ides", "▁W oman", "▁Wo man", ". [", "ér é", "é ré", "▁Hash Map", "▁ HashMap", "▁co unting", "▁coun ting", "▁count ing", "▁Init ial", "▁ Initial", "▁ver se", "▁vers e", "▁ verse", "▁Vere in", ">\" ,", "> \",", "▁an th", "▁ant h", "▁ anth", "ci d", "c id", "▁h unt", "▁hun t", "на л", "н ал", "ci es", "cie s", "c ies", "Pi n", "P in", "▁# !", "ва я", "sn d", "s nd", "▁u k", "▁ uk", "▁sw ift", "▁tempor ada", "▁environment s", "▁environ ments", "claim er", "eme tery", "emet ery", "j är", "▁ча ст", "▁час т", "Trans port", "▁A rr", "▁Ar r", "▁ Arr", "▁P aper", "▁Pa per", "▁Pap er", "▁b ew", "▁be w", "▁ bew", "▁har vest", "▁- ----", "▁-- ---", "▁--- --", "▁ -----", "product s", "ле т", "л ет", "ident ifier", "RO OT", "▁M ak", "▁Ma k", "▁App ro", "▁Ap pro", "▁ Appro", "ie ri", "ier i", "i eri", "▁F ly", "▁Fl y", "▁is set", "▁iss et", "▁ isset", "▁determ ination", "▁determin ation", "Ge ometry", "▁emer ging", "sub scription", "ol y", "o ly", "▁R ace", "▁Ra ce", "▁B ah", "▁Ba h", "▁Config uration", "▁ Configuration", "▁Inter est", "ско в", "ск ов", "с ков", "ist rz", "istr z", "▁S han", "▁Sh an", "▁Sha n", "▁P ain", "▁Pa in", "CON NE", "ma jor", "m ajor", "▁St ay", "▁Sta y", "▁bron ze", "▁f itting", "▁fit ting", "▁J ar", "▁Ja r", "mg r", "m gr", "▁S har", "▁Sh ar", "▁Sha r", "FL O", "F LO", "ut er", "ute r", "u ter", "с ы", "▁cont acts", "▁contact s", "▁f iring", "▁fi ring", "▁fir ing", "на н", "н ан", "▁prof es", "sk é", "s ké", "▁rule d", "▁ru led", "▁rul ed", "=\" /", "an dro", "and ro", "▁ens uring", "iz en", "ize n", "i zen", "▁че рез", "ise cond", "i second", "ob il", "obi l", "o bil", "▁re ck", "▁rec k", "▁ reck", ")} (", ") }(", "bit map", "▁B run", "▁Br un", "▁Bru n", "▁Jer usalem", "▁W o", "▁Republic ans", "▁Republican s", "mat ic", "m atic", "▁E arl", "▁d ock", "▁do ck", "▁doc k", "▁M all", "▁Mal l", "▁Ma ll", "k k", "▁ Й", "▁C OL", "▁CO L", "▁ COL", "▁lat ach", "UI nt", "U Int", "ци ональ", "цион аль", "циона ль", "▁seg ments", "▁segment s", "▁re fund", "▁ref und", "fa c", "f ac", "▁Art icle", "▁B orn", "▁Bo rn", "▁Bor n", "² .", "br and", "bra nd", "b rand", "{$ \\", "{ $\\", "▁s s", "▁ ss", "▁Re sources", "▁Res ources", "▁Resource s", "▁ Resources", "▁re cycl", "▁rec ycl", "▁$ $\\", "▁$$ \\", "▁Conne ction", "▁Connect ion", "▁ Connection", "▁imp erial", "▁imper ial", "▁pract ically", "▁practical ly", "▁– ,", "▁Dis play", "▁ Display", "ier no", "mo uth", "m outh", "ed es", "ede s", "e des", "ba hn", "b ahn", "▁C atherine", "▁high way", "un ting", "unt ing", "▁Any way", "Sp ell", "Spe ll", "▁L iste", "▁List e", "▁Li ste", "▁Lis te", "▁ret rieve", "▁retr ieve", "▁retriev e", "▁z d", "▁ zd", "stra ße", "▁dom inated", "▁domin ated", "to uch", "t ouch", "▁m b", "▁ mb", "LO NG", "L ONG", "as ures", "asure s", "TL S", "T LS", "▁accompl ished", "▁accomp lished", "▁accomplish ed", "▁f ears", "▁fe ars", "▁fear s", "▁seem ingly", "▁d ag", "▁da g", "▁ dag", "▁b ureau", "▁bur eau", "▁Gro ß", "▁accord ance", ". ]", "ou x", "o ux", "▁col onial", "▁colon ial", "▁compass ion", "th umb", "▁s wo", "▁sw o", "on line", "▁J i", "▁work shop", "▁works hop", "▁l ub", "▁lu b", "év rier", "ш і", ">\" ;", "> \";", "▁gener ous", "▁gene rous", "ro us", "rou s", "r ous", "av id", "avi d", "a vid", "igen ous", "▁R aw", "▁Ra w", "▁ Raw", "▁sw ap", "▁ swap", "h c", "java script", "jav ascript", "Fact or", "Fac tor", "F actor", "▁gar bage", "▁M icro", "▁Mic ro", "▁Mi cro", "co u", "c ou", "ü ber", "▁f atal", "▁fa tal", "▁fat al", "▁trans parent", "▁b earing", "▁be aring", "▁bear ing", "▁celebr ated", "▁celebrate d", "VI S", "V IS", "▁B M", "▁ BM", "▁pr ince", "▁prin ce", "to l", "t ol", "▁' ", "\\ \">", "▁du rant", "▁dur ant", "▁vent ure", "▁F itz", "▁Fi tz", "▁C BD", "▁CB D", "▁b acking", "▁back ing", "▁w are", "▁war e", "▁wa re", "▁ ware", "ev e", "e ve", "O G", "ed ish", "edi sh", "▁Giov anni", "▁Sh are", "▁Shar e", "▁Sha re", "▁ Share", "▁rec ipes", "▁recipe s", "▁recip es", "big g", "bi gg", "b igg", "▁minor ity", "▁n ar", "▁na r", "▁ nar", "oll ary", "ollar y", "▁F E", "▁ FE", "sh irt", "▁redu ces", "▁reduce s", "Ch e", "C he", "▁NOT E", "▁NO TE", "j query", "▁F low", "▁Fl ow", "▁Flo w", "▁ Flow", "task s", "pr event", "pre vent", "prev ent", "▁со вет", "▁сов ет", "it as", "ita s", "▁exam ined", "▁examine d", "ho n", "h on", "▁M ine", "▁Min e", "▁Mi ne", "▁grad ient", "▁V ien", "▁Vi en", "▁b eds", "▁be ds", "▁bed s", "ET H", "E TH", "fl at", "f lat", "an son", "ans on", "▁in tu", "▁int u", "▁fl ows", "▁flo ws", "▁flow s", "но к", "▁E ine", "▁Ein e", "ро ди", "род и", "▁ко р", "▁к ор", "▁ кор", "▁aff ection", "▁af fection", "▁affect ion", "▁p orts", "▁por ts", "▁port s", "▁ ports", "__ .", "_ _.", "re po", "rep o", "ail and", "ai land", "▁по да", "▁под а", "int age", "inta ge", "▁Prote ction", "î t", "▁[ {", "▁l amp", "▁la mp", "▁benef icial", "ка де", "▁Станов ништво", "▁l ined", "▁li ned", "▁line d", "▁lin ed", "▁ lined", "▁Ex change", "▁f itted", "▁fit ted", "▁v erk", "▁ver k", "▁focus es", "vo d", "v od", "▁Car lo", "▁Carl o", "▁ра спо", "▁рас по", "ain ted", "aint ed", "ainte d", "a inted", "▁r ape", "▁rap e", "▁ra pe", "▁t ogg", "▁to gg", "ac ker", "ack er", "a cker", "T w", "ra h", "r ah", "trans l", "▁je alous", "▁re pository", "▁rep ository", "▁ repository", "re marks", "rem arks", "remark s", "▁i e", "▁ ie", "í d", "▁sk ull", "ra c", "r ac", "() ]", "( )]", "ri en", "rie n", "r ien", "? (", "▁K ids", "▁Ki ds", "▁Kid s", "▁sw itched", "▁switch ed", "▁G ew", "▁Ge w", "▁be ef", "▁appear ances", "▁appearance s", "▁Coll ins", "▁V illa", "▁Vill a", "▁Vi lla", "▁Vil la", "▁z ona", "▁zo na", "▁n eu", "▁ne u", "те льно", "тель но", "▁х удо", "▁oper ational", "▁operation al", "ON LY", "▁h ockey", "▁ś wi", "ö k", "Sl ice", "Ref resh", "▁n uts", "▁nu ts", "▁nut s", "sa y", "s ay", "▁ста нови", "▁станов и", "х е", "▁le aning", "▁lean ing", "▁H aus", "▁Ha us", "▁o ral", "▁or al", "▁ oral", "▁ Ž", "▁Sup pose", "▁Supp ose", "▁ess ence", "EN TER", "ENT ER", "B ucket", "▁C ant", "▁Can t", "▁Ca nt", "▁N azi", "▁Na zi", "▁Naz i", "ш ти", "▁Vol ume", "▁ Volume", "▁wor thy", "▁worth y", "▁ worthy", "B u", "Ent ries", "on ie", "oni e", "o nie", "▁h ood", "▁ho od", "▁ hood", "▁emp ire", "▁dé velop", "▁p robe", "▁pro be", "▁pr obe", "▁prob e", "▁ probe", "▁K night", "▁Kn ight", "▁peace ful", "hu b", "h ub", "▁ál bum", "su it", "s uit", "▁sil k", "+ =", "▁p ione", "▁pi one", "' \"", "ка ми", "▁N ull", "▁Nu ll", "▁ Null", "Label s", "au tres", "aut res", "autre s", "to LowerCase", "▁b uzz", "▁bu zz", "▁w ashed", "▁was hed", "▁wash ed", "' *", "itzer land", "▁r amp", "▁ra mp", "▁ram p", "▁к ни", "▁k un", "col ors", "color s", "colo rs", "▁vacc ine", "an imation", "anim ation", "▁Just in", "mem set", "▁c ensus", "▁cens us", "in fl", "inf l", "▁statist ical", "▁trop ical", "Dis abled", "Disable d", "\r \r", "▁Cra ig", "Page s", "Pag es", "P ages", "▁mag az", "▁comput ing", "▁flo ors", "▁floor s", "oin e", "oi ne", "o ine", "▁tit olo", "▁an ci", "▁anc i", "▁Indust ry", "▁г лав", "▁гла в", "Bo ot", "B oot", "Cl ip", "▁d v", "▁ dv", "▁met all", "▁metal l", "▁meta ll", "▁Is abel", "▁Isa bel", "▁look up", "▁ lookup", "▁ц ер", "▁це р", "▁ цер", "▁car ries", "f u", "tp l", "t pl", "pe rp", "per p", "▁St orm", "▁Sto rm", "eh icle", "▁S even", "▁Se ven", "▁Sev en", "љ а", "▁l ut", "▁lu t", "th reshold", "▁d ull", "▁du ll", "▁E ND", "▁EN D", "▁ END", "▁O tto", "▁Ot to", "▁Ott o", "▁there by", "TE MP", "T EMP", "▁S cal", "▁Sc al", "▁ Scal", "Com put", "Comp ut", "ip v", "i pv", "▁ins ane", "▁myster ious", "▁M is", "▁Mi s", "uch ar", "uc har", "u char", "as ma", "asm a", "au ch", "auc h", "a uch", "ne tt", "net t", "n ett", "El em", "E lem", "de rive", "der ive", "▁murder ed", "ak ten", "akt en", "akte n", "ро ван", "ров ан", "рова н", "▁a nos", "▁an os", "▁ano s", "▁ anos", "}} ^", "} }^", "▁F uß", "▁Fu ß", "▁S ister", "▁Si ster", "▁volunte er", ":: _", ": :_", "er ta", "ert a", "▁бо лее", "og rá", "▁Im Gui", "sa me", "sam e", "s ame", "Sh adow", "▁re actions", "▁reaction s", "▁react ions", "▁purch asing", "PRE FIX", "▁emb od", "со м", "▁alt ogether", "▁prom oting", "▁promot ing", "U V", "▁ind uced", "▁indu ced", "▁eer ste", "▁eerst e", "Li fe", "Lif e", "L ife", "hd d", "h dd", "ní ch", "▁c hill", "▁ch ill", "▁chi ll", "RG B", "R GB", "red uce", "redu ce", "FR OM", "F ROM", "dir name", "▁t une", "▁tu ne", "▁tun e", "▁r ay", "▁ra y", "▁ ray", "T D", "▁к ъ", "▁Febru ar", "▁suspend ed", "▁susp ended", "▁u pp", "▁up p", "▁ upp", "er i", "e ri", "pr eter", "pre ter", "pret er", "▁E R", "▁ ER", "то н", "т он", "▁c atal", "▁cat al", "▁ca tal", "▁h iring", "▁hi ring", "▁п ів", "▁пі в", "▁Olymp ics", "▁Olympic s", "da le", "dal e", "d ale", ":: {", ": :{", "▁expl oring", "▁explo ring", "▁с тал", "▁ста л", "▁ст ал", "▁univers ities", "Class es", "▁ча с", "▁C ool", "▁Co ol", "▁S ony", "▁So ny", "▁Son y", "th al", "tha l", "t hal", "▁es crit", "▁esc rit", "▁cor ruption", "▁corrupt ion", "az ar", "aza r", "▁N eb", "▁Ne b", "▁Py thon", "▁c him", "▁ch im", "▁chi m", "▁cap ability", "cy cl", "c ycl", "▁re try", "▁r etry", "▁ret ry", "▁retr y", "▁ retry", "++ ]", "▁t oy", "▁to y", "▁T erry", "▁Ter ry", "▁Terr y", "View ById", "▁v ine", "▁vi ne", "▁vin e", "▁Kit chen", "▁B iden", "▁Bi den", "Back end", "gl ich", "g lich", "re lation", "rel ation", "▁rat ings", "▁ra tings", "▁rating s", "Execut or", "ibr ation", ">( )", "> ()", "▁he al", "if iable", "ifi able", "par k", "p ark", "▁P ete", "▁Pe te", "▁Pet e", "▁tr aged", "▁tra ged", "▁trag ed", "▁ch uck", "▁wire less", "▁wir eless", "Re place", "Rep lace", "IR Q", "▁се зо", "i ß", "▁j unto", "▁jun to", "Lo w", "L ow", "▁s id", "▁si d", "▁ sid", "Tag Helpers", "TagHelper s", "▁comp aring", "▁compar ing", "▁c elle", "▁cell e", "▁ce lle", "▁cel le", "▁obt aining", "▁obtain ing", "▁qu ar", "▁q uar", "Br o", "B ro", "▁E C", "▁ EC", "in ea", "ine a", "i nea", "▁F ue", "▁Fu e", "▁Prince ss", "▁Prin cess", "ij o", "i jo", "ge ns", "gen s", "g ens", "PO L", "P OL", "è tres", "▁h ind", "▁hi nd", "▁ hind", "Var iant", "Vari ant", "▁rece ives", "▁receive s", "go d", "g od", "ik en", "ike n", "i ken", "na il", "n ail", "▁amer ican", "▁ american", "br as", "bra s", "b ras", "(' \\", "( '\\", "ie ce", "if ference", "iffer ence", "iffe rence", "▁b ubble", "▁bub ble", "▁B ear", "▁Be ar", "un ivers", "uni vers", "▁demand ing", "sa ved", "save d", "s aved", "▁cred entials", "MS M", "M SM", "▁struct ural", "Con s", "Co ns", "C ons", "▁Way ne", "▁blank et", "▁re pet", "▁rep et", "▁repe t", "Ne g", "N eg", "▁exclusive ly", "▁exclus ively", "IF I", "I FI", "бур г", "▁arg uing", "▁Re pub", "▁Rep ub", "▁f rowned", "▁fr owned", "Met ric", "M etric", "sk im", "ski m", "s kim", "▁П ет", "▁Пе т", "▁rele ases", "▁release s", "▁t ast", "▁ta st", "▁p reference", "▁pre ference", "▁prefer ence", "▁S üd", "▁Sü d", "oc c", "o cc", "▁r x", "▁ rx", "activ ate", "cl am", "c lam", "▁фи ль", "▁Sud denly", "▁cr ushing", "▁crush ing", "▁L ower", "▁Lo wer", "▁Low er", "▁ Lower", "ei ng", "e ing", "wa lt", "wal t", "w alt", "▁Г ер", "▁Ге р", "▁m ö", "ри сто", "la gen", "lag en", "lage n", "l agen", "▁co aching", "▁coach ing", "ight ers", "igh ters", "ighter s", "▁bas ement", "▁base ment", "▁F IX", "▁FI X", "▁ FIX", "Te le", "T ele", "With out", "▁Com mons", "▁Comm ons", "▁Common s", "ul ly", "ull y", "h box", "fl ash", "▁por tal", "▁port al", "▁ portal", "ot ype", "o type", "▁S or", "▁So r", "▁trou bles", "▁trouble s", "ar si", "ars i", "▁с тан", "▁ста н", "▁ст ан", "▁ стан", "CA M", "C AM", "▁de notes", "▁den otes", "▁denote s", "LA NG", "LAN G", "L ANG", "▁Be yond", "▁Bey ond", "▁Bo wl", "▁Bow l", "▁import antly", "▁important ly", "▁W R", "▁ WR", "▁rel ating", "▁a nder", "▁and er", "▁an der", "▁ ander", "▁gr inned", "▁grin ned", "▁D ak", "▁Da k", "▁Brook lyn", "▁d p", "▁ dp", "▁P oly", "▁Pol y", "▁Po ly", "▁ Poly", "▁Sch ul", "▁B uffer", "▁Buff er", "▁ Buffer", "▁h older", "▁hold er", "▁hol der", "▁ holder", "IC AL", "I CAL", "▁tra iler", "▁trail er", "er ek", "ere k", "e rek", "▁n ě", "▁ ně", "sh aped", "shape d", "sha ped", ": `", "▁de code", "▁dec ode", "▁ decode", "▁co unted", "▁coun ted", "▁count ed", "▁v amp", "▁va mp", "▁re late", "▁rel ate", "▁M ason", "▁Ma son", "▁Mas on", "▁t itled", "▁title d", "▁tit led", "▁Kent ucky", "▁particip ated", "▁participate d", "▁Jenn ifer", "▁mat rices", "Cal endar", "st s", "s ts", "Ass oci", "▁f orum", "▁for um", "▁fo rum", "▁s phere", "▁sp here", "▁spher e", "▁S EO", "▁SE O", "pop up", "▁Current ly", "CL E", "C LE", "▁vol unt", "▁stell ar", "for all", "Is s", "I ss", "im et", "ime t", "i met", "q p", "la test", "lat est", "late st", "▁config ured", "▁configure d", "ab ol", "a bol", "ig ent", "igen t", "ige nt", "i gent", "▁delay ed", "ff ic", "f fic", "▁g ing", "▁gi ng", "▁ ging", "▁s cent", "▁sc ent", "▁scen t", "▁disg ust", "▁disgu st", "he sis", "hes is", "h esis", "im en", "ime n", "i men", "▁re ign", "▁П и", "ul as", "ula s", "u las", "um ing", "umin g", "umi ng", "u ming", "in nings", "inn ings", "Re nd", "R end", "id ity", "idi ty", "▁do zens", "▁dozen s", "wa rf", "war f", "▁Del hi", "▁bi ological", "▁corrid or", "Vis ual", "▁I z", "▁s uits", "▁su its", "▁suit s", "Py Object", "ia go", "i ago", "▁div ide", "▁divid e", "pe nt", "pen t", "p ent", "hel lo", "hell o", "h ello", "▁b eta", "▁be ta", "▁bet a", "▁ beta", "▁ex terior", "▁fin est", "▁fine st", "▁B ir", "▁Bi r", "▁f reed", "▁fr eed", "▁free d", "▁fre ed", "▁K el", "▁Ke l", "Se m", "S em", "▁fr uits", "▁fruit s", "▁fru its", "▁serv ants", "▁servant s", "▁pub lisher", "▁publish er", "▁cop per", "ol ation", "o lation", "se p", "s ep", "▁chair man", "ti k", "t ik", "▁m others", "▁mother s", "▁mo thers", "A ug", "▁je ans", "[] )", "[ ])", "▁D ATA", "▁DA TA", "▁ DATA", "▁reve als", "▁reveal s", "▁un conscious", "▁h acer", "▁ha cer", "▁hace r", "ric ulum", "▁T ogether", "▁ш та", "▁ шта", "or sz", "ors z", "▁c anal", "▁can al", "▁ca nal", "ös t", "ö st", "▁equ als", "▁equal s", "▁eq uals", "▁ equals", "▁по мо", "▁al location", "▁all ocation", "▁alloc ation", "st änd", "▁ч ер", "▁че р", "ac king", "ack ing", "▁motiv ation", "со н", "с он", "▁R ole", "▁Ro le", "▁Rol e", "▁ Role", "App ly", "Ap ply", "ig es", "ige s", "i ges", "* {", "▁f ires", "▁fire s", "▁fi res", "▁fir es", "Us ed", "Use d", "U sed", "▁he ute", "sk iej", "ski ej", "▁Or leans", "yl an", "y lan", "▁warm th", "▁w elfare", "▁wel fare", "je m", "j em", "▁си сте", "be z", "b ez", "ř e", "ke e", "k ee", "▁segu ito", "un ge", "ung e", "▁y oga", "▁yo ga", "▁d ug", "▁du g", "▁rest ored", "▁restore d", "Dr oid", "D roid", "▁P ent", "▁Pe nt", "▁Pen t", "▁ran king", "▁rank ing", "mo r", "m or", ".~ (\\", "ograph ical", "ographic al", "▁p ian", "▁pi an", "▁g ates", "▁gate s", "▁ga tes", "▁с ти", "▁ст и", "▁ сти", "s quare", "▁im plicit", "▁impl icit", "▁G ram", "▁Gr am", "▁Gra m", "▁Apr ès", "▁Ap rès", "▁Ass istant", "▁p ac", "▁pa c", "▁P ope", "▁Po pe", "▁Pop e", "г ре", "▁sc attering", "▁scatter ing", "стра тив", "▁all ocate", "▁alloc ate", "▁Man hattan", "▁а нг", "▁ан г", "▁ анг", "▁inter rupted", "▁interrupt ed", "ér ieur", "éri eur", "érie ur", "数 据", "Sign al", "Sig nal", "▁Con tract", "▁Cont ract", "▁ Contract", "ór ia", "ó ria", "WI TH", "W ITH", "хо дя", "ход я", "Ag greg", "A ggreg", "cul es", "cu les", "cule s", "c ules", "J an", "▁s to", "▁st o", "▁ sto", "▁G PIO", "▁GP IO", "▁ GPIO", "▁ident ifying", "▁identify ing", "▁p id", "▁pi d", "▁ pid", "ę p", "▁di git", "▁dig it", "el ia", "eli a", "e lia", "inv oke", "▁Fl oren", "▁Flor en", "▁Flo ren", "▁sh allow", "▁shall ow", "get Class", "getC lass", "▁advert is", "ем ы", "е мы", "▁H R", "▁ HR", "ym an", "y man", "▁C E", "▁ CE", "▁sec ured", "▁secure d", "▁secur ed", "▁rel atives", "▁relative s", "▁relativ es", "▁s ob", "▁so b", "▁s tab", "▁st ab", "▁sta b", "Trans ition", "▁w en", "▁we n", "▁ wen", "sh ops", "shop s", "▁k ont", "▁kon t", "▁ko nt", "▁h acia", "▁ha cia", "H y", "в ри", "sh ell", "she ll", "s hell", "▁ant ib", "▁anti b", "env ironment", "environ ment", "um bs", "umb s", "Tr acker", "Track er", "Tra cker", "en tr", "ent r", "▁Polit ical", "ex tract", "ext ract", "extra ct", "extr act", "=\" {{", "▁m erc", "▁me rc", "▁mer c", "▁p oc", "▁po c", "▁Re set", "▁Res et", "▁ Reset", "▁pur ely", "▁pure ly", "▁M ul", "▁Mu l", "▁gorge ous", "▁Î n", "ri ven", "riv en", "rive n", "r iven", "▁rom ance", "▁roman ce", "▁d av", "▁da v", "че ского", "ér ica", "éri ca", "éric a", "▁tra ject", "▁a rise", "▁ar ise", "▁sw ung", "▁p ockets", "▁pocket s", "▁trad itions", "▁tradition s", "▁re ver", "▁r ever", "▁rev er", "▁reve r", ">> >", "> >>", "▁n d", "▁ nd", "▁di vis", "▁div is", "▁bel oved", "▁quant ities", "▁é d", "▁ éd", "ien do", "i endo", "▁tal ented", "▁talent ed", "▁C ad", "▁Ca d", "▁В ла", "▁imm igration", "▁immigr ation", "▁ju ris", "▁jur is", "▁a er", "▁e aten", "▁eat en", "▁m iejsc", "▁sum mon", "pe ople", "▁g ains", "▁gain s", "▁ga ins", "▁пра во", "▁restr iction", "▁restrict ion", "st ub", "▁b out", "▁bo ut", "▁bou t", "▁slave ry", "▁sla very", "▁comput ation", "▁ar mor", "▁arm or", "▁e k", "▁ ek", "▁Muslim s", "▁co operation", "▁cooper ation", "▁enh anced", "▁enhance d", "os lav", "▁ab rupt", "▁pod cast", "▁hospital s", "▁hosp itals", "нь о", "▁hot els", "▁hotel s", "▁Wik ipedia", "▁ж ен", "▁же н", "▁ жен", "G LOBAL", "▁Commun ist", "an gles", "ang les", "angle s", "▁t high", "▁th igh", "▁K irk", "▁Kir k", "▁t ends", "▁ten ds", "▁tend s", "▁M ode", "▁Mod e", "▁Mo de", "▁ Mode", "▁N atur", "▁Nat ur", "▁de let", "▁del et", "▁po pul", "▁pop ul", "▁Ch amber", "▁Cha mber", "▁Conserv ative", "kr ieg", "k rieg", "▁Class ic", "▁die sem", "▁dies em", "▁diese m", "▁em power", "▁emp ower", "▁M es", "▁Me s", "▁de alt", "▁deal t", "▁e stad", "▁est ad", "▁esta d", "▁Se it", "▁cred its", "▁credit s", "sub subsection", "Inv oke", "▁phys ician", "це в", "ц ев", "ás a", "á sa", "▁g ob", "▁go b", "▁R ug", "▁Ru g", "▁м іс", "▁мі с", "sh aller", "shal ler", "shall er", "▁k ol", "▁ko l", "▁ kol", "▁c ared", "▁car ed", "▁care d", "▁ca red", "▁of icial", "no s", "n os", "▁j el", "▁je l", "▁ jel", "null able", "GU I", "G UI", "▁r app", "▁rap p", "▁ra pp", "▁An nie", "▁Ann ie", "▁st ocks", "▁stock s", "▁sto cks", "▁develop er", "▁pl acement", "▁place ment", "▁plac ement", "▁ placement", "(\" <", "▁l avor", "▁la vor", "▁lav or", "▁acc us", "Mar t", "Ma rt", "M art", "amer ikan", "▁sk etch", "▁sent iment", "▁а мерикан", "An chor", "Mer ge", "Pe ople", "▁rend ered", "▁render ed", "▁la und", "▁n ons", "▁no ns", "▁non s", "▁bl ew", "▁ble w", "k b", "ate gor", "ateg or", "▁franç aise", "▁français e", "KE N", "K EN", "method s", "▁Part icip", "nost i", "nos ti", "n osti", "▁com merce", "▁commer ce", "▁ commerce", "▁до ма", "▁d re", "▁dr e", "▁t win", "▁tw in", "▁ded ic", "▁U TC", "▁ UTC", "We ek", "▁differ ential", "▁different ial", "л ё", "▁Ch oose", "▁Cho ose", "▁\" (", "▁то м", "▁ том", "▁про фе", "em ark", "e mark", "▁fe ared", "▁fear ed", "sk o", "s ko", "Br anch", "▁in vention", "▁inv ention", "▁invent ion", "er mine", "erm ine", "▁car act", "▁ca ract", "ро го", "р ого", "lo yd", "▁ку ль", "▁ куль", "▁del icate", "Or gan", "▁Im pro", "▁Imp ro", "▁r ip", "▁ri p", "▁ rip", "Up dated", "Update d", "ul ent", "ule nt", "▁o bra", "▁ob ra", "s uspend", "Line s", "Lin es", "Li nes", "L ines", "▁b anda", "▁band a", "▁ban da", "ot ta", "ott a", "o tta", "▁k ole", "▁ko le", "▁kol e", "il io", "ili o", "i lio", "▁output s", "▁ outputs", "est ro", "estr o", "AAAA AAAA", "R UN", "ne nt", "nen t", "n ent", "▁d ated", "▁da ted", "▁dat ed", "▁date d", "▁ dated", "▁s py", "▁sp y", "▁c rap", "▁cr ap", "▁in coming", "▁inc oming", "▁ф ев", "▁фе в", "PH Y", "P HY", "▁O range", "▁Or ange", "▁ob server", "▁observ er", "▁observe r", "▁up stairs", "ion ed", "io ned", "ione d", "i oned", "▁a tr", "▁at r", "▁ atr", "igh bor", "▁expect ation", "Hi s", "H is", "im edia", "i media", "com put", "comp ut", "▁arg v", "▁ argv", "▁ear liest", "та ли", "тал и", "т али", "мо н", "м он", "ol len", "oll en", "ra ke", "r ake", "▁pat ience", "ходи т", "ход ит", "▁де ка", "▁bu yers", "▁buy ers", "▁buyer s", "▁Conne ct", "▁ Connect", "▁Univers al", "▁adjust ed", "▁adj usted", "im eq", "ime q", "el lers", "ell ers", "elle rs", "eller s", "▁ru in", "▁Cr usher", "▁Freder ick", "ott age", "otta ge", "▁com prom", "▁comp rom", "▁compr om", "ia sm", "ias m", "i asm", "wa ve", "w ave", "▁encour aging", "▁be ans", "▁bean s", "▁ beans", "▁per ceived", "… ]", "▁gl obe", "▁glob e", "▁glo be", "▁S F", "▁ SF", "he rent", "her ent", "here nt", "▁a like", "▁al ike", "▁ali ke", "▁hur ried", "qu el", "que l", "q uel", "▁mus icians", "▁music ians", "▁musician s", "ar z", "a rz", "по в", "п ов", "drop down", "ac l", "a cl", "pre view", "prev iew", "p review", "▁under neath", "ze ś", "▁fem ales", "▁female s", "list ener", "listen er", "▁C AN", "▁CA N", "▁ CAN", "▁T ow", "▁To w", "▁pe ers", "▁peer s", "tl s", "t ls", "at ra", "atr a", "a tra", "se nder", "send er", "sen der", "s ender", "TIME OUT", "fu rt", "fur t", "f urt", "▁Gu erra", "{} )", "{ })", "▁D urch", "▁Dur ch", "▁s ki", "▁sk i", "▁ ski", "il las", "ill as", "illa s", "▁S of", "▁So f", "▁Organ ization", "▁C leveland", "▁b utt", "▁but t", "▁bu tt", "▁sim ilarly", "▁similar ly", "▁assert True", "▁ assertTrue", "▁inev itable", "ne ll", "nel l", "n ell", "▁R af", "▁Ra f", "DIS ABLE", "am ine", "ami ne", "amin e", "a mine", "▁Com plete", "▁Comp lete", "▁ Complete", "▁be iden", "▁bei den", "▁Chall enge", "Rad io", "R adio", "▁Not ice", "He x", "H ex", "▁C uba", "▁Cub a", "▁Cu ba", "▁aug ust", "▁Philipp ines", "Mar gin", "M argin", "ja l", "j al", "gener ator", "▁t atto", "▁ta tto", "▁H em", "▁He m", "▁S alt", "▁Sal t", "▁Sa lt", "un ately", "unate ly", "▁terr ain", "▁terra in", ",\\ ,", ", \\,", "гра д", "▁c rop", "▁cr op", "▁cro p", "Name d", "Na med", "N amed", "▁W onder", "▁Wo nder", "▁Won der", "es sen", "ess en", "esse n", "▁f ist", "▁fi st", "▁fis t", "▁z oom", "▁zo om", "▁ zoom", "пе н", "п ен", "▁ru ling", "▁rul ing", "un likely", "as sy", "ass y", "or ent", "ore nt", "oren t", "o rent", "▁g ibt", "▁gi bt", "▁A w", "sim eq", "s imeq", "▁r aid", "▁ra id", "▁ raid", "▁Com par", "▁Comp ar", "▁ Compar", "▁free ly", "▁fre ely", "▁esp añ", "▁espa ñ", "▁py thon", "▁ python", "▁diagn osis", "▁ch ips", "▁chip s", "▁chi ps", "R azor", "▁V ert", "▁Ver t", "▁Ve rt", "▁ Vert", "For ward", "▁P é", "▁compar able", "▁anal ys", "▁analy s", "St d", "S td", "▁Franç ois", "▁c ó", "jo s", "j os", "▁p eg", "▁pe g", "▁ peg", "CON ST", "cl usive", "▁voy age", "▁Sch l", "▁Sc hl", "Group Layout", "oi se", "ois e", "o ise", "сс е", "с се", "▁cr ush", "▁cru sh", "▁Die se", "▁Di ese", "▁Dies e", "▁be kan", "▁bek an", "ci t", "c it", "▁Ein wohner", "▁L an", "▁La n", "▁dress ing", "▁s olved", "▁sol ved", "▁solve d", "М а", "▁C hel", "▁Ch el", "▁Che l", "par ed", "pa red", "pare d", "p ared", "▁se aled", "▁sea led", "▁seal ed", "}) )", "} ))", "anc ouver", "se h", "s eh", "ta bles", "table s", "tab les", "t ables", "▁red dit", "▁redd it", "▁ reddit", "▁m our", "▁mo ur", "▁mou r", "▁clean up", "▁ cleanup", "ov ić", "ovi ć", "▁Ur ban", "oc t", "o ct", "то ра", "тор а", "▁Le gal", "▁Leg al", "▁J ur", "▁Ju r", "▁N as", "▁Na s", "C ity", "▁un fortunately", "▁unfortunate ly", "▁P ER", "▁PE R", "▁ PER", "ma kers", "make rs", "maker s", "m akers", "▁sig lo", "▁k in", "▁ki n", "▁ kin", "co des", "code s", "cod es", "c odes", "ля р", "NI NG", "N ING", "▁C ec", "▁Ce c", "▁C T", "▁ CT", "▁R acing", "▁Ra cing", "da n", "d an", "▁He rz", "▁Her z", "▁gen ius", "▁e urop", "▁eu rop", "serv let", "ow ego", "owe go", "▁Im agine", "▁Imp erial", "▁Imper ial", "Re gex", "Reg ex", "c é", "HE D", "H ED", "det ect", "з ни", "io c", "i oc", "Anal ysis", "Analy sis", "▁* =", "▁f ever", "▁fe ver", "▁Ob viously", "F oot", "Line ar", "Lin ear", "▁p ró", "▁pr ó", "▁satell ite", "▁B eng", "▁Be ng", "▁Ben g", "bound s", "b ounds", "▁J azz", "▁Ja zz", "▁C urt", "▁Cur t", "▁Cu rt", "▁поли ти", "▁b ild", "▁bi ld", "▁bil d", "▁ bild", "▁\" \");", "▁\"\" );", "▁\"\") ;", "▁document ary", "▁gr asp", "▁gra sp", "▁gras p", "▁d la", "▁dl a", "TR A", "T RA", "▁read ily", "To r", "T or", "C ACHE", "▁Const ruction", "▁Construct ion", "▁d ía", "да т", "д ат", "▁G rey", "▁Gr ey", "▁Gre y", "run ner", "le ading", "▁co oked", "▁cook ed", "ro log", "rol og", "r olog", "▁annoy ing", "DE LETE", "amer ican", "▁Niger ia", "▁d ai", "▁da i", "▁ dai", "▁sac rific", "▁serv ant", "▁s kb", "▁sk b", "▁ skb", "▁b arg", "▁bar g", "▁ba rg", "pix el", "p ixel", "In ject", "ca ched", "cache d", "c ached", "▁cou pled", "▁couple d", "▁coup led", "un gle", "ung le", "pro b", "pr ob", "p rob", ">{ @", "ла го", "default s", "▁por trait", "▁port rait", "▁d ental", "▁den tal", "▁dent al", "▁d estro", "▁dest ro", "▁r ue", "▁ru e", "▁hy brid", "▁ й", "▁CO MP", "▁COM P", "▁ COMP", "▁B ent", "▁Be nt", "▁Ben t", "Com pare", "Comp are", "Compar e", "bo th", "bot h", "b oth", "kl ahoma", "ais er", "ai ser", "aise r", "a iser", "Su re", "Sur e", "S ure", "▁s olving", "▁sol ving", "▁l ista", "▁li sta", "▁list a", "▁ lista", "▁у чи", "▁Ev ans", "▁Eva ns", "▁f usion", "▁fus ion", "▁compl aint", "▁complain t", "H P", "He ap", "al ways", "M gr", "▁appro x", "▁ approx", "display style", "lo rd", "lor d", "l ord", "in sn", "ins n", "▁Fe ature", "▁ Feature", "RP C", "R PC", "▁v et", "▁ve t", "▁ vet", "К а", "▁kil omet", "▁kilom et", "▁deliver ing", "▁const itution", "sh ine", "ле к", "▁го род", "▁горо д", "▁prob able", "▁run ner", "▁ runner", "hr en", "hre n", "h ren", "▁N ep", "▁Ne p", "▁over night", "pr ead", "pre ad", "p read", "л та", "фор ма", "CL O", "C LO", "ie sa", "ies a", "i esa", "▁object ives", "▁objective s", "con tract", "cont ract", "contr act", "EX P", "▁col ours", "▁colour s", "xi co", "xic o", "x ico", "C lean", "▁light ly", "▁scen arios", "▁scenario s", "▁qu arters", "▁quarter s", "▁quart ers", "▁quar ters", "▁ quarters", "▁D ear", "▁De ar", "▁l uc", "▁lu c", "▁app et", "▁ap pet", "▁appe t", "▁de port", "▁dep ort", "Sa fe", "▁me nos", "▁men os", "▁Paul o", "▁Pa ulo", "CI AL", "C IAL", "ці в", "ц ів", "▁R oc", "▁Ro c", "▁c aring", "▁car ing", "▁ca ring", "▁elect ro", "▁de cember", "▁dec ember", "▁dece mber", "▁Phil osoph", "▁col ored", "▁color ed", "▁ colored", "it sch", "its ch", "ropol itan", "os ti", "ost i", "▁N ut", "▁Nu t", "▁consecut ive", "Pe er", "ar ness", "arn ess", "▁ż e", "▁ że", "▁A round", "▁Ar ound", "af ka", "▁d io", "▁di o", "ci p", "c ip", "▁to ys", "▁toy s", "cr o", "c ro", "▁m iser", "▁mis er", "▁mi ser", "▁mise r", "check box", "▁F isher", "▁Fish er", "▁gover ned", "▁govern ed", "▁h á", "▁En able", "▁ Enable", "▁t rivial", "▁occup ation", "ro rs", "ror s", "r ors", "▁l av", "▁la v", "▁ lav", "▁m ou", "▁mo u", "▁b ord", "▁bo rd", "▁bor d", "ли ч", "Ro om", "R oom", "') \r", "' )\r", "▁art ic", "▁m ientras", "ch air", "cha ir", "uation s", "u ations", "▁comm ented", "▁comment ed", "▁trigger ed", "Can not", "C annot", "▁Marc us", "▁p unct", "▁pun ct", "▁achie vement", "▁achieve ment", "е ди", "ext ensions", "extension s", "ad ers", "ade rs", "ader s", "a ders", "jo urs", "jour s", "j ours", "ir lines", "irl ines", "▁со стоя", "V IEW", "▁Nap ole", "Conf irm", "▁por que", "........ ........", "▁LI ABILITY", "Wall et", "W allet", "Sub ject", "al gorithm", "▁tr iple", "▁tri ple", "▁trip le", "ru b", "r ub", "▁se cur", "▁sec ur", "▁hand some", "▁hands ome", "▁d od", "▁do d", "r ès", "ac ja", "ch od", "cho d", "н ва", "es ar", "esa r", "an chor", "anc hor", "anch or", "▁Soph ie", "▁Украї ни", "Up per", "am ous", "amo us", "Fe atures", "Feature s", "▁б ли", "▁ бли", "Supp ress", "Sup press", "▁kil om", "▁Z u", "▁belong ed", "▁Red dit", "▁pro ces", "▁proc es", "▁с тар", "▁ста р", "▁ст ар", "▁F est", "▁Fe st", "/ %", "▁P am", "▁Pa m", "st orm", "sto rm", "W W", "P aul", "▁t ales", "▁tal es", "▁ta les", "▁tale s", "▁рай она", "▁райо на", "▁район а", "▁spread ing", "▁s ched", "▁sc hed", "▁sch ed", "▁sche d", "▁ sched", "le ased", "lease d", "Non Null", "▁High way", "▁Re serve", "▁Res erve", "▁c ater", "▁cat er", "▁ca ter", "▁t ire", "▁ti re", "▁tir e", "▁por ch", "qu ier", "US A", "U SA", "▁Sw iss", "▁ È", "▁br ave", "▁bra ve", "▁explos ion", "l r", "▁class ified", "Ab out", "▁P ict", "▁Pic t", "▁Pi ct", "▁Dub lin", "▁separ ately", "▁separate ly", "▁bank ing", "▁ban king", "▁Christian ity", "mi gr", "m igr", "Ro b", "R ob", "се р", "с ер", "▁el f", "▁ elf", "▁employ ers", "▁employer s", "▁S low", "▁Sl ow", "▁j uli", "▁ju li", "▁jul i", "west ern", "w estern", "▁anal yst", "▁analy st", "▁analys t", "ob serv", "obs erv", "▁N ice", "▁Nic e", "▁Ni ce", "▁G C", "▁ GC", "▁Let ter", "▁ha rass", "▁har ass", "User name", "▁A unt", "▁Au nt", "▁с ент", "Su p", "S up", "IC ES", "ICE S", "RE NT", "R ENT", "rat io", "r atio", "▁Мо ск", "▁an gles", "▁ang les", "▁angle s", "▁angl es", "▁ angles", "▁l lev", "▁ll ev", "_ *", "▁n it", "▁ni t", "▁ nit", "▁w reck", "▁pat rol", "▁loyal ty", "▁n ationale", "▁nat ionale", "▁national e", "▁nation ale", "go m", "g om", "}$ -", "} $-", "▁dis pute", "▁disput e", "▁disp ute", "▁r us", "▁ru s", "▁ rus", "▁П рез", "▁Пре з", "▁Indust rial", "▁dem ocratic", "▁democr atic", "b w", "li mp", "lim p", "l imp", "ur bed", "urb ed", "▁mie jsce", "▁miejsc e", "ру д", "▁t ex", "▁te x", "▁ tex", "▁develop ments", "▁development s", "▁B right", "▁Br ight", "▁Brig ht", "▁var ying", "▁va rying", "▁vary ing", "fa ct", "fac t", "f act", "▁Port al", "▁Por tal", "as is", "asi s", "a sis", "▁горо да", "▁город а", "▁cre ativity", "▁creat ivity", ")) ))", "))) )", ") )))", ".\" ;", ". \";", "ie ux", "ieu x", "▁prov isions", "▁provision s", "uv e", "u ve", "La ng", "L ang", "miss ing", "ра т", "р ат", "ph ony", "▁out line", "pa s", "p as", "el m", "e lm", "mon itor", "TC P", "T CP", "ka t", "k at", "uc ed", "uce d", "u ced", "\\\" ,", "\\ \",", "yn a", "y na", "ра бо", "раб о", "oc ate", "oca te", "▁c ares", "▁car es", "▁care s", "▁ca res", "▁f ins", "▁fin s", "▁fi ns", "▁he ap", "▁ heap", "▁small est", "äch st", "▁I X", "▁ IX", "re cv", "rec v", "key word", "▁at tra", "▁att ra", "▁attr a", "▁sel bst", "Un expected", "Une xpected", "Sm all", "▁насе ље", "▁H us", "▁Hu s", "Enc oder", "Encode r", "▁un set", "▁uns et", "▁home less", "▁hom eless", "▁Johann es", "▁U RI", "▁ URI", "ant age", "anta ge", "▁in hib", "▁appreci ated", "▁appreciate d", "ie lte", "iel te", "ielt e", "i elte", "▁st ays", "▁stay s", "▁sta ys", "▁alle ged", "▁alleg ed", "▁c oding", "▁co ding", "▁cod ing", "▁tv å", "pipe line", "p ipeline", "▁W or", "▁Wo r", "File Path", "▁accept ing", "▁Ex cell", "▁L uther", "▁Lu ther", "▁Friend s", "▁c urt", "▁cur t", "▁cu rt", "▁' $", "▁ '$", "▁tight ly", "▁cz ę", "▁un necessary", "▁F ed", "▁Fe d", "▁А нд", "▁Ан д", "▁H P", "▁ HP", "▁String Builder", "en burg", "' (", "vm a", "v ma", "▁Ab raham", "W L", "▁Re ference", "▁Refer ence", "▁ Reference", "J o", "Bl ob", "Blo b", "▁H ugh", "▁Hug h", "▁Hu gh", "▁Bul gar", "MESS AGE", "з во", "▁avoid ed", "▁po ems", "▁poem s", "▁с ы", "▁ сы", "▁O pp", "▁Op p", "av irus", "avi rus", "Pre view", "Prev iew", "P review", "▁k er", "▁ke r", "▁ ker", "ue va", "u eva", "fl ix", "▁char ging", "▁charg ing", "▁motiv ated", "▁O rd", "▁Or d", "▁ Ord", "▁av eva", "▁ave va", "x l", "▁flex ibility", "ag na", "agn a", "▁rac ism", "d h", "▁b aking", "▁ba king", "F riend", "ble r", "bl er", "b ler", "▁Log ger", "▁ Logger", "Te n", "T en", "nav igation", "▁att achment", "▁attach ment", "▁ attachment", "▁b ajo", "▁ba jo", "▁pr icing", "▁pri cing", "▁T ip", "▁Ti p", "▁ Tip", "da r", "d ar", "G G", "To ols", "Tool s", "Too ls", "T ools", "vol ution", "v olution", "am as", "ama s", "a mas", "▁b ibli", "▁adapt ed", "ox y", "o xy", "▁F reedom", "▁Free dom", "ri co", "ric o", "r ico", "▁coll apsed", "▁collapse d", "z m", "pl o", "p lo", "▁c ô", "▁r t", "▁ rt", "än ger", "äng er", "änge r", "▁D R", "▁ DR", "▁Bit coin", "go w", "g ow", "▁ch ez", "▁che z", "▁ chez", "▁ot ro", "▁te il", "▁ teil", "ла га", "▁St ars", "▁Star s", "▁Sta rs", "▁invest ing", "▁a board", "▁ab oard", "▁f lights", "▁fl ights", "▁flight s", "▁genu inely", "▁genuine ly", "▁prom ising", "Rot ation", "O cc", "▁su oi", "▁suo i", "string ify", "ac ies", "aci es", "a cies", "▁G round", "▁Gr ound", "▁Gro und", "▁sequ ences", "▁sequence s", "▁c ure", "▁cur e", "▁cu re", "out ine", "▁! !", "▁ !!", "▁G ay", "▁Ga y", "▁garden s", "▁gard ens", "▁G las", "▁Gl as", "▁Tai wan", "reg istry", "▁# {", "▁ #{", "▁ins pection", "▁insp ection", "▁inspect ion", "Te ll", "T ell", "▁` ${", "p matrix", "▁reg ulation", "▁regul ation", "fin ish", "▁Ed ge", "▁ Edge", "Sp rite", "S prite", "▁Conf eder", "▁immigr ants", "▁elder ly", "um ed", "ume d", "u med", "▁Quest ion", "▁ Question", "Gate way", "fo ny", "fon y", "f ony", "ît re", "î tre", "▁co sm", "▁cos m", "Ro und", "R ound", "▁ign oring", "▁ignor ing", "▁K i", "▁sens itivity", "âte au", "ât eau", "▁engine ers", "▁engineer s", "▁cor rel", "▁corre l", "ir teen", "irt een", "▁Sw itzerland", "▁inher it", "▁ inherit", "wo r", "w or", "▁mid night", "▁P un", "▁Pu n", "ak te", "akt e", "a kte", "Dis able", "▁es per", "▁esp er", "▁not ation", "▁ notation", "▁Univers idad", "so l", "s ol", "de rn", "der n", "d ern", "in ge", "ing e", "▁inv itation", ")} }", ") }}", "▁ â", "▁ess ays", "▁essay s", "ar med", "arm ed", "ch sel", "chs el", "▁не го", "▁ него", "▁confirm ation", "un ity", "unit y", "uni ty", "▁Br other", "▁Bro ther", "▁ Є", "ni ce", "nic e", "n ice", "▁S ue", "▁Su e", "▁t ray", "▁tr ay", "▁tra y", "ро и", "C ookie", "▁Feder ation", "IC T", "I CT", "▁p éri", "stud ent", "▁V ent", "▁Ven t", "▁Ve nt", "K K", "ST EM", "aw k", "▁re un", "▁pe oples", "▁people s", "io res", "ior es", "iore s", "i ores", "ou bt", "▁St age", "▁Sta ge", "▁ Stage", "▁c harm", "▁ch arm", "▁char m", "▁cha rm", "ie ur", "ieu r", "i eur", "▁util ize", "▁utiliz e", "▁d istribute", "▁dist ribute", "▁distribut e", "▁g otta", "▁go tta", "▁got ta", "▁block ing", "H ot", "br ew", "bre w", "b rew", "▁b onds", "▁bon ds", "▁bond s", "le af", "Pro te", "Pr ote", "P rote", "▁d ice", "▁di ce", "▁dic e", "▁Nor man", "▁Norm an", "▁о кт", "▁ок т", "▁in spir", "▁insp ir", "Pr iv", "P riv", "▁P uerto", "▁то ва", "RS T", "R ST", "▁s f", "▁ sf", "▁qu ale", "▁qual e", "ni ck", "nic k", "n ick", "▁sup press", "▁supp ress", "ча т", "ч ат", "▁H ello", "▁Hel lo", "▁Hell o", "▁ Hello", "▁crow ded", "▁crowd ed", "hba r", "h bar", "▁lo ads", "▁load s", "▁ loads", "▁cor rection", "▁correct ion", "▁corre ction", "ad just", "adj ust", "▁E state", "▁Est ate", "▁Esta te", "text sc", "▁cool ing", "iv eau", "ive au", "▁bet ting", "==== ========", "======== ====", "re mark", "rem ark", "r emark", "▁im plications", "▁impl ications", "▁p oz", "▁po z", "ün g", "ü ng", "▁reg ards", "▁regard s", "▁a mid", "▁am id", "▁habit antes", "G I", "▁F ou", "▁Fo u", "▁j ar", "▁ja r", "▁ jar", "▁requ iring", "▁D rupal", "▁Dru pal", "▁li ability", "cz as", "c zas", "▁l yrics", "▁ly rics", "▁N ort", "▁No rt", "▁Nor t", "si l", "s il", "▁M ey", "▁Me y", "UN IT", "ва ния", "f uture", "hi r", "h ir", "CA L", "C AL", "LAB EL", "▁S weet", "▁stat ue", "bor ne", "born e", "b orne", "Not ify", "▁her itage", "▁d orm", "▁do rm", "▁l ever", "▁le ver", "▁lev er", "▁mut tered", "} &", "▁inter mediate", "▁Wat son", "▁view ing", "▁vie wing", "kt or", "k tor", "enti eth", "xx x", "x xx", "at u", "a tu", "▁Inst all", "▁ Install", "Cont in", "▁t oute", "▁to ute", "▁tou te", "▁tout e", "▁P T", "▁ PT", "▁u ri", "▁ur i", "▁ uri", "Call ed", "Cal led", "C alled", "▁O FF", "▁OF F", "▁ OFF", "ig lia", "ic hi", "ich i", "i chi", "с ни", "V o", "▁exhib it", "▁asym pt", "▁G ulf", "л ли", "do min", "dom in", "d omin", "▁départ ement", "mi l", "m il", "▁B ez", "▁Be z", "▁l ately", "▁late ly", "▁lat ely", "▁def ining", "▁defin ing", "▁E L", "▁ EL", "omorph ic", "▁f ebru", "▁fe bru", "▁febr u", "IS TER", "IST ER", "I STER", "res olved", "resolve d", "те й", "т ей", "▁S pect", "▁Sp ect", "▁Spec t", "▁Spe ct", "▁sem pre", "▁Se pt", "▁Sep t", "▁cl earing", "▁cle aring", "▁clear ing", "▁diam eter", "in do", "ind o", "▁soc cer", "▁D CHECK", "▁DC HECK", "vo te", "v ote", "▁n omin", "▁no min", "▁nom in", "Type d", "Ty ped", "Typ ed", "Miss ing", "W as", "▁Cent ury", "▁direct ors", "▁dire ctors", "▁director s", "▁mode rate", "▁moder ate", "▁Ill uminate", "▁ Illuminate", "▁челове к", "▁B apt", "▁Ba pt", "▁Qu ant", "▁ Quant", "▁tre ating", "▁treat ing", "ag i", "a gi", "Si l", "S il", "ring e", "rin ge", "r inge", "ł ą", "el lan", "ell an", "ella n", "▁f ino", "▁fin o", "▁fi no", "Capt ure", "C apture", "▁S ic", "▁Si c", "▁st amp", "▁sta mp", "▁stam p", "▁B uen", "▁Bu en", "▁seg undo", "▁in verse", "▁d up", "▁du p", "▁ dup", "▁br oker", "▁bro ker", "▁broke r", "▁search ed", "▁sear ched", "be ans", "bean s", "▁A BC", "▁AB C", "is ha", "ish a", "i sha", "▁Lin ked", "▁Link ed", "▁ Linked", "▁Nich olas", "▁Sw edish", "he mal", "hem al", "▁E M", "▁ EM", "▁j ego", "▁je go", "че ский", "чески й", "lo t", "l ot", "▁dis cret", "▁disc ret", "▁discre t", "▁E g", "pi ck", "pic k", "p ick", "am on", "amo n", "a mon", "▁Rail way", "ка р", "к ар", "▁nav igate", "▁navig ate", "▁Comm ander", "▁Command er", "▁disappe ar", "▁con gress", "▁congr ess", "▁graph ic", "sp r", "s pr", "FLO AT", "▁S erial", "▁Se rial", "▁Ser ial", "▁ Serial", "▁я нва", "so cial", "soc ial", "s ocial", "bu ch", "b uch", "▁se al", "▁sea l", "▁c ement", "▁ce ment", "▁Y e", "ot ti", "ott i", "o tti", "▁The od", "remove Class", "▁Jul ie", "▁Ju lie", "▁Juli e", "▁gr öß", "ST REAM", "▁G B", "▁ GB", "▁Ben ef", "▁Mat rix", "▁ Matrix", "▁ke ine", "▁cont inent", "▁contin ent", "▁ja ar", "DA I", "D AI", "▁S equ", "▁Se qu", "▁ Sequ", "kre is", "▁c rown", "▁cr own", "▁crow n", "▁cro wn", "Init ialize", "Initial ize", "ax y", "a xy", "▁C IA", "▁int end", "▁inte nd", "▁b ub", "▁bu b", "▁mask s", "▁mas ks", "▁sit uated", "▁situ ated", "▁E du", "▁Ed u", "▁particip ating", "ше й", "ш ей", "_{ -", "_ {-", "▁Tele vision", "▁pre ferences", "▁prefer ences", "▁preference s", "▁D rop", "▁Dr op", "▁ Drop", "re view", "rev iew", "▁vi olation", "▁viol ation", "▁ch rist", "▁chr ist", "q q", "▁M yst", "▁My st", "comm ands", "command s", "▁prim itive", "ill ance", "▁r anging", "▁ran ging", "▁rang ing", "▁Adv anced", ") &", "▁О б", "▁sub str", "▁subst r", "▁subs tr", "▁ substr", "▁clos ure", "▁clo sure", "▁ closure", "tw itter", "ne z", "n ez", "▁pr zed", "▁prz ed", "▁prze d", "▁mer ged", "▁merge d", "ur os", "uro s", "u ros", "▁j er", "▁je r", "▁ jer", "▁_ (", "▁ _(", "ar an", "ara n", "a ran", "▁P atri", "▁Pat ri", "▁Pa tri", "▁T un", "▁Tu n", "U K", "il iation", "ili ation", "▁Ke ith", "Own Property", "op sis", "ops is", "Ma d", "M ad", "▁def ence", "A ir", "=$ {", "= ${", "cript ors", "criptor s", "So m", "S om", "▁ ±", "▁HA VE", "~~~~ ~~~~", "▁be aten", "▁beat en", "▁int imate", "▁intim ate", "op ic", "o pic", "▁p řed", "▁př ed", "Sh op", "S hop", "Table s", "Tab les", "T ables", "▁S I", "▁ SI", "re name", "ren ame", "rena me", "r ename", "▁product ive", "rib ly", "r ibly", "▁L uck", "▁Lu ck", "▁Luc k", "▁kl ub", "}} ^{", "}}^ {", "} }^{", "▁F ish", "▁Fi sh", "PR I", "P RI", "en ario", "ena rio", "▁pse ud", "Or d", "O rd", "▁quel ques", "▁D od", "▁Do d", "▁p unto", "▁pun to", "▁punt o", "se nal", "sen al", "▁Br others", "▁Bro thers", "▁Brother s", "▁diab etes", "P aint", "▁person as", "▁persona s", "в ър", "▁n ep", "▁ne p", "▁El len", "▁Ell en", "▁Elle n", "▁h ä", "cr tc", "c rtc", "▁frustr ation", ". ^{[", "▁s printf", "▁sprint f", "▁ sprintf", "+ -", "En code", "Enc ode", "▁насе лення", "Draw able", "▁b ore", "▁bo re", "▁bor e", "▁E ld", "▁El d", "те т", "т ет", "T ick", "ar ator", "ara tor", "▁Fin ance", "▁agric ultural", ")^ {-", ")^{ -", ") ^{-", "may be", "Sche dule", "▁[ …]", "et ection", "ete ction", "ль ного", "льно го", "▁he els", "▁En joy", "Sy s", "S ys", "orsz ág", "CONT ROL", "cc cc", "▁D ictionary", "▁ Dictionary", "Ne ed", "N eed", "▁He aven", "▁vess els", "▁vessel s", "ec ycle", "e cycle", "ti es", "t ies", "▁e nde", "▁en de", "▁end e", "▁ ende", "SI NG", "S ING", "De scribe", "Desc ribe", "▁Pub lished", "▁Publish ed", "▁win ds", "▁wind s", "neh men", "▁D ES", "▁DE S", "Hor izontal", "▁L ost", "▁Los t", "▁Lo st", "-- -----------", "---- ---------", "-------- -----", "--- ----------", "------------ -", "----- --------", "---------- ---", "------ -------", "--------- ----", "------- ------", "----------- --", "- ------------", "▁p x", "▁ px", "}( {\\", "} ({\\", "▁Hein rich", "oms nitt", "ho s", "h os", "Ro ll", "R oll", "tor ch", "▁equ ity", "▁eq uity", "▁collect ing", "▁l ifting", "▁lif ting", "▁lift ing", "sub figure", "Ne ver", "N ever", "▁L ength", "▁Le ngth", "▁ Length", "▁w inners", "▁win ners", "▁winner s", "▁U SD", "▁US D", "▁st esso", "▁а бо", "▁al tri", "▁alt ri", "▁produ cers", "▁produce rs", "▁producer s", "mon s", "mo ns", "m ons", "▁Pop ular", "Com b", "Co mb", "C omb", "ab lo", "abl o", "a blo", "RE SET", "RES ET", "т ва", "Over lay", "▁id iot", "▁idi ot", "ex ist", "Be havior", "UB LE", "ier re", "i erre", "mine craft", "▁f os", "▁fo s", "▁encuent ra", "▁scream ed", "▁polynom ial", "▁c one", "▁con e", "▁co ne", "▁c ited", "▁cit ed", "▁ci ted", "▁president e", "▁presid ente", "▁re sign", "▁res ign", "▁y elled", "▁i k", "▁ ik", "Pl us", "▁Ми ха", "▁The me", "▁Th eme", "▁ Theme", "▁re li", "▁r eli", "▁rel i", "ne m", "n em", "▁a men", "▁am en", "▁ amen", "▁ Ј", "Th anks", "Thank s", "Than ks", "▁al umin", "▁sh elf", "▁shel f", "!\" );", "! \");", "append Child", "▁l ogs", "▁lo gs", "▁log s", "▁ logs", "▁re gex", "▁reg ex", "▁ regex", "▁p unk", "▁pun k", "CO RE", "▁b orders", "▁border s", "▁bord ers", "▁bor ders", "▁Requ ired", "▁ Required", "▁f law", "▁fl aw", "▁cin ema", "▁v í", "▁ ví", "▁ab ortion", "▁abort ion", "jour nal", "j ournal", "in itions", "init ions", "inition s", "state ment", "stat ement", "▁o urs", "▁our s", "▁ou rs", "▁ ours", "ó t", "▁Tur ner", "▁Turn er", "in us", "ev es", "eve s", "e ves", "▁magazine s", "▁magaz ines", "… …", "la ce", "l ace", "sl ider", "slide r", "▁l ocate", "▁loc ate", "▁des arroll", "P an", "To m", "T om", "▁Land es", "▁Lan des", "ol ia", "oli a", "o lia", "▁u nm", "▁un m", "▁Sen ator", "▁ad minister", "▁admin ister", "▁ко ји", "▁' {", "▁) {", "▁ ){", "▁G olf", "▁Gol f", "▁g ele", "▁ge le", "▁gel e", "▁d rank", "▁dr ank", "pos ing", "po sing", "p osing", "▁en semble", "he ap", "sign ature", "то й", "ци й", "scri ber", "scr iber", "scribe r", "scrib er", "▁ch amp", "▁cha mp", "ni o", "n io", "la yers", "lay ers", "layer s", "▁tr ump", "▁mod al", "▁mo dal", "▁ modal", "on ces", "once s", "че ння", "чен ня", "▁C ort", "▁Co rt", "▁Cor t", "▁sun light", "▁M use", "▁Mus e", "▁Mu se", "ém ent", "é ment", "▁curios ity", "▁v r", "▁ vr", "O ct", "yl on", "y lon", "▁rel ativ", "st y", "s ty", "] /", "az u", "a zu", "▁U SS", "▁US S", "▁person a", "▁pers ona", "Me n", "M en", "▁w ides", "▁wide s", "▁wid es", "▁K as", "▁Ka s", "ic ies", "ici es", "i cies", "▁C off", "▁Co ff", "▁con solid", "▁cons olid", "▁inter active", "▁interact ive", "op ing", "o ping", "La nd", "L and", "▁energ ies", "▁independ ently", "▁independent ly", "inner HTML", "Requ ire", "Re quire", "▁abs urd", "▁IN FO", "▁ INFO", "▁b und", "▁bu nd", "▁ bund", "anz ös", "▁G ent", "▁Ge nt", "▁Gen t", "▁scholar s", "▁schol ars", "▁C reated", "▁Create d", "▁Creat ed", "▁Cre ated", "▁ Created", "▁mar ine", "▁mari ne", ".. .'", "... '", "EN V", "E NV", "ach te", "acht e", "a chte", "am ents", "ament s", "amen ts", "a ments", "▁tr ucks", "▁truck s", "▁re wards", "▁reward s", "og s", "o gs", "Gr een", "Gre en", "G reen", "▁n ä", "▁inher ited", "▁inherit ed", "im ated", "imate d", "ima ted", "imat ed", "▁F REE", "▁FR EE", "▁ FREE", "▁ext ens", "da g", "d ag", "▁g low", "▁gl ow", "▁glo w", "ar di", "ard i", "N F", "▁evalu ated", "▁evaluate d", "▁eval uated", "▁o ps", "▁op s", "▁ ops", "▁cle aned", "▁clean ed", "▁Prov ince", "▁Provinc e", "ha bil", "hab il", "h abil", "гра фі", "▁T CP", "▁ TCP", "▁я кі", "▁як і", "▁de ce", "▁dec e", "▁cont empl", "▁acquis ition", "}) $.", "})$ .", "} )$.", "=\" -", "▁se ctors", "▁sector s", "▁sect ors", ":: <", "u ß", "▁trab aj", "th an", "tha n", "t han", "▁S ta", "▁St a", "Mem bers", "Member s", "▁r v", "▁ rv", ")^ {\\", ")^{ \\", ") ^{\\", "mit t", "mi tt", "m itt", "▁W ang", "▁Wa ng", "▁W end", "▁We nd", "▁G lass", "▁Gl ass", "▁Glas s", "▁t xt", "▁tx t", "▁ txt", "▁Cam eron", "ie ls", "iel s", "i els", "▁im mer", "▁imm er", "▁ immer", "▁насе ления", ".. . /", "▁ро ди", "▁ роди", "▁sophistic ated", "▁R he", "▁Rh e", "us sy", "uss y", "▁Sy ria", "▁Car oline", "▁Carol ine", "riter ion", "ér c", "é rc", "Lo ve", "L ove", "▁cy cles", "▁cycle s", "▁cycl es", "▁Ter ms", "▁Term s", "▁med ieval", "▁medi eval", "ь я", "▁m issions", "▁miss ions", "▁mission s", "Har d", "Ha rd", "H ard", "▁rég ion", "▁Ph oenix", "De ep", "▁sam pling", "▁dismiss ed", "prop ri", "p ropri", "▁jud ges", "▁judge s", "▁judg es", "ał a", "a ła", "ul os", "ulo s", "u los", "▁L ion", "▁Li on", "▁loc als", "▁local s", "neg ative", "ogen eous", "ogene ous", "▁A pi", "▁Ap i", "▁ Api", "▁d ici", "▁di ci", "▁dic i", "▁а пре", "▁author ized", "▁ authorized", "ze rw", "zer w", "▁p g", "▁ pg", "▁A WS", "▁key word", "▁ keyword", "▁entrepre neur", "▁п рое", "▁про е", "▁V ancouver", "it ating", "ita ting", "itat ing", "F ast", "▁acknowled ged", "▁acknowledge d", "▁tour ist", "▁tou rist", "▁G rid", "▁Gr id", "▁ Grid", "▁En try", "▁Ent ry", "▁ Entry", "▁g ebru", "▁ge bru", "▁geb ru", "sa t", "s at", "ber ger", "berg er", "▁T F", "▁ TF", "▁m t", "▁ mt", "▁Mar cel", "▁Marc el", "▁Tw enty", "▁ ”", "{} {", "{ }{", "hi nt", "hin t", "h int", "▁an onymous", "Cam p", "C amp", "▁** _", "By Comparator", "U C", "▁t ö", "Event Handler", "▁t ours", "▁to urs", "▁tour s", "▁tou rs", "▁lon ely", "▁Sum mary", "▁ Summary", "st ick", "s tick", "All owed", "Allow ed", "лі в", "л ів", "▁B rew", "▁Br ew", "▁Bre w", "AME TER", "▁review ed", "ir at", "ira t", "i rat", "▁n erve", "▁nerv e", "▁ner ve", "▁L inda", "▁Lin da", "▁Lind a", "▁dec is", "▁sp okes", "▁spoke s", "▁spo kes", "▁qu ed", "▁que d", "▁q ued", "▁F T", "▁ FT", "▁в ін", "▁ві н", "ou sing", "ous ing", "o using", "▁L arge", "▁Lar ge", "▁ Large", "▁op ponents", "▁oppon ents", "▁opponent s", "▁D isc", "▁Dis c", "▁Di sc", "Found ation", "EQ UAL", "og g", "o gg", "Re try", "Ret ry", "R etry", "CHAN NEL", "▁Е вро", "▁% .", "▁ %.", "▁i i", "▁ ii", "de ad", "d ead", "▁M ale", "▁Mal e", "▁Ma le", "Com pleted", "Comp leted", "Complete d", "ty p", "t yp", "▁Ty ler", "Dis k", "Di sk", "D isk", "Hi de", "H ide", "iju ana", "▁public ations", "▁publication s", "fo x", "f ox", "vis ed", "vi sed", "v ised", "Fore ign", "Write Line", "де ра", "дер а", "▁remain der", "Pi cker", "P icker", "we alth", "▁G or", "▁Go r", "sequ ently", "▁coll ision", "▁Harr ison", "▁Harris on", "▁work place", "▁N ormal", "▁Nor mal", "▁Norm al", "▁ Normal", "▁B irth", "▁Bir th", "▁cons ume", "▁consum e", "Sh ift", "▁avoid ing", "▁C ha", "▁Ch a", "▁An ti", "▁Ant i", "▁ch arts", "▁char ts", "▁chart s", "▁P av", "▁Pa v", "ст вом", "ство м", "ual mente", "an ed", "ane d", "a ned", "▁A uch", "▁Au ch", "rd ev", "r dev", "▁she er", "▁an gl", "▁ang l", "sub str", "Gener ate", "> =", "▁B ev", "▁Be v", "▁ч ем", "▁че м", "▁camp o", "▁cam po", "▁lect ure", "hy per", "▁Balt imore", "mi x", "m ix", "ke iten", "keit en", "▁ра ди", "▁l asted", "▁la sted", "▁last ed", "▁las ted", "▁discrim ination", "ig te", "igt e", "ok al", "oka l", "o kal", "Ph ase", "▁T itel", "▁Tit el", "▁Fif th", "▁di agnostic", "su ng", "sun g", "s ung", "▁giorn ata", "os ta", "ost a", "o sta", "is co", "isc o", "▁S ara", "▁Sa ra", "▁Sar a", "m v", "▁el ő", "▁R osen", "▁Ro sen", "▁Ros en", "▁Rose n", "▁E SP", "▁ES P", "ph er", "p her", "▁a j", "▁ aj", "Path s", "Pat hs", "▁R alph", "▁ž e", "▁ že", "ре в", "р ев", "▁о коло", "▁ок оло", "▁Ag reement", "▁Word Press", "an try", "ant ry", "▁p icks", "▁pick s", "▁pi cks", "▁pic ks", "▁N ur", "▁Nu r", "chedul ed", "ki e", "k ie", "▁represent ations", "▁representation s", "++ ){", "++) {", "ess ment", "▁count less", "Block s", "Bl ocks", "Blo cks", "ym e", "y me", "▁c lo", "▁cl o", "▁B ened", "▁Be ned", "▁Ben ed", "ch ars", "char s", "cha rs", "▁A gent", "▁Ag ent", "▁Age nt", "▁ Agent", "▁hist oria", "▁histor ia", "▁F loor", "▁Fl oor", "▁Flo or", "▁ten ía", "▁long est", "▁lon gest", "fr ica", "▁b ef", "▁be f", "▁mechan isms", "▁mechanism s", "ла зи", "▁h eter", "▁he ter", "▁het er", "▁athlet es", "▁period ic", "▁V otes", "▁Vo tes", "ри сти", "▁n á", "▁ ná", "▁m aid", "▁ma id", "▁mai d", "▁s wear", "▁sw ear", "▁swe ar", "▁wip ed", "▁graph s", "▁grap hs", "▁t hesis", "▁the sis", "▁th esis", "▁sens ation", "pers istence", "▁V il", "▁Vi l", "ac s", "a cs", "▁de el", "sc rib", "scri b", "scr ib", "ie ro", "ier o", "i ero", "▁dis cre", "▁disc re", "air y", "ai ry", "Data Source", "q t", "ic iones", "ici ones", "icio nes", "icion es", "▁res pected", "▁respect ed", "▁f ram", "▁fr am", "▁fra m", "▁spec ialized", "▁special ized", "▁prés ent", "▁pré sent", "Tur n", "T urn", "▁compl aints", "▁complain ts", "▁complaint s", "(\" ,", "( \",", "▁Rel ated", "▁Set ting", "▁ Setting", "р ю", "▁s ą", "▁P le", "▁Pl e", "▁d isse", "▁dis se", "▁diss e", "ca ps", "cap s", "c aps", "▁C ash", "▁Cas h", "▁Ca sh", "▁cons umed", "▁consum ed", "▁consume d", "▁l b", "▁ lb", "Ad just", "Ser ialize", "Serial ize", "S erialize", "is y", "i sy", "▁pat ent", "▁vis ibility", "▁S ach", "▁Sa ch", "▁Sac h", "ün st", "▁cy ber", "▁Bl ake", "▁Bl oom", "▁Blo om", "▁Sh ah", "▁Sha h", "PO WER", "▁in clusion", "▁incl usion", "se rie", "ser ie", "s erie", "▁man era", "sec onds", "second s", "is ches", "isch es", "ische s", "isc hes", "▁C andidate", "W D", "op ath", "o path", "▁про гра", "▁efficient ly", "ap ps", "app s", "tool bar", "we nd", "wen d", "w end", "▁Ne il", "▁form ats", "▁format s", "▁forma ts", "▁T emplate", "▁Temp late", "▁ Template", "▁min istry", "▁minist ry", "▁Char acter", "▁ Character", "Un iform", "▁fon ction", "не м", "н ем", "Wh ile", "к ва", "рі я", "▁D L", "▁ DL", "▁L ayout", "▁La yout", "▁Lay out", "▁ Layout", "не ние", "▁c aval", "▁ca val", "▁cav al", "▁H ob", "▁Ho b", "SP I", "S PI", "▁h ely", "▁he ly", "▁hel y", "Dest ination", "), \r", ") ,\r", "▁i OS", "▁ad mission", "▁adm ission", "▁c ss", "▁cs s", "▁ css", "user Id", "um bling", "umb ling", "▁bo oking", "▁book ing", "▁COPY RIGHT", "▁b land", "▁bl and", "output s", "▁sub mission", "▁subm ission", "ti t", "t it", "fe ctions", "fect ions", "fection s", "fr agment", "frag ment", "▁fa ç", "▁Through out", "▁distingu ished", "▁distinguish ed", "▁ar range", "▁arr ange", "▁arrang e", "ume ric", "umer ic", "xf e", "x fe", "ip age", "ipa ge", "i page", "ер жа", "▁C ars", "▁Car s", "▁Ca rs", "▁P AGE", "▁PA GE", "▁ PAGE", "▁a unque", "▁insert ed", "smith y", "AL LOC", "ALL OC", "RE C", "R EC", "▁B ak", "▁Ba k", "▁Str ong", "ac hen", "ach en", "ache n", "a chen", "▁Spec ific", "w q", "▁Д у", "MO VE", "▁mús ica", "▁C ris", "▁Cr is", "ea u", "e au", "▁F orum", "▁For um", "▁Fo rum", "li sted", "list ed", "l isted", ")\\ \\", ") \\\\", "▁X VI", "▁XV I", "▁м оло", "▁мо ло", "/ $", "Be r", "B er", "▁tact ics", "Form atter", "Format ter", "op ens", "ope ns", "open s", "▁r h", "▁ rh", "▁t ram", "▁tr am", "▁tra m", "V L", "▁Pro file", "▁Prof ile", "▁ Profile", "▁par ish", "▁Ray mond", "▁cont empor", "▁Pl anning", "▁Plan ning", "▁Ч е", "▁A RM", "▁AR M", "▁ ARM", "▁des ires", "▁desire s", "k v", "O s", "▁m iner", "▁min er", "▁mi ner", "▁mine r", "▁qual ify", "ik u", "i ku", "▁der ni", "ol óg", "▁K id", "▁Ki d", "ane an", "▁Hol land", "▁Holl and", "Aut om", "Auto m", "▁Hamilton ian", "St ation", "Stat ion", "js p", "j sp", "▁YO UR", "▁YOU R", "▁Th ailand", "effect ive", "п ло", "▁relie ved", "▁O klahoma", "▁Jul ian", "▁Juli an", "▁Julia n", "▁ind ent", "▁inde nt", "▁ indent", "if r", "i fr", "пре де", "▁fl ame", "on io", "oni o", "o nio", "As sign", "Ass ign", "▁sh ifts", "▁shift s", "▁car acter", "▁caract er", "if icates", "ific ates", "ificate s", "ifica tes", "X R", "▁G FP", "▁GF P", "FE ATURE", "▁M aine", "▁Ma ine", "▁Main e", "▁Mai ne", "▁f rank", "▁fr ank", "▁al igned", "▁align ed", "▁ aligned", "▁p ří", "▁př í", "Code Attribute", "▁M AC", "▁MA C", "▁ MAC", "▁R oot", "▁Ro ot", "▁ Root", "▁F M", "▁ FM", "erv ation", "с лі", "▁s hy", "▁sh y", "▁partic ul", "▁parti cul", "pl atz", "▁hypothes is", "at hol", "ath ol", "s With", "J s", "$ ^{-", "▁#! /", "▁l emon", "▁le mon", "▁a bol", "▁ab ol", "▁ abol", "▁Mil an", "▁Mi lan", "an ten", "ant en", "ante n", "a nten", "▁s ia", "▁si a", "ri as", "ria s", "r ias", "▁con sid", "▁cons id", "as so", "ass o", "ain ers", "ai ners", "ainer s", "aine rs", "▁cir ca", "▁circ a", "re try", "ret ry", "r etry", "▁nue vo", "const ants", "constant s", "▁Med iterr", "▁Turk ish", "ion en", "io nen", "ione n", "i onen", "c rypto", "▁ev olved", "▁\" ?", "▁p úblic", "▁comp rend", "▁compre nd", "▁compr end", "al lo", "all o", "zo om", "z oom", "▁dat etime", "▁date time", "▁ datetime", "▁mond iale", "ма т", "м ат", "▁M ask", "▁Ma sk", "▁Mas k", "▁ Mask", "▁p row", "▁pro w", "▁pr ow", "▁belong ing", "+ '", "OUT PUT", "▁G rab", "▁Gr ab", "▁Gra b", "M ir", "▁accommod ate", "▁$ ('#", "▁ $('#", "▁Lou ise", "▁Louis e", "▁da mit", "▁dam it", "}' ,", "} ',", "scri pts", "script s", "sn apshot", "snap shot", "▁sh itty", "▁shit ty", "▁y o", "▁ yo", "▁belie ving", "▁inhabit ants", "W P", "▁Colomb ia", "li sts", "list s", "l ists", "▁Mur phy", "Data set", "Dat aset", "▁(! $", "▁tremend ous", "▁se ñ", "▁S ed", "▁Se d", "▁sw allowed", "▁swallow ed", "om p", "o mp", "▁L ate", "▁La te", "▁Lat e", "▁an ys", "▁any s", "▁dead ly", "fol low", "f ollow", "▁A nc", "▁An c", "▁h w", "▁ hw", "wik ipedia", "ic ts", "ict s", "▁Al aska", "▁sc ary", "▁scar y", "▁second o", "▁sec ondo", "▁her oes", "▁hero es", "▁veter an", "▁behav iors", "▁behavior s", "▁behavi ors", "- %", "▁E z", "▁с і", "▁ сі", "tik z", "▁spect acular", "▁Ch ron", "▁( @", "▁ (@", "▁de mo", "▁dem o", "▁ demo", "▁ser ialized", "▁serial ized", "▁In depend", "▁Indep end", "BU ILD", "fail ure", "▁P ORT", "▁PO RT", "▁ PORT", "ю чи", "▁med itation", "sample s", "sam ples", "s amples", "i ão", "▁Ни кола", "▁я зы", "▁Tr uth", "▁Tru th", "▁co efficient", "▁coeff icient", "sl ug", "▁XV III", "▁XVI II", "▁XVII I", "ia o", "i ao", "de ck", "dec k", "▁раз ви", "▁ad oles", "ar ius", "ari us", "▁H az", "▁Ha z", "▁Pro test", "▁Prote st", "ra de", "rad e", "r ade", "не ния", "▁cl ause", "conne ctor", "connect or", "conn ector", "RA TE", "R ATE", "ц ю", "▁Conne cticut", "V S", "abul ary", "HO W", "▁d elen", "▁de len", "▁del en", "▁su ited", "▁suit ed", "▁suite d", "▁Sur vey", "ze c", "z ec", "ți i", "ț ii", "▁b acks", "▁back s", "▁ba cks", "▁ backs", "com merce", "▁And rea", "▁Andre a", "▁Andr ea", "▁propag anda", "iz ioni", "izi oni", "izio ni", "▁B il", "▁Bi l", "▁In nov", "▁Inn ov", "▁forg ive", "▁oper ates", "▁operate s", "▁opera tes", "ч ний", "▁l ingu", "▁lin gu", "▁ling u", "▁c ollar", "▁col lar", "▁coll ar", "до л", "сі й", "zt en", "zte n", "z ten", "im at", "ima t", "i mat", "▁sh oe", "ge nder", "gen der", "g ender", "▁leg ally", "▁legal ly", "RO P", "R OP", "▁S leep", "deleg ate", "ID s", "▁build s", "▁qu er", "▁que r", "▁q uer", "▁ quer", "uls ion", ". “", "к ло", "ri se", "ris e", "r ise", "th ink", "К о", "▁bacter ia", "▁magn ific", "▁prison er", "Cl ock", "C lock", "R B", "ú t", "▁L iz", "▁Li z", "gr a", "g ra", "▁And ré", "▁Andr é", "▁D ennis", "▁Den nis", "▁sur ge", "▁surg e", "ex isting", "exist ing", "▁W ald", "▁Wal d", "▁Wa ld", "▁S chema", "▁Sch ema", "▁Sche ma", "▁ Schema", "▁war nings", "▁warn ings", "▁warning s", "▁qu adr", "▁quad r", "at te", "att e", "▁E ins", "▁Ein s", "▁ad option", "▁adopt ion", "▁w anna", "▁de rive", "▁der ive", "▁deriv e", "▁ derive", "▁a rena", "▁are na", "▁ar ena", "▁aren a", "▁Den ver", "▁F i", "▁ Fi", "▁Jess ica", "acy j", "R atio", "▁которы е", "▁Act ivity", "▁Activ ity", "▁ Activity", "em u", "e mu", "▁St alin", "▁Sta lin", "ag gi", "agg i", "a ggi", "▁f ün", "▁f ils", "▁fil s", "▁fi ls", "aj u", "a ju", "card s", "car ds", "c ards", "▁att raction", "▁attract ion", "▁attr action", "▁attra ction", "od ot", "odo t", "o dot", "F at", "▁H aven", "▁Ha ven", "▁Have n", "▁Hav en", "▁nine teenth", "▁ninete enth", "▁* *\"", "▁** \"", "▁m aggio", "▁mag gio", "ma ny", "man y", "m any", "win ning", "▁G A", "▁ GA", "▁d ummy", "▁ dummy", "Un able", "en ci", "enc i", "ère nt", "è rent", "Im g", "I mg", "▁t ob", "▁to b", "DI P", "D IP", "S ince", "▁Sa fe", "▁Saf e", "▁ Safe", "Gu ard", "is ure", "i sure", "port e", "por te", "p orte", "▁stad ium", "in di", "ind i", "▁App arently", "ug no", "▁w olf", "▁ne ces", "▁overse as", "of s", "o fs", "ar el", "are l", "a rel", "▁F ine", "▁Fin e", "▁Fi ne", "▁cor rupt", "▁n ovember", "▁nov ember", "▁nove mber", "▁interpret ed", "ib ile", "ibil e", "▁w ages", "▁wa ges", "▁wage s", "▁Pre tty", "▁Her bert", "▁reg istr", "вы м", "an swer", "ans wer", "▁m orte", "▁mor te", "▁mort e", "▁com posite", "▁compos ite", "Tool bar", "▁iter ator", "▁ iterator", "ant ine", "anti ne", "▁init ialized", "▁initial ized", "▁initialize d", "▁ initialized", "▁poor ly", "Access or", "▁Han nah", "▁Hann ah", "▁то лько", "ol an", "ola n", "o lan", "▁o tto", "▁ot to", "▁ott o", "▁ otto", "▁str ikes", "▁stri kes", "▁strike s", "▁conflict s", "▁conflic ts", "▁s urg", "▁su rg", "▁sur g", "▁histor ian", "▁historia n", "wo man", "w oman", "▁l ibraries", "be w", "b ew", ")- -(", ")-- (", "ga ther", "g ather", "▁L ip", "▁Li p", "▁f ict", "▁fi ct", "FIL TER", "@ {", "▁bl essed", "▁bless ed", "et ics", "etic s", "eti cs", "▁f ork", "▁for k", "▁Me tal", "▁Met al", "▁Meta l", "po lation", "pol ation", "p olation", "▁negoti ations", "▁gen us", "▁genu s", "▁cont rolling", "▁control ling", "VER T", "VE RT", "V ERT", "▁P erry", "▁Per ry", "▁S PD", "▁SP D", "CA SE", "C ASE", "т вер", "▁C rown", "▁Cr own", "▁Cro wn", "▁Crow n", "▁ind ul", "▁indu l", "▁e hemal", "▁ampl itude", "▁amplit ude", "▁B ach", "▁Ba ch", "▁phot ographer", "▁photograph er", "n ý", "▁inv ested", "▁invest ed", "▁P arte", "▁Par te", "▁Part e", "▁pro long", "C U", "icht et", "ichte t", "res ume", "▁c arb", "▁car b", "▁ca rb", "ur st", "urs t", "▁N ixon", "▁n eur", "▁ne ur", "▁neu r", "▁ neur", "▁corpor ations", "▁corporation s", "Op s", "O ps", "u u", "l m", "ap ple", "app le", "ch te", "cht e", "▁deliber ately", "ber e", "be re", "b ere", "▁fe br", "▁provinc ia", "▁provin cia", "Over flow", "▁E ight", "▁ind ication", "▁indic ation", "▁pist ol", "▁к ре", "▁ кре", "oc ial", "oci al", "o cial", "▁r und", "▁run d", "▁ru nd", "▁ rund", "▁se hr", "ok at", "oka t", "o kat", "ül et", "ü let", "▁He at", "Н а", "▁о дин", "▁од ин", "IC S", "I CS", "ay e", "a ye", "▁eight een", "▁t ug", "▁tu g", "LO T", "L OT", "▁L ar", "▁La r", "ning s", "n ings", "▁T odd", "▁To dd", "▁Tod d", "▁organis ations", "▁organisation s", "▁g enes", "▁gen es", "▁ge nes", "▁gene s", "B ag", "Ke ep", "^{ +", "Base d", "Bas ed", "B ased", "sk in", "ski n", "s kin", "▁to das", "▁tod as", "▁toda s", "▁illustr ated", "▁c f", "▁ cf", "▁ar riving", "▁arriv ing", "▁arr iving", "▁excess ive", "▁tra its", "▁trait s", "▁s ank", "▁san k", "▁Att ribute", "▁ Attribute", "▁G D", "▁ GD", "com par", "comp ar", "▁dent ro", "br is", "b ris", "▁at oms", "▁atom s", "fr ed", "fre d", "f red", "▁E val", "▁Ev al", "▁Eva l", "▁ Eval", "▁di stances", "▁dist ances", "▁distance s", "st aw", "sta w", "краї н", "vari ables", "variable s", "l c", "на ли", "нал и", "н али", "▁чемпи она", "wi j", "w ij", "▁Sim ilar", "je k", "j ek", "Pe t", "P et", "=\" $", "ко то", "▁R ang", "▁Ra ng", "▁Ran g", "ion ato", "iona to", "▁bek annt", "▁bekan nt", "! *", "Li m", "L im", "▁concl usions", "▁conclusion s", "ain te", "ai nte", "aint e", "a inte", "- ,", "▁g ł", "▁pass ive", "▁Ga ussian", "▁stag ione", "ME DI", "MED I", "it ol", "ito l", "i tol", "▁Jer emy", "View s", "class List", "▁desper ately", "▁desperate ly", "▁ver l", "▁ve rl", "br ace", "bra ce", "N P", "▁c ob", "▁co b", "▁A rist", "▁Ar ist", "▁Ari st", "da p", "d ap", "Fil ters", "Filter s", "'=> '", "ul tan", "ult an", "▁F actory", "▁ Factory", "è le", "▁l asting", "▁last ing", "▁las ting", "▁element ary", "▁C M", "▁ CM", "▁Louis iana", "▁p ov", "▁po v", "PC I", "P CI", "è de", "▁P ink", "▁Pin k", "▁Br uno", "▁Bru no", "▁Brun o", "▁Y ellow", "▁ev angel", "▁lik elihood", "WID TH", "▁$ -", "▁ $-", "ni co", "nic o", "n ico", "hu i", "h ui", "ak ter", "akt er", "akte r", "ne urs", "neur s", "n eurs", "▁bre eze", "▁bree ze", "▁со ста", "▁He ader", "▁Head er", "▁ Header", "om rå", "▁D ylan", "▁Dy lan", "▁Bi ographie", "▁Univers ität", "on so", "ons o", "HAND LE", "J ournal", "ea st", "e ast", "▁sup pliers", "▁supplier s", "▁table t", "▁tab let", "LI C", "L IC", "PER TY", "ї в", "▁z aw", "▁za w", "▁su bm", "▁sub m", "▁Fern ando", "▁nou velle", "▁nouve lle", "▁Point s", "▁ Points", "▁str angers", "▁strange rs", "▁stranger s", "▁strang ers", "Component Model", "ist ro", "istr o", "au rus", "aur us", "▁san ct", "▁о дна", "▁од на", "▁В ы", "▁о на", "▁он а", "▁ она", "vert ical", "Sp ring", "▁Har old", "▁Back ground", "▁ Background", "Bal ance", "Key word", "~$ \\", "~ $\\", "mal loc", "m alloc", "ORM AL", "Sk ip", "▁Mu ham", "▁back wards", "▁backward s", "c ów", "по зи", "▁back end", "▁ backend", "▁de emed", "▁accur ately", "▁accurate ly", "▁trans c", "▁Broad way", "▁g rud", "▁gr ud", "▁gru d", "▁N amen", "▁Name n", "▁Na men", "▁Nam en", "▁sh ifting", "▁shift ing", "▁ment ally", "▁mental ly", "▁cal ories", "▁cons ensus", "Perm issions", "Permission s", "▁ob jet", "▁obj et", "▁elabor ate", "at ts", "att s", "▁sn ake", "▁ref res", "▁refr es", "ar u", "a ru", "▁reflect s", "oun ge", "o unge", "R ank", "▁K urt", "▁Kur t", "▁Ku rt", "▁p ied", "▁pie d", "▁pi ed", "▁exped ition", "V el", "▁O wen", "Le ad", "L ead", "▁utter ly", "▁Ar be", "▁bre asts", "▁breast s", "IP S", "I PS", "▁hung er", "▁hun ger", "at em", "ate m", "a tem", "▁vers chied", "▁versch ied", "▁Cam era", "▁ Camera", "▁Mün chen", "iv als", "ival s", "iva ls", "i vals", "▁sp raw", "▁spr aw", "▁S ü", "▁Was ser", "▁mechan ics", "Load ed", "Lo aded", "db c", "d bc", "▁re marks", "▁rem arks", "▁remark s", "▁ remarks", "▁} ).", "▁}) .", "▁ }).", "▁pain ter", "▁pa inter", "▁paint er", "▁h aut", "▁ha ut", "Mar shal", "IS D", "I SD", "▁ve loc", "▁vel oc", "▁In cre", "▁Inc re", "W ar", "▁ру с", "▁com pte", "▁comp te", "▁compt e", "ü g", "▁Def inition", "▁ Definition", "▁G am", "▁Ga m", "▁H ir", "▁Hi r", "▁witness ed", "▁g ren", "▁gr en", "▁gre n", "▁ gren", "▁hur ry", "ch et", "che t", "c het", "re verse", "G F", "▁Qu arter", "п ла", "▁s ar", "▁sa r", "sb urg", "sbur g", "s burg", "▁D it", "▁Di t", "▁ Dit", "▁Arn old", "j k", "▁l ambda", "▁ lambda", "è ge", "▁o z", "▁ oz", "▁h ans", "▁ha ns", "▁han s", "▁answ ering", "▁answer ing", "▁o live", "▁ol ive", "▁sp ont", "▁spo nt", "▁inter vals", "▁interval s", "> @", "▁т ран", "▁тра н", "▁F ocus", "▁ Focus", "ч них", "▁д ви", "▁tri angle", "▁r ally", "▁P unk", "▁Pun k", "▁G and", "▁Ga nd", "se ctions", "section s", "sect ions", "сси й", "AC CESS", "A CCESS", "ha rm", "har m", "h arm", "▁Sk ip", "▁ Skip", "▁D river", "▁Dr iver", "▁Drive r", "▁ Driver", "▁Sant iago", "it ung", "itu ng", "▁B arr", "▁Bar r", "▁Ba rr", "process or", "▁real ised", "▁realise d", "ą z", "le ave", "▁C omo", "▁Com o", "▁Co mo", "▁Re views", "▁Review s", "▁и зда", "▁из да", "▁earn ings", "▁ear nings", "▁earning s", "▁S creen", "▁Sc reen", "▁Scre en", "▁ Screen", "gr and", "gra nd", "g rand", "▁ap ril", "▁apr il", "▁sil ently", "▁silent ly", "ed o", "e do", "ue st", "ues t", "u est", "oo oo", "▁Исто рия", "ра з", "MAGE S", "MAG ES", "▁Sing h", "▁Sin gh", "▁Per fect", "▁revolution ary", "▁н і", "▁ ні", "▁Sch ools", "▁School s", "R ich", "▁ch rom", "▁chr om", "▁an terior", "▁ante rior", "▁Indones ia", "Con straints", "Constraint s", "▁\" __", "▁\"_ _", "▁six teen", "▁sixt een", "ér e", "é re", "мен та", "мент а", "N il", "je l", "j el", "че ские", "чески е", "▁thr one", "▁thro ne", "▁aud iences", "▁audience s", "▁i hren", "▁ih ren", "▁ihr en", "▁ihre n", "ра б", "Qu ick", "in burgh", "fi co", "fic o", "f ico", "▁kid n", "▁ki dn", "irm ingham", "is le", "isl e", "iz ación", "iza ción", "▁Ch ampions", "▁Champion s", "▁вы со", "ol er", "ole r", "o ler", "▁z ak", "▁za k", "▁p lat", "▁pl at", "▁V III", "▁VI II", "▁VII I", "at ique", "ati que", "li ter", "lit er", "l iter", "▁P rest", "▁Pr est", "▁Pres t", "▁Pre st", "in is", "ini s", "i nis", "▁scient ist", "▁m ån", "▁må n", "ke ley", "kel ey", "▁h yd", "▁hy d", "grad uate", "of t", "o ft", "▁N GC", "on gs", "ong s", "▁t ier", "▁tie r", "▁ti er", "▁Sh aw", "▁Sha w", "un ächst", "▁establish ing", "▁ind icator", "▁indic ator", "▁Par ad", "▁Pa rad", "▁Para d", "▁Tr ail", "▁Tra il", "UM N", "▁sp ine", "▁spin e", "▁Vis ual", "▁ Visual", ":: $", "▁t eles", "▁te les", "▁tele s", "▁tel es", "OP ER", "O PER", "▁pack aging", "to ire", "t oire", "▁не ско", "▁product ivity", "A f", "ні ї", "▁de gener", "▁deg ener", "br it", "b rit", "U i", "▁Y am", "▁Ya m", "▁d ough", "▁do ugh", "▁dou gh", "os ph", "osp h", "▁cl ue", "▁ре ги", "▁me ille", "▁tend ency", "▁re lay", "▁rel ay", "▁design ers", "▁designer s", "▁Т у", "Sh are", "▁b icy", "▁bi cy", "▁M asters", "▁Ma sters", "▁Mas ters", "▁Master s", "▁м но", "▁altern atives", "▁alternative s", "ет о", "е то", "▁coun tr", "▁count r", "▁W ow", "▁Wo w", "LO CAL", "LOC AL", "en ue", "enu e", "▁s lim", "▁sl im", "к ви", "▁t ir", "▁ti r", "▁do it", "lic a", "li ca", "l ica", "ci pe", "cip e", "c ipe", "iz ia", "izi a", "▁A ires", "▁Air es", "▁F alls", "▁Fall s", "▁Fal ls", "▁concent rate", "▁concentr ate", "▁ne gl", "▁neg l", "▁Re in", "? ,", "▁G ott", "▁Go tt", "▁Got t", "▁Ver ify", "▁ Verify", "▁Stud ios", "▁Studio s", "$ ('#", "ow ym", "owy m", "я в", "Prim itive", "▁tax i", "▁ta xi", "▁Com mercial", "▁Ч ер", "▁Че р", "place holder", "se au", "sea u", "s eau", "cor rect", "he imer", "heim er", "▁H of", "▁Ho f", "▁d ia", "▁di a", "▁i rr", "▁ir r", "▁ur ged", "▁urg ed", "▁urge d", "▁a nom", "▁an om", "▁ano m", "▁t arde", "▁tar de", "▁tard e", "ur m", "u rm", "▁se ized", "▁sei zed", "▁seiz ed", "DO T", "D OT", "op acity", "St rings", "String s", "Str ings", "▁dec iding", "▁decid ing", "▁listen ers", "▁listener s", "ár a", "á ra", "▁pl anted", "▁plan ted", "▁plant ed", "▁é taient", "▁ét aient", "Z oom", "st ví", "ng th", "ä ude", "▁C av", "▁Ca v", "▁v endor", "▁vend or", "▁ vendor", "▁ ż", "▁meas uring", "▁necess ity", "▁r ivers", "▁ri vers", "▁river s", "▁riv ers", "▁labor atory", "▁E ff", "▁reprodu ce", "▁S ak", "▁Sa k", "▁not ebook", "▁note book", "▁reason ably", "isecond s", "i seconds", "▁Part ial", "▁ Partial", "GUI D", "GU ID", "G UID", "▁Per iod", "▁ Period", "▁reve aling", "▁reveal ing", "▁conv iction", "▁ н", "▁бу ли", "▁altern ate", "▁alter nate", "cc iones", "▁N AT", "▁NA T", "▁can onical", "▁canon ical", "mo z", "m oz", "▁Mé xico", "M o", "▁ш а", "▁ ша", "lim inary", "f é", "чно й", "ч ной", "▁Ham burg", "▁Hamb urg", "▁influ ential", "▁b olt", "▁bo lt", "▁bol t", "az zo", "azz o", "PH P", "P HP", "▁Sa udi", "▁Saud i", "▁Sau di", "▁r m", "▁ rm", "▁cer ca", "▁decor ated", "▁st aat", "▁sta at", "Lo u", "L ou", "▁compet itors", "во ї", "▁diam ond", "▁dia mond", "▁m obil", "▁mo bil", "▁mob il", "Click Listener", "set State", "▁s üd", "; \"", "œ ur", "▁Lud wig", "▁clin ic", "▁e go", "▁eg o", "▁ ego", "Thread ing", "▁f ract", "▁fr act", "▁fra ct", "Ref lection", "oss ip", "\"] [\"", "▁L ov", "▁Lo v", "Ex press", "Exp ress", "Expr ess", "д ри", "if acts", "ifact s", "▁O ften", "▁Of ten", "▁ лу", "▁p ets", "▁pe ts", "▁pet s", "▁address ing", "▁m ens", "▁me ns", "▁men s", "▁ED IT", "▁ EDIT", "ud der", "udd er", "Vert ical", "ка та", "Cap t", "C apt", "verb ose", "▁вой ны", "UNK NOWN", "un its", "unit s", "uni ts", "per mission", "perm ission", "[ _", "▁er sch", "▁ers ch", "▁comm unes", "▁commun es", "▁commune s", "Un ityEngine", "▁com mut", "▁comm ut", "kl ass", "k lass", "▁volt age", "▁volta ge", "re zent", "rez ent", "pe rf", "per f", "DR V", "D RV", "▁f ame", "▁fam e", "▁fa me", "▁S pot", "▁Sp ot", "▁Л ю", "▁c asting", "▁cas ting", "▁cast ing", "hi m", "h im", "▁en gl", "▁eng l", "▁int ro", "▁intr o", "▁Г у", "Comp any", "some thing", "som ething", "▁cl icking", "▁click ing", "жи ва", "▁fl ames", "▁flame s", "▁random ly", "ex tr", "ext r", "Equal To", "an ners", "ann ers", "anner s", "anne rs", "▁p arks", "▁par ks", "▁park s", "▁murm ured", "ми я", "▁reason ing", "сле д", "▁n er", "▁ne r", "▁ ner", "▁é c", "▁ éc", "ow ners", "own ers", "owner s", "▁Д же", "▁Дж е", "▁me er", "▁typ ing", "▁ty ping", "▁happ ily", ".. ...", "... ..", ".... .", ". ....", "▁Ч а", "be cca", "bec ca", "▁P apers", "▁Pa pers", "▁Pap ers", "▁Paper s", "▁Or acle", "▁equ ilibrium", "man agement", "Li te", "L ite", "▁des ktop", "▁desk top", "ă r", "▁G ill", "▁Gi ll", "▁Gil l", "do rf", "d orf", "ig g", "i gg", "▁qu esta", "▁que sta", "▁quest a", "▁ questa", "Warning s", "Warn ings", "War nings", "over flow", "▁V T", "▁ VT", "▁cons isted", "▁consist ed", "▁A bu", "▁Ab u", "v scale", "J O", "ah o", "a ho", "▁T ensor", "▁Ten sor", "▁ Tensor", "▁hes itated", "▁w enn", "▁we nn", "▁wen n", "map sto", "maps to", "▁controvers ial", "M F", "▁l ac", "▁la c", "▁an ch", "▁anc h", "▁ anch", "▁A A", "▁ AA", "it ta", "itt a", "i tta", "ul in", "uli n", "u lin", "▁c ler", "▁cl er", "▁cle r", "▁D iana", "▁Di ana", "▁Fre ud", "▁challeng ed", "▁challenge d", "лё н", "л ён", "▁se ated", "▁sea ted", "▁seat ed", "▁sm iles", "▁smile s", "▁cr acked", "▁crack ed", "▁а ктив", "ско ј", "dict ion", "di ction", "d iction", "ex press", "exp ress", "expr ess", "▁im posed", "▁imp osed", "▁pro tests", "▁prote sts", "▁protest s", "▁prot ests", "▁w ounds", "▁wound s", "C ulture", "N Y", "prevent Default", "ad io", "adi o", "▁NE W", "▁ NEW", "B attle", "▁se colo", "▁sec olo", "▁A x", "▁found ing", "(\" -", "▁ret ro", "▁retr o", "▁pot atoes", "import ant", "ie me", "iem e", "i eme", "ys ide", "y side", "d ummy", "▁t ilt", "▁til t", "▁ti lt", "▁R ules", "▁Ru les", "▁Rule s", "▁ Rules", "▁un ters", "▁unt ers", "▁unter s", "A ud", "V ENDOR", "ud ge", "un al", "una l", "u nal", "▁Ad ult", "▁im pat", "▁imp at", "▁rep airs", "▁repair s", "▁F erd", "▁Fe rd", "▁Fer d", "▁Az ure", "▁ Azure", ")) :", ") ):", "▁pag ina", "▁E pisode", "File name", "Fil ename", "▁j á", "▁oblig ation", "ig hed", "igh ed", "▁pers istent", "Mus ic", "▁C ele", "▁Ce le", "▁Cel e", "▁r y", "▁ ry", "▁cert ification", "ul d", "u ld", "▁T L", "▁ TL", "▁sk irt", "▁ski rt", "▁M ini", "▁Min i", "▁Mi ni", "▁B ring", "▁Br ing", ">< ?", "> %", "▁P and", "▁Pan d", "▁Pa nd", "▁S UB", "▁SU B", "▁ SUB", "▁compan ions", "▁companion s", "▁RE AD", "▁ READ", "▁S olutions", "▁Solution s", "▁acc essed", "▁access ed", "▁p osto", "▁pos to", "▁po sto", "▁post o", "▁pur suit", "▁purs uit", "ow i", "o wi", "▁gro cery", "Sp e", "S pe", "haus en", "▁normal ized", "▁tra uma", "gg i", "g gi", "ie nia", "ien ia", "▁aut umn", "▁so vere", "▁sov ere", "▁Men schen", "▁Mens chen", "▁D AG", "▁DA G", "▁S ort", "▁So rt", "▁Sor t", "▁ Sort", "| ---", "▁l iver", "▁li ver", "▁live r", "▁liv er", "▁ liver", "env iron", "DE CL", "DEC L", "▁ма й", "▁N ak", "▁Na k", "bet ween", "▁gentle man", "in ging", "ing ing", "▁su bur", "▁sub ur", "ST O", "S TO", "ace ut", "\\ !", "▁Fuß ball", "na r", "n ar", "▁b og", "▁bo g", "Token s", "Tok ens", "▁cer emon", "▁cere mon", "DA Y", "D AY", "▁out fit", "▁agric ulture", "ди и", "▁N in", "▁Ni n", "▁Sp rings", "▁Spring s", "▁Spr ings", "▁Co ach", "▁d jango", "▁ django", "▁C rim", "▁Cr im", "▁te cn", "Th ree", "em os", "e mos", "▁be an", "▁ bean", "pi eler", "pie ler", "p ieler", "ri tz", "rit z", "r itz", "ta bs", "tab s", "t abs", "▁Pro blem", "in and", "ina nd", "oc on", "oco n", "o con", "њ и", "▁bu yer", "▁buy er", "us ement", "use ment", "▁b or", "▁bo r", "▁ bor", "▁sett embre", "pp e", "p pe", "▁D eg", "▁De g", "▁W a", "▁w ives", "▁fr anzös", "▁mar ca", "▁des cent", "▁desc ent", "▁S ha", "▁Sh a", "ver ts", "vert s", "v erts", "▁Sh adow", "▁ Shadow", "▁Hug o", "▁Hu go", "▁A ppe", "▁App e", "▁Ap pe", "▁ Appe", "▁L ac", "▁La c", "al len", "all en", "alle n", "os ity", "osi ty", "▁consult ation", "▁T i", "▁er ano", "▁era no", "▁eran o", "▁lo vers", "▁love rs", "▁lov ers", "▁lover s", "▁уни версите", "▁virt ue", "▁view ers", "M u", "c ategories", "▁о пера", "▁over look", "▁overl ook", "▁тер рито", "▁Oper ations", "▁Operation s", "▁ Operations", "è ve", "- (", "▁ Ż", "je v", "j ev", "▁c rist", "▁cr ist", "▁cris t", "▁cri st", "▁мар та", "▁pro vin", "▁prov in", "product ion", "produ ction", "prod uction", "p roduction", "▁T all", "▁Tal l", "▁Ta ll", "Requ ests", "Request s", "▁t iles", "▁til es", "▁tile s", "▁ti les", "ref lect", "▁ar gc", "▁arg c", "▁ argc", "▁t emplates", "▁templ ates", "▁template s", "▁ templates", "AR B", "A RB", "▁weiter e", "▁weit ere", ")? ;", ") ?;", "▁t oll", "▁to ll", "▁correspond ence", "$ ;", "L T", "▁t am", "▁ta m", "de cess", "dec ess", "built in", "da sh", "das h", "d ash", "ze nie", "zen ie", "▁mole cular", "▁chem icals", "▁chemical s", "▁rend ering", "▁render ing", "▁Sing les", "▁Sin gles", "▁Single s", "Init ialized", "Initial ized", "Initialize d", "▁Mar tha", "▁Mart ha", "ri ere", "rie re", "rier e", "r iere", "par agraph", "para graph", "as ters", "ast ers", "aster s", "aste rs", "a sters", "▁dec ides", "▁decide s", "▁decid es", "▁Flor ence", "▁Flo rence", "▁Floren ce", "▁And ers", "▁An ders", "мо й", "▁a pt", "▁ap t", "▁ apt", "▁affili ate", "ch el", "che l", "c hel", "▁re vision", "▁rev ision", "Pat ch", "P atch", "▁fi scal", "▁fis cal", "wi ę", "w ię", "N ational", "▁depend encies", "TRAN S", "TRA NS", "▁r ack", "▁rac k", "▁ra ck", "sel ling", "s elling", "na issance", "c atalog", "Sh ip", "S hip", "IM AGE", "I MAGE", "'] [", "' ][", "▁p rv", "▁pr v", "▁F en", "▁Fe n", "▁rad ar", "▁ra dar", "cond itions", "condition s", "▁Quest ions", "▁Question s", "▁v ivid", "▁vi vid", "▁viv id", "op f", "o pf", "FA CE", "F ACE", "ry s", "r ys", "Ex tract", "Ext ract", "Extra ct", "il ians", "ili ans", "ilia ns", "pl ug", "▁a té", "▁at é", "и л", "▁like wise", "▁L il", "▁Li l", "▁Cam peonato", "AUT O", "AU TO", "▁M eta", "▁Me ta", "▁Met a", "▁ Meta", "re no", "ren o", "r eno", "▁Trans fer", "▁ Transfer", "▁Mich elle", "▁Michel le", "▁Miche lle", "bi s", "b is", "ń st", "зо н", "з он", "▁C ultural", "com pass", "comp ass", "▁my sql", "▁ mysql", "▁cancel led", "▁cancell ed", "▁ ’", "to o", "t oo", "▁re bell", "▁reb ell", "▁rebel l", "ég e", "é ge", "os z", "o sz", "▁com poser", "▁comp oser", "▁compos er", "}\" )", "} \")", "▁des erves", "▁deserve s", "▁oh ne", "▁J ed", "▁Je d", "K ernel", "▁pract ition", "▁in door", "▁ind oor", "▁config urations", "▁configuration s", "▁m eth", "▁me th", "▁met h", "+ (", "Quest ion", "▁bl own", "▁blo wn", "▁blow n", ") '", "▁Ar gs", "▁Arg s", "▁ Args", "F ake", "▁d even", "▁de ven", "▁dev en", "istrz ost", "na io", "▁\" {", "▁L it", "▁Li t", "com ed", "co med", "come d", "c omed", "▁st am", "▁sta m", "▁pl ugins", "▁plugin s", "▁plug ins", "▁ plugins", "▁travel ling", "▁trav elling", "na ire", "n aire", "▁aut onom", "▁auto nom", "STRU CT", "n h", "né es", "née s", "n ées", "▁consider ably", "ко р", "к ор", "B G", "▁lad der", "▁h ast", "▁has t", "▁ha st", "iz ado", "iza do", "▁s ele", "▁se le", "▁sel e", "▁W ere", "▁We re", "▁Wer e", "ar don", "ard on", "ardo n", "B ank", "bund le", "b undle", "▁anticip ated", "▁C ot", "▁Co t", "▁else if", "▁ elseif", "▁Bl ues", "▁Blue s", "▁fil tered", "▁filter ed", "▁a uction", "▁au ction", "ed uc", "edu c", "e duc", "▁Ex pression", "▁Express ion", "▁Exp ression", "▁ Expression", "in x", "i nx", "▁s ucks", "▁su cks", "▁suc ks", "▁suck s", "▁ма я", "EL L", "E LL", "ющи й", "▁Hud son", "it ä", "на ми", "▁fem me", "in ho", "▁e vt", "▁ev t", "▁ evt", "istribut ions", "istribution s", "▁r uss", "▁ru ss", "▁rus s", "▁pet ition", "▁petit ion", "▁г ла", "▁ гла", "Si g", "S ig", "▁T ut", "▁Tu t", "Part ial", "Ent ities", "▁b ears", "▁be ars", "▁bear s", "▁h ollow", "▁hol low", "__ [\"", "▁R is", "ț ă", "dim s", "di ms", "d ims", "▁compl ained", "▁complain ed", "▁m apped", "▁map ped", "▁ma pped", "▁авгу ста", "▁initi atives", "▁initiative s", "▁own s", "ch ez", "che z", "▁dis pon", "▁disp on", "▁m ush", "▁mus h", "▁mu sh", "q s", "▁er folg", "▁Nor weg", "▁c et", "▁ce t", "im ag", "ima g", "i mag", "▁исто ри", "▁ни х", "▁ них", "Un til", "U ntil", "▁s talk", "▁st alk", "▁П ра", "uv o", "u vo", "ie rz", "ier z", "ri eben", "rie ben", "rieb en", "X T", "ic als", "ical s", "ica ls", "std out", "▁extra cted", "▁extract ed", "▁Im ages", "▁Image s", "▁ Images", "un def", "und ef", "unde f", "u ndef", "▁L é", "▁accommod ation", "▁T ouch", "▁To uch", "▁ Touch", "▁intent ions", "▁intention s", "▁concent rated", "▁concentr ated", "▁concentrate d", "▁Насе ление", "▁ut ilis", "▁util is", "▁сле д", "▁ след", "li f", "l if", "▁comp ris", "▁compr is", "▁с бор", "med ium", "medi um", "St ates", "State s", "Stat es", "▁Би ография", "▁Fa ith", "U A", "ADD RESS", "▁r ated", "▁rate d", "▁rat ed", "▁ra ted", "▁ rated", "▁R ena", "▁Re na", "▁Ren a", "▁C ache", "▁Ca che", "▁ Cache", "▁pe que", "▁un used", "▁unus ed", "▁ unused", "ni m", "n im", "ol ding", "old ing", "▁N r", "R ay", "ur ls", "url s", "▁em issions", "▁emission s", "I r", "▁m å", "be ar", "b ear", "▁L ub", "▁Lu b", "▁Out side", "min ded", "mind ed", "▁PRO VID", "▁s ó", "▁civil ian", "Find er", "Fin der", "Fi nder", "F inder", "▁achie ving", "mod ified", "la ne", "lan e", "l ane", "Se nder", "Send er", "S ender", "▁Cr ime", "▁Crim e", "REQ UI", "▁open ly", "▁Belg ium", "ic ity", "ici ty", "icit y", "i city", "▁M az", "▁Ma z", "▁st agger", "▁stag ger", "}} $,", "}}$ ,", "} }$,", "na te", "nat e", "n ate", "'' '", "' ''", "▁Ge off", "ll i", "l li", "Su ite", "▁D istribution", "▁я кий", "Com bo", "Comb o", "ho oks", "hook s", "▁F ight", "▁Fig ht", "▁Fi ght", "Set s", "Se ts", "S ets", "▁m k", "▁ mk", "▁gu ides", "▁guide s", "▁guid es", "▁princip ale", "▁principal e", "Pre ferences", "ti ny", "t iny", "ap pen", "app en", "appe n", "a ppen", "▁ru ined", "▁ruin ed", "▁sl iding", "▁slid ing", "▁Z en", "▁Ze n", "▁oct ubre", "pos er", "po ser", "pose r", "p oser", "▁F lag", "▁Fl ag", "▁ Flag", "▁b oom", "▁bo om", "▁Det ect", "▁activ ation", "▁обра зова", "▁entertain ing", "▁entert aining", "▁protect ive", "ál l", "á ll", "▁Fl ash", "▁mid st", "▁mi dst", "ствен ной", "▁Ph D", "ij ing", "iji ng", "cl ub", "get C", "▁tro uve", "▁trou ve", "am bers", "amb ers", "amber s", "▁g reed", "▁gr eed", "▁gre ed", "am arin", "ama rin", "amar in", "▁suspic ious", "▁susp icious", "▁dep uty", "▁deput y", "as per", "asp er", "▁fun ded", "▁fund ed", "al one", "alo ne", "▁t ract", "▁tr act", "▁tra ct", "▁ tract", "▁R ating", "▁Ra ting", "▁Rat ing", "ad ays", "ada ys", "a days", "▁st att", "▁stat t", "▁sta tt", "▁Priv acy", "▁_ _(", "▁__ (", "▁ __(", "▁f ights", "▁fight s", "á j", "\\ ]", "ag h", "a gh", "or na", "orn a", "▁Diam ond", "▁pro totype", "▁proto type", "▁prot otype", "▁ prototype", "▁Str ateg", "ha do", "had o", "h ado", "▁l ungs", "▁lung s", "▁lun gs", "Pro totype", "Proto type", "ließ lich", "▁d ive", "▁di ve", "▁div e", "co v", "c ov", "▁M ist", "▁Mi st", "▁Mis t", "▁T ypes", "▁Type s", "▁Ty pes", "▁Typ es", "▁ Types", "▁di agonal", "▁p review", "▁pre view", "▁prev iew", "▁ preview", "▁Cont ainer", "▁ Container", "DESC RIP", "▁brit ann", "▁C ord", "▁Co rd", "▁Cor d", "ak ov", "ako v", "a kov", "▁far ming", "▁farm ing", "▁p ère", "▁k ills", "▁kill s", "▁kil ls", "▁Car ib", "▁Ca rib", "ћ и", "▁А л", "? ;", "▁пи са", "▁ писа", "▁En sure", "par sed", "parse d", "pars ed", "än ge", "äng e", "▁D elta", "▁Del ta", "▁ Delta", "▁g aining", "▁gain ing", "▁ga ining", "▁n oting", "▁not ing", "▁no ting", "▁B arb", "▁Bar b", "▁Ba rb", "▁фев ра", "▁фе вра", "Em p", "E mp", "▁{ })", "▁{} )", "▁ {})", "▁sy ntax", "▁syn tax", "▁synt ax", "W alk", "▁P ere", "▁Per e", "▁Pe re", "Is Null", "▁U V", "▁ UV", "▁ret val", "▁ retval", "▁sim plicity", "▁simpl icity", "▁rein force", "Lin q", "▁diff usion", "▁dis orders", "▁disorder s", "ât re", "â tre", "ui ty", "uit y", "u ity", "▁hel pless", "▁help less", "Me asure", "▁com pression", "▁comp ression", "▁compr ession", "▁compress ion", "▁Co al", "olut ely", "olute ly", "og ue", "o gue", "▁up ward", "▁Block ly", "▁b ride", "▁br ide", "parse Int", "▁is olation", "▁isol ation", "▁regul atory", "ș ti", "ric ane", "м б", "▁с ло", "▁ сло", "▁sa lad", "▁sal ad", "we i", "w ei", "▁B asket", "▁Bas ket", "▁M ON", "▁MO N", "▁ MON", "\"> &", "\" >&", "do ors", "door s", "▁K ill", "▁Kil l", "▁Ki ll", "▁conspir acy", "▁M iles", "▁Mil es", "▁Mi les", "wa nt", "wan t", "w ant", "Mod ifier", "▁batter ies", "▁batt eries", "iv as", "iva s", "i vas", "▁att endance", "▁attend ance", "▁AUT H", "▁AU TH", "▁ AUTH", "▁с ві", ".. .,", "... ,", "▁aggreg ate", "▁de struct", "▁dest ruct", "▁four teen", "▁м ет", "▁ме т", "▁ мет", "▁both ered", "▁bother ed", "el te", "elt e", "e lte", "▁m ism", "▁mis m", "▁mi sm", "▁res ting", "▁rest ing", "▁P ars", "▁Par s", "▁Pa rs", "▁ Pars", "▁id le", "▁ idle", "▁d eren", "▁de ren", "▁der en", "▁dere n", "▁di ary", "▁dia ry", "▁v ague", "▁va gue", "▁vag ue", "▁margin al", "▁marg inal", "Wr it", "W rit", "Bo t", "B ot", "▁Met ro", "▁e arning", "▁earn ing", "▁ear ning", "hist oire", "his toire", "▁end orse", "▁be ard", "▁bear d", "▁Chair man", "ie b", "i eb", "▁neut r", "▁neu tr", "▁am bit", "▁amb it", "▁Leon ard", "ban ds", "band s", "b ands", "▁D ale", "▁Da le", "▁Dal e", "▁ver ified", "Al gorithm", "Enumer able", "op code", "cast le", "cas tle", "š e", "▁Venez uela", "▁de scriptions", "▁des criptions", "▁description s", "▁value d", "▁val ued", "▁chapter s", "▁chap ters", "▁I ls", "▁Il s", "▁cl arity", "▁clar ity", "▁tour ists", "▁tourist s", "Da n", "D an", "▁t ribe", "▁tr ibe", "▁tri be", "▁trib e", "▁г и", "▁ ги", "fol k", "f olk", "ac cur", "acc ur", "▁St ack", "▁Sta ck", "▁ Stack", "▁adv ocate", "▁advoc ate", "▁G ene", "▁Ge ne", "▁Gen e", "Im ages", "Image s", "▁rig id", "▁con greg", "▁congr eg", "▁start up", "▁dead line", "co uld", "cou ld", "c ould", "▁beg ann", "▁began n", "▁cal ci", "▁calc i", "▁Cir cle", "▁Circ le", "▁in cons", "▁inc ons", "▁incon s", "aaaa aaaa", "▁rub bed", "ape ut", "ua rio", "uar io", "u ario", "worth y", "wor thy", "wort hy", "▁уча сти", "▁участ и", "▁fam ília", "▁synchron ized", "▁unf air", "rs p", "r sp", "▁soc ieties", "▁societ ies", "bo at", "gr o", "g ro", "▁k at", "▁ka t", "▁ kat", "▁p oker", "▁po ker", "▁pok er", "▁l ocks", "▁loc ks", "▁lo cks", "▁lock s", "▁G F", "▁ GF", "▁re conc", "▁recon c", "▁Maur ice", "▁Mau rice", "__( /*!", "▁ble eding", "äs ident", "▁по след", "▁после д", "▁deriv ative", "ша я", "cc ió", "c ció", "▁cr ushed", "▁crush ed", "▁tempor arily", "▁co aches", "▁coach es", "▁Mo vement", "▁Move ment", "▁Mov ement", "}} $.", "}}$ .", "} }$.", "▁K yle", "▁Ky le", "▁S ohn", "▁So hn", "▁cre ator", "▁creat or", "ind ust", "▁E rik", "▁Er ik", "▁se iz", "▁sei z", "▁dim ensional", "▁dimension al", "▁ dimensional", "▁I st", "▁Is t", "▁pre val", "▁pr eval", "▁prev al", "he ads", "head s", "▁про ти", "▁determ ines", "▁determine s", "▁determin es", "eg y", "e gy", "▁U INT", "▁UI NT", "▁ UINT", "▁V olk", "▁Vol k", "pa wn", "p awn", "Ph oto", "▁C olin", "▁Col in", "▁Co lin", "ap propri", "app ropri", "ort ion", "st eller", "stell er", "É tat", "▁im ply", "▁imp ly", "▁impl y", "▁t outes", "▁to utes", "▁tou tes", "▁tout es", "▁toute s", "VO L", "V OL", "an ing", "ani ng", "a ning", "Tool tip", "ig ious", "igi ous", "▁e ternal", "▁etern al", "▁P oz", "▁Po z", "▁bank rupt", "▁fail ures", "▁failure s", "uer te", "▁вре ме", "zu ng", "z ung", "▁t cp", "▁tc p", "▁ tcp", "▁cont ainers", "▁contain ers", "▁container s", "ou sel", "ous el", "ouse l", "▁H IV", "▁con ced", "▁conc ed", "▁conce d", "▁sept iembre", "gi rl", "g irl", "▁C ho", "▁Ch o", "▁f az", "▁fa z", "▁Up per", "▁ Upper", "▁For ces", "▁Force s", "äh lt", "in ject", "Re ceived", "MA T", "M AT", "ag lia", "ów nie", "ówn ie", "/ '", "▁p ip", "▁pi p", "▁G est", "▁Ge st", "▁Ges t", "▁l ado", "▁la do", "▁lad o", "▁compat ibility", "▁m are", "▁mar e", "▁ma re", "▁ mare", "▁Cle arly", "▁Clear ly", "vers ation", "Ver s", "V ers", "▁ch ick", "▁chi ck", "▁organ ize", "▁organiz e", "▁econom ics", "▁economic s", "▁ancest ors", "ME D", "M ED", "▁sc rub", "▁scr ub", "▁label ed", "▁lab eled", "▁п р", "▁S uz", "▁Su z", "▁A str", "▁As tr", "▁Ast r", "allow een", "allo ween", "rh s", "r hs", "as ci", "asc i", "▁C ancer", "▁Can cer", "▁H unt", "▁Hun t", "▁Hu nt", "▁switch ing", "▁R idge", "Se q", "S eq", "▁gi ugno", "bus iness", "▁char ming", "▁charm ing", "▁I o", "▁ Io", "▁prés ident", "ek ing", "e king", "í l", "en h", "e nh", "pr it", "p rit", "erc ise", "án ak", "á nak", "▁х ра", "▁ хра", "▁b ugs", "▁bu gs", "▁bug s", "▁жи во", "▁light ning", "▁never theless", "▁length s", "G U", "H idden", "Act or", "Ac tor", "A ctor", "To pic", "Top ic", "T opic", "▁H orse", "▁Hor se", "ћ е", "el ines", "eline s", "eli nes", "elin es", "e lines", "▁trag edy", "▁traged y", "int endo", "▁abund ance", "▁ev ac", "it ably", "+\\ _\\", "▁rec ib", "ua ted", "uate d", "u ated", "рі ї", "▁fool ish", "▁foo lish", "▁t m", "▁ tm", "▁des pair", "▁desp air", "TO KEN", "▁comp romise", "▁comprom ise", "▁Person en", "▁Pers onen", "▁investig ated", "▁investigate d", "▁ex clude", "▁excl ude", "▁telev is", "▁tele vis", "▁pull s", "▁pul ls", "▁according ly", "▁accord ingly", "▁f ő", "▁Le ave", "▁ Leave", "oper ations", "operation s", "cri m", "cr im", "c rim", "▁r hs", "▁rh s", "▁ rhs", "▁form ally", "▁formal ly", "▁L ily", "▁Li ly", "▁Lil y", "▁Com ments", "▁Comm ents", "▁Comment s", "▁se ptember", "▁sept ember", "ie fs", "ief s", "▁tre asure", "Http Servlet", "ді в", "д ів", "▁dis claimer", "lu ss", "l uss", "▁ка о", "ro gen", "rog en", "r ogen", "▁Start ing", "▁Star ting", "▁d ém", "▁dé m", "▁select ing", "▁ ↘", "▁О н", "▁Pract ice", "▁p orte", "▁por te", "▁port e", "▁ porte", "▁as sure", "▁ass ure", "▁frustr ated", "S ink", "▁A ri", "▁Ar i", "▁esc ort", "ais es", "ai ses", "aise s", "a ises", "▁b ush", "▁bu sh", "▁bus h", "▁Se ine", "▁F ill", "▁Fil l", "▁Fi ll", "▁ Fill", "▁S ull", "▁Su ll", "▁Sul l", "Do t", "D ot", "vi l", "v il", "un ing", "uni ng", "u ning", "Render ing", "Rend ering", "sh ake", "sha ke", "пи си", "пис и", "pt e", "p te", "▁b end", "▁be nd", "▁ben d", "▁jewel ry", "▁Stock holm", "▁Hon estly", "! [", "▁array s", "▁arr ays", "▁War ner", "▁sh aft", "▁sha ft", "▁C ann", "▁Can n", "▁Ca nn", "▁Pitt sburgh", "ir ical", "iri cal", "i rical", "au tre", "aut re", "▁R ück", "▁gen naio", "▁Ш а", "an nte", "ann te", "annt e", "ps hire", "p shire", "но логи", "н ологи", "ét a", "é ta", "▁pr inter", "▁print er", "▁prin ter", "▁dam ages", "▁damage s", "▁Isa ac", "▁Famil ie", "Code s", "Co des", "C odes", "th rift", "no b", "n ob", "▁c av", "▁ca v", "▁techn ically", "▁technical ly", "▁I mm", "▁Im m", "▁tr icks", "▁tri cks", "▁trick s", "EA R", "E AR", "▁Sub ject", "▁ Subject", "▁ne eding", "▁need ing", "▁G ir", "▁Gi r", "Bo ard", "B oard", "▁re he", "▁rem inder", "▁remind er", "▁sh iver", "K it", "▁strugg les", "▁struggle s", "▁gen om", "▁ge nom", "im il", "imi l", "i mil", "Reg istration", "▁gl oves", "▁glo ves", "▁Z ur", "▁Zu r", "▁B eg", "▁Be g", "▁in clusive", "▁incl usive", "/ ,", "og an", "oga n", "o gan", "po que", "cont rib", "contr ib", "ши н", "ш ин", "▁M ama", "▁Ma ma", "▁Mam a", "print s", "▁re named", "▁ren amed", "ють ся", "ю ться", "net dev", "▁comp ile", "▁ compile", "▁ §", "M UL", "▁dr aws", "▁draw s", "co ck", "c ock", "▁сво и", "▁M um", "▁Mu m", "sp ieler", "spi eler", "s pieler", "▁n ail", "▁na il", "▁ nail", "▁trans it", "▁S aw", "▁Sa w", "▁com press", "▁comp ress", "▁compre ss", "▁compr ess", "▁ compress", "▁purch ases", "▁purchase s", "▁per forms", "▁perform s", "▁dem ol", "▁demo l", "▁comm ence", "▁C B", "▁ CB", "▁A ber", "▁Ab er", "▁c ush", "▁cu sh", "▁ком п", "▁ру ко", "▁Muham mad", "▁Net flix", "▁Environment al", "No rm", "N orm", "▁w ir", "null ptr", "▁refuge es", "до н", "д он", "▁B irmingham", "New s", "Ne ws", "▁В се", "Or ient", "O rient", "As sembly", "▁introdu cing", "fin der", "find er", "fi nder", "f inder", "▁scholar ship", "▁scholars hip", "▁ос нова", "▁основ а", "if a", "i fa", "Si ng", "S ing", "ib lic", "ibli c", "i blic", "istribut ed", "istribute d", "▁depart ments", "▁department s", "CR EF", "CRE F", "C REF", "▁Malays ia", "CO NF", "CON F", "▁Cl aud", "▁Bu ilt", "▁ Built", "RAN GE", "Re direct", "Red irect", "LE ASE", "-- -------", "---- -----", "-------- -", "--- ------", "----- ----", "------ ---", "------- --", "- --------", "▁П у", "▁n umpy", "▁num py", "▁project ed", "▁remind s", "▁- *-", "ib ling", "ibli ng", "i bling", "▁s lower", "▁sl ower", "▁slow er", "op p", "o pp", "ro pic", "rop ic", "r opic", "▁Mont real", "▁detect ive", "TH READ", "▁qu é", "▁R osa", "▁Ro sa", "▁Ros a", "▁seven th", "▁sevent h", "Col ors", "Color s", "de mo", "dem o", "▁E sta", "▁Est a", "▁Es ta", "ff f", "f ff", "ick ets", "icket s", "Gr e", "G re", "á b", "bo ost", "▁Go ing", "▁Su ite", "▁ Suite", "▁adapt ation", "▁j ours", "▁jour s", "▁jo urs", "▁jou rs", "▁ jours", "▁Or th", "▁Ort h", "х і", "Fig ure", "▁su pers", "▁sup ers", "▁super s", "▁access ories", "we ak", "▁dist ress", "fr ied", "f ried", "▁go og", "ка з", "▁far mer", "▁farm er", "it ational", "itation al", "itat ional", "Go ld", "G old", "▁ass hole", "▁assh ole", "▁Cont roller", "▁Control ler", "▁ Controller", "▁ар хи", "To o", "T oo", "▁mol to", "▁p ropri", "▁prop ri", "▁ propri", "▁al go", "▁alg o", "Af f", "A ff", "re sc", "res c", "r esc", "▁D y", "▁con gr", "▁T es", "▁Te s", "▁W IN", "▁ WIN", "de serialize", "des erialize", "sy n", "s yn", "▁chem istry", "m iddle", "▁an imated", "▁anim ated", "▁K um", "▁Ku m", "file Name", "Amer ica", "▁dr ums", "▁dru ms", "▁drum s", "▁program a", "▁n ej", "▁ne j", "▁ nej", "Read Only", "▁Б ра", "-- -----", "---- ---", "--- ----", "----- --", "------ -", "- ------", "Mut ex", "Mu tex", "un ned", "unn ed", "ynam ics", "ynamic s", "co system", "cos ystem", "▁R ect", "▁Re ct", "▁Rec t", "▁ Rect", "▁an ime", "▁anim e", "▁I BM", "▁need le", "es ser", "ess er", "esse r", "▁incl u", "▁inc lu", "Le an", "tr aining", "tra ining", "train ing", "▁b our", "▁bo ur", "▁bou r", "▁ bour", "ab ases", "abase s", "aba ses", "▁tak że", "wa rz", "war z", "w arz", "▁ste pping", "▁step ping", "▁T IME", "▁ TIME", "▁Ein stein", "▁Log in", "▁Lo gin", "▁ Login", "pon ential", "ponent ial", "De ad", "D ead", "in str", "ins tr", "inst r", "▁ne ural", "▁neu ral", "▁neur al", "▁ub ic", "▁Init ialized", "▁Initialize d", "▁Initial ized", "▁ Initialized", "▁facil itate", "G D", "}{ (", "} {(", "D ark", "▁n ag", "▁na g", "min ipage", "Size s", "Si zes", "S izes", "▁w orm", "▁wor m", "▁wo rm", "bi as", "bia s", "b ias", "Su ch", "S uch", "wick lung", "▁sp ouse", "▁spo use", "▁surviv ors", "er st", "ers t", "at ype", "aty pe", "a type", "}) $,", "})$ ,", "} )$,", "▁n l", "▁ nl", "▁cogn itive", "▁o nde", "▁on de", "▁ onde", "▁en abling", "▁soc iet", "▁soci et", "▁c lan", "▁cl an", "▁ex cluded", "▁excl uded", "▁exclude d", "▁th under", "▁ent ropy", "▁entr opy", "▁fast est", "RE EN", "REE N", "▁Vien na", "▁fl owing", "▁flo wing", "▁flow ing", "▁aff irm", "al om", "alo m", "▁h ips", "▁hi ps", "▁hip s", "▁can nab", "▁st icks", "▁stick s", "▁cur riculum", "▁ret ained", "▁retain ed", "▁ext ending", "▁extend ing", "ó z", "he aded", "head ed", "ex c", "e xc", "▁je ho", "▁for ests", "▁fore sts", "▁forest s", "ma nia", "man ia", "m ania", "▁C anal", "▁Can al", "▁Ca nal", "▁S out", "▁So ut", "▁Sou t", "▁B ahn", "▁Ba hn", "▁Bah n", "▁T EXT", "▁TE XT", "▁ TEXT", "▁др жа", "▁User s", "▁Us ers", "▁Use rs", "▁ Users", "▁G EN", "▁ GEN", "sl ash", "ben falls", "Text Field", "▁r av", "▁ra v", "▁ rav", "▁continu ously", "▁continuous ly", "IT ER", "ITE R", "I TER", "▁Jen ny", "▁Jenn y", "ch os", "cho s", "c hos", "▁am big", "▁amb ig", "▁ж ур", "Aut ow", "Auto w", "▁V eter", "▁Ve ter", "▁dest in", "H om", "au ge", "aug e", "a uge", "▁com mod", "▁comm od", "▁gar lic", "< =", "▁dram atically", "▁dramatic ally", "CA N", "C AN", "an cers", "ance rs", "anc ers", "ancer s", "() }", "( )}", "gh ai", "▁tw ee", "▁twe e", "▁сент ября", "GP U", "G PU", "▁B omb", "▁Bo mb", "▁young est", "▁c age", "▁ca ge", "ok s", "o ks", "ic hes", "ich es", "iche s", "i ches", "▁T ests", "▁Te sts", "▁Test s", "▁Tes ts", "▁ Tests", "sk ý", "cur y", "cu ry", "c ury", "na ls", "nal s", "n als", "ț a", "▁V oice", "▁Vo ice", "Depend ency", "v f", "e ous", "▁Z a", "▁am ateur", "▁G host", "▁Gh ost", "▁dis ability", "▁Вла ди", "▁rev enge", "▁reven ge", "Trans lation", "▁cour tesy", "ски я", "▁bl ob", "▁blo b", "▁ blob", "ä ß", "ó j", "▁print s", "▁prin ts", "▁ prints", "▁pro ves", "▁pr oves", "▁prov es", "▁prove s", ">? [<", "▁ut ils", "▁util s", "▁ utils", "ty pen", "type n", "typ en", "▁t erra", "▁ter ra", "▁terr a", "▁ terra", "▁min eral", "▁mine ral", "▁miner al", "▁war rior", "▁ме ст", "▁D S", "▁ DS", "Em b", "E mb", "get Data", "ли чи", "лич и", "▁sa fer", "▁saf er", "▁safe r", "▁com une", "▁comun e", "▁hier archy", "Cred entials", "res se", "ress e", "r esse", "gr av", "gra v", "g rav", "lo gs", "log s", "l ogs", "br os", "bro s", "b ros", "BUT TON", "lit eral", "liter al", "l iteral", "▁S r", "an tal", "ant al", "anta l", "▁mer cy", "▁merc y", "DA P", "D AP", "▁Mag gie", "▁sust ained", "▁sustain ed", "N M", "Re view", "Rev iew", "▁Buen os", "▁de aler", "▁deal er", "en es", "ene s", "e nes", "▁file Name", "▁ fileName", "bb ra", "b bra", "ро ма", "ром а", "Inst all", "▁Mor ning", "LE T", "L ET", "ip a", "i pa", "G a", "го в", "г ов", "▁Sche dule", "▁ Schedule", "▁rep orters", "▁report ers", "▁reporter s", "▁pecul iar", "▁sup plier", ")$ -", ") $-", "ë l", "▁roll s", "▁né cess", "▁p reg", "▁pre g", "▁pr eg", "▁Re yn", "▁sur render", "▁contribut ing", ")+ \\", ") +\\", "PRO P", "PR OP", "P ROP", "▁dec imal", "▁Town ship", "gr p", "g rp", "▁terror ist", "pt o", "p to", "on en", "one n", "o nen", "▁Polit ics", "▁Pe arl", "▁Pear l", "▁pil low", "▁pill ow", "▁gr ades", "▁grad es", "▁grade s", "▁gra des", "▁ grades", "TH E", "T HE", "▁num ero", "▁numer o", "▁nu mero", "i NdEx", "M igration", "PE ND", "P END", "ph oto", "▁cent ered", "▁center ed", "▁r het", "▁rh et", "egr ünd", "▁laund ry", "get Node", "▁est imation", "▁estim ation", "▁I v", "▁wh oles", "▁who les", "▁whole s", "ше ния", "▁const itutional", "▁constitution al", "am ination", "amin ation", "▁Municip al", "ad t", "a dt", "th y", "t hy", "▁pub li", "▁di cembre", "▁dic embre", "▁dice mbre", "` )", "▁Ch rome", "ef e", "e fe", "con g", "co ng", "c ong", "bre aking", "break ing", "at ched", "atch ed", "es tr", "est r", "e str", "▁i di", "▁id i", "▁ idi", "VER Y", "V ERY", "▁app el", "▁ap pel", "▁appe l", "▁Techn ical", "tc x", "t cx", "DO UBLE", "se k", "s ek", "hu ng", "h ung", "▁A ur", "▁Au r", "coll apse", "▁adv ise", "▁advis e", "▁Pr imary", "▁Pri mary", "▁Prim ary", "▁ Primary", "ia z", "i az", "▁a nten", "▁an ten", "▁ant en", "▁ante n", "▁ anten", "▁bro ader", "▁broad er", "▁ju nio", "▁jun io", "▁juni o", "▁w ool", "▁wo ol", "▁hat red", "▁ex agger", "Con v", "Co nv", "kt ur", "▁em peror", "▁Pack age", "▁ Package", "TD M", "T DM", "\\{ \\", "\\ {\\", "whe el", "▁fe as", "▁js ou", "", "< ?>", "INST ANCE", "▁ch ant", "▁cha nt", "▁ chant", "▁Re fer", "▁Ref er", "▁S hir", "▁Sh ir", "▁ве ка", "▁Me eting", "▁Meet ing", "▁n v", "▁ nv", "▁associ ations", "▁association s", "it ations", "itation s", "itat ions", "or um", "o rum", "▁t ires", "▁ti res", "▁tire s", "▁tir es", "▁d ash", "▁da sh", "▁das h", "▁ dash", "▁} ));", "▁}) );", "ar to", "art o", "▁Ed inburgh", "W T", "▁inv ented", "▁invent ed", "ve h", "v eh", "▁Hind u", "▁Насе лення", "▁ur gent", "▁urg ent", "▁urge nt", "text color", "we rp", "wer p", "▁det ector", "▁detect or", "▁al tered", "▁alt ered", "▁alter ed", "▁t b", "▁ tb", "▁N aval", "▁Na val", "▁Nav al", "▁mem br", "style sheet", "styles heet", "un ts", "unt s", "▁nut rition", "▁S ylv", "▁Sy lv", "▁e numer", "▁en umer", "▁enum er", "▁m ines", "▁min es", "▁mi nes", "▁mine s", "▁l itter", "▁lit ter", "▁litt er", "ž í", "con current", "▁sw allow", "Si r", "S ir", "tal k", "t alk", "▁de utschen", "▁deutsch en", "re peat", "▁dom ains", "▁domain s", "▁Mc Donald", "▁cand le", "▁pl ural", "▁sharp ly", "▁shar ply", "orig ine", "origin e", "▁c andy", "▁can dy", "▁cand y", "▁kilomet res", "▁power ed", "▁pow ered", "▁ powered", "▁s ep", "▁se p", "▁ sep", "▁S oci", "▁So ci", "▁Soc i", "▁Ber nie", "▁Bern ie", "GE NER", "GEN ER", "Ex per", "Exp er", "▁Al low", "▁All ow", "▁ Allow", "▁Ern st", "▁Re becca", "▁Cont ribut", "ro utes", "rou tes", "route s", "r outes", "▁s uffix", "▁suff ix", "▁ju lio", "▁jul io", "▁juli o", "▁provinc ial", "▁provincia l", "▁provin cial", "▁appreci ation", "Us ing", "U sing", "abs olute", "▁cr icket", "W ould", "▁Equip ment", "▁tort ure", "на х", "ut ton", "utt on", "че ство", "▁out break", "▁prevent ing", "▁mad re", "▁ret ire", "end region", "▁f ais", "▁fa is", "▁remember ing", "▁Al ban", "▁Alb an", "▁a rist", "▁ar ist", "▁work out", "▁u z", "▁ uz", "as to", "ast o", "a sto", "fort unate", "fortun ate", "▁p aste", "▁past e", "▁pas te", "▁pa ste", "▁M R", "▁ MR", "▁o tra", "▁ot ra", "S v", "an gen", "ang en", "ange n", "▁S ierra", "▁Si erra", "▁n au", "▁na u", "▁s era", "▁se ra", "▁ser a", "$ ~", "▁cos ì", ")( (", ") ((", "▁propos als", "▁proposal s", "it te", "itt e", "▁P ero", "▁Per o", "▁Pe ro", "▁te nant", "▁ten ant", "▁ tenant", "Y P", "▁Param eter", "▁ Parameter", "sp ell", "spe ll", "▁e merge", "▁emer ge", "▁g ek", "▁ge k", "ol ence", "olen ce", "ot os", "oto s", "o tos", "▁witness es", "▁watch es", "▁wat ches", "▁A ch", "▁Ac h", "Cr oss", "C ross", "▁янва ря", "; }", "▁O NE", "▁ON E", "▁ ONE", "▁care ers", "▁career s", "▁faith ful", "▁J our", "▁Jo ur", "▁Gener ate", "▁Gene rate", "▁ Generate", "▁ию ля", "▁recommend ation", "w b", "sk ich", "ski ch", "bold math", "▁orig ins", "▁origin s", "▁spin ning", "▁// \r", "▁bomb s", "▁bom bs", "min ister", "I o", "öl ker", "Autow ired", "um per", "ump er", "ich ael", "▁contribut ors", "▁contributor s", "▁n asty", "▁na sty", "▁nas ty", "▁nast y", "▁d rap", "▁dr ap", "▁Bud apest", "ur ious", "uri ous", "hi d", "h id", "▁wel comed", "▁welcome d", "▁w agon", "▁wa gon", "▁Ва си", "▁embarrass ed", "▁Har vey", "Lo s", "L os", "▁S ter", "▁St er", "▁Ste r", "▁enjoy able", "ör t", "ö rt", "Mill is", "-- )", "- -)", "▁d ashed", "▁das hed", "▁dash ed", "\"> < ?", "\" >' ,", "> ',", "▁all iance", "ic ism", "ici sm", "▁NAS A", "▁NA SA", "▁p ode", "▁po de", "▁pod e", "č ní", "▁respon ding", "▁respond ing", "▁bl owing", "▁blo wing", "▁blow ing", "ic ké", "ick é", "ва но", "ван о", "▁H off", "▁Ho ff", "▁Hof f", "MB ER", "M BER", "▁civil ization", "ar ía", "a ría", "Un lock", "ge ts", "get s", "g ets", "no d", "n od", "▁S TE", "▁ST E", "▁con science", "PE G", "ch anging", "chan ging", "▁Rich mond", "ling ton", "l ington", "ocr atic", "▁trav és", "▁ф ран", "▁up dating", "process ing", "Al ex", "A lex", "▁mil itar", "▁milit ar", "▁pse udo", "▁pseud o", "str len", "▁be have", "▁beh ave", "▁behav e", "▁distinct ive", "▁E c", "▁c x", "▁ cx", "▁journal ists", "▁journalist s", "vo lt", "vol t", "v olt", "▁sp un", "▁d urable", "▁dur able", "▁pro position", "▁propos ition", "▁ proposition", "thread s", "▁tw entieth", "▁ф і", "▁ фі", "en son", "ens on", "enso n", "▁self ish", "▁sel fish", "ar ium", "ari um", "a rium", "▁de cid", "▁dec id", "▁ха рак", "▁psy chiat", "▁psych iat", "g d", "Z Z", "ug u", "u gu", "▁i ds", "▁id s", "▁ ids", "Man aged", "▁Leg isl", "ancell ationToken", "▁gr ants", "▁gran ts", "▁grant s", "▁lie utenant", "▁lieu tenant", "▁Fle et", "** /", "* */", "▁T ig", "▁Ti g", "▁accept s", "▁system atic", ", {\\", "▁У кра", "▁aus ge", "▁dial ect", "▁dia lect", "st ri", "str i", "s tri", "er me", "erm e", "▁B esch", "▁Be sch", "▁Bes ch", "lo ve", "lov e", "l ove", "S ensor", "▁B IT", "▁ BIT", "▁т ру", "▁mist aken", "▁mistake n", "p v", "▁u tf", "▁ut f", "▁ utf", "▁[ \\", "▁ [\\", "▁Geb iet", "▁Mann schaft", "PAR AMETER", "▁u rb", "▁ur b", "▁ urb", "▁R eed", "▁Re ed", "▁c ough", "▁co ugh", "▁cou gh", "wa ld", "wal d", "w ald", "▁L amb", "▁La mb", "▁Lam b", "▁surv iving", "▁surviv ing", "▁s way", "▁sw ay", "▁с ве", "WI SE", "ä ger", "f y", "sk e", "s ke", "▁s og", "▁so g", "▁Im plement", "▁Imp lement", "▁ Implement", "获 取", "▁T ools", "▁To ols", "▁Tool s", "▁Too ls", "▁ Tools", "▁ne wer", "▁new er", "▁exempl e", "▁exem ple", "▁l itt", "▁li tt", "▁lit t", "▁вы пу", "▁у прав", "Em itter", "Emit ter", "IS ING", "I SING", "▁органи за", "▁М і", "▁Ex amples", "▁Example s", "▁I con", "▁ Icon", "Get ter", "▁L ay", "▁La y", "▁Col lect", "▁Coll ect", "▁ Collect", "Sa int", "S aint", "or able", "ora ble", "▁f ick", "▁fi ck", "ik h", "i kh", "sl ave", "▁c lay", "▁cl ay", "▁W A", "▁ WA", "Re po", "Rep o", "▁Java Script", "it r", "i tr", "pa id", "p aid", "▁home work", "M iddleware", "▁r éal", "▁ré al", "▁при зна", "ê m", "ès e", "è se", "▁W ells", "▁Well s", "▁Wel ls", "▁e nero", "▁en ero", "▁ener o", "emperature n", "▁N aj", "▁Na j", "▁Re agan", "▁comp elling", "▁tri bes", "▁trib es", "▁tribe s", "▁to String", "▁ toString", "pace s", "pa ces", "p aces", "▁harm ful", "▁Con se", "▁Cons e", "od io", "odi o", "▁m im", "▁mi m", "get Item", "▁script s", "▁ scripts", "ra is", "rai s", "r ais", "▁Ph ase", "▁ Phase", "▁An swer", "▁$ |\\", "▁$| \\", "▁as sembled", "el in", "eli n", "e lin", "ph abet", "pha bet", "▁to ast", "▁tut ti", "▁tu tti", "▁be zeichnet", "Gre at", "G reat", "et tes", "ett es", "ette s", "e ttes", "▁дека бря", "F ULL", "▁re gener", "▁reg ener", "▁któ re", "го р", "г ор", "is ce", "isc e", "▁t oda", "▁to da", "▁tod a", "▁eth ical", "i q", "P t", "ar in", "ari n", "a rin", "ig os", "igo s", "i gos", "▁work shops", "▁workshop s", "▁R oche", "▁Ro che", "▁Roc he", "Get String", "мини стратив", "m ême", "▁D aw", "▁Da w", "ar ians", "ari ans", "aria ns", "arian s", "▁imp acts", "▁impact s", "▁por table", "▁port able", ")- \\", ") -\\", "sh ots", "shot s", "▁re lev", "▁rel ev", "▁rele v", "PR IV", "PRI V", "▁бу ла", "ard less", "ul ously", "ulous ly", "-- >", "- ->", "ol ent", "ole nt", "olen t", "▁э того", "▁это го", "▁Gener ic", "▁Gene ric", "▁ Generic", "▁* /,", "▁*/ ,", "▁comb inations", "▁combination s", "▁re jo", "с публи", "cap acity", "▁tr aces", "▁tra ces", "▁trace s", "▁op acity", "▁ opacity", "▁Off icial", "ic ion", "ici on", "icio n", "▁emotional ly", "▁emotion ally", "▁Jo el", "▁Joe l", "сько му", "▁legend ary", "▁p am", "▁pa m", "▁Tamb ién", ". <", "ib a", "i ba", "mi dt", "mid t", "бо м", "▁en suite", "Author ization", "P ag", "▁hel met", "▁ter rito", "▁terr ito", "second ary", "▁seg unda", "▁W ire", "▁Wi re", "rec ated", "▁inv oked", "▁invoke d", "▁Value Error", "▁ф о", "▁ фо", "AL IGN", "CUR RENT", "\\ +\\_\\", "▁comp ilation", "æ r", "▁Pal mar", "▁Palm ar", "▁influ ences", "▁influence s", "/ :", "M ix", "NO P", "N OP", "ec onom", "e conom", "▁t ucked", "▁} );\r", "▁}); \r", "▁}) ;\r", "▁ });\r", "AN K", "re ject", "▁p ension", "▁pens ion", "▁gener ates", "▁generate s", "ч ё", "▁in cap", "▁inc ap", "▁cl icked", "▁click ed", "▁f us", "▁fu s", "our ses", "ours es", "ourse s", "▁E aster", "▁East er", "% ;", "zi n", "z in", "▁oblig ations", "▁obligation s", "▁T ips", "▁Tip s", "▁Ti ps", "}; \r", "} ;\r", ".\" _", "▁B SD", "▁BS D", "át ica", "▁ex pose", "▁exp ose", "▁expos e", "Par s", "P ars", "▁Am anda", "ку п", "▁gu essed", "▁guess ed", "ds i", "d si", "▁Le ip", "Br oad", "Bro ad", "B road", "▁Hug hes", "▁Hugh es", "i é", "▁W ahl", "▁Wa hl", "▁former ly", "Rel ative", "▁Y u", "▁Mount ains", "▁Mountain s", "▁E num", "▁En um", "▁ Enum", "▁str ang", "▁stra ng", "_ -", "re cht", "rec ht", "vi v", "v iv", "pa use", "p ause", "▁Lond res", "▁el bow", "▁Hawai i", "▁Cas ino", "Th reshold", "Un its", "Unit s", "In clude", "ит о", "и то", "as ury", "▁ste ht", "▁dam ned", "▁damn ed", "▁pack ets", "▁packet s", "▁W erk", "▁Wer k", "▁elev ator", "ied ad", "go vern", "gov ern", "g overn", "▁CONTR ACT", "ma ls", "mal s", "m als", "▁re mem", "▁rem em", "▁ent onces", "▁v as", "▁va s", "▁ vas", "▁sym pathy", "▁befind et", "in cing", "inc ing", "Data Set", "▁add itionally", "▁addition ally", "▁additional ly", "▁mus ician", "▁music ian", "ше го", "▁li stop", "▁list op", ">\" )", "> \")", "Print f", "▁Fel ix", "▁car ved", "▁nice ly", "▁nic ely", "го м", "ch ap", "cha p", "▁N ieder", "▁Ni eder", "▁Nie der", "▁L av", "▁La v", "▁mod ifications", "▁modification s", "mo ment", "m oment", "▁bal con", "▁depend ency", "CK ET", "▁van ished", "▁f ighters", "▁fight ers", "▁fighter s", "▁z unächst", "io ctl", "ioc tl", "▁def ens", "▁defe ns", "▁N em", "▁Ne m", "Util ity", "Ut ility", "▁cur v", "▁cu rv", "▁DA MAGES", "▁Ro gers", "▁Rog ers", "▁Roger s", "▁grat itude", "▁Den mark", "ра я", "gr pc", "grp c", "g rpc", "▁j uni", "▁ju ni", "▁jun i", "▁окт ября", "▁imm ense", "▁prevent ed", "▁prev ented", "▁fo am", "▁Ex tra", "▁Ext ra", "▁ Extra", "ai med", "aim ed", "▁C riteria", "▁Crit eria", "▁ Criteria", "▁Sim ply", "box es", "▁Leg end", "▁P layers", "▁Play ers", "▁Player s", "▁Mer cedes", "▁Merc edes", "▁Br anch", "▁ Branch", "TER N", "T ERN", "om ena", "ome na", "omen a", "▁incorpor ate", "con de", "co nde", "cond e", "c onde", "▁Est ado", "▁Esta do", "▁w asted", "▁was ted", "▁wa sted", "▁waste d", "▁compl aining", "▁complain ing", "▁war riors", "▁warrior s", "ot er", "ote r", "o ter", "▁э том", "▁это м", "▁con ten", "▁cont en", "▁co nten", "▁machine ry", "▁mach inery", "▁techn ological", "▁T D", "▁ TD", "▁g ras", "▁gr as", "▁gra s", "▁minim ize", "▁D oor", "▁Do or", "▁b zw", "▁p rac", "▁pr ac", "▁pra c", "TR EE", "T REE", "▁W ing", "▁Win g", "▁Wi ng", "▁Trans action", "▁ Transaction", "▁M VT", "▁Kle in", "com mons", "comm ons", "common s", "▁} {", "▁ }{", "▁Her itage", "▁f ade", "▁fa de", "ро к", "set Value", "▁Wal lace", "▁Wall ace", "M X", "▁A CT", "▁AC T", "▁ ACT", "▁foot age", "▁ent stand", "ar ga", "arg a", "▁n ails", "▁na ils", "▁nail s", "▁capital ism", "▁G arc", "▁Gar c", "▁Ga rc", "▁susp ension", "il is", "ili s", "▁M ov", "▁Mo v", "uff led", "uffle d", "Ar c", "A rc", "▁Beaut iful", "WA Y", "W AY", "Par allel", "XX XX", "di ag", "▁D T", "▁ DT", "m q", "Text View", "ML E", "M LE", "en nen", "enn en", "enne n", "▁infect ed", "▁therap ist", "IN GS", "ING S", "▁c idade", "ъ н", "▁p df", "▁pd f", "▁ pdf", "▁b ump", "▁bu mp", "CT X", "C TX", "▁IN CLUDING", "▁ INCLUDING", "▁G ef", "▁Ge f", "ENT IAL", "▁h andy", "▁hand y", "▁han dy", "▁tempor al", "▁temp oral", "▁tempo ral", "At A", "IS H", "I SH", "▁Pat tern", "▁ Pattern", "▁l an", "▁la n", "▁ lan", "ep endant", "▁sh ining", "id y", "i dy", "▁N T", "▁ NT", "▁F ran", "▁Fr an", "▁Fra n", "▁nur ses", "▁nurs es", "▁nurse s", "▁bet ray", "▁sens ible", "▁апре ля", "▁' [", "▁th irteen", ")} _{", ") }_{", "▁No ah", "INS ERT", "ist ically", "istic ally", "▁Append ix", "▁re cher", "▁rec her", "Re ceiver", "▁der nier", "▁derni er", "л ла", "ли за", "▁Part ido", "▁max imal", "▁maxim al", "sn ap", "▁ча сть", "▁част ь", "▁час ть", "ST OP", "STO P", "S TOP", "▁ult ra", "▁ul tra", "▁dévelop p", "▁t egen", "▁te gen", "▁Ч и", "LI B", "L IB", "▁bas eline", "▁base line", "re load", "rel oad", "▁Ar bitro", "▁k all", "▁ka ll", "c apture", "Ar m", "A rm", "qu in", "im pse", "imp se", "za s", "z as", "▁C and", "▁Can d", "▁Ca nd", "▁br ains", "▁brain s", "▁bra ins", "▁host ile", "▁mar ble", "oo ns", "oon s", "o ons", "▁L oss", "▁Los s", "▁Lo ss", "Meta Data", "▁Rep ública", "▁and ra", "▁ andra", "od en", "ode n", "o den", "▁document ed", "▁M oses", "▁Mo ses", "▁Mos es", "od d", "o dd", "▁w ax", "▁wa x", "us ch", "usc h", "u sch", "▁diagn osed", "in kle", "ink le", "▁X box", "▁seven ty", "▁sevent y", "ci as", "cia s", "c ias", "▁nov iembre", "Com pute", "Comp ute", "Comput e", "}) ;\r", "}); \r", "} );\r", "▁Philip pe", "▁Philipp e", "▁F ör", "Le ave", "▁s age", "▁sa ge", "▁sag e", "▁un pre", "▁Fort unately", "▁a post", "▁ap ost", "ent ities", "enti ties", "▁el los", "▁ell os", "author ized", "GB T", "G BT", "▁ins ist", "▁insp ire", "▁inspir e", "Ma ss", "M ass", "▁r ôle", "fe e", "f ee", "ip art", "ipa rt", "i part", "це р", "ц ер", "un ate", "una te", "u nate", "▁C NN", ": }", "▁unh appy", "▁import ed", "▁imp orted", "H IGH", "ring s", "rin gs", "r ings", "▁In stance", "▁Inst ance", "▁ Instance", "B ay", "ag les", "agle s", "a gles", "me e", "m ee", "ber y", "be ry", "b ery", "▁St ories", "▁Sto ries", "▁Ch ase", "▁Cha se", "▁car riage", "▁mis under", "▁imag in", "p w", "▁M eter", "▁Me ter", "▁Met er", "▁crow ds", "▁crowd s", "▁F ame", "▁Fa me", "sk ill", "ski ll", "s kill", "▁c omed", "▁com ed", "▁co med", "▁come d", "▁ comed", "▁r anch", "▁ran ch", "▁l acking", "▁lack ing", "▁lac king", "▁sub mar", "▁subm ar", "ia nte", "ian te", "iant e", "i ante", "▁l anz", "▁lan z", "▁слу ж", "-- ---------", "---- -------", "-------- ---", "--- --------", "----- ------", "---------- -", "------ -----", "--------- --", "------- ----", "- ----------", "▁ob ten", "▁obt en", "▁down stairs", "Y N", "rot ation", "▁J esse", "▁Jes se", "▁Jess e", "$ (\"#", "▁p uls", "▁pu ls", "▁pul s", "ir ling", "irl ing", "▁Sch aus", "▁Sc haus", "▁de ployed", "▁deploy ed", "▁{ }\",", "▁{} \",", "▁Mar vel", "EN UM", "E NUM", "▁Mat hemat", "▁Math emat", "▁n n", "▁ nn", "com pet", "comp et", "k ów", "bi l", "b il", "Wh ich", "is ine", "isi ne", "▁r ude", "▁ru de", "▁n iveau", "▁á rea", "▁p rès", "▁pr ès", "at is", "ati s", "▁[... ]", "fu r", "f ur", "om m", "o mm", "pack ed", "p acked", "ме не", "мен е", "м ене", "script style", "▁A th", "▁At h", "▁d esp", "▁de sp", "▁des p", "elt emperaturen", "▁tal ents", "▁talent s", "oc y", "o cy", "▁r aises", "▁rais es", "▁raise s", "▁ra ises", "LI MIT", "L IMIT", "▁editor ial", "▁edit orial", "▁An imal", "▁Anim al", "dr ive", "d rive", "▁рабо та", "bs s", "b ss", "▁S ev", "▁Se v", "ep och", "e poch", "▁R C", "▁ RC", "UN USED", "▁mand atory", "( ?:", "▁B in", "▁Bi n", "▁ Bin", "▁synt hetic", "▁g own", "▁go wn", "▁D ob", "▁Do b", "ka p", "k ap", "▁har mon", "▁harm on", "▁liber ty", "▁libert y", "▁R ice", "▁Ric e", "▁pray ers", "▁pra yers", "▁prayer s", "▁m ise", "▁mis e", "▁mi se", "▁conf using", "▁le ap", "▁arriv es", "▁arr ives", "▁arrive s", "ka mp", "k amp", "▁th ats", "▁that s", "AC C", "A CC", "▁Param eters", "▁Parameter s", "▁ Parameters", "▁о дно", "▁од но", "▁B io", "▁Bi o", "d ensity", "▁gl impse", "FO RE", "FOR E", "▁L isten", "▁List en", "▁Li sten", "▁Liste n", "▁Lis ten", "▁ Listen", "Pr ev", "Pre v", "P rev", "}\\ ,\\", "}\\, \\", "} \\,\\", "ку ль", "▁S EC", "▁SE C", "▁ SEC", "▁expl ored", "▁explore d", "▁explo red", "▁mean time", "▁meant ime", "AI L", "A IL", "▁W P", "▁ WP", "▁r aison", "▁rais on", "▁ra ison", "▁ex iste", "▁exist e", "▁l esser", "▁les ser", "▁less er", "▁Valid ate", "▁ Validate", "▁ca ution", "▁caut ion", "us ta", "ust a", "u sta", "he ading", "head ing", "EF F", "E FF", ".' \"", ". '\"", "▁Gil bert", "▁lim itation", "▁limit ation", "▁ret our", "▁Common wealth", "▁gew ann", "▁miser able", "▁net working", "▁network ing", "▁ott obre", "▁otto bre", "▁D ise", "▁Dis e", "▁Di se", "ed ges", "edge s", "▁s ede", "▁se de", "▁sed e", "ви ча", "вич а", "un iform", "uni form", "▁дея тель", "ir os", "iro s", "i ros", "▁d esen", "▁de sen", "▁des en", "▁p arc", "▁par c", "▁pa rc", "▁R ico", "▁Ric o", "N s", "gu id", "gui d", "g uid", "or io", "ori o", "o rio", "ave length", "avel ength", "▁G le", "▁Gl e", "ince ton", "inc eton", "Am az", "A maz", "Con struct", "Const ruct", "▁m x", "▁ mx", "▁V ern", "▁Ver n", "▁Ve rn", "▁Gener ation", "▁ Generation", "J ack", "ro mag", "rom ag", "▁vi agra", "▁via gra", "▁P eg", "▁Pe g", "▁Up dated", "▁Update d", "▁ Updated", "▁over lap", "▁overl ap", "Event Args", "к ро", "▁* «", "▁quest ioned", "▁question ed", "So uth", "S outh", "not ice", "▁perman ently", "▁permanent ly", "ls t", "l st", "fi cie", "fic ie", "▁qu ella", "▁que lla", "▁quel la", "▁college s", "▁colle ges", "▁colleg es", "▁disappoint ment", "▁Lu ft", "img ur", "▁trans itions", "▁transition s", "▁transit ions", "▁s eller", "▁sell er", "▁sel ler", "▁ию ня", "▁O g", "▁A DD", "▁AD D", "▁ ADD", "▁P ays", "▁Pa ys", "▁Pay s", "COMM AND", "gr ades", "grad es", "grade s", "gra des", "▁fe bbra", "▁C yr", "▁Cy r", "▁febbra io", "et i", "e ti", "▁a rom", "▁ar om", "▁Cl aude", "▁Claud e", "▁UE FA", "▁жи ве", "▁Victor ian", "▁Victoria n", "ke eping", "keep ing", "kee ping", "ê n", "▁FIX ME", "it ime", "iti me", "i time", "ch estr", "che str", "ches tr", "▁Sam sung", "▁do ctrine", "▁p ear", "▁pe ar", "▁Mediterr anean", "▁Y a", "▁v ault", "▁va ult", "▁Hist oric", "▁Histor ic", "▁se dan", "▁sed an", "▁he ated", "▁heat ed", "▁polít ica", "Pro of", ": {", "fe m", "f em", "▁Frank furt", "pect ives", "pective s", "M G", "▁E ye", "da i", "d ai", "▁res erves", "▁reserv es", "▁reserve s", "NE R", "N ER", "▁tob acco", "▁frag ments", "▁fragment s", "ic c", "i cc", "▁b ooth", "▁bo oth", "▁boot h", "▁cru ise", "▁Test ament", "co la", "col a", "c ola", "▁Le op", "▁Leo p", "▁n oon", "▁no on", "▁ noon", "▁terr ified", "v b", "int el", "inte l", "al ie", "ali e", "a lie", "▁ver ification", "yst er", "ys ter", "y ster", "AD ER", "A DER", "ch ied", "chie d", "chi ed", "▁data sets", "▁dat asets", "▁dataset s", "▁з і", "▁ зі", "▁m iem", "▁mi em", "▁mie m", "ul ates", "ula tes", "ulate s", "▁u uid", "▁ uuid", "▁Pict ures", "▁Picture s", "▁B rend", "▁Br end", "▁Bre nd", "▁Bren d", "Bill board", "▁s tern", "▁st ern", "▁ste rn", "▁ster n", "▁de nom", "▁den om", "▁acc idents", "▁accident s", "с ня", "▁p acking", "▁pack ing", "▁pac king", "ци ја", "ibli cal", "iblic al", "▁Та к", "▁wh isk", "▁whis k", "▁l uego", "▁lu ego", "▁rect angle", "▁ho oks", "▁hook s", "▁ hooks", "▁neg lect", "▁negl ect", "▁s ober", "▁so ber", "▁sob er", "pro position", "Mult iple", "Multi ple", ":\" ,", ": \",", "▁b apt", "▁ba pt", "Par ts", "Part s", "P arts", "▁S election", "▁Se lection", "▁Sel ection", "▁Select ion", "▁ Selection", "▁Al pha", "▁ Alpha", "we ights", "weight s", "ha ll", "hal l", "h all", "со б", "с об", "▁l ur", "▁lu r", "▁ép oca", "▁re sted", "▁r ested", "▁res ted", "▁rest ed", "▁reste d", "amb igu", "▁taste s", "▁tast es", "amazon aws", "▁conf ess", "▁dic iembre", "▁dici embre", "im plement", "impl ement", "imp lement", "▁absor ption", "Ha l", "H al", "LE AN", "▁Z ach", "▁Za ch", "▁free ze", "▁fre eze", "L BL", "ST M", "S TM", "▁cal c", "▁ca lc", "▁ calc", "={ ()", "= */", "▁b t", "▁ bt", "Re b", "R eb", "▁W ien", "▁Wi en", "an ska", "ans ka", "ansk a", "▁s urn", "▁su rn", "▁sur n", "iat ive", "i ative", "▁inv ån", "C Y", "▁l à", "am ba", "amb a", "le en", "lee n", "l een", "wa hl", "w ahl", "▁function ing", "ți a", "ț ia", "get Context", "ga rt", "gar t", "g art", "▁о бе", "▁об е", "Pe n", "P en", "vi k", "v ik", "Sl ider", "▁Ac cept", "▁ Accept", "Ga p", "G ap", "▁J orge", "SI G", "S IG", "▁во с", "▁го ло", "▁г оло", "▁period o", "ш та", "▁pat ches", "▁patch es", "ко ї", "är e", "ä re", "eng ono", "li sta", "list a", "l ista", "hor n", "ho rn", "h orn", "▁Com plex", "▁Comp lex", "▁ Complex", "Se nt", "S ent", "tr fs", "▁conv ex", "▁conve x", "Gener ation", "▁міс це", "com press", "comp ress", "▁S ax", "▁Sa x", "▁u id", "▁ui d", "▁ uid", "▁Leb ens", "▁Leben s", "Com pletion", "\\| _{", "\\ |_{", "in sky", "ins ky", "▁sc hon", "▁sch on", "▁m asters", "▁ma sters", "▁master s", "▁mas ters", "▁mast ers", "in depend", "inde pend", "ne ys", "ney s", "▁l ied", "▁li ed", "▁lie d", "▁a spir", "▁asp ir", "ч ні", "▁break down", "▁H arm", "▁Har m", "▁Ha rm", "▁design ing", "h f", "▁Ang ela", "▁Angel a", "▁con fer", "▁conf er", "▁part ido", "▁parti do", "▁inter ference", "ma o", "m ao", "▁absor bed", "▁absorb ed", "▁V all", "▁Val l", "▁Va ll", "Error Code", "▁Publish ing", "va no", "van o", "v ano", "BIT S", "BI TS", "B ITS", "▁de er", "▁Camp aign", "▁g raz", "▁gr az", "▁gra z", "CHAN GE", "▁f eder", "▁fe der", "▁fed er", "if fe", "iff e", "hand ed", "han ded", "h anded", "c q", "um bing", "umb ing", "▁un re", "▁s iendo", "▁si endo", "▁sim pler", "▁simple r", "▁simpl er", "wh y", "w hy", "ar ettes", "are ttes", "aret tes", "arette s", "an st", "ans t", "▁h ass", "▁has s", "▁ha ss", "▁Enter prise", "▁m ois", "▁mo is", "▁F o", "▁уча ст", "ff en", "f fen", "▁MOD ULE", "▁ MODULE", "▁activ ated", "▁activate d", "▁intern acional", "▁M ittel", "deg ree", "▁от кры", "▁& (", "get Property", "is z", "i sz", "ced ure", "▁en ters", "▁ent ers", "▁enter s", "▁S ally", "▁Sal ly", "▁Tr ain", "▁Tra in", "▁lo gged", "▁log ged", "▁R av", "▁Ra v", "▁A void", "▁Av oid", "▁K aiser", "▁Ka iser", "▁ex pend", "▁exp end", "ap hor", "aph or", "▁b rass", "▁br ass", "▁bra ss", "▁bras s", "▁mel od", "▁att itudes", "▁attitude s", "* \"", "W all", "▁o we", "▁ owe", "▁b amb", "▁ba mb", "sh ader", "sha der", "ce ster", "ces ter", "c ester", "▁P P", "▁ PP", "▁migr ations", "▁migration s", "ent ric", "entr ic", "▁Set up", "▁ Setup", "▁Art ist", "hr e", "h re", "▁pol ite", "▁polit e", "ah an", "aha n", "a han", "▁lug lio", "▁pre decess", "▁S IG", "▁SI G", "▁ SIG", "ті в", "т ів", "▁R F", "▁ RF", "▁D ry", "▁Dr y", "▁m aker", "▁make r", "▁ma ker", "▁ maker", "ши м", "ш им", "▁S ounds", "▁Sound s", "▁implement ing", "▁a h", "▁ ah", "▁g ev", "▁ge v", "▁du plicate", "▁L ogan", "▁Log an", "▁Lo gan", "▁G rade", "▁Gr ade", "▁Grad e", "▁Gra de", "DU CT", "ís es", "í ses", "ér t", "é rt", "▁nons ense", "back up", "Att achment", "▁e cc", "▁ec c", "▁Squad ron", "le arn", "lear n", "de precated", "dep recated", "▁A ub", "▁Au b", "▁G ol", "▁Go l", "▁over l", "SER VICE", "▁beautiful ly", "RE L", "R EL", "▁G ian", "▁Gi an", "▁P apa", "▁Pa pa", "▁Pap a", "res pond", "respon d", "resp ond", "▁Carib bean", "r n", "▁худо ж", "C fg", "ra i", "r ai", "▁sn iff", "tt o", "t to", "оло ги", "о логи", "▁r b", "▁ rb", "▁inc idents", "▁incident s", "▁d uck", "▁du ck", "▁PROVID ED", "Source s", "S ources", "▁Chel sea", "▁t ek", "▁te k", "▁ tek", "▁на лази", "▁pil ots", "▁pilot s", "т ки", "▁tr aded", "▁trad ed", "▁tra ded", "▁trade d", "▁Be ijing", "▁Greg ory", "scal ar", "▁incl ined", "▁inc lined", "▁K amp", "▁Kam p", "▁Ka mp", "▁M arian", "▁Mar ian", "▁Ma rian", "▁Maria n", "▁fier ce", "▁the ft", "▁th eft", "ющи х", "▁In to", "▁Int o", "▁ Into", "con straint", "parent Node", "id ental", "ident al", "iden tal", "▁gouver nement", "▁S ND", "▁SN D", "▁Rub y", "▁Ru by", "▁mon aster", "Rec ords", "Record s", "▁K ab", "▁Ka b", "▁Un iverse", "▁Univers e", "▁approxim ate", "▁approx imate", "W ater", "▁Phys ical", "ap pers", "app ers", "appe rs", "oubt edly", "ло жен", "ложе н", "▁tow el", "▁sib lings", "ep h", "e ph", "ic ios", "ici os", "icio s", "ра ми", "▁out rage", "▁tamb é", "SR C", "S RC", "те лем", "тел ем", "V i", ".' );", ". ');", "L M", "▁m itt", "▁mit t", "▁mi tt", "▁ mitt", "▁w eed", "▁we ed", "▁cr ops", "▁cro ps", "▁crop s", "im an", "ima n", "i man", "Cl aim", "ins ula", "▁( “", "▁Ch anges", "▁Change s", "▁ Changes", "▁invån are", "ag ain", "aga in", "a gain", "▁c nt", "▁ cnt", "▁G az", "▁Ga z", "▁a ustral", "over lay", "▁Me chan", "▁sl ammed", "▁tr ailing", "▁tra iling", "▁trail ing", "▁Bi ography", "▁appe aling", "▁appeal ing", "IV ER", "IVE R", "I VER", "▁A ve", "▁Av e", "▁P lot", "▁Pl ot", "vo j", "v oj", "▁s ung", "▁su ng", "▁sun g", "▁ sung", "▁u nos", "▁un os", "▁uno s", "Effect s", "v v", "co ok", "c ook", "But tons", "Button s", "▁trans m", "ier to", "iert o", "CON TEXT", "CONT EXT", "▁dign ity", "air ed", "ai red", "aire d", "a ired", "java x", "jav ax", "j avax", "▁Albert o", "▁Alber to", "▁Rec ently", "▁Recent ly", "▁fac ial", "▁fa cial", "math op", "mat hop", "ał o", "a ło", "ви д", "co tt", "c ott", "Vari ables", "Variable s", "▁R an", "▁Ra n", "▁b unk", "am iliar", "amil iar", "CA ST", "C AST", "▁fr ü", "VE D", "V ED", "▁NOT ICE", "▁turn o", "▁tur no", "valid ator", "▁Portug uese", "▁question ing", "}} )", "} })", "▁l ear", "▁le ar", "▁ lear", "X amarin", "▁dis adv", "enc oded", "encode d", "▁K ot", "▁Ko t", "ra ted", "rat ed", "rate d", "r ated", "▁The ory", "ci us", "c ius", "▁Dar win", "ђ е", "▁dé cl", "▁déc l", "▁обла сть", "ро вич", "▁mob ility", "▁mobil ity", "V F", "▁х и", "▁ хи", "un til", "unt il", "u ntil", "▁bar riers", "▁barrier s", "▁barr iers", "gi f", "g if", "▁R oh", "▁Ro h", "▁a ging", "▁ag ing", "▁ aging", "▁W idget", "▁ Widget", "ol k", "▁f arms", "▁far ms", "▁farm s", "Check er", "Che cker", "Int roduction", "с мо", "▁Russ ians", "▁Russian s", "▁Russia ns", "na ments", "nam ents", "nament s", "n aments", "▁In sert", "▁Ins ert", "▁ Insert", "▁When ever", "▁Whe never", "er set", "ers et", "it ori", "itor i", "ito ri", "▁D ort", "▁Do rt", "▁Dor t", "▁cost ume", "▁mathemat ical", "▁B ast", "▁Bas t", "▁Ba st", "▁nom inated", "▁nomin ated", "▁rest oration", "pos al", "po sal", "▁un fortunate", "P s", "LI N", "L IN", "▁int act", "▁prov oc", "▁situ ée", "▁но ября", "er mo", "erm o", "▁f isher", "▁fish er", "▁fis her", "г ля", "▁con ting", "▁cont ing", "▁contin g", "▁Do ug", "▁Dou g", "\" ?", "▁E va", "▁Ev a", "▁t ops", "▁to ps", "▁top s", "▁Rem ote", "▁ Remote", "▁art work", "▁art illery", "qu ick", "▁Arab ia", "▁SD Value", "▁Dak ota", "ia ted", "iat ed", "iate d", "i ated", "▁Op tim", "▁Opt im", "but tons", "button s", "▁c ottage", "▁where in", "▁tut orial", "▁S cre", "▁Sc re", "▁swe ep", "▁Coff ee", "}) }", "} )}", "▁му зы", "host name", "▁T emp", "▁Te mp", "▁Tem p", "▁ Temp", "▁F ut", "▁Fu t", "re spect", "res pect", "resp ect", "oc z", "o cz", "▁pre domin", "▁pred omin", "Ind icator", "en cial", "enc ial", "encia l", "enci al", "UM ENT", "U MENT", "▁SH ALL", "▁SHA LL", "▁comm anded", "▁command ed", "▁withdraw al", "io ur", "i our", "REG ION", "s printf", "▁в ме", "▁Pay ment", "▁ Payment", "▁A nim", "▁An im", "▁ Anim", "pub lish", "▁se eks", "▁see ks", "▁seek s", "ou w", "o uw", "▁G M", "▁ GM", "ru gu", "rug u", "r ugu", "us tain", "ust ain", "usta in", "▁) )", "▁ ))", "▁consult ing", "▁D ialog", "▁ Dialog", "▁L ars", "▁La rs", "▁Lar s", "▁crit ique", "▁circ ulation", "▁circul ation", "▁land sc", "▁lands c", "man aged", "▁C raft", "▁Cr aft", "▁Cra ft", "▁h erman", "▁her man", "af i", "a fi", "am y", "a my", "▁disc our", "▁disco ur", "<> (", "< >(", "▁St eph", "▁Ste ph", "▁Step h", "▁toler ance", "type name", "typ ename", "typen ame", "vent ions", "vention s", "zi ał", "z iał", "ст ов", "сто в", "с тов", "▁st icking", "▁stick ing", "AS C", "A SC", "IS O", "I SO", "▁Sp encer", "▁Di dn", "▁Did n", "gom ery", "im iter", "imit er", "imi ter", "dr u", "d ru", "Cl ause", "▁sl ides", "▁slide s", "▁slid es", "## #", "# ##", "▁S ugar", "▁Su gar", "H Y", "▁э ти", "▁Ed wards", "▁Edward s", "▁c ents", "▁cent s", "oy a", "o ya", "ser ts", "sert s", "s erts", "▁H ass", "▁Ha ss", "▁Has s", "▁in gen", "▁ing en", "▁ ingen", "ст ри", "с три", "▁s addle", "sol id", "s olid", "▁ch ampions", "▁champion s", "▁champ ions", "- )", "▁S lov", "▁Sl ov", "▁sh iny", "▁* )&", "▁*) &", "▁Def ine", "č e", "▁scr ut", "on den", "ond en", "onde n", "'\" ,", "' \",", "uf fs", "uff s", "▁o lymp", "id ential", "ident ial", "wa nd", "wan d", "w and", "▁ann ually", "▁annual ly", "▁Ark ansas", "▁s aint", "▁sa int", "▁gle ich", "▁per fection", "▁perfect ion", "▁perf ection", ") >", "▁sh orts", "▁short s", "▁just ified", "pe ated", "peat ed", "pack ages", "package s", "dr iven", "drive n", "d riven", "▁Liber ty", "▁str ipped", "▁stri pped", "▁strip ped", "ше ние", "▁fün f", "▁e cosystem", "ix a", "i xa", "▁F resh", "▁Fr esh", "▁Fre sh", "var t", "va rt", "v art", "▁tre ats", "▁treat s", "▁st ance", "▁stan ce", "▁ stance", "чё т", "ч ёт", "▁p ity", "▁pi ty", "▁pit y", "ad ém", "▁о кон", "▁ок он", "▁C hand", "▁Ch and", "▁Cha nd", "ra b", "r ab", "вши й", "в ший", "in ski", "ins ki", "▁contin ually", "▁continu ally", "▁D addy", "▁Dad dy", "▁night mare", "ic ional", "ici onal", "icio nal", "icion al", "▁e fect", "ue blo", "▁l anç", "▁lan ç", "▁Col lections", "▁Collection s", "▁Collect ions", "▁ Collections", "du e", "d ue", "am pton", "amp ton", "▁mem cpy", "▁ memcpy", "▁* *(", "▁** (", "is sent", "iss ent", "isse nt", "issen t", "▁In sp", "▁Ins p", "▁Glas gow", "▁fur ono", "▁kind ness", "B i", "▁comp eted", "▁compet ed", "▁compete d", "▁o ak", "L arge", "▁dis gu", "▁disg u", "▁k ings", "▁king s", "▁kin gs", "та ми", "▁st uffed", "▁stuff ed", "▁h ilar", "▁hi lar", "pub lished", "publish ed", "▁st ressed", "▁str essed", "▁stress ed", "▁Pe ak", "▁lo ader", "▁load er", "▁ loader", "Key board", "▁re construction", "▁v od", "▁vo d", "▁ vod", "▁d un", "▁du n", "▁understand s", "te nant", "ten ant", "▁ch aque", "▁cha que", "▁pre jud", "ut at", "uta t", "u tat", "▁u so", "▁us o", "▁ uso", "▁He avy", "▁cu atro", "▁side walk", "▁B ug", "▁Bu g", "▁mån aden", "ge o", "▁un ited", "▁unit ed", "▁F iles", "▁Fil es", "▁File s", "▁Fi les", "▁ Files", "▁А ль", "▁Ал ь", "▁rug by", "▁fin ancing", "▁financ ing", "▁com ply", "▁comp ly", "▁compl y", "& #", "▁r ushing", "▁rush ing", "▁rus hing", "▁f en", "▁fe n", "▁ fen", "mon g", "mo ng", "m ong", "▁sp é", "▁present ing", "IN CLUDING", "ě l", "zeich nung", "Back up", "▁pe tit", "▁pet it", "▁all erg", "▁alle rg", "▁aller g", "ну т", "н ут", "▁wor rying", "▁worry ing", "▁m amm", "▁ma mm", "▁oper and", "▁opera nd", ":%.* ]]", "▁real ise", "Comm ands", "Command s", "▁B ew", "▁Be w", "▁ass umes", "▁assum es", "▁assume s", "▁Co vid", "▁Cov id", "▁qu and", "ty ard", "t yard", "▁M ono", "▁Mon o", "▁Mo no", "lin ked", "link ed", "M ARK", "Es p", "E sp", "▁bless ing", "▁eyeb rows", "▁N V", "▁ NV", "▁ст ру", "▁ стру", "▁mod eling", "▁model ing", "▁mode ling", "▁gre eted", "Work space", "▁pe dest", "▁ped est", "▁не за", "lem agne", "Stat istics", "▁a ument", "▁au ment", "▁spe eds", "▁speed s", "▁synd rome", "CONNE CT", "za hl", "z ahl", "ver so", "vers o", "érc ito", "▁astr onom", "▁ap rile", "▁apr ile", "▁april e", "že n", "ž en", "ве ро", "вер о", "dr aft", "d raft", "▁g ioc", "▁gi oc", "▁com port", "▁comp ort", "▁var iance", "▁vari ance", "▁real izing", "▁realiz ing", "ED IT", "оло ві", "▁e star", "▁est ar", "▁es tar", "▁esta r", "▁s ost", "▁so st", "N ORMAL", "▁ ó", "▁And r", "▁An dr", "ATTR IB", "▁re de", "▁r ede", "▁red e", "▁t oes", "▁to es", "▁toe s", "▁adv ances", "▁advance s", "▁Again st", "TO M", "T OM", "rs s", "r ss", "MM MM", "▁ne west", "▁new est", "▁V ER", "▁ VER", "▁phrase s", "▁phr ases", "an ter", "ant er", "ante r", "La unch", "▁c hr", "▁ch r", "▁ chr", "▁manufact ured", "$) ,", "$ ),", "roll ment", "es ton", "est on", "esto n", "e ston", "▁pe int", "” )", "en det", "end et", "ende t", "▁H air", "▁Ha ir", "ival ent", "▁up right", "gr en", "gre n", "g ren", "an ked", "ank ed", "wr ight", "w right", "▁m ast", "▁ma st", "▁mas t", "▁on Change", "▁de bris", "▁deb ris", "▁g rap", "▁gr ap", "▁gra p", "et ry", "etr y", "e try", "▁( __", "▁(_ _", "▁ (__", "▁Com merce", "BO X", "T ax", "▁о три", "▁от ри", "▁pre vention", "▁prevent ion", "▁prev ention", "▁Fe el", "▁ex otic", "▁B ark", "▁Bar k", "▁S team", "▁Ste am", "fo n", "f on", "ol in", "oli n", "o lin", "▁elim inated", "▁eliminate d", "▁b c", "▁ bc", "▁C ycl", "▁Cy cl", "▁$ (\"#", "▁ $(\"#", "▁P arl", "▁Par l", "▁Pa rl", "man uel", "os pher", "osp her", "osph er", "W F", "An aly", "Anal y", "▁nav ig", "▁re nown", "▁ren own", "R x", "▁W alt", "▁Wal t", "▁Wa lt", "uf fed", "uff ed", "▁f oster", "▁fo ster", "▁fost er", "▁fos ter", "$ :", "sh ore", "Conne ctor", "Conn ector", "Connect or", "фи ка", "▁real ization", "▁realiz ation", "L i", "ct xt", "ctx t", "c txt", "ah oo", "aho o", "▁mir acle", "▁E T", "▁ ET", "▁G PS", "▁GP S", "▁Observ able", "▁h f", "▁ hf", "▁magnific ent", "не го", "BI N", "B IN", "▁D orf", "▁Do rf", "▁Dor f", "ie ck", "ve e", "v ee", "▁C raw", "▁Cr aw", "▁Cra w", "/ #", "▁p ci", "▁pc i", "▁ pci", "ip pet", "ipp et", "▁Hill ary", "▁g ir", "▁gi r", "▁r and", "▁ran d", "▁ra nd", "▁ rand", "▁la ying", "▁lay ing", "▁D ifferent", "bo ys", "boy s", "vi rt", "vir t", "v irt", "▁enc ryption", "ás z", "á sz", "по р", "п ор", "▁sm elled", "▁smell ed", "▁sus cept", "cl uded", "clude d", "▁C arn", "▁Car n", "▁Ca rn", "ig ten", "igt en", "igte n", "▁Ch uck", "▁Prov inc", "▁per í", "▁Mar shal", "▁Mars hal", "▁ Marshal", "мо ж", "g fx", "os hi", "osh i", "▁W HE", "▁WH E", "▁relax ation", ", .", "we re", "wer e", "w ere", "▁var ieties", "▁W on", "▁Wo n", "▁g aps", "▁gap s", "▁ga ps", "▁st ole", "▁sto le", "ig ua", "igu a", "ющи е", "▁Ham pshire", "ph rase", "▁pel ícula", "Process ing", "▁initial ization", "oust ic", "▁Jose f", "▁Jos ef", "ic ating", "ica ting", "▁good ness", "TE S", "T ES", "▁c ope", "▁co pe", "▁cop e", "▁ cope", "▁ignor ance", "▁B rist", "▁Br ist", "▁par as", "▁para s", "▁pa ras", "▁accident ally", "▁t and", "▁tan d", "▁ta nd", "it test", "itt est", "itte st", "▁у ли", "▁sh ipped", "▁ship ped", "▁о ст", "▁ос т", "else if", "▁u size", "▁us ize", "hor izontal", "▁C arr", "▁Car r", "▁Ca rr", "▁pre cip", "▁prec ip", "ro z", "r oz", "path etic", "pat hetic", "ri ved", "riv ed", "rive d", "r ived", "ro k", "r ok", "▁dig ging", "мо м", "▁M ull", "▁Mu ll", "▁Mul l", "▁X III", "▁XII I", "▁XI II", "▁pe as", "▁f oul", "▁fo ul", "▁fou l", "▁travel s", "▁trav els", "▁N g", "▁состав е", "▁соста ве", "Mon t", "Mo nt", "M ont", "ar de", "ard e", "▁Ste fan", "^^ ^^", "▁K iss", "▁Ki ss", "▁E k", "▁ok tober", "▁mem orable", "▁memor able", "') ).", "')) .", "' )).", "▁V ision", "▁Vis ion", "▁N ina", "▁Ni na", "▁Nin a", "▁S olar", "▁So lar", "▁Sol ar", "▁highlight ed", "▁me mo", "▁mem o", "me isterschaft", "side bar", "SE E", "S EE", "▁Nev ada", "D a", "▁draw er", "ast ically", "astic ally", "el de", "eld e", "sc ribed", "scri bed", "scribe d", "scrib ed", "▁pri ests", "▁priest s", "▁hom mes", "▁homme s", "▁in structor", "▁instruct or", "кла д", "▁sp ett", "▁spe tt", "\\ -", "▁ми ра", "▁ мира", "▁Look s", "▁Lo oks", "▁sle eve", "▁strong est", "▁t ête", "▁Nic ole", "▁Ni cole", "▁Nicol e", "im per", "imp er", "на ча", "ip per", "ipp er", "▁in won", "il ers", "ile rs", "iler s", "i lers", "▁Dep uty", "og e", "o ge", "▁de pressed", "▁dep ressed", "▁depress ed", "▁ar te", "▁art e", "▁ arte", "▁comb ining", "LA ST", "L AST", "in ted", "int ed", "inte d", "▁A verage", "▁Ave rage", "▁poll ution", "▁Phill ips", "▁W M", "▁ WM", "}} }\\", "}}} \\", "} }}\\", "Add ed", "Ad ded", "▁per ipher", "Creat ion", "C reation", "▁ital ien", "▁Ch oice", "▁Cho ice", "▁ Choice", "▁EX PRESS", "▁St ruct", "▁Str uct", "▁ Struct", "ys z", "y sz", "Res ize", "Re size", "AR GS", "ARG S", "▁re po", "▁rep o", "▁ repo", "▁что бы", "▁p ref", "▁pre f", "▁pr ef", "▁ pref", "▁earth qu", "▁Ме кси", "▁F inale", "▁Fin ale", "▁Final e", "▁h echo", "▁he cho", "requ ests", "request s", "C ut", "▁des erved", "▁deserve d", "го во", "гов о", "▁Re cent", "▁Rec ent", "▁ди визи", "▁support ive", "пра ви", "прав и", "▁irre levant", "' \r", "▁c trl", "▁ ctrl", "▁De al", "iz ada", "iza da", "u o", "▁n ort", "▁no rt", "▁nor t", "ge ometry", "geo metry", "▁Individ ual", "er eg", "ere g", "e reg", "▁при ня", "cre f", "cr ef", "c ref", "═ ═", "▁com erc", "▁come rc", "= _", "bu nd", "b und", "та х", "il en", "ile n", "i len", "чи та", "▁corpor ation", "es z", "e sz", "▁= =>", "▁== >", "ab lish", "abl ish", "Ap r", "A pr", "▁r ipped", "▁ri pped", "▁rip ped", "Var s", "V ars", "st ret", "str et", "stre t", "▁Frances co", "Na N", "▁any time", "▁autom ated", "ost ream", "o stream", "▁draw ings", "▁drawing s", "▁enhance ment", "ok rat", "▁Iss ue", "в ра", "Cur rency", "▁w yn", "▁wy n", "izar re", "ét ico", "mult iple", "multi ple", "multip le", "▁R ate", "▁Ra te", "▁Rat e", "▁ Rate", "▁I ch", "▁A uss", "▁Aus s", "▁Au ss", "▁For mer", "▁Form er", "Cur ve", "▁mar vel", "att ro", "attr o", "▁с п", "BO OL", "си я", "go ld", "g old", "▁N intendo", "▁Salv ador", "▁S olution", "▁Sol ution", "AD C", "A DC", "бо ра", "бор а", "▁Ben nett", "▁F R", "▁ FR", "▁pu eden", "▁pued en", "▁puede n", "pat ient", "▁P G", "▁ PG", "▁J in", "▁Ji n", "▁cr ashed", "▁crash ed", "▁d enen", "▁de nen", "▁den en", "▁S ample", "▁Sam ple", "▁ Sample", "▁Que bec", "it ories", "itor ies", "ito ries", "itori es", "▁b linked", "▁blink ed", "▁l ion", "▁li on", "▁vo ce", "▁voc e", "▁Imp act", "▁M au", "▁Ma u", "▁N ie", "▁Ni e", "▁l ob", "▁lo b", "▁д ве", "or neys", "orney s", "orne ys", "▁coast al", "▁s ensors", "▁sens ors", "▁sensor s", "▁X II", "▁XI I", "▁ill usion", "oj i", "o ji", "▁I NC", "▁IN C", "▁Dun can", "y k", "▁affect ing", "pu l", "p ul", "▁Napole on", "▁а каде", "▁com pt", "▁comp t", "▁prof itable", "▁profit able", "lo e", "l oe", "▁deux ième", "▁W C", "▁ WC", "▁v iable", "▁vi able", "▁via ble", "▁D rug", "▁Dr ug", "▁Dru g", "Text Box", "▁lum inos", "au té", "aut é", "y c", "št ě", "▁affili ates", "▁affiliate s", "il da", "ild a", "con duct", "cond uct", "▁e benfalls", "▁A MD", "▁AM D", "▁Mon itor", "▁ Monitor", "▁Compan ies", "▁correct ed", "▁corre cted", "ä ck", "SY STEM", "other apy", "▁п еред", "▁пере д", "▁пе ред", "▁bl ues", "▁blue s", "at isf", "ati sf", "atis f", "al though", "alth ough", "ro st", "ros t", "r ost", "SC AN", "S CAN", "▁R AM", "ці ональ", "▁vend ors", "▁vendor s", "▁custom s", "▁cust oms", "▁activ ate", "▁ activate", "▁b logs", "▁bl ogs", "▁blo gs", "▁blog s", "▁br ace", "▁bra ce", "▁ brace", "▁st rat", "▁str at", "▁stra t", "an je", "anj e", "щ ё", "▁t ide", "▁tid e", "▁ti de", "▁Brig ade", "get Operand", "▁al iment", "▁ali ment", "▁achieve ments", "▁achievement s", "▁suspic ion", "▁susp icion", "▁touch down", "br oad", "bro ad", "b road", "io re", "ior e", "i ore", "Compar ison", "▁m um", "▁mu m", "Eng lish", "▁P icture", "▁Pict ure", "▁M ouse", "▁Mo use", "▁ Mouse", "am d", "a md", "▁[ `", "▁den omin", "▁denom in", "▁Al eks", "▁Ale ks", "▁pr events", "▁prevent s", "▁prev ents", "ó b", "fe d", "f ed", "▁P ray", "▁Pr ay", "▁Pra y", "▁sh ine", "▁ shine", "▁cl utch", "mu x", "m ux", "App ro", "Ap pro", "▁not ably", "ch io", "chi o", "na ge", "n age", "HA S", "H AS", "▁' )", "▁ ')", "▁M iche", "▁Mich e", "▁Mic he", "▁Mi che", "t g", ":: ~", "▁am ely", "▁ro dz", "▁rod z", "z s", "tr ait", "tra it", "t rait", "▁k lass", "▁kl ass", "▁ klass", "f ö", "▁dest ac", "▁Cl ara", "▁Clar a", "f requency", "▁G it", "▁Gi t", "▁по ль", "▁пол ь", "▁frequ encies", "▁febr ero", "▁st umbled", "ко ю", "▁N ames", "▁Name s", "▁Na mes", "▁Nam es", "▁ Names", "▁F light", "▁Fl ight", "▁p rey", "▁pre y", "▁pr ey", "▁med io", "▁medi o", "▁V AR", "▁VA R", "▁ VAR", "▁F loat", "▁Flo at", "▁ Float", "▁Ern est", "▁Marc atori", "op ort", "o port", "▁cancel lation", "▁cancell ation", "▁Br yan", "▁Bry an", "—— ——", "Lu c", "L uc", "▁li bre", "▁lib re", "▁t ítulo", "* >", "▁S andy", "▁San dy", "▁Sand y", "▁Mar ina", "Be en", "B een", "▁w al", "▁wa l", "▁ wal", "▁K ultur", "▁expl ode", "▁explo de", "▁lim iting", "▁limit ing", "▁presum ably", "▁p b", "▁ pb", "▁M erc", "▁Me rc", "▁Mer c", "▁ре ки", "le arning", "lear ning", "learn ing", "C atalog", "▁C ensus", "lt e", "l te", "▁N ET", "▁NE T", "▁ NET", "ra ising", "rais ing", "rai sing", "сь ке", "st aff", "sta ff", "▁Qu inn", "▁mem orial", "▁memor ial", "▁memo rial", "п ня", "▁cu enta", "▁X I", "lb l", "l bl", "▁v aries", "▁var ies", "▁vari es", "▁va ries", "▁fluct uations", "▁дол ж", "▁осо би", "▁ware house", "How ever", "▁correct ions", "▁corre ctions", "▁correction s", "dh d", "d hd", "▁f als", "▁fa ls", "▁fal s", "▁controvers y", "▁cur se", "▁t élé", "▁té lé", "ře d", "ř ed", "▁A U", "▁ AU", "▁т ор", "▁то р", "▁ тор", "▁cr ít", "id an", "ida n", "i dan", "ili ary", "iliar y", "ilia ry", "▁P anel", "▁Pan el", "▁Pa nel", "▁ Panel", "cul e", "cu le", "c ule", "▁P oor", "▁Po or", "▁B A", "▁ BA", "▁ignor ant", "ème s", "è mes", "▁aest hetic", "Link ed", "Lin ked", "get Int", "Un icode", "[ @", "▁Z ent", "▁Ze nt", "▁Zen t", "Man ifest", "▁v ars", "▁var s", "▁va rs", "▁ vars", "P B", "▁в у", "▁ ву", "▁De scribe", "▁Desc ribe", "▁ Describe", "▁Any thing", "oi rs", "oir s", "o irs", "▁s ocks", "▁so cks", "▁soc ks", "▁sock s", "▁im ped", "▁imp ed", "▁ne ue", "▁neu e", "▁dis pers", "▁disp ers", "Col lect", "Coll ect", "file r", "fil er", "fi ler", "f iler", "▁Fr au", "▁Fra u", "▁H ockey", "▁te ens", "▁teen s", "▁Rober to", "▁Robert o", "la uf", "l auf", "ва ть", "ват ь", "▁с ко", "▁ ско", "is Array", "▁teen ager", "Bu ilt", "▁loud ly", "Cap acity", "▁advent ures", "▁adventure s", "▁M olly", "▁Mol ly", "rec ogn", "bar s", "ba rs", "b ars", "▁L or", "▁Lo r", "▁pu ò", "▁m ong", "▁mon g", "▁mo ng", "▁ mong", "in ement", "ine ment", "i nement", "Ass ignment", "Assign ment", "▁d iz", "▁di z", "less ness", "▁H alloween", "▁bit map", "▁ bitmap", "Ro m", "R om", "на р", "н ар", "▁re bel", "▁reb el", "▁rad ial", "▁radi al", "me asure", "ni t", "n it", "▁Ass ume", "▁assign ments", "▁assignment s", "▁I sn", "▁Is n", "▁al tre", "▁alt re", "ße r", "ß er", "на ль", "нал ь", "н аль", "▁fl ies", "▁d roit", "▁dro it", "▁thick ness", "▁en jo", "▁d well", "▁dw ell", "▁hom osexual", "▁e val", "▁ev al", "▁ eval", "$_ {", "$ _{", "as ia", "asi a", "▁phil os", "get Current", "▁veter ans", "▁veteran s", "▁Ber keley", "▁wild life", "Co p", "C op", "ve rn", "ver n", "v ern", "▁ Ú", "to s", "t os", "▁L ed", "▁Le d", "▁key words", "▁keyword s", "▁med ications", "▁medic ations", "▁medication s", "ne um", "▁jam ais", "▁B uc", "▁Bu c", "▁P D", "▁ PD", "▁State ment", "▁Stat ement", "▁ Statement", "▁P I", "▁ PI", "▁Jack ie", "▁Jac kie", "▁ord in", "▁k ör", "▁kö r", "en ze", "enz e", "▁util ized", "▁utiliz ed", "▁utilize d", "á ct", "az ed", "aze d", "a zed", "▁sever ely", "▁severe ly", "▁ä ven", "▁li bro", "▁lib ro", "▁E u", "äs t", "ä st", "PAR T", "PA RT", "P ART", "▁But ler", "▁puzz le", "F all", "Count ry", "C ountry", "pf n", "p fn", "▁у країн", "▁Or chestra", "▁al to", "▁alt o", "▁anc ora", "▁decom position", "▁ م", "▁appet ite", "ad u", "a du", "▁TH AT", "▁com enz", "min a", "mi na", "m ina", "▁init iated", "▁initi ated", "▁T at", "▁Ta t", "▁some time", "▁som etime", "▁somet ime", "re k", "r ek", "br ead", "bre ad", "b read", "▁Stat istics", "▁ Statistics", "▁C ob", "▁Co b", "F ollow", "▁ge ometric", "ш ла", "▁proceed ings", "D lg", "se ven", "s even", "▁[ -", "▁ [-", "▁Buff alo", "▁bl acks", "▁black s", "▁s ov", "▁so v", "▁cust ody", "▁r as", "▁ra s", "▁ ras", "▁tatto o", "öffent licht", "Bl o", "B lo", "A ustral", "▁rec uper", "ле в", "л ев", "▁b em", "▁be m", "▁t hou", "▁th ou", "ori ented", "orient ed", "vi r", "v ir", "▁col ony", "▁colon y", "▁Stan ford", "Abs olute", "ad rat", "adr at", "▁S itu", "▁Si tu", "▁sou vent", "EX EC", "▁m ű", "▁apart ments", "▁apartment s", "▁слу ча", "▁a no", "▁an o", "▁ ano", "WIN DO", "ac ci", "acc i", "▁L au", "▁La u", "co urt", "cou rt", "c ourt", "▁manif old", "▁coal ition", "▁X IV", "▁XI V", "Att rib", "Attr ib", "asc ade", "▁whe at", "▁strength s", "FR EE", "F REE", "EMP TY", "▁h ey", "▁he y", "as cular", "asc ular", "▁pl asma", "▁b ob", "▁bo b", "Sep arator", "=\" ${", "=\"$ {", "▁Z ag", "▁Za g", "▁pro jet", "▁smooth ly", "SE QU", "an aly", "ana ly", "anal y", "att achment", "attach ment", "▁E S", "▁ ES", "▁po pped", "▁pop ped", "ő s", "to m", "t om", "▁s ón", "▁só n", "▁r ott", "▁ro tt", "▁rot t", "▁ rott", "Util ities", "Ut ilities", "had oop", "hado op", "▁s otto", "▁so tto", "au tor", "aut or", "auto r", "▁George s", "▁Georg es", "▁kter ý", "▁gru ppo", "▁ко гда", "▁ме да", "▁instrument al", "▁W riter", "▁Write r", "▁Writ er", "▁Wr iter", "▁ Writer", "▁set Timeout", "ik k", "i kk", "▁Do po", "▁Dop o", "]) ;\r", "]); \r", "] );\r", "▁pract icing", "▁Ron ald", "▁у би", "▁ag rees", "▁agree s", "▁agre es", "▁den oted", "▁denote d", "is miss", "ism iss", "▁interview ed", "template s", "t emplates", "ř i", "ad ministr", "admin istr", "▁B utter", "▁But ter", "▁XV II", "▁XVI I", "▁position ed", "▁posit ioned", "▁Four th", "▁overwhel med", "▁Reg ular", "▁rep rezent", "коно ми", "▁expect s", "Ind ices", "▁mar ijuana", "▁z aj", "▁za j", "▁B ren", "▁Br en", "▁Bre n", "▁be gg", "▁beg g", "▁na hm", "▁nah m", "▁inter rog", "ти е", "▁B un", "▁Bu n", "▁с еред", "▁се ред", "▁shel ves", "▁которы х", "▁Fra uen", "▁Frau en", "▁Serge ant", "▁у спе", "mat ched", "match ed", "m atched", "▁d onne", "▁don ne", "▁touch es", "▁tou ches", "ab ort", "abor t", "▁v ale", "▁val e", "▁va le", "▁inst itutional", "▁institut ional", "▁institution al", "▁M ons", "▁Mon s", "▁Mo ns", "▁ambit ious", "▁non etheless", "▁none theless", "j d", "пе й", "п ей", "▁back pack", "da o", "d ao", "ви я", "▁surround ings", "▁surrounding s", "| _{", "▁g egründ", "dis p", "di sp", "d isp", "▁moist ure", "▁w yd", "▁wy d", "▁tr aders", "▁trad ers", "▁tra ders", "▁trade rs", "▁Er st", "▁Gal axy", "▁в оло", "▁во ло", "▁Per u", "▁Pe ru", "▁prior ities", "▁pron ounced", "▁C BS", "▁CB S", "▁Pal m", "▁Pa lm", "▁exp ans", "▁ener get", "▁energ et", "▁Cond ition", "▁ Condition", "▁S ver", "▁Sv er", "ne sted", "nes ted", "n ested", "▁февра ля", "he ro", "her o", "h ero", "▁ко ло", "▁к оло", "▁ коло", "▁Fil ms", "▁Film s", "Bo n", "B on", "é al", "ploy ed", "tr ained", "tra ined", "train ed", "▁els ő", "▁l ust", "▁lu st", "ati num", "atin um", "oy le", "o yle", "▁J et", "▁Je t", "жде ния", "▁survey s", "▁surve ys", "be e", "b ee", "work ers", "worker s", "wor kers", "rec ords", "record s", "cal endar", "bb ing", "b bing", "reg ation", "dash board", "d ashboard", "K ing", "▁v ista", "▁vis ta", "▁vi sta", "▁dep icted", "▁occur ring", "▁о фи", "▁sand wich", "rc u", "r cu", "ke rn", "ker n", "k ern", "▁min ut", "▁mi nut", "▁с мер", "▁t d", "▁ td", "so lete", "sole te", "sol ete", "Com plex", "Comp lex", "▁t unn", "▁tu nn", "▁tun n", "▁sc arc", "▁scar c", "st ead", "ste ad", "▁F ail", "▁Fa il", "▁ Fail", "▁R s", "▁tr ails", "▁tra ils", "▁trail s", "ke m", "k em", "▁Rom ans", "▁Ro mans", "▁Roman s", "▁Roma ns", "at ivity", "ativ ity", "Pre vious", "Prev ious", "▁de press", "▁dep ress", "▁re signed", "▁res igned", "▁resign ed", "get Default", "▁Tib et", "▁Ti bet", "▁Fr anco", "▁Franc o", "▁Fran co", "\") ));", "\")) );", "\" )));", "▁in jection", "▁inj ection", "▁inject ion", "rem oved", "remove d", "▁pra ised", "▁praise d", "▁A sc", "▁As c", "er ase", "era se", "eras e", "e rase", "▁commission ed", "MA IL", "M AIL", "▁B oh", "▁Bo h", "Pol y", "Po ly", "P oly", "▁cin q", "▁Ab ove", "▁Josh ua", "ZE RO", "Z ERO", "▁sum mit", "▁U rs", "▁Ur s", "▁c url", "▁cur l", "▁cu rl", "▁v isa", "▁vis a", "▁vi sa", "▁re sur", "▁res ur", "={ '", "= {'", "fe at", "▁abs orb", "▁absor b", "▁plan ets", "▁plane ts", "▁planet s", "▁prin cess", "▁prince ss", "▁Jahrhund erts", "▁Jahrhundert s", "x p", "▁N BC", "▁ко ми", "▁ком и", "▁F UN", "▁ FUN", "▁ne uen", "▁neu en", "▁neue n", "▁dé jà", "▁O z", "bb en", "b ben", "VID EO", "▁ej empl", "▁cons iders", "▁consider s", "▁consid ers", "at ri", "atr i", "a tri", "▁ar rog", "▁arr og", "io so", "ios o", "i oso", "▁h ace", "▁ha ce", "▁contact ed", "▁un ple", "▁spons ored", "▁tr ainer", "▁tra iner", "▁train er", "sb i", "s bi", "▁за нима", "C riterion", "но то", "sch eme", "sche me", "enn ial", "per form", "perf orm", "▁fix ing", "▁по стро", "▁пос тро", "ar b", "a rb", "EX IT", "▁ca fé", "▁caf é", "itut ed", "itute d", "itu ted", "ri ages", "ria ges", "riage s", "T ur", "▁hab er", "▁ha ber", "el asticsearch", "▁а л", "▁ ал", "r h", "▁v oll", "▁vo ll", "▁vol l", "CL U", "M il", "▁mem bres", "▁membr es", "▁membre s", "▁remark ed", "ва на", "ван а", "в ана", "=\" _", "Le ss", "Les s", "L ess", "(\" \");", "▁Y ale", "▁Ya le", "ber ries", "▁rele asing", "▁im ports", "▁import s", "▁imp orts", "id ea", "ide a", "▁( +", "▁ar qu", "ific ación", "ifica ción", "▁па ра", "▁пар а", "▁R angers", "▁Range rs", "▁Rang ers", "▁Ran gers", "M ic", "▁n ederbörd", "▁imag inary", "▁imagin ary", "▁special ists", "▁specialist s", "▁ho of", "Mod ules", "Module s", "▁sad ly", "ce il", "Tab Index", "at ionale", "ation ale", "ational e", "▁Part ner", "tb ody", "t body", "▁le verage", "▁lever age", "D N", "▁P rec", "▁Pr ec", "▁Pre c", "▁S é", "▁M am", "▁Ma m", "▁a fin", "▁af in", "is Valid", "Ps e", "P se", "▁сто ро", "▁cho pped", "▁chop ped", "▁Min or", "▁Mi nor", "▁d abei", "Da vid", "D avid", "uss ia", "▁дере вня", "▁Id entity", "▁Ident ity", "▁ Identity", "▁L GBT", "ци је", "▁Or ts", "▁Ort s", "▁part i", "▁par ti", "▁B achelor", "ug a", "u ga", "▁O PT", "▁OP T", "▁ OPT", "▁S eth", "▁Se th", "▁Set h", "▁LI ABLE", "▁inaug ur", "▁Shan ghai", "▁relax ing", "ци она", "цион а", "\" %", "▁o bey", "▁ob ey", "▁A irlines", "▁Air lines", "Link s", "Lin ks", "L inks", "▁C elt", "▁Ce lt", "▁Cel t", "▁Ad min", "▁Adm in", "▁ Admin", "ag ation", "▁wor ries", "IN TE", "INT E", "ar ith", "ari th", "Fat alf", "]] )", "] ])", "co lm", "col m", "▁arch ae", "▁br ushed", "▁brush ed", "▁t ät", "▁struct ured", "▁structure d", "ти и", "▁home m", "▁hom em", "▁ho mem", "[: ,", "▁n avy", "▁na vy", "▁nav y", "get Key", "power ed", "pow ered", "▁s ucked", "▁suc ked", "▁suck ed", "▁z omb", "▁zo mb", "iss ant", "issa nt", "▁M ight", "▁Mi ght", "▁Mig ht", "▁P ull", "▁Pu ll", "▁Pul l", "ri r", "r ir", "▁п і", "▁ пі", "▁se as", "▁sea s", "▁W rest", "▁Wr est", "▁t ense", "▁ten se", "▁tens e", "▁a tm", "▁at m", "▁have t", "▁ha vet", "▁hav et", "▁pier ws", "▁trag ic", "▁D iff", "▁Di ff", "▁ Diff", "▁conf idential", "▁confident ial", "success ful", "ę ż", "▁Ch ain", "▁Cha in", "▁ Chain", "▁Ken ya", "Ch oice", "oc ur", "o cur", "an iu", "ani u", "▁consult ant", "▁Ad vis", "▁Adv is", "Li f", "L if", "▁L ors", "▁Lo rs", "▁Lor s", "avor ite", "avo rite", "▁util izing", "▁utiliz ing", "▁v intage", "Mat cher", "Match er", "▁m embre", "▁me mbre", "▁mem bre", "▁membr e", "▁Ex pect", "▁Exp ect", "▁ Expect", "▁tr acing", "▁tra cing", "no g", "n og", "▁d ej", "▁de j", "▁у че", "▁lo ops", "▁loop s", "▁on click", "▁G PU", "▁GP U", "▁ GPU", "▁Album s", "▁Alb ums", "▁Arch ives", "ва та", "ват а", "▁st ove", "▁sto ve", "ш ли", "an cies", "anc ies", "▁geme ente", "mo b", "m ob", "PD F", "P DF", "es o", "e so", "▁v ég", "▁vé g", "Res olve", "▁te aches", "▁teach es", "▁tea ches", "ло же", "▁с тво", "▁ст во", "▁ ство", "▁О дна", "▁f id", "▁fi d", "Some thing", "Som ething", "▁ne bo", "▁Valent ine", "row ning", "rown ing", "▁а ле", "▁ал е", "aw i", "a wi", "is hi", "ish i", "▁S PI", "▁SP I", "▁ SPI", "▁s pel", "▁sp el", "▁spe l", "▁б іль", "▁бі ль", "▁particip ant", "▁N ed", "▁Ne d", "▁G ast", "▁Ga st", "▁Gas t", "▁bl ond", "▁blo nd", "▁s aves", "▁sa ves", "▁save s", "▁sav es", "col ored", "color ed", "colo red", "▁A CTION", "▁AC TION", "▁ACT ION", "▁ ACTION", "▁Polit iker", "}$ )", "} $)", "▁D um", "▁Du m", "den try", "d entry", "Stud ent", "▁~ =", "lo ads", "load s", "▁F oster", "▁Fo ster", "一 个", "▁P K", "▁ PK", "▁S B", "▁ SB", "▁H ern", "▁He rn", "▁Her n", "▁Ex hib", "Listener s", "Listen ers", "Su n", "S un", "pl ac", "▁B ever", "▁Be ver", "▁Bev er", "▁incl uy", "▁inclu y", "▁d c", "▁ dc", "ar gc", "arg c", "▁g ed", "▁ge d", "▁ ged", "с па", "▁Form ula", "▁с ем", "▁се м", "▁em pt", "▁emp t", "▁ empt", "un register", "▁Queens land", "ánd ez", "ot ive", "oti ve", "▁al ley", "▁all ey", "▁alle y", "▁Democr at", "▁trav ail", "▁$ ,", "▁ $,", "R P", "ро е", "pers onal", "person al", "▁péri ode", "HO ME", "om es", "ome s", "o mes", "▁recogn ised", "he ng", "hen g", "h eng", "▁J ung", "▁Jun g", "▁Ju ng", "▁Ro land", "▁Rol and", "▁conv icted", "Loc ked", "Lock ed", "L ocked", "▁m ari", "▁mar i", "▁ma ri", "▁Lux em", "refer to", "De leted", "Dele ted", "Delete d", "Del eted", "int ent", "inte nt", "▁St aats", "▁Sta ats", "▁обла сті", "и т", "▁са ве", "▁Pro tocol", "▁ Protocol", "ają c", "ch k", "Type Info", "▁p kt", "▁ pkt", "▁sc andal", "▁scan dal", "▁individ ually", "▁individual ly", "FM T", "F MT", "▁n j", "ab ile", "abil e", "abi le", "▁R ivers", "▁River s", "PRO PERTY", "V B", "wo rt", "wor t", "w ort", "▁split ting", "▁spl itting", "ach ten", "acht en", "achte n", "a chten", "▁AR ISING", "▁s ip", "▁si p", "▁f res", "▁fr es", "▁fre s", "▁g room", "▁gr oom", "▁gro om", "H ol", "▁c anon", "▁can on", "▁ca non", "▁abrupt ly", "▁after ward", "▁R unning", "▁Run ning", "▁ Running", "▁j i", "▁ ji", "▁% ,", "▁ %,", "▁Palest inian", "R W", "pgf scope", "▁country side", "▁countr yside", "▁fort unate", "▁ fortunate", "▁c él", "▁Po inter", "▁Point er", "▁ Pointer", "ens ors", "ensor s", "enso rs", "ra ting", "rat ing", "r ating", "▁buff ers", "▁buffer s", "▁buf fers", "▁re mot", "▁rem ot", "▁Prop Types", "▁N ah", "▁Na h", "al tern", "alt ern", "alter n", "▁eas iest", "▁in vas", "▁inv as", "▁cl k", "▁ clk", "copy right", "c opyright", "▁bl anc", "SA MP", "S AMP", "▁Co hen", "▁S hell", "▁She ll", "▁Sh ell", "▁Shel l", "▁ Shell", "▁destroy ing", "▁destro ying", "▁Z el", "▁Ze l", "date r", "da ter", "dat er", "d ater", "če n", "č en", "▁f iling", "▁fil ing", "▁fi ling", "▁integr ate", "xi t", "x it", "▁R ET", "▁RE T", "▁ RET", "le ne", "len e", "l ene", "cal ls", "call s", "c alls", "▁sl aughter", "init ialized", "initial ized", "initialize d", "un ches", "unch es", "unc hes", "▁Tr ace", "▁Tra ce", "▁ Trace", "eff icient", "▁Wood s", "▁long itud", "G N", "▁K ont", "▁Kon t", "▁Ko nt", "▁chunk s", "á ch", "▁unem ployment", "ac om", "aco m", "a com", "▁sl owed", "▁slow ed", "▁out lined", "▁outline d", "xff ff", "xf fff", "x ffff", "▁ik ke", "▁work space", "▁works pace", "M c", "▁k icking", "▁kick ing", "▁embed ding", "ch nitt", "chn itt", "er ten", "ert en", "▁In terior", "▁Inter ior", "▁S ongs", "▁Son gs", "▁Song s", "mm c", "m mc", "▁analy zed", "▁analyze d", "▁Cou pe", "▁favor ites", "▁favorite s", "▁t t", "▁ tt", "▁то й", "▁ той", "R outing", "▁Sil va", "▁andere m", "▁ander em", "▁h onom", "▁hon om", "▁ho nom", "▁исполь зова", ".\" ]", ". \"]", "▁W u", "le gt", "leg t", "▁s poon", "▁sp oon", "▁spo on", "▁j ap", "▁ja p", "▁Ext ension", "▁ Extension", "er ne", "ern e", "▁v agy", "▁va gy", "▁vag y", "▁се ла", "▁ф унк", "▁anal ytics", "▁analyt ics", "▁s ug", "▁su g", "▁A sync", "▁As ync", "▁ Async", "▁pe aks", "▁peak s", "▁G ym", "▁Gy m", "▁law suit", "▁laws uit", "< >", "ial is", "i alis", "et ric", "etr ic", "face d", "fa ced", "fac ed", "f aced", "▁dis rupt", "▁f å", "Input s", "`) ;", "` );", "▁M end", "▁Me nd", "▁Men d", "go n", "g on", "▁\" ,\"", "▁\", \"", "▁ \",\"", "▁n erves", "▁nerv es", "▁nerve s", "▁ner ves", "▁doubt s", "▁doub ts", "sa p", "s ap", "▁s ow", "▁so w", ",\\ ,\\", ",\\, \\", ", \\,\\", "▁B S", "▁ BS", "▁G lad", "▁Gl ad", "▁a ster", "▁as ter", "▁ast er", "▁ aster", "œuv re", "▁Bang l", "▁Ban gl", "▁i Pad", "use ppe", "▁conduct ing", "▁( {\\", "▁({ \\", "▁ ({\\", "▁Har bor", "ps z", "p sz", "▁FI FA", "_* *", "_ **", "em or", "e mor", "▁ ▁", "▁▁ ▁▁", "▁▁▁ ▁", "▁ ▁▁▁", "▁▁ ▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁", "▁▁▁▁▁ ▁▁▁", "▁▁▁▁▁▁ ▁▁", "▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁▁ ▁", "▁ ▁▁▁▁▁▁▁", "▁▁ ▁▁▁", "▁▁▁▁ ▁", "▁▁▁ ▁▁", "▁ ▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁▁ ▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁", "▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁▁▁ ▁▁", "▁▁▁ ▁▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁ ▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁", "▁▁ ▁▁▁▁", "▁▁▁▁ ▁▁", "▁▁▁▁▁ ▁", "▁▁▁ ▁▁▁", "▁ ▁▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁▁▁", "▁▁▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁ ▁▁", "▁▁▁ ▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁▁▁", "▁▁▁▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁ ▁", "▁ ▁▁▁▁▁▁▁▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁ ▁", "▁▁▁▁▁▁▁▁▁▁ ▁▁▁", "▁▁▁ ▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁▁▁▁", "▁▁▁▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁ ▁▁", "▁ ▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁", "▁▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁", "▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁", "▁▁▁▁▁▁▁ ▁▁▁", "▁ ▁▁▁▁▁▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁ ▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁ ▁▁", "▁▁▁▁▁▁▁▁▁▁▁▁▁ ▁", "▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁", "▁▁▁ ▁▁▁▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁", "▁ ▁▁▁▁▁▁▁▁▁▁▁▁▁", "▁▁ ▁", "▁ ▁▁", "▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁", "▁▁▁▁▁ ▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁", "▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁▁ ▁▁", "▁ ▁▁▁▁▁▁▁▁", "▁▁ ▁▁▁▁▁", "▁▁▁▁ ▁▁▁", "▁▁▁▁▁ ▁▁", "▁▁▁▁▁▁ ▁", "▁▁▁ ▁▁▁▁", "▁ ▁▁▁▁▁▁", "▁▁ ▁▁▁▁▁▁▁▁▁", "▁▁▁▁ ▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁ ▁▁▁", "▁▁▁▁▁ ▁▁▁▁▁▁", "▁▁▁▁▁▁ ▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁▁ ▁", "▁▁▁ ▁▁▁▁▁▁▁▁", "▁▁▁▁▁▁▁▁▁ ▁▁", "▁▁▁▁▁▁▁ ▁▁▁▁", "▁ ▁▁▁▁▁▁▁▁▁▁" ] } } ================================================ FILE: Tokenizer/tokenizer_config.json ================================================ { "add_bos_token": true, "add_eos_token": false, "added_tokens_decoder": { "0": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "1": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "2": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "additional_special_tokens": [], "bos_token": "", "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", "clean_up_tokenization_spaces": false, "eos_token": "", "legacy": true, "model_max_length": 1000000000000000019884624838656, "pad_token": null, "sp_model_kwargs": {}, "spaces_between_special_tokens": false, "tokenizer_class": "LlamaTokenizer", "unk_token": "", "use_default_system_prompt": true } ================================================ FILE: __main__.py ================================================ import faulthandler faulthandler.enable(all_threads=True) import multiprocessing multiprocessing.set_start_method('spawn', force=True) from core.utilities import set_cuda_paths set_cuda_paths() from gui.main_window import main main() ================================================ FILE: charts/__init__.py ================================================ ================================================ FILE: charts/all_gpus.py ================================================ import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import LinearSegmentedColormap from charts.gpu_info import GPUS def create_gpu_comparison_plot(min_vram_threshold=6, max_vram_threshold=8): filtered = {name: info for name, info in GPUS.items() if min_vram_threshold <= info["memory_size_gb"] <= max_vram_threshold} sorted_nvidia = sorted(filtered.items(), key=lambda item: item[1]["cuda_cores"], reverse=True) names = [name for name, _ in sorted_nvidia] compute_units = [info["cuda_cores"] for _, info in sorted_nvidia] sizes = [info["memory_size_gb"] for _, info in sorted_nvidia] gradient_nvidia = LinearSegmentedColormap.from_list("", ["#003328", "#00CC66"]) plt.rcParams["figure.autolayout"] = True fig = plt.figure(figsize=(19.2, 10.8), dpi=100, facecolor="#4A4A4A") ax1 = fig.add_subplot(111) ax1.set_facecolor("#4A4A4A") bars = ax1.barh(names, compute_units, color=gradient_nvidia(np.linspace(0, 1, len(compute_units))), label="NVIDIA CUDA Cores") max_units = max(compute_units) if compute_units else 0 for i, bar in enumerate(bars): pct = (compute_units[i] / max_units) * 100 if max_units else 0 ax1.text(150, bar.get_y() + bar.get_height() / 2, f"{compute_units[i]:,} - {pct:.2f}%", va="center", ha="left", color="white", fontsize=10) ax1.set_xlabel("CUDA Cores", color="white") ax1.set_ylabel("Graphics Cards", color="white", labelpad=15) ax1.set_title(f"Graphics Cards: CUDA Cores and VRAM Comparison ({min_vram_threshold}GB <= VRAM <= {max_vram_threshold}GB)", color="white", pad=20) ax1.tick_params(axis="both", colors="white") ax2 = ax1.twiny() ax2.plot(sizes, names, "o-", color="orange", label="VRAM (GB)") ax2.set_xlabel("VRAM (GB)", color="white") ax2.xaxis.set_label_position("bottom") ax2.xaxis.tick_bottom() ax2.tick_params(axis="x", colors="white") ax1.xaxis.set_label_position("top") ax1.xaxis.tick_top() legend_elements = [ plt.Rectangle((0,0),1,1, facecolor=gradient_nvidia(0.5), edgecolor="none", label="NVIDIA CUDA Cores"), plt.Line2D([0], [0], color="orange", marker="o", linestyle="-", label="VRAM (GB)") ] ax2.legend(handles=legend_elements, loc="upper right", facecolor="#4A4A4A", edgecolor="white", labelcolor="white") for spine in ax1.spines.values(): spine.set_edgecolor("white") for spine in ax2.spines.values(): spine.set_edgecolor("white") vram_lines = [2, 4, 6, 8, 10, 11, 12, 16, 20, 24, 32] for vram_value in vram_lines: if vram_value in sizes: ax2.axvline(x=vram_value, color="#A8A8A8", linestyle="--", linewidth=0.5) ax2.set_xticks(vram_lines) ax2.set_xlim(0, 33) plt.subplots_adjust(left=0.25, right=0.9, top=0.9, bottom=0.1) return fig if __name__ == "__main__": fig = create_gpu_comparison_plot(12, 24) plt.show() ================================================ FILE: charts/gpu_info.py ================================================ # Auto-generated GPU info module from typing import TypedDict, Dict from datetime import date class GPUInfo(TypedDict): gpu_name: str generation: str architecture: str release_date: date bus_interface: str memory_size_gb: int memory_type: str cuda_cores: int streaming_multiprocessors: int tensor_cores: int cuda_major_version: int cuda_minor_version: int half_float_performance_gflop_s: int single_float_performance_gflop_s: int tpu_url: str GPUS: Dict[str, GPUInfo] = { 'Quadro RTX 5000': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-08-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 22300, 'single_float_performance_gflop_s': 11150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-5000.c3308', }, 'Quadro RTX 6000': { 'gpu_name': 'TU102', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-08-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 32619, 'single_float_performance_gflop_s': 16309, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-6000.c3307', }, 'Quadro RTX 6000 Passive': { 'gpu_name': 'TU102', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-08-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 28750, 'single_float_performance_gflop_s': 14380, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-6000-passive.c3469', }, 'Quadro RTX 8000': { 'gpu_name': 'TU102', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-08-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 32619, 'single_float_performance_gflop_s': 16309, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-8000.c3306', }, 'Quadro RTX 8000 Passive': { 'gpu_name': 'TU102', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-08-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 29860, 'single_float_performance_gflop_s': 14930, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-8000-passive.c3470', }, 'Tesla T4': { 'gpu_name': 'TU104', 'generation': 'Tesla Turing(Txx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-09-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 65129, 'single_float_performance_gflop_s': 8141, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/tesla-t4.c3316', }, 'Tesla T4G': { 'gpu_name': 'TU104', 'generation': 'Tesla Turing(Txx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-09-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 65129, 'single_float_performance_gflop_s': 8141, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/tesla-t4g.c4134', }, 'GeForce RTX 2080': { 'gpu_name': 'TU104', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-09-20'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2944, 'streaming_multiprocessors': 46, 'tensor_cores': 368, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20140, 'single_float_performance_gflop_s': 10070, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080.c3224', }, 'GeForce RTX 2080 Ti': { 'gpu_name': 'TU102', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-09-20'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 11, 'memory_type': 'GDDR6', 'cuda_cores': 4352, 'streaming_multiprocessors': 68, 'tensor_cores': 544, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 26900, 'single_float_performance_gflop_s': 13450, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-ti.c3305', }, 'GeForce RTX 2070': { 'gpu_name': 'TU106', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-10-17'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14930, 'single_float_performance_gflop_s': 7465, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070.c3252', }, 'Quadro RTX 4000': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-11-13'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14240, 'single_float_performance_gflop_s': 7119, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-4000.c3336', }, 'TITAN RTX': { 'gpu_name': 'TU102', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2018-12-18'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 32619, 'single_float_performance_gflop_s': 16309, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/titan-rtx.c3311', }, 'GeForce RTX 2060': { 'gpu_name': 'TU106', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-07'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 12900, 'single_float_performance_gflop_s': 6451, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060.c3310', }, 'GeForce RTX 2060 Max-Q Refresh': { 'gpu_name': 'TU106B', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 9216, 'single_float_performance_gflop_s': 4608, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-max-q-refresh.c3628', }, 'GeForce RTX 2060 Mobile': { 'gpu_name': 'TU106', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 9216, 'single_float_performance_gflop_s': 4608, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-mobile.c3348', }, 'GeForce RTX 2060 Mobile Refresh': { 'gpu_name': 'TU106B', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 11980, 'single_float_performance_gflop_s': 5990, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-mobile-refresh.c3567', }, 'GeForce RTX 2070 Max-Q': { 'gpu_name': 'TU106', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10920, 'single_float_performance_gflop_s': 5460, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-max-q.c3392', }, 'GeForce RTX 2070 Mobile': { 'gpu_name': 'TU106', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 13270, 'single_float_performance_gflop_s': 6636, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-mobile.c3349', }, 'GeForce RTX 2080 Max-Q': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2944, 'streaming_multiprocessors': 46, 'tensor_cores': 368, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 12890, 'single_float_performance_gflop_s': 6447, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-max-q.c3363', }, 'GeForce RTX 2080 Mobile': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2944, 'streaming_multiprocessors': 46, 'tensor_cores': 368, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 18720, 'single_float_performance_gflop_s': 9362, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-mobile.c3312', }, 'GeForce GTX 1660 Ti': { 'gpu_name': 'TU116', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-02-22'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1536, 'streaming_multiprocessors': 24, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10870, 'single_float_performance_gflop_s': 5437, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1660-ti.c3364', }, 'GeForce GTX 1660': { 'gpu_name': 'TU116', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-03-14'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR5', 'cuda_cores': 1408, 'streaming_multiprocessors': 22, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10050, 'single_float_performance_gflop_s': 5027, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1660.c3365', }, 'GeForce GTX 1650': { 'gpu_name': 'TU117', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-04-23'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR5', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5967, 'single_float_performance_gflop_s': 2984, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650.c3366', }, 'GeForce GTX 1650 Max-Q': { 'gpu_name': 'TU117', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-15'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 4608, 'single_float_performance_gflop_s': 2304, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-max-q.c3620', }, 'GeForce GTX 1650 Mobile': { 'gpu_name': 'TU117', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-15'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 6205, 'single_float_performance_gflop_s': 3103, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-mobile.c3633', }, 'GeForce GTX 1660 Ti Max-Q': { 'gpu_name': 'TU116', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-04-23'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1536, 'streaming_multiprocessors': 24, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 8202, 'single_float_performance_gflop_s': 4101, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1660-ti-max-q.c3382', }, 'GeForce GTX 1660 Ti Mobile': { 'gpu_name': 'TU116', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-04-23'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1536, 'streaming_multiprocessors': 24, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 9769, 'single_float_performance_gflop_s': 4884, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1660-ti-mobile.c3369', }, 'Quadro RTX 3000 Max-Q': { 'gpu_name': 'TU106', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 9331, 'single_float_performance_gflop_s': 4666, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-3000-max-q.c3429', }, 'Quadro RTX 3000 Mobile': { 'gpu_name': 'TU106', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10600, 'single_float_performance_gflop_s': 5299, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-3000-mobile.c3428', }, 'Quadro RTX 3000 Mobile Refresh': { 'gpu_name': 'TU106', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10600, 'single_float_performance_gflop_s': 5299, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-3000-mobile-refresh.c3697', }, 'Quadro RTX 3000 X2 Mobile': { 'gpu_name': 'TU106', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10600, 'single_float_performance_gflop_s': 5299, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-3000-x2-mobile.c4120', }, 'Quadro RTX 4000 Max-Q': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14130, 'single_float_performance_gflop_s': 7066, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-4000-max-q.c3427', }, 'Quadro RTX 4000 Mobile': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 15970, 'single_float_performance_gflop_s': 7987, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-4000-mobile.c3430', }, 'Quadro RTX 5000 Max-Q': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 16590, 'single_float_performance_gflop_s': 8294, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-5000-max-q.c3432', }, 'Quadro RTX 5000 Mobile': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 18800, 'single_float_performance_gflop_s': 9400, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-5000-mobile.c3431', }, 'Quadro T1000 Max-Q': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR5', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 4838, 'single_float_performance_gflop_s': 2419, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t1000-max-q.c3807', }, 'Quadro T1000 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR5', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5215, 'single_float_performance_gflop_s': 2607, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t1000-mobile.c3435', }, 'Quadro T2000 Max-Q': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR5', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5714, 'single_float_performance_gflop_s': 2857, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t2000-max-q.c3436', }, 'Quadro T2000 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-05-27'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR5', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 7311, 'single_float_performance_gflop_s': 3656, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t2000-mobile.c3434', }, 'GeForce RTX 2060 SUPER': { 'gpu_name': 'TU106', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-07-09'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2176, 'streaming_multiprocessors': 34, 'tensor_cores': 272, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14360, 'single_float_performance_gflop_s': 7181, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-super.c3441', }, 'GeForce RTX 2060 SUPER Mobile': { 'gpu_name': 'TU106', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-07-09'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2176, 'streaming_multiprocessors': 34, 'tensor_cores': 272, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 13320, 'single_float_performance_gflop_s': 6659, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-super-mobile.c4150', }, 'GeForce RTX 2070 SUPER': { 'gpu_name': 'TU104', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-07-09'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 18120, 'single_float_performance_gflop_s': 9062, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-super.c3440', }, 'GeForce RTX 2080 SUPER': { 'gpu_name': 'TU104', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-07-23'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 22300, 'single_float_performance_gflop_s': 11150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-super.c3439', }, 'Quadro RTX 6000 Mobile': { 'gpu_name': 'TU102', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-09-04'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 26820, 'single_float_performance_gflop_s': 13410, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-6000-mobile.c3497', }, 'GeForce GTX 1660 SUPER': { 'gpu_name': 'TU116', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-10-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1408, 'streaming_multiprocessors': 22, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10050, 'single_float_performance_gflop_s': 5027, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1660-super.c3458', }, 'GeForce GTX 1650 SUPER': { 'gpu_name': 'TU116', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2019-11-22'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1280, 'streaming_multiprocessors': 20, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 8832, 'single_float_performance_gflop_s': 4416, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-super.c3411', }, 'GRID RTX T10-2': { 'gpu_name': 'TU102', 'generation': 'GRID(Tx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-01-01'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20000, 'single_float_performance_gflop_s': 9999, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-rtx-t10-2.c3815', }, 'GRID RTX T10-4': { 'gpu_name': 'TU102', 'generation': 'GRID(Tx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-01-01'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20000, 'single_float_performance_gflop_s': 9999, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-rtx-t10-4.c3500', }, 'Tesla T10 16 GB': { 'gpu_name': 'TU102', 'generation': 'Tesla Turing(Txx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-01-01'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20000, 'single_float_performance_gflop_s': 9999, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/tesla-t10-16-gb.c4036', }, 'GeForce RTX 2060 TU104': { 'gpu_name': 'TU104', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-01-10'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 12900, 'single_float_performance_gflop_s': 6451, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-tu104.c3495', }, 'GeForce RTX 2060 Max-Q': { 'gpu_name': 'TU106', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-01-29'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1920, 'streaming_multiprocessors': 30, 'tensor_cores': 240, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 9101, 'single_float_performance_gflop_s': 4550, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-max-q.c3533', }, 'GeForce RTX 2070 Max-Q Refresh': { 'gpu_name': 'TU106B', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-03-04'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10370, 'single_float_performance_gflop_s': 5184, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-max-q-refresh.c3574', }, 'GeForce RTX 2070 Mobile Refresh': { 'gpu_name': 'TU106B', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-03-04'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 13410, 'single_float_performance_gflop_s': 6705, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-mobile-refresh.c3573', }, 'GeForce GTX 1650 GDDR6': { 'gpu_name': 'TU117', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-01'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5699, 'single_float_performance_gflop_s': 2849, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-gddr6.c3541', }, 'GeForce GTX 1650 Ti Max-Q': { 'gpu_name': 'TU117', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 4915, 'single_float_performance_gflop_s': 2458, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-ti-max-q.c3619', }, 'GeForce GTX 1650 Ti Mobile': { 'gpu_name': 'TU116', 'generation': 'GeForce 16 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-23'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 6083, 'single_float_performance_gflop_s': 3041, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-ti-mobile.c3517', }, 'GeForce RTX 2070 SUPER Max-Q': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 11830, 'single_float_performance_gflop_s': 5914, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-super-max-q.c3563', }, 'GeForce RTX 2070 SUPER Mobile': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 40, 'tensor_cores': 320, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14130, 'single_float_performance_gflop_s': 7066, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2070-super-mobile.c3514', }, 'GeForce RTX 2080 SUPER Max-Q': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 11980, 'single_float_performance_gflop_s': 5990, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-super-max-q.c3566', }, 'GeForce RTX 2080 SUPER Mobile': { 'gpu_name': 'TU104', 'generation': 'GeForce 20 Mobile', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-04-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 19170, 'single_float_performance_gflop_s': 9585, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-super-mobile.c3513', }, 'Quadro RTX 5000 Mobile Refresh': { 'gpu_name': 'TU104B', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-06-08'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 18800, 'single_float_performance_gflop_s': 9400, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-5000-mobile-refresh.c3625', }, 'Quadro RTX 5000 X2 Mobile': { 'gpu_name': 'TU104', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-06-08'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 18800, 'single_float_performance_gflop_s': 9400, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-rtx-5000-x2-mobile.c4121', }, 'Quadro T1000 Mobile GDDR6': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-06-08'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5914, 'single_float_performance_gflop_s': 2957, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t1000-mobile-gddr6.c3624', }, 'GeForce GTX 1650 TU106': { 'gpu_name': 'TU106', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-06-18'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5699, 'single_float_performance_gflop_s': 2849, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-tu106.c3585', }, 'GeForce GTX 1650 TU116': { 'gpu_name': 'TU116', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-07-07'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5699, 'single_float_performance_gflop_s': 2849, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1650-tu116.c3586', }, 'GeForce MX450 12W': { 'gpu_name': 'TU117S', 'generation': 'GeForce MX(4xx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-08-15'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 3333, 'single_float_performance_gflop_s': 1667, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx450-12w.c3657', }, 'GeForce MX450 30.5W 10Gbps': { 'gpu_name': 'TU117S', 'generation': 'GeForce MX(4xx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-08-25'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5645, 'single_float_performance_gflop_s': 2822, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx450-30-5w-10gbps.c3641', }, 'GeForce MX450 30.5W 8Gbps': { 'gpu_name': 'TU117S', 'generation': 'GeForce MX(4xx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-08-25'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 2, 'memory_type': 'GDDR5', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 4570, 'single_float_performance_gflop_s': 2285, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx450-30-5w-8gbps.c3717', }, 'T500 Max-Q': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-12-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5107, 'single_float_performance_gflop_s': 2554, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t500-max-q.c4103', }, 'T500 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2020-12-02'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5591, 'single_float_performance_gflop_s': 2796, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t500-mobile.c3747', }, 'CMP 30HX': { 'gpu_name': 'TU116', 'generation': 'Mining GPUs', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-02-25'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 1408, 'streaming_multiprocessors': 22, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 10050, 'single_float_performance_gflop_s': 5027, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-30hx.c3780', }, 'CMP 40HX': { 'gpu_name': 'TU106', 'generation': 'Mining GPUs', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-02-25'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 36, 'tensor_cores': 288, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 15210, 'single_float_performance_gflop_s': 7603, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-40hx.c3781', }, 'Quadro T1200 Max-Q': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5837, 'single_float_performance_gflop_s': 2918, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t1200-max-q.c4006', }, 'Quadro T1200 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 7311, 'single_float_performance_gflop_s': 3656, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/quadro-t1200-mobile.c3803', }, 'T600': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 640, 'streaming_multiprocessors': 10, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 3418, 'single_float_performance_gflop_s': 1709, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t600.c3796', }, 'T600 Max-Q': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5000, 'single_float_performance_gflop_s': 2500, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t600-max-q.c3817', }, 'T600 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5053, 'single_float_performance_gflop_s': 2527, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t600-mobile.c4151', }, 'T1000': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-05-06'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5000, 'single_float_performance_gflop_s': 2500, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t1000.c3797', }, 'T1000 8 GB': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-05-06'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 896, 'streaming_multiprocessors': 14, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 5000, 'single_float_performance_gflop_s': 2500, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t1000-8-gb.c3842', }, 'T400': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-05-06'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 384, 'streaming_multiprocessors': 6, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 2189, 'single_float_performance_gflop_s': 1094, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t400.c3808', }, 'T400 4 GB': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-05-06'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 384, 'streaming_multiprocessors': 6, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 2189, 'single_float_performance_gflop_s': 1094, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t400-4-gb.c3843', }, 'CMP 50HX': { 'gpu_name': 'TU102', 'generation': 'Mining GPUs', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-06-24'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 10, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 22150, 'single_float_performance_gflop_s': 11070, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-50hx.c3782', }, 'GeForce RTX 2060 12 GB': { 'gpu_name': 'TU106', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-12-07'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 2176, 'streaming_multiprocessors': 34, 'tensor_cores': 272, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 14360, 'single_float_performance_gflop_s': 7181, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2060-12-gb.c3836', }, 'GeForce MX550': { 'gpu_name': 'TU117SB', 'generation': 'GeForce MX(5xx)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2021-12-17'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 2703, 'single_float_performance_gflop_s': 2703, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx550.c3860', }, 'T550 Mobile': { 'gpu_name': 'TU117', 'generation': 'Quadro Turing-M(Tx000)', 'architecture': 'Turing', 'release_date': date.fromisoformat('2022-05-01'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1024, 'streaming_multiprocessors': 16, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 6820, 'single_float_performance_gflop_s': 3410, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/t550-mobile.c3918', }, 'GeForce GTX 1630': { 'gpu_name': 'TU117', 'generation': 'GeForce 16', 'architecture': 'Turing', 'release_date': date.fromisoformat('2022-06-28'), 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 512, 'streaming_multiprocessors': 8, 'tensor_cores': 0, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 3656, 'single_float_performance_gflop_s': 1828, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-gtx-1630.c3916', }, 'GeForce RTX 2080 Engineering Sample': { 'gpu_name': 'TU104', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': None, 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 48, 'tensor_cores': 384, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 21010, 'single_float_performance_gflop_s': 10510, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-engineering-sample.c3976', }, 'GeForce RTX 2080 Ti 12 GB': { 'gpu_name': 'TU102', 'generation': 'GeForce 20', 'architecture': 'Turing', 'release_date': None, 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 30140, 'single_float_performance_gflop_s': 15070, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2080-ti-12-gb.c3938', }, 'GRID RTX T10-16': { 'gpu_name': 'TU102', 'generation': 'GRID(Tx)', 'architecture': 'Turing', 'release_date': None, 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20000, 'single_float_performance_gflop_s': 9999, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-rtx-t10-16.c3502', }, 'GRID RTX T10-8': { 'gpu_name': 'TU102', 'generation': 'GRID(Tx)', 'architecture': 'Turing', 'release_date': None, 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 448, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 20000, 'single_float_performance_gflop_s': 9999, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-rtx-t10-8.c3501', }, 'Tesla T40 24 GB': { 'gpu_name': 'TU102', 'generation': 'Tesla Turing(Txx)', 'architecture': 'Turing', 'release_date': None, 'bus_interface': 'PCIe 3.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 72, 'tensor_cores': 576, 'cuda_major_version': 7, 'cuda_minor_version': 5, 'half_float_performance_gflop_s': 28750, 'single_float_performance_gflop_s': 14380, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/tesla-t40-24-gb.c3942', }, 'A100 SXM4 40 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-05-14'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 40, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a100-sxm4-40-gb.c3506', }, 'DRIVE A100 PROD': { 'gpu_name': 'GA100', 'generation': 'DRIVE(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-05-14'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 32, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/drive-a100-prod.c3967', }, 'GRID A100A': { 'gpu_name': 'GA100', 'generation': 'GRID(Ax)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-05-14'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 32, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 55570, 'single_float_performance_gflop_s': 13890, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-a100a.c3579', }, 'GRID A100B': { 'gpu_name': 'GA100', 'generation': 'GRID(Ax)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-05-14'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 55570, 'single_float_performance_gflop_s': 13890, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/grid-a100b.c3578', }, 'A100 PCIe 40 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-06-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 40, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a100-pcie-40-gb.c3623', }, 'A100 SXM4 80 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-11-16'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a100-sxm4-80-gb.c3746', }, 'A30 PCIe': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'HBM2e', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a30-pcie.c3792', }, 'A30X': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 24, 'memory_type': 'HBM2e', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a30x.c3968', }, 'PG506-207': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'HBM2', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/pg506-207.c3962', }, 'PG506-217': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'HBM2', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/pg506-217.c3963', }, 'PG506-232': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'HBM2', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/pg506-232.c3799', }, 'PG506-242': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'HBM2', 'cuda_cores': 3584, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 10320, 'single_float_performance_gflop_s': 10320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/pg506-242.c3823', }, 'A100 PCIe 80 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-06-28'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a100-pcie-80-gb.c3821', }, 'A100X': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-06-28'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 79630, 'single_float_performance_gflop_s': 19910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a100x.c3958', }, 'CMP 170HX 10 GB': { 'gpu_name': 'GA100', 'generation': 'Mining GPUs', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-09-01'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 10, 'memory_type': 'HBM2e', 'cuda_cores': 4480, 'streaming_multiprocessors': 70, 'tensor_cores': 280, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 50530, 'single_float_performance_gflop_s': 12630, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-170hx-10-gb.c3957', }, 'CMP 170HX 8 GB': { 'gpu_name': 'GA100', 'generation': 'Mining GPUs', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-09-01'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 8, 'memory_type': 'HBM2e', 'cuda_cores': 4480, 'streaming_multiprocessors': 70, 'tensor_cores': 280, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 50530, 'single_float_performance_gflop_s': 12630, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-170hx-8-gb.c3830', }, 'A800 SXM4 80 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-08-11'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a800-sxm4-80-gb.c3966', }, 'A800 PCIe 40 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-11-08'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 40, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a800-pcie-40-gb.c3964', }, 'A800 PCIe 80 GB': { 'gpu_name': 'GA100', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-11-08'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 6912, 'streaming_multiprocessors': 108, 'tensor_cores': 432, 'cuda_major_version': 8, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 77970, 'single_float_performance_gflop_s': 19490, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a800-pcie-80-gb.c3965', }, 'GeForce RTX 3070': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-09-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 20310, 'single_float_performance_gflop_s': 20310, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070.c3674', }, 'GeForce RTX 3080': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-09-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 10, 'memory_type': 'GDDR6X', 'cuda_cores': 8704, 'streaming_multiprocessors': 68, 'tensor_cores': 272, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 29770, 'single_float_performance_gflop_s': 29770, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080.c3621', }, 'GeForce RTX 3090': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-09-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6X', 'cuda_cores': 10496, 'streaming_multiprocessors': 82, 'tensor_cores': 328, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 35580, 'single_float_performance_gflop_s': 35580, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3090.c3622', }, 'A40 PCIe': { 'gpu_name': 'GA102', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-10-05'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 10752, 'streaming_multiprocessors': 84, 'tensor_cores': 336, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 37420, 'single_float_performance_gflop_s': 37420, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a40-pcie.c3700', }, 'RTX A6000': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-10-05'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 10752, 'streaming_multiprocessors': 84, 'tensor_cores': 336, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 38710, 'single_float_performance_gflop_s': 38710, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a6000.c3686', }, 'GeForce RTX 3060 Ti': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2020-12-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4864, 'streaming_multiprocessors': 38, 'tensor_cores': 152, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16200, 'single_float_performance_gflop_s': 16200, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-ti.c3681', }, 'CMP 70HX': { 'gpu_name': 'GA104', 'generation': 'Mining GPUs', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-01'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 8, 'memory_type': 'GDDR6X', 'cuda_cores': 3840, 'streaming_multiprocessors': 30, 'tensor_cores': 120, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 10710, 'single_float_performance_gflop_s': 10710, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-70hx.c3822', }, 'GeForce RTX 3060 3840SP': { 'gpu_name': 'GA106', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 3840, 'streaming_multiprocessors': 30, 'tensor_cores': 120, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 14220, 'single_float_performance_gflop_s': 14220, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-3840sp.c4080', }, 'GeForce RTX 3060 12 GB': { 'gpu_name': 'GA106', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 28, 'tensor_cores': 112, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 12740, 'single_float_performance_gflop_s': 12740, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-12-gb.c3682', }, 'GeForce RTX 3060 Max-Q': { 'gpu_name': 'GA106', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 3840, 'streaming_multiprocessors': 30, 'tensor_cores': 120, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 9846, 'single_float_performance_gflop_s': 9846, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-max-q.c3752', }, 'GeForce RTX 3060 Mobile': { 'gpu_name': 'GA106', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 3840, 'streaming_multiprocessors': 30, 'tensor_cores': 120, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 10940, 'single_float_performance_gflop_s': 10940, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-mobile.c3757', }, 'GeForce RTX 3070 Max-Q': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 13210, 'single_float_performance_gflop_s': 13210, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-max-q.c3685', }, 'GeForce RTX 3070 Mobile': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 15970, 'single_float_performance_gflop_s': 15970, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-mobile.c3712', }, 'GeForce RTX 3080 Max-Q': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 15300, 'single_float_performance_gflop_s': 15300, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-max-q.c3753', }, 'GeForce RTX 3080 Mobile': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-01-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 18980, 'single_float_performance_gflop_s': 18980, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-mobile.c3684', }, 'A10 PCIe': { 'gpu_name': 'GA102', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 9216, 'streaming_multiprocessors': 72, 'tensor_cores': 288, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 31240, 'single_float_performance_gflop_s': 31240, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a10-pcie.c3793', }, 'A10G': { 'gpu_name': 'GA102', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 9216, 'streaming_multiprocessors': 72, 'tensor_cores': 288, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 31520, 'single_float_performance_gflop_s': 31520, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a10g.c3798', }, 'A16 PCIe': { 'gpu_name': 'GA107', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 1280, 'streaming_multiprocessors': 10, 'tensor_cores': 40, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4493, 'single_float_performance_gflop_s': 4493, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a16-pcie.c3794', }, 'RTX A2000 Max-Q': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6180, 'single_float_performance_gflop_s': 6180, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-max-q.c4007', }, 'RTX A2000 Max-Q 8 GB': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6026, 'single_float_performance_gflop_s': 6026, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-max-q-8-gb.c4288', }, 'RTX A2000 Mobile': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 8637, 'single_float_performance_gflop_s': 8637, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-mobile.c3827', }, 'RTX A2000 Mobile 8 GB': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 8253, 'single_float_performance_gflop_s': 8253, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-mobile-8-gb.c4287', }, 'RTX A3000 Mobile': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 4096, 'streaming_multiprocessors': 32, 'tensor_cores': 128, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 10080, 'single_float_performance_gflop_s': 10080, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a3000-mobile.c3806', }, 'RTX A3000 Mobile 12 GB': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 4096, 'streaming_multiprocessors': 32, 'tensor_cores': 128, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 11800, 'single_float_performance_gflop_s': 11800, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a3000-mobile-12-gb.c3903', }, 'RTX A4 Mobile': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7250, 'single_float_performance_gflop_s': 7250, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4-mobile.c3789', }, 'RTX A4000': { 'gpu_name': 'GA104', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 19170, 'single_float_performance_gflop_s': 19170, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4000.c3756', }, 'RTX A4000 Max-Q': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 14280, 'single_float_performance_gflop_s': 14280, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4000-max-q.c4008', }, 'RTX A4000 Mobile': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 17200, 'single_float_performance_gflop_s': 17200, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4000-mobile.c3804', }, 'RTX A4000H': { 'gpu_name': 'GA104', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 19170, 'single_float_performance_gflop_s': 19170, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4000h.c3969', }, 'RTX A5000': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 8192, 'streaming_multiprocessors': 64, 'tensor_cores': 256, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 27770, 'single_float_performance_gflop_s': 27770, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5000.c3748', }, 'RTX A5000 Max-Q': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16590, 'single_float_performance_gflop_s': 16590, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5000-max-q.c4010', }, 'RTX A5000 Mobile': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 19350, 'single_float_performance_gflop_s': 19350, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5000-mobile.c3805', }, 'RTX A5000-12Q': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 8192, 'streaming_multiprocessors': 64, 'tensor_cores': 256, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 27770, 'single_float_performance_gflop_s': 27770, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5000-12q.c3991', }, 'RTX A5000-8Q': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 8192, 'streaming_multiprocessors': 64, 'tensor_cores': 256, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 27770, 'single_float_performance_gflop_s': 27770, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5000-8q.c3990', }, 'GeForce RTX 3050 Max-Q': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-11'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4328, 'single_float_performance_gflop_s': 4328, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-max-q.c3972', }, 'GeForce RTX 3050 Mobile': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-11'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5501, 'single_float_performance_gflop_s': 5501, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-mobile.c3788', }, 'GeForce RTX 3050 Ti Max-Q': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-11'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5299, 'single_float_performance_gflop_s': 5299, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-ti-max-q.c4149', }, 'GeForce RTX 3050 Ti Mobile': { 'gpu_name': 'GA106', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-11'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5299, 'single_float_performance_gflop_s': 5299, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-ti-mobile.c3812', }, 'GeForce RTX 3070 Ti': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-31'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6X', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 21750, 'single_float_performance_gflop_s': 21750, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-ti.c3675', }, 'GeForce RTX 3080 Ti': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-05-31'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 10240, 'streaming_multiprocessors': 80, 'tensor_cores': 320, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 34100, 'single_float_performance_gflop_s': 34100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-ti.c3735', }, 'CMP 90HX': { 'gpu_name': 'GA102', 'generation': 'Mining GPUs', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-07-28'), 'bus_interface': 'PCIe 1.0 x4', 'memory_size_gb': 10, 'memory_type': 'GDDR6X', 'cuda_cores': 6400, 'streaming_multiprocessors': 50, 'tensor_cores': 200, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 21890, 'single_float_performance_gflop_s': 21890, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/cmp-90hx.c3783', }, 'RTX A2000': { 'gpu_name': 'GA106', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-08-10'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 3328, 'streaming_multiprocessors': 26, 'tensor_cores': 104, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7987, 'single_float_performance_gflop_s': 7987, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000.c3820', }, 'GeForce RTX 3060 12 GB GA104': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-09-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 28, 'tensor_cores': 112, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 12740, 'single_float_performance_gflop_s': 12740, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-12-gb-ga104.c3832', }, 'A2': { 'gpu_name': 'GA107', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-11-10'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 1280, 'streaming_multiprocessors': 10, 'tensor_cores': 40, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4531, 'single_float_performance_gflop_s': 4531, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a2.c3848', }, 'A2 PCIe': { 'gpu_name': 'GA107', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-11-10'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 1280, 'streaming_multiprocessors': 10, 'tensor_cores': 40, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4531, 'single_float_performance_gflop_s': 4531, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a2-pcie.c4112', }, 'RTX A500': { 'gpu_name': 'GA107', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-11-10'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7250, 'single_float_performance_gflop_s': 7250, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a500.c3989', }, 'RTX A2000 12 GB': { 'gpu_name': 'GA106', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-11-23'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 3328, 'streaming_multiprocessors': 26, 'tensor_cores': 104, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7987, 'single_float_performance_gflop_s': 7987, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-12-gb.c3853', }, 'RTX A4500': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-11-23'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 20, 'memory_type': 'GDDR6', 'cuda_cores': 7168, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 23650, 'single_float_performance_gflop_s': 23650, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4500.c3849', }, 'GeForce MX570': { 'gpu_name': 'GA107S', 'generation': 'GeForce MX(5xx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-12-17'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4731, 'single_float_performance_gflop_s': 4731, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx570.c3919', }, 'GeForce MX570 A': { 'gpu_name': 'GA107SB', 'generation': 'GeForce MX(5xx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-12-17'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 2, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4731, 'single_float_performance_gflop_s': 4731, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-mx570-a.c3943', }, 'GeForce RTX 2050 Max-Q': { 'gpu_name': 'GA107', 'generation': 'GeForce 20 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-12-17'), 'bus_interface': 'PCIe 3.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 9462, 'single_float_performance_gflop_s': 4731, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2050-max-q.c4012', }, 'GeForce RTX 2050 Mobile': { 'gpu_name': 'GA107', 'generation': 'GeForce 20 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2021-12-17'), 'bus_interface': 'PCIe 3.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 10200, 'single_float_performance_gflop_s': 5100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-2050-mobile.c3859', }, 'GeForce RTX 3080 Ti 20 GB': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 20, 'memory_type': 'GDDR6X', 'cuda_cores': 10240, 'streaming_multiprocessors': 80, 'tensor_cores': 320, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 34100, 'single_float_performance_gflop_s': 34100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-ti-20-gb.c3831', }, 'GeForce RTX 3050 8 GB': { 'gpu_name': 'GA106', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-04'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 9098, 'single_float_performance_gflop_s': 9098, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-8-gb.c3858', }, 'GeForce RTX 3050 OEM': { 'gpu_name': 'GA106', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-04'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 18, 'tensor_cores': 72, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 8087, 'single_float_performance_gflop_s': 8087, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-oem.c3915', }, 'GeForce RTX 3070 Ti Max-Q': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-04'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 12190, 'single_float_performance_gflop_s': 12190, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-ti-max-q.c3923', }, 'GeForce RTX 3070 Ti Mobile': { 'gpu_name': 'GA104', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-04'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16600, 'single_float_performance_gflop_s': 16600, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-ti-mobile.c3852', }, 'GeForce RTX 3080 12 GB': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-11'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 8960, 'streaming_multiprocessors': 70, 'tensor_cores': 280, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 30640, 'single_float_performance_gflop_s': 30640, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-12-gb.c3834', }, 'GeForce RTX 3080 Ti Max-Q': { 'gpu_name': 'GA103', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-25'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16700, 'single_float_performance_gflop_s': 16700, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-ti-max-q.c3841', }, 'GeForce RTX 3080 Ti Mobile': { 'gpu_name': 'GA103', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-25'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 18710, 'single_float_performance_gflop_s': 18710, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3080-ti-mobile.c3840', }, 'GeForce RTX 3050 4 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-27'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7127, 'single_float_performance_gflop_s': 7127, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-4-gb.c3744', }, 'GeForce RTX 3090 Ti': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-01-27'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6X', 'cuda_cores': 10752, 'streaming_multiprocessors': 84, 'tensor_cores': 336, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 40000, 'single_float_performance_gflop_s': 40000, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3090-ti.c3829', }, 'A10M': { 'gpu_name': 'GA102', 'generation': 'Server Ampere(Axx)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-02-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 7168, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 23440, 'single_float_performance_gflop_s': 23440, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/a10m.c4200', }, 'GeForce RTX 3060 Ti GA103': { 'gpu_name': 'GA103', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-02-23'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4864, 'streaming_multiprocessors': 38, 'tensor_cores': 152, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16200, 'single_float_performance_gflop_s': 16200, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-ti-ga103.c3872', }, 'RTX A4500 Max-Q': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 14310, 'single_float_performance_gflop_s': 14310, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4500-max-q.c4009', }, 'RTX A4500 Mobile': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 17660, 'single_float_performance_gflop_s': 17660, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4500-mobile.c3851', }, 'RTX A500 Mobile': { 'gpu_name': 'GA107S', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6296, 'single_float_performance_gflop_s': 6296, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a500-mobile.c3939', }, 'RTX A5500': { 'gpu_name': 'GA102', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 10240, 'streaming_multiprocessors': 80, 'tensor_cores': 320, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 34100, 'single_float_performance_gflop_s': 34100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5500.c3901', }, 'RTX A5500 Max-Q': { 'gpu_name': 'GA103', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 18710, 'single_float_performance_gflop_s': 18710, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5500-max-q.c4011', }, 'RTX A5500 Mobile': { 'gpu_name': 'GA103', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-22'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 22270, 'single_float_performance_gflop_s': 22270, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a5500-mobile.c3902', }, 'RTX A1000 Embedded': { 'gpu_name': 'GA107S', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4669, 'single_float_performance_gflop_s': 4669, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a1000-embedded.c3895', }, 'RTX A1000 Mobile': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4669, 'single_float_performance_gflop_s': 4669, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a1000-mobile.c3920', }, 'RTX A1000 Mobile 6 GB': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5837, 'single_float_performance_gflop_s': 5837, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a1000-mobile-6-gb.c4137', }, 'RTX A2000 Embedded': { 'gpu_name': 'GA107', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6026, 'single_float_performance_gflop_s': 6026, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a2000-embedded.c3861', }, 'RTX A4500 Embedded': { 'gpu_name': 'GA104', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 14310, 'single_float_performance_gflop_s': 14310, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a4500-embedded.c3893', }, 'RTX A500 Embedded': { 'gpu_name': 'GA107S', 'generation': 'Ampere-MW(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-03-30'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5468, 'single_float_performance_gflop_s': 5468, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a500-embedded.c3896', }, 'GeForce RTX 3050 Max-Q Refresh 4 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-07-06'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4608, 'single_float_performance_gflop_s': 4608, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-max-q-refresh-4-gb.c4092', }, 'GeForce RTX 3050 Max-Q Refresh 6 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-07-06'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 5069, 'single_float_performance_gflop_s': 5069, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-max-q-refresh-6-gb.c3970', }, 'GeForce RTX 3050 Mobile Refresh 4 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-07-06'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6180, 'single_float_performance_gflop_s': 6180, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-mobile-refresh-4-gb.c4156', }, 'GeForce RTX 3050 Mobile Refresh 6 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-07-06'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 7639, 'single_float_performance_gflop_s': 7639, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-mobile-refresh-6-gb.c3971', }, 'GeForce RTX 3060 8 GB GA104': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-10-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 28, 'tensor_cores': 112, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 12740, 'single_float_performance_gflop_s': 12740, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-8-gb-ga104.c4132', }, 'GeForce RTX 3060 8 GB': { 'gpu_name': 'GA106', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-10-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3584, 'streaming_multiprocessors': 28, 'tensor_cores': 112, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 12740, 'single_float_performance_gflop_s': 12740, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-8-gb.c3937', }, 'GeForce RTX 3060 Ti GDDR6X': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-10-19'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6X', 'cuda_cores': 4864, 'streaming_multiprocessors': 38, 'tensor_cores': 152, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16200, 'single_float_performance_gflop_s': 16200, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3060-ti-gddr6x.c3935', }, 'GeForce RTX 3070 Ti 8 GB GA102': { 'gpu_name': 'GA102', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-10-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6X', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 21750, 'single_float_performance_gflop_s': 21750, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-ti-8-gb-ga102.c3936', }, 'GeForce RTX 3070 TiM': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-11-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 16600, 'single_float_performance_gflop_s': 16600, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-tim.c3951', }, 'GeForce RTX 3050 8 GB GA107': { 'gpu_name': 'GA107', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2022-12-16'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 9098, 'single_float_performance_gflop_s': 9098, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-8-gb-ga107.c3880', }, 'Jetson AGX Orin 32 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-02-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 32, 'memory_type': 'LPDDR5', 'cuda_cores': 1792, 'streaming_multiprocessors': 14, 'tensor_cores': 56, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6666, 'single_float_performance_gflop_s': 3333, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-agx-orin-32-gb.c4084', }, 'Jetson Orin NX 16 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-02-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 16, 'memory_type': 'LPDDR5', 'cuda_cores': 1024, 'streaming_multiprocessors': 8, 'tensor_cores': 32, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 3760, 'single_float_performance_gflop_s': 1880, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-orin-nx-16-gb.c4086', }, 'Jetson AGX Orin 64 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-03-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 64, 'memory_type': 'LPDDR5', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 10650, 'single_float_performance_gflop_s': 5325, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-agx-orin-64-gb.c4085', }, 'Jetson Orin NX 8 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-03-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 8, 'memory_type': 'LPDDR5', 'cuda_cores': 1024, 'streaming_multiprocessors': 8, 'tensor_cores': 32, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 3133, 'single_float_performance_gflop_s': 1567, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-orin-nx-8-gb.c4081', }, 'Jetson Orin Nano 4 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-03-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 4, 'memory_type': 'LPDDR5', 'cuda_cores': 512, 'streaming_multiprocessors': 4, 'tensor_cores': 16, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 1280, 'single_float_performance_gflop_s': 640, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-orin-nano-4-gb.c4083', }, 'Jetson Orin Nano 8 GB': { 'gpu_name': 'GA10B', 'generation': 'Tegra(Ampere)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2023-03-01'), 'bus_interface': 'PCIe 4.0 x4', 'memory_size_gb': 8, 'memory_type': 'LPDDR5', 'cuda_cores': 1024, 'streaming_multiprocessors': 8, 'tensor_cores': 32, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 2560, 'single_float_performance_gflop_s': 1280, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/jetson-orin-nano-8-gb.c4082', }, 'GeForce RTX 3050 A Mobile': { 'gpu_name': 'GA106', 'generation': 'GeForce 30 Mobile', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2024-01-01'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 1792, 'streaming_multiprocessors': 14, 'tensor_cores': 56, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 4813, 'single_float_performance_gflop_s': 4813, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-a-mobile.c4227', }, 'GeForce RTX 3050 6 GB': { 'gpu_name': 'GA107', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2024-02-02'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 18, 'tensor_cores': 72, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6774, 'single_float_performance_gflop_s': 6774, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3050-6-gb.c4188', }, 'GeForce RTX 4010': { 'gpu_name': 'GA107', 'generation': 'GeForce 40', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2024-04-16'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 768, 'streaming_multiprocessors': 6, 'tensor_cores': 24, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 2706, 'single_float_performance_gflop_s': 2706, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4010.c4247', }, 'RTX A1000': { 'gpu_name': 'GA107', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2024-04-16'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2304, 'streaming_multiprocessors': 18, 'tensor_cores': 72, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 6737, 'single_float_performance_gflop_s': 6737, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a1000.c4211', }, 'RTX A400': { 'gpu_name': 'GA107', 'generation': 'Workstation Ampere(Ax000)', 'architecture': 'Ampere', 'release_date': date.fromisoformat('2024-04-16'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 768, 'streaming_multiprocessors': 6, 'tensor_cores': 24, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 2706, 'single_float_performance_gflop_s': 2706, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-a400.c4212', }, 'GeForce RTX 3070 6144SP': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 21750, 'single_float_performance_gflop_s': 21750, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-6144sp.c4242', }, 'GeForce RTX 3070 Ti 16 GB': { 'gpu_name': 'GA104', 'generation': 'GeForce 30', 'architecture': 'Ampere', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 6, 'half_float_performance_gflop_s': 21750, 'single_float_performance_gflop_s': 21750, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-3070-ti-16-gb.c3835', }, 'GeForce RTX 4080': { 'gpu_name': 'AD103', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-09-20'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 48740, 'single_float_performance_gflop_s': 48740, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080.c3888', }, 'GeForce RTX 4090': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-09-20'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6X', 'cuda_cores': 16384, 'streaming_multiprocessors': 128, 'tensor_cores': 512, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 82580, 'single_float_performance_gflop_s': 82580, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4090.c3889', }, 'L40': { 'gpu_name': 'AD102', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-10-13'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 90520, 'single_float_performance_gflop_s': 90520, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l40.c3959', }, 'L40 CNX': { 'gpu_name': 'AD102', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-10-13'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 89970, 'single_float_performance_gflop_s': 89970, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l40-cnx.c3961', }, 'L40G': { 'gpu_name': 'AD102', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-10-13'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 89970, 'single_float_performance_gflop_s': 89970, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l40g.c3960', }, 'L40S': { 'gpu_name': 'AD102', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-10-13'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 91610, 'single_float_performance_gflop_s': 91610, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l40s.c4173', }, 'RTX 6000 Ada Generation': { 'gpu_name': 'AD102', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2022-12-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 91060, 'single_float_performance_gflop_s': 91060, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-6000-ada-generation.c3933', }, 'GeForce RTX 4050 Max-Q': { 'gpu_name': 'AD107', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 8218, 'single_float_performance_gflop_s': 8218, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4050-max-q.c3987', }, 'GeForce RTX 4050 Mobile': { 'gpu_name': 'AD107', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 8986, 'single_float_performance_gflop_s': 8986, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4050-mobile.c3953', }, 'GeForce RTX 4060 Max-Q': { 'gpu_name': 'AD107', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 9032, 'single_float_performance_gflop_s': 9032, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-max-q.c3986', }, 'GeForce RTX 4060 Mobile': { 'gpu_name': 'AD107', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 11610, 'single_float_performance_gflop_s': 11610, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-mobile.c3946', }, 'GeForce RTX 4070 Max-Q': { 'gpu_name': 'AD106', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 11340, 'single_float_performance_gflop_s': 11340, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-max-q.c3954', }, 'GeForce RTX 4070 Mobile': { 'gpu_name': 'AD106', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 15620, 'single_float_performance_gflop_s': 15620, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-mobile.c3944', }, 'GeForce RTX 4070 Ti': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 7680, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 40090, 'single_float_performance_gflop_s': 40090, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-ti.c3950', }, 'GeForce RTX 4080 Max-Q': { 'gpu_name': 'AD104', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 20040, 'single_float_performance_gflop_s': 20040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080-max-q.c3948', }, 'GeForce RTX 4080 Mobile': { 'gpu_name': 'AD104', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 24720, 'single_float_performance_gflop_s': 24720, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080-mobile.c3947', }, 'GeForce RTX 4090 Max-Q': { 'gpu_name': 'AD103', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 28310, 'single_float_performance_gflop_s': 28310, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4090-max-q.c3988', }, 'GeForce RTX 4090 Mobile': { 'gpu_name': 'AD103', 'generation': 'GeForce 40 Mobile', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-01-03'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 32979, 'single_float_performance_gflop_s': 32979, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4090-mobile.c3949', }, 'L4': { 'gpu_name': 'AD104', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 30290, 'single_float_performance_gflop_s': 30290, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l4.c4091', }, 'RTX 2000 Embedded Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 12350, 'single_float_performance_gflop_s': 12350, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-2000-embedded-ada-generation.c4177', }, 'RTX 2000 Max-Q Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 8940, 'single_float_performance_gflop_s': 8940, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-2000-max-q-ada-generation.c4094', }, 'RTX 2000 Mobile Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 12990, 'single_float_performance_gflop_s': 12990, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-2000-mobile-ada-generation.c4093', }, 'RTX 3000 Mobile Ada Generation': { 'gpu_name': 'AD106', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 15620, 'single_float_performance_gflop_s': 15620, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-3000-mobile-ada-generation.c4095', }, 'RTX 3500 Embedded Ada Generation': { 'gpu_name': 'AD104', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 23040, 'single_float_performance_gflop_s': 23040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-3500-embedded-ada-generation.c4201', }, 'RTX 3500 Mobile Ada Generation': { 'gpu_name': 'AD104', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 5120, 'streaming_multiprocessors': 40, 'tensor_cores': 160, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 15820, 'single_float_performance_gflop_s': 15820, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-3500-mobile-ada-generation.c4098', }, 'RTX 4000 Mobile Ada Generation': { 'gpu_name': 'AD104', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 7424, 'streaming_multiprocessors': 58, 'tensor_cores': 232, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 24720, 'single_float_performance_gflop_s': 24720, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-4000-mobile-ada-generation.c4096', }, 'RTX 4000 SFF Ada Generation': { 'gpu_name': 'AD104', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 20, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 19170, 'single_float_performance_gflop_s': 19170, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-4000-sff-ada-generation.c4139', }, 'RTX 5000 Embedded Ada Generation': { 'gpu_name': 'AD103', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 32689, 'single_float_performance_gflop_s': 32689, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5000-embedded-ada-generation.c4176', }, 'RTX 5000 Embedded Ada Generation X2': { 'gpu_name': 'AD103', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 32689, 'single_float_performance_gflop_s': 32689, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5000-embedded-ada-generation-x2.c4256', }, 'RTX 5000 Max-Q Ada Generation': { 'gpu_name': 'AD103', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 32689, 'single_float_performance_gflop_s': 32689, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5000-max-q-ada-generation.c4154', }, 'RTX 5000 Mobile Ada Generation': { 'gpu_name': 'AD103', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 9728, 'streaming_multiprocessors': 76, 'tensor_cores': 304, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 41150, 'single_float_performance_gflop_s': 41150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5000-mobile-ada-generation.c4097', }, 'GeForce RTX 4070': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-04-12'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 29150, 'single_float_performance_gflop_s': 29150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070.c3924', }, 'GeForce RTX 4060': { 'gpu_name': 'AD107', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-05-18'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 15110, 'single_float_performance_gflop_s': 15110, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060.c4107', }, 'GeForce RTX 4060 Ti 16 GB': { 'gpu_name': 'AD106', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-05-18'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 4352, 'streaming_multiprocessors': 34, 'tensor_cores': 136, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 22060, 'single_float_performance_gflop_s': 22060, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-ti-16-gb.c4155', }, 'GeForce RTX 4060 Ti 8 GB': { 'gpu_name': 'AD106', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-05-18'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4352, 'streaming_multiprocessors': 34, 'tensor_cores': 136, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 22060, 'single_float_performance_gflop_s': 22060, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-ti-8-gb.c3890', }, 'RTX 4000 Ada Generation': { 'gpu_name': 'AD104', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-08-09'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 20, 'memory_type': 'GDDR6', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 26730, 'single_float_performance_gflop_s': 26730, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-4000-ada-generation.c4171', }, 'RTX 4500 Ada Generation': { 'gpu_name': 'AD103', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-08-09'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6', 'cuda_cores': 7680, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 39630, 'single_float_performance_gflop_s': 39630, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-4500-ada-generation.c4172', }, 'RTX 5000 Ada Generation': { 'gpu_name': 'AD102', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-08-09'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 32, 'memory_type': 'GDDR6', 'cuda_cores': 12800, 'streaming_multiprocessors': 100, 'tensor_cores': 400, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 65280, 'single_float_performance_gflop_s': 65280, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5000-ada-generation.c4152', }, 'L20': { 'gpu_name': 'AD102', 'generation': 'Server Ada(Lxx)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-11-16'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 11776, 'streaming_multiprocessors': 92, 'tensor_cores': 368, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 59350, 'single_float_performance_gflop_s': 59350, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/l20.c4206', }, 'GeForce RTX 4090 D': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2023-12-28'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6X', 'cuda_cores': 14592, 'streaming_multiprocessors': 114, 'tensor_cores': 456, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 73540, 'single_float_performance_gflop_s': 73540, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4090-d.c4189', }, 'RTX 5880 Ada Generation': { 'gpu_name': 'AD102', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-01-05'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6', 'cuda_cores': 14080, 'streaming_multiprocessors': 110, 'tensor_cores': 440, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 69269, 'single_float_performance_gflop_s': 69269, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-5880-ada-generation.c4191', }, 'GeForce RTX 4070 SUPER': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-01-08'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 7168, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 35480, 'single_float_performance_gflop_s': 35480, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-super.c4186', }, 'GeForce RTX 4070 Ti SUPER': { 'gpu_name': 'AD103', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-01-08'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 8448, 'streaming_multiprocessors': 66, 'tensor_cores': 264, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 44100, 'single_float_performance_gflop_s': 44100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-ti-super.c4187', }, 'GeForce RTX 4080 SUPER': { 'gpu_name': 'AD103', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-01-08'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 10240, 'streaming_multiprocessors': 80, 'tensor_cores': 320, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 52220, 'single_float_performance_gflop_s': 52220, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080-super.c4182', }, 'RTX 2000 Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Workstation Ada(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-02-12'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR6', 'cuda_cores': 2816, 'streaming_multiprocessors': 22, 'tensor_cores': 88, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 12000, 'single_float_performance_gflop_s': 12000, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-2000-ada-generation.c4199', }, 'RTX 1000 Mobile Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-02-26'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 10370, 'single_float_performance_gflop_s': 10370, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-1000-mobile-ada-generation.c4208', }, 'RTX 500 Mobile Ada Generation': { 'gpu_name': 'AD107', 'generation': 'Ada-MW(x000A)', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-02-26'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 4, 'memory_type': 'GDDR6', 'cuda_cores': 2048, 'streaming_multiprocessors': 16, 'tensor_cores': 64, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 8294, 'single_float_performance_gflop_s': 8294, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-500-mobile-ada-generation.c4207', }, 'GeForce RTX 4070 AD103': { 'gpu_name': 'AD103', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-03-01'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 29150, 'single_float_performance_gflop_s': 29150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-ad103.c4205', }, 'GeForce RTX 4060 AD106': { 'gpu_name': 'AD106', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-04-01'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 3072, 'streaming_multiprocessors': 24, 'tensor_cores': 96, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 15110, 'single_float_performance_gflop_s': 15110, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-ad106.c3891', }, 'GeForce RTX 4060 Ti AD104': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-04-01'), 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 4352, 'streaming_multiprocessors': 34, 'tensor_cores': 136, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 22060, 'single_float_performance_gflop_s': 22060, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4060-ti-ad104.c4204', }, 'GeForce RTX 4070 Ti SUPER AD102': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-06-10'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 8448, 'streaming_multiprocessors': 66, 'tensor_cores': 264, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 44100, 'single_float_performance_gflop_s': 44100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-ti-super-ad102.c4215', }, 'GeForce RTX 4070 GDDR6': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': date.fromisoformat('2024-08-20'), 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 29150, 'single_float_performance_gflop_s': 29150, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-gddr6.c4228', }, 'GeForce RTX 4080 12 GB': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR6X', 'cuda_cores': 7680, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 40090, 'single_float_performance_gflop_s': 40090, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080-12-gb.c3932', }, 'GeForce RTX 4050': { 'gpu_name': 'AD107', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x8', 'memory_size_gb': 6, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 18, 'tensor_cores': 120, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 13520, 'single_float_performance_gflop_s': 13520, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4050.c3892', }, 'GeForce RTX 4080 Ti': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR6X', 'cuda_cores': 14080, 'streaming_multiprocessors': 110, 'tensor_cores': 440, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 73500, 'single_float_performance_gflop_s': 73500, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4080-ti.c3887', }, 'GeForce RTX 4090 Ti': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR6X', 'cuda_cores': 18176, 'streaming_multiprocessors': 142, 'tensor_cores': 568, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 93240, 'single_float_performance_gflop_s': 93240, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4090-ti.c3917', }, 'TITAN Ada': { 'gpu_name': 'AD102', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR6X', 'cuda_cores': 18432, 'streaming_multiprocessors': 144, 'tensor_cores': 576, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 92900, 'single_float_performance_gflop_s': 92900, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/titan-ada.c3985', }, 'GeForce RTX 4070 10 GB': { 'gpu_name': 'AD104', 'generation': 'GeForce 40', 'architecture': 'Ada Lovelace', 'release_date': None, 'bus_interface': 'PCIe 4.0 x16', 'memory_size_gb': 10, 'memory_type': 'GDDR6X', 'cuda_cores': 7168, 'streaming_multiprocessors': 56, 'tensor_cores': 224, 'cuda_major_version': 8, 'cuda_minor_version': 9, 'half_float_performance_gflop_s': 36130, 'single_float_performance_gflop_s': 36130, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-4070-10-gb.c4226', }, 'H100 PCIe 80 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2022-10-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 14592, 'streaming_multiprocessors': 114, 'tensor_cores': 456, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 204900, 'single_float_performance_gflop_s': 51220, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-pcie-80-gb.c3899', }, 'H100 SXM5 80 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2022-10-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 267600, 'single_float_performance_gflop_s': 66910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-sxm5-80-gb.c3900', }, 'H100 CNX': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 14592, 'streaming_multiprocessors': 114, 'tensor_cores': 456, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 215400, 'single_float_performance_gflop_s': 53840, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-cnx.c4131', }, 'H100 PCIe 96 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 248300, 'single_float_performance_gflop_s': 62080, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-pcie-96-gb.c4164', }, 'H100 SXM5 64 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 64, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 267600, 'single_float_performance_gflop_s': 66910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-sxm5-64-gb.c4165', }, 'H100 SXM5 94 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 94, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 267600, 'single_float_performance_gflop_s': 66910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-sxm5-94-gb.c4294', }, 'H100 SXM5 96 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 267600, 'single_float_performance_gflop_s': 66910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h100-sxm5-96-gb.c3974', }, 'H800 PCIe 80 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM2e', 'cuda_cores': 14592, 'streaming_multiprocessors': 114, 'tensor_cores': 456, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 204900, 'single_float_performance_gflop_s': 51220, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h800-pcie-80-gb.c4181', }, 'H800 SXM5': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2023-03-21'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 80, 'memory_type': 'HBM3', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 237200, 'single_float_performance_gflop_s': 59300, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h800-sxm5.c3975', }, 'H200 NVL': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2024-11-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 141, 'memory_type': 'HBM3e', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 241300, 'single_float_performance_gflop_s': 60320, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h200-nvl.c4254', }, 'H200 SXM 141 GB': { 'gpu_name': 'GH100', 'generation': 'Server Hopper(Hxx)', 'architecture': 'Hopper', 'release_date': date.fromisoformat('2024-11-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 141, 'memory_type': 'HBM3e', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 9, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 267600, 'single_float_performance_gflop_s': 66910, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/h200-sxm-141-gb.c4255', }, 'B200 SXM 192 GB': { 'gpu_name': 'GB100', 'generation': 'Server Blackwell(Bxx)', 'architecture': 'Blackwell', 'release_date': date.fromisoformat('2024-01-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'HBM3e', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 10, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 248300, 'single_float_performance_gflop_s': 62080, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/b200-sxm-192-gb.c4210', }, 'B100': { 'gpu_name': 'GB102', 'generation': 'Server Blackwell(Bxx)', 'architecture': 'Blackwell', 'release_date': date.fromisoformat('2024-11-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'HBM3e', 'cuda_cores': 16896, 'streaming_multiprocessors': 132, 'tensor_cores': 528, 'cuda_major_version': 10, 'cuda_minor_version': 1, 'half_float_performance_gflop_s': 248300, 'single_float_performance_gflop_s': 62080, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/b100.c4275', }, 'GeForce RTX 5050': { 'gpu_name': 'GB207', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-01-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 12900, 'single_float_performance_gflop_s': 12900, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5050.c4220', }, 'GeForce RTX 5050 Mobile': { 'gpu_name': 'GB207', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-01-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR6', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 12900, 'single_float_performance_gflop_s': 12900, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5050-mobile.c4239', }, 'GeForce RTX 5080': { 'gpu_name': 'GB203', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-01-30'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR7', 'cuda_cores': 10752, 'streaming_multiprocessors': 84, 'tensor_cores': 336, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 56280, 'single_float_performance_gflop_s': 56280, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5080.c4217', }, 'GeForce RTX 5090': { 'gpu_name': 'GB202', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-01-30'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 32, 'memory_type': 'GDDR7', 'cuda_cores': 21760, 'streaming_multiprocessors': 170, 'tensor_cores': 680, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 104800, 'single_float_performance_gflop_s': 104800, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5090.c4216', }, 'GeForce RTX 5090 D': { 'gpu_name': 'GB202', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-01-30'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 32, 'memory_type': 'GDDR7', 'cuda_cores': 21760, 'streaming_multiprocessors': 170, 'tensor_cores': 680, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 104800, 'single_float_performance_gflop_s': 104800, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5090-d.c4253', }, 'GeForce RTX 5070 Ti': { 'gpu_name': 'GB203', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-02-20'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR7', 'cuda_cores': 8960, 'streaming_multiprocessors': 70, 'tensor_cores': 280, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 43940, 'single_float_performance_gflop_s': 43940, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5070-ti.c4243', }, 'GeForce RTX 5070 Ti Mobile': { 'gpu_name': 'GB205', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR7', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 17040, 'single_float_performance_gflop_s': 17040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5070-ti-mobile.c4238', }, 'GeForce RTX 5070': { 'gpu_name': 'GB205', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-04'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR7', 'cuda_cores': 6144, 'streaming_multiprocessors': 48, 'tensor_cores': 192, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 30870, 'single_float_performance_gflop_s': 30870, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5070.c4218', }, 'RTX PRO 4000 Blackwell': { 'gpu_name': 'GB203', 'generation': 'Blackwell PRO W(x000)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR7', 'cuda_cores': 8960, 'streaming_multiprocessors': 70, 'tensor_cores': 280, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 46900, 'single_float_performance_gflop_s': 46900, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-4000-blackwell.c4279', }, 'RTX PRO 4500 Blackwell': { 'gpu_name': 'GB203', 'generation': 'Blackwell PRO W(x000)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 32, 'memory_type': 'GDDR7', 'cuda_cores': 10496, 'streaming_multiprocessors': 82, 'tensor_cores': 328, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 54940, 'single_float_performance_gflop_s': 54940, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-4500-blackwell.c4278', }, 'RTX PRO 5000 Blackwell': { 'gpu_name': 'GB202', 'generation': 'Blackwell PRO W(x000)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 48, 'memory_type': 'GDDR7', 'cuda_cores': 14080, 'streaming_multiprocessors': 110, 'tensor_cores': 440, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 73690, 'single_float_performance_gflop_s': 73690, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-5000-blackwell.c4276', }, 'RTX PRO 6000 Blackwell': { 'gpu_name': 'GB202', 'generation': 'Blackwell PRO W(x000)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'GDDR7', 'cuda_cores': 24064, 'streaming_multiprocessors': 188, 'tensor_cores': 752, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 126000, 'single_float_performance_gflop_s': 126000, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-6000-blackwell.c4272', }, 'RTX PRO 6000 Blackwell Max-Q': { 'gpu_name': 'GB202', 'generation': 'Blackwell PRO W(x000)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'GDDR7', 'cuda_cores': 24064, 'streaming_multiprocessors': 188, 'tensor_cores': 752, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 110100, 'single_float_performance_gflop_s': 110100, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-6000-blackwell-max-q.c4273', }, 'RTX PRO 6000 Blackwell Server': { 'gpu_name': 'GB202', 'generation': 'Server Blackwell(Bxx)', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-18'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 96, 'memory_type': 'GDDR7', 'cuda_cores': 24064, 'streaming_multiprocessors': 188, 'tensor_cores': 752, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 126000, 'single_float_performance_gflop_s': 126000, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-6000-blackwell-server.c4274', }, 'GeForce RTX 5090 Mobile': { 'gpu_name': 'GB203', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-03-27'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR7', 'cuda_cores': 10496, 'streaming_multiprocessors': 82, 'tensor_cores': 328, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 31800, 'single_float_performance_gflop_s': 31800, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5090-mobile.c4235', }, 'GeForce RTX 5070 Mobile': { 'gpu_name': 'GB206', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-04-01'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 13130, 'single_float_performance_gflop_s': 13130, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5070-mobile.c4237', }, 'GeForce RTX 5080 Mobile': { 'gpu_name': 'GB203', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-04-02'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR7', 'cuda_cores': 7680, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 23040, 'single_float_performance_gflop_s': 23040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5080-mobile.c4236', }, 'GeForce RTX 5060 Ti 16 GB': { 'gpu_name': 'GB206', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-04-16'), 'bus_interface': 'PCIe 5.0 x8', 'memory_size_gb': 16, 'memory_type': 'GDDR7', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 23700, 'single_float_performance_gflop_s': 23700, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5060-ti-16-gb.c4292', }, 'GeForce RTX 5060 Ti 8 GB': { 'gpu_name': 'GB206', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-04-16'), 'bus_interface': 'PCIe 5.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 4608, 'streaming_multiprocessors': 36, 'tensor_cores': 144, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 23700, 'single_float_performance_gflop_s': 23700, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5060-ti-8-gb.c4246', }, 'GeForce RTX 5060': { 'gpu_name': 'GB206', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-05-19'), 'bus_interface': 'PCIe 5.0 x8', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 3840, 'streaming_multiprocessors': 30, 'tensor_cores': 120, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 19180, 'single_float_performance_gflop_s': 19180, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5060.c4219', }, 'GeForce RTX 5060 Mobile': { 'gpu_name': 'GB206', 'generation': 'GeForce 50 Mobile', 'architecture': 'Blackwell 2.0', 'release_date': date.fromisoformat('2025-05-20'), 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 3328, 'streaming_multiprocessors': 26, 'tensor_cores': 104, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 9684, 'single_float_performance_gflop_s': 9684, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5060-mobile.c4230', }, 'GeForce RTX 5080 SUPER': { 'gpu_name': 'GB203', 'generation': 'GeForce 50', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR7', 'cuda_cores': 10752, 'streaming_multiprocessors': 84, 'tensor_cores': 336, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 56280, 'single_float_performance_gflop_s': 56280, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/geforce-rtx-5080-super.c4302', }, 'RTX PRO 1000 Blackwell Mobile': { 'gpu_name': 'GB207', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 2560, 'streaming_multiprocessors': 20, 'tensor_cores': 80, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 12900, 'single_float_performance_gflop_s': 12900, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-1000-blackwell-mobile.c4284', }, 'RTX PRO 2000 Blackwell Mobile': { 'gpu_name': 'GB206', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 8, 'memory_type': 'GDDR7', 'cuda_cores': 3328, 'streaming_multiprocessors': 26, 'tensor_cores': 104, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 9684, 'single_float_performance_gflop_s': 9684, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-2000-blackwell-mobile.c4283', }, 'RTX PRO 3000 Blackwell Mobile': { 'gpu_name': 'GB205', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 12, 'memory_type': 'GDDR7', 'cuda_cores': 5888, 'streaming_multiprocessors': 46, 'tensor_cores': 184, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 17040, 'single_float_performance_gflop_s': 17040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-3000-blackwell-mobile.c4282', }, 'RTX PRO 4000 Blackwell Mobile': { 'gpu_name': 'GB203', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 16, 'memory_type': 'GDDR7', 'cuda_cores': 7680, 'streaming_multiprocessors': 60, 'tensor_cores': 240, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 23040, 'single_float_performance_gflop_s': 23040, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-4000-blackwell-mobile.c4281', }, 'RTX PRO 500 Blackwell Mobile': { 'gpu_name': 'GB207', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 6, 'memory_type': 'GDDR7', 'cuda_cores': 1792, 'streaming_multiprocessors': 14, 'tensor_cores': 56, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 9032, 'single_float_performance_gflop_s': 9032, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-500-blackwell-mobile.c4285', }, 'RTX PRO 5000 Blackwell Mobile': { 'gpu_name': 'GB203', 'generation': 'Blackwell-MW', 'architecture': 'Blackwell 2.0', 'release_date': None, 'bus_interface': 'PCIe 5.0 x16', 'memory_size_gb': 24, 'memory_type': 'GDDR7', 'cuda_cores': 10496, 'streaming_multiprocessors': 82, 'tensor_cores': 328, 'cuda_major_version': 12, 'cuda_minor_version': 0, 'half_float_performance_gflop_s': 31800, 'single_float_performance_gflop_s': 31800, 'tpu_url': 'https://www.techpowerup.com/gpu-specs/rtx-pro-5000-blackwell-mobile.c4280', }, } ================================================ FILE: charts/models_chat.py ================================================ import matplotlib.pyplot as plt from matplotlib.patches import Patch from matplotlib.colors import LinearSegmentedColormap import pandas as pd from core.constants import CHAT_MODELS def create_chat_models_comparison_plot(): model_categories = { "coding": { "models": [ "Seed Coder - 8b" ], "color": "#DAA520", "label": "Coding Focused" }, "thinking": { "models": [ "Qwen 3 - 0.6b", "Qwen 3 - 1.7b", "Qwen 3 - 4b", "Qwen 3 - 8b", "Deepseek R1 - 8b", "GLM4-Z1 - 9b", "Qwen 3 - 14b", "Qwen 3 - 32b", "GLM4-Z1 - 32b", ], "color": "#CC5500", "label": "Thinking" }, "coding_and_thinking": { "models": [ ], "color": "#8B0000", "label": "Coding Focused and Thinking" } } df = pd.DataFrame([ {"model": model, "cps": data["cps"], "vram": data["vram"] / 1024} for model, data in CHAT_MODELS.items() ]) df = df.sort_values(by="vram") plt.rcParams['font.family'] = 'Arial' fig, ax1 = plt.subplots(figsize=(12, 6)) fig.patch.set_facecolor('#2e2e2e') ax1.set_facecolor('#2e2e2e') ax1.set_title("BitsAndBytes (4-bit) - RTX 4090", fontsize=14, color='white', pad=5) ax2 = ax1.twinx() gradient = LinearSegmentedColormap.from_list("", ["#001f4d", "#0066cc"]) bars = [] for i, (index, row) in enumerate(df.iterrows()): border_color = None border_width = 0 for category in model_categories.values(): if row["model"] in category["models"]: border_color = category["color"] border_width = 3 break bar = ax1.bar(i, row["vram"], color=gradient(i/len(df)), alpha=0.7, edgecolor=border_color, linewidth=border_width) bars.append(bar[0]) ax1.bar(0, 0, color='none', label="VRAM Usage") ax1.set_xlabel("Model", color="white") ax1.set_ylabel("Average VRAM Usage (GB)", color="white", fontsize=14) ax1.tick_params(axis="y", labelcolor="white", colors="white") ax1.tick_params(axis="x", labelcolor="white", colors="white", rotation=45) ax1.grid(True, axis='y', linestyle='--', alpha=0.3, color='white') ax1.set_xticks(range(len(df))) model_names = df["model"] ax1.set_xticklabels(model_names, rotation=45, ha="right") for bar in bars: yval = bar.get_height() ax1.text(bar.get_x() + bar.get_width() / 2, yval, f'{yval:.2f}', verticalalignment='bottom', color='white', ha='center') line = ax2.plot(range(len(df)), df["cps"], color="#6699CC", marker="D", markersize=6, linewidth=2, label="Characters per Second") ax2.set_ylabel("Characters per Second", color="white", fontsize=14) ax2.tick_params(axis="y", labelcolor="white") for i, cps in enumerate(df["cps"]): ax2.annotate(f'{cps:.2f}', (i, cps), textcoords="offset points", xytext=(0,10), ha='center', color='white', fontweight='bold') category_patches = [Patch(facecolor='none', edgecolor=cat["color"], label=cat["label"], linewidth=2) for cat in model_categories.values()] lines1, labels1 = ax1.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() all_handles = lines1 + lines2 + category_patches all_labels = labels1 + labels2 + [cat["label"] for cat in model_categories.values()] ax1.legend(all_handles, all_labels, loc="upper center", bbox_to_anchor=(0.5, 0.95), fancybox=True, shadow=True, ncol=len(all_handles)) fig.tight_layout() plt.subplots_adjust(left=0.04, right=0.96, top=0.85, bottom=0.15) return fig if __name__ == "__main__": fig = create_chat_models_comparison_plot() plt.show() ================================================ FILE: charts/models_vector.py ================================================ import matplotlib.pyplot as plt def create_vector_models_comparison_plot(): models_data = [ ("bge-small-en-v1.5", 529.34, 1058.68), ("e5-small-v2", 542.37, 1084.74), ("Granite-30m-English", 664.24, 1328.48), ("e5-base-v2", 779.93, 1559.86), ("Granite-125m-English", 885.93, 1771.86), ("bge-base-en-v1.5", 911.93, 1823.86), ("e5-large-v2", 1428.12, 2856.24), ("bge-large-en-v1.5", 1512.24, 3024.48), ("arctic-embed-m-v2.0", 1785.93, 3571.86), ("arctic-embed-l-v2.0", 2037.93, 4075.86), ("Qwen3-Embedding-0.6B", 2974.87, 5949.74), ("inf-retriever-v1-1.5b", 6303.24, 12606.48), ("Qwen3-Embedding-4B", 9234.05, 18468.10), ("Qwen3-Embedding-8B", 15634.17, 31268.34), ("inf-retriever-v1-7b", 17274.20, 34548.40), ] models_data.sort(key=lambda r: r[1]) names = [r[0] for r in models_data] gpu_mb = [r[1] for r in models_data] cpu_extra_mb = [r[2] - r[1] for r in models_data] plt.rcParams.update({ "figure.facecolor": "#2e2e2e", "axes.facecolor": "#2e2e2e", "axes.edgecolor": "white", "axes.labelcolor": "white", "xtick.color": "white", "ytick.color": "white", "text.color": "white", "legend.edgecolor": "white", }) fig, ax = plt.subplots(figsize=(10, 8)) y_pos = range(len(names)) ax.barh(y_pos, gpu_mb, color="#4c78a8", label="GPU – half precision (MB)") ax.barh(y_pos, cpu_extra_mb, left=gpu_mb, color="#f58518", alpha=0.6, label="CPU – additional for float32 (MB)") ax.set_yticks(y_pos) ax.set_yticklabels(names) ax.invert_yaxis() ax.set_xlabel("Memory (MB)") ax.set_title("Vector-model memory usage\nGPU half-precision vs. CPU float32") legend = ax.legend(facecolor="#2e2e2e", framealpha=0.8) for text in legend.get_texts(): text.set_color("white") fig.tight_layout() return fig ================================================ FILE: charts/models_vision.py ================================================ import matplotlib.pyplot as plt import pandas as pd from matplotlib.colors import LinearSegmentedColormap from matplotlib.patches import Patch def create_vision_models_comparison_plot(): model_categories = { "florence": { "models": [ ], "color": "#2E8B57", "label": "Can run on CPU" }, } data = [ {"model": "GLM-4.1V-9B-Thinking", "cps": 200.96, "memory": 9802.12}, {"model": "Qwen VL - 3b", "cps": 178.31, "memory": 6306.30}, {"model": "Qwen VL - 7b", "cps": 173.67, "memory": 9559.89}, {"model": "Granite Vision - 2b", "cps": 217.64, "memory": 4094.18}, {"model": "InternVL3 - 1b", "cps": 274.25, "memory": 2318.05}, {"model": "InternVL3 - 2b", "cps": 244.36, "memory": 3153.87}, {"model": "InternVL3 - 8b", "cps": 255.95, "memory": 8153.30}, {"model": "InternVL3 - 14b", "cps": 162.58, "memory": 12998.80}, {"model": "Liquid-VL - 1.6B", "cps": 437.50, "memory": 1396.00}, {"model": "Liquid-VL - 450m", "cps": 497.64, "memory": 497.64}, ] df = pd.DataFrame(data) df["memory"] = df["memory"] / 1024 df = df.sort_values(by="memory") fig, ax1 = plt.subplots(figsize=(10, 5)) fig.patch.set_facecolor('#2e2e2e') ax1.set_facecolor('#2e2e2e') ax1.set_title("Model Comparison - Memory Usage vs Characters per Second", fontsize=16, color='white', pad=10) ax2 = ax1.twinx() gradient = LinearSegmentedColormap.from_list("", ["#003328", "#004D40"]) bars = [] for i, (index, row) in enumerate(df.iterrows()): border_color = None border_width = 0 for category in model_categories.values(): if row["model"] in category["models"]: border_color = category["color"] border_width = 3 break bar = ax1.bar(i, row["memory"], color=gradient(i/len(df)), alpha=0.7, edgecolor=border_color, linewidth=border_width) bars.append(bar[0]) ax1.bar(0, 0, color=gradient(0.5), alpha=0.7, label="Memory Usage") ax1.set_xlabel("Model", color="white") ax1.set_ylabel("Memory Usage (GB)", color="white", fontsize=14) ax1.tick_params(axis="y", labelcolor="white", colors="white") ax1.tick_params(axis="x", labelcolor="white", colors="white", rotation=45) ax1.set_xticks(range(len(df))) ax1.set_xticklabels(df["model"], rotation=45, ha="right") for bar in bars: yval = bar.get_height() ax1.text(bar.get_x() + bar.get_width()/2, yval, f'{yval:.2f}', verticalalignment='bottom', color='white', ha='center') ax1.grid(True, linestyle='--', alpha=0.1, color='white') line = ax2.plot(range(len(df)), df["cps"], color="#5F9EA0", marker="o", label='Characters per Second (cps)') ax2.set_ylabel("Characters per Second", color="white", fontsize=14) ax2.tick_params(axis="y", labelcolor="white") for i, cps in enumerate(df["cps"]): ax2.text(i, cps, f'{cps:.2f}', ha='center', va='bottom', color='white') lines1, labels1 = ax1.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() category_patches = [Patch(facecolor='none', edgecolor=cat["color"], label=cat["label"], linewidth=2) for cat in model_categories.values()] all_handles = lines1 + lines2 + category_patches all_labels = labels1 + labels2 + [cat["label"] for cat in model_categories.values()] ax1.legend(all_handles, all_labels, loc='upper center', fancybox=True, shadow=True, ncol=len(all_handles), facecolor='#2e2e2e', edgecolor='white', labelcolor='white') fig.tight_layout() plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.25) return fig if __name__ == "__main__": fig = create_vision_models_comparison_plot() plt.show() ================================================ FILE: chat/__init__.py ================================================ ================================================ FILE: chat/base.py ================================================ import yaml import logging import gc from copy import deepcopy import functools import copy from pathlib import Path import torch from transformers import ( AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, BitsAndBytesConfig, StoppingCriteria, StoppingCriteriaList ) import threading from abc import ABC, abstractmethod import builtins from contextlib import contextmanager from huggingface_hub import HfApi from PySide6.QtCore import Signal, QObject from core.constants import CHAT_MODELS, system_message, rag_string, GLM4Z1_CHAT_TEMPLATE, PROJECT_ROOT from core.utilities import my_cprint, has_bfloat16_support, format_citations logging.getLogger("transformers").setLevel(logging.ERROR) metadata_output_file_path = PROJECT_ROOT / "metadata.txt" class ChatSignals(QObject): response_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() citations_signal = Signal(str) def load_chat_config(): with open(PROJECT_ROOT / 'config.yaml', 'r', encoding='utf-8') as f: return yaml.safe_load(f) def save_metadata(metadata_list): with metadata_output_file_path.open('w', encoding='utf-8') as f: for m in metadata_list: f.write(f"{m}\n") def build_augmented_query(contexts, query): return f"{rag_string}\n\n---\n\n" + "\n\n---\n\n".join(contexts) + f"\n\n-----\n\n{query}" def cleanup_gpu(): if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @contextmanager def utf8_file_operations(): original_open = builtins.open def utf8_open(path, *args, **kwargs): if 'encoding' not in kwargs: kwargs['encoding'] = 'utf-8' return original_open(path, *args, **kwargs) builtins.open = utf8_open try: yield finally: builtins.open = original_open def _configure_device_settings(settings, model_info): device = "cuda" if torch.cuda.is_available() else "cpu" settings.setdefault('tokenizer_settings', {}) settings.setdefault('model_settings', {}) if device == "cuda": native = model_info.get("precision") if native in ("float32", "bfloat16"): dtype = torch.bfloat16 if has_bfloat16_support() else torch.float16 else: dtype = torch.float16 settings['tokenizer_settings']['torch_dtype'] = dtype settings['model_settings']['torch_dtype'] = dtype qc = settings['model_settings'].get("quantization_config") if qc is not None: qc.bnb_4bit_compute_dtype = dtype else: settings['model_settings'].pop('quantization_config', None) settings['model_settings']['device_map'] = "cpu" return device def get_max_length(model_name): if model_name in CHAT_MODELS: return CHAT_MODELS[model_name].get('max_tokens', 8192) return 8192 def get_max_new_tokens(model_name): if model_name in CHAT_MODELS: return CHAT_MODELS[model_name].get('max_new_tokens', 1024) return 1024 def get_generation_settings(max_length, max_new_tokens): return { 'max_length': max_length, 'max_new_tokens': max_new_tokens, 'do_sample': False, 'num_beams': 1, 'use_cache': True, 'temperature': None, 'top_p': None, 'top_k': None, } def make_bnb_settings(dtype): return { 'tokenizer_settings': {'torch_dtype': dtype}, 'model_settings': { 'torch_dtype': dtype, 'quantization_config': BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=dtype, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, ), 'low_cpu_mem_usage': True, } } bnb_bfloat16_settings = make_bnb_settings(torch.bfloat16) bnb_float16_settings = make_bnb_settings(torch.float16) @functools.lru_cache(maxsize=1) def get_hf_token(): config_path = Path("config.yaml") if config_path.exists(): with open(config_path, 'r') as config_file: config = yaml.safe_load(config_file) return config.get('hf_access_token') return None def check_if_model_is_gated(repo_id, hf_token): try: api = HfApi(token=False) repo_info = api.repo_info(repo_id, token=False) return getattr(repo_info, 'gated', False) except Exception: if hf_token: try: api_with_token = HfApi(token=hf_token) repo_info = api_with_token.repo_info(repo_id) return getattr(repo_info, 'gated', False) except Exception: return False return False class _StopOnToken(StoppingCriteria): def __init__(self, stop_ids): self.stop_ids = set(stop_ids) def __call__(self, input_ids, scores, **kwargs): return input_ids[0, -1].item() in self.stop_ids class BaseModel(ABC): def __init__(self, model_info, settings, generation_settings, attn_implementation=None, tokenizer_kwargs=None, model_kwargs=None): if attn_implementation: settings = copy.deepcopy(settings) self.model_info = model_info self.settings = settings self.model_name = model_info['model'] self.generation_settings = generation_settings self.max_length = generation_settings['max_length'] self.device = _configure_device_settings(self.settings, self.model_info) script_dir = PROJECT_ROOT cache_dir = script_dir / "Models" / "chat" / self.model_info['cache_dir'] hf_token = get_hf_token() is_gated = self.model_info.get('gated', False) if not is_gated: is_gated = check_if_model_is_gated(model_info['repo_id'], hf_token) tokenizer_settings = { **self.settings.get('tokenizer_settings', {}), 'cache_dir': str(cache_dir) } if tokenizer_kwargs: tokenizer_settings.update(tokenizer_kwargs) if is_gated and hf_token: tokenizer_settings['token'] = hf_token elif not is_gated: tokenizer_settings['token'] = False with utf8_file_operations(): self.tokenizer = AutoTokenizer.from_pretrained(model_info['repo_id'], **tokenizer_settings) if tokenizer_kwargs and 'eos_token' in tokenizer_kwargs: self.tokenizer.eos_token = tokenizer_kwargs['eos_token'] model_settings = { **self.settings.get('model_settings', {}), 'cache_dir': str(cache_dir) } if model_kwargs: model_settings.update(model_kwargs) if is_gated and hf_token: model_settings['token'] = hf_token elif not is_gated: model_settings['token'] = False self.model = AutoModelForCausalLM.from_pretrained(model_info['repo_id'], **model_settings) self.model.eval() config = self.model.config model_dtype = next(self.model.parameters()).dtype my_cprint(f"Loaded {model_info['model']} ({model_dtype}) on {self.device} using {config._attn_implementation}", "green") def get_model_name(self): return self.model_name @abstractmethod def create_prompt(self, augmented_query): pass def create_inputs(self, prompt): inputs = self.tokenizer(prompt, return_tensors="pt", return_attention_mask=True) if inputs['input_ids'].size(1) > self.max_length: raise ValueError(f"Input prompt is too long ({inputs['input_ids'].size(1)} tokens). Maximum length is {self.max_length} tokens.") inputs = {k: v.to(self.device) for k, v in inputs.items()} return inputs @torch.inference_mode() def generate_response(self, inputs, remove_token_type_ids=False): if remove_token_type_ids: inputs.pop('token_type_ids', None) streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True) eos_token_id = self.tokenizer.eos_token_id all_settings = {**inputs, **self.generation_settings, 'streamer': streamer, 'eos_token_id': eos_token_id} generation_thread = threading.Thread(target=self.model.generate, kwargs=all_settings) generation_thread.start() for partial_response in streamer: yield partial_response generation_thread.join() def cleanup(self): if hasattr(self, 'model'): del self.model if hasattr(self, 'tokenizer'): del self.tokenizer torch.cuda.empty_cache() gc.collect() def switch_model(self, new_model_class): self.cleanup() return new_model_class() @staticmethod def free_torch_memory(model, tokenizer): del model del tokenizer torch.cuda.empty_cache() gc.collect() class LiquidAI(BaseModel): def __init__(self, generation_settings, model_name): model_info = CHAT_MODELS[model_name] if torch.cuda.is_available(): settings = copy.deepcopy(bnb_bfloat16_settings) settings['model_settings']['attn_implementation'] = "sdpa" else: settings = { 'tokenizer_settings': { 'torch_dtype': torch.float32, }, 'model_settings': { 'torch_dtype': torch.float32, 'device_map': 'cpu', } } super().__init__(model_info, settings, generation_settings) if self.tokenizer.pad_token_id is None and self.tokenizer.eos_token_id is not None: self.tokenizer.pad_token_id = self.tokenizer.eos_token_id def create_prompt(self, augmented_query): return f"""<|startoftext|><|im_start|>system {system_message}<|im_end|> <|im_start|>user {augmented_query}<|im_end|> <|im_start|>assistant """ def create_inputs(self, prompt): inputs = self.tokenizer( prompt, return_tensors="pt", return_attention_mask=True, return_token_type_ids=False, ) if inputs['input_ids'].size(1) > self.max_length: raise ValueError( f"Input prompt is too long ({inputs['input_ids'].size(1)} tokens). " f"Maximum length is {self.max_length} tokens." ) return {k: v.to(self.device) for k, v in inputs.items()} @torch.inference_mode() def generate_response(self, inputs, remove_token_type_ids: bool = False): inputs.pop('token_type_ids', None) streamer = TextIteratorStreamer( self.tokenizer, skip_prompt=True, skip_special_tokens=True ) all_settings = { **inputs, **self.generation_settings, "streamer": streamer, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } gen_thread = threading.Thread(target=self.model.generate, kwargs=all_settings, daemon=True) gen_thread.start() for chunk in streamer: yield chunk gen_thread.join() class Granite(BaseModel): def __init__(self, generation_settings, model_name): model_info = CHAT_MODELS[model_name] if '2b' in model_name.lower() and not torch.cuda.is_available(): settings = {} else: settings = bnb_bfloat16_settings super().__init__(model_info, settings, generation_settings) def create_prompt(self, augmented_query): return f"""<|start_of_role|>system<|end_of_role|>{system_message}<|end_of_text|> <|start_of_role|>user<|end_of_role|>{augmented_query}<|end_of_text|> <|start_of_role|>assistant<|end_of_role|>""" class Exaone(BaseModel): def __init__(self, generation_settings, model_name): model_info = CHAT_MODELS[model_name] settings = copy.deepcopy(bnb_bfloat16_settings) settings['tokenizer_settings']['trust_remote_code'] = True settings['model_settings']['trust_remote_code'] = True if '2.4b' in model_name.lower() and not torch.cuda.is_available(): settings = { 'tokenizer_settings': {'trust_remote_code': True}, 'model_settings': {'trust_remote_code': True} } super().__init__(model_info, settings, generation_settings) def create_prompt(self, augmented_query): return f"""[|system|]{system_message}[|endofturn|] [|user|]{augmented_query} [|assistant|]""" class Qwen(BaseModel): def __init__(self, generation_settings, model_name): model_info = CHAT_MODELS[model_name] is_small_model = ( '1.7b' in model_name.lower() or '0.6b' in model_name.lower() ) no_cuda = not torch.cuda.is_available() if is_small_model and no_cuda: settings = {} else: settings = bnb_bfloat16_settings super().__init__(model_info, settings, generation_settings) def create_prompt(self, augmented_query): return f"""<|im_start|>system {system_message}<|im_end|> <|im_start|>user {augmented_query}<|im_end|> <|im_start|>assistant """ class Mistral_Small_24b(BaseModel): def __init__(self, generation_settings, model_name=None): model_info = CHAT_MODELS[model_name] super().__init__(model_info, bnb_bfloat16_settings, generation_settings) def create_prompt(self, augmented_query): return f""" [SYSTEM_PROMPT]{system_message}[/SYSTEM_PROMPT] [INST]{augmented_query}[/INST]""" class DeepseekR1(BaseModel): def __init__(self, generation_settings: dict, model_name: str): model_info = CHAT_MODELS[model_name] settings = deepcopy(bnb_bfloat16_settings) settings["tokenizer_settings"]["trust_remote_code"] = True settings["model_settings"]["trust_remote_code"] = True custom_generation_settings = { "max_length": generation_settings["max_length"], "max_new_tokens": generation_settings["max_new_tokens"], "do_sample": True, "temperature": 0.6, "top_p": 0.95, "top_k": 40, "num_beams": 1, "use_cache": True } tokenizer_kwargs = { "trust_remote_code": True, } super().__init__( model_info, settings, custom_generation_settings, attn_implementation=None, tokenizer_kwargs=tokenizer_kwargs ) self.generation_settings["pad_token_id"] = self.tokenizer.eos_token_id def create_prompt(self, augmented_query: str) -> str: return f"""<|begin_of_sentence|>{system_message}<|User|>{augmented_query}<|Assistant|>""" @torch.inference_mode() def generate_response(self, inputs, remove_token_type_ids: bool = False): yield from super().generate_response(inputs, remove_token_type_ids) class GLM4Z1(BaseModel): def __init__(self, generation_settings: dict, model_name: str): model_info = CHAT_MODELS[model_name] settings = deepcopy(bnb_bfloat16_settings) settings["tokenizer_settings"]["trust_remote_code"] = True settings["model_settings"]["trust_remote_code"] = True settings["model_settings"]["attn_implementation"] = "sdpa" custom_generation_settings = { "max_length": generation_settings["max_length"], "max_new_tokens": generation_settings["max_new_tokens"], "do_sample": True, "temperature": 0.6, "top_p": 0.95, "top_k": 40, "num_beams": 1, "use_cache": True } tokenizer_kwargs = { "trust_remote_code": True, "chat_template": GLM4Z1_CHAT_TEMPLATE } super().__init__( model_info, settings, custom_generation_settings, attn_implementation=None, tokenizer_kwargs=tokenizer_kwargs ) self.generation_settings["pad_token_id"] = self.tokenizer.eos_token_id def create_prompt(self, augmented_query: str) -> str: return f"""[gMASK]<|system|> {system_message}<|user|> {augmented_query}<|assistant|> """ @torch.inference_mode() def generate_response(self, inputs, remove_token_type_ids: bool = False): if remove_token_type_ids: inputs.pop("token_type_ids", None) settings = {**inputs, **self.generation_settings} generated = self.model.generate(**settings) text = self.tokenizer.decode(generated[0], skip_special_tokens=True) idx = text.rfind("") + len("") yield text[idx:].strip() class SeedCoder(BaseModel): def __init__(self, generation_settings, model_name=None): model_info = CHAT_MODELS[model_name] super().__init__(model_info, bnb_bfloat16_settings, generation_settings) def create_prompt(self, augmented_query): return f"""<[begin_of_sentence]>system {system_message} <[end_of_sentence]><[begin_of_sentence]>user {augmented_query}<[begin_of_sentence]>assistant """ @torch.inference_mode() def generate_response(self, inputs): inputs.pop("token_type_ids", None) yield from super().generate_response(inputs) class Phi4(BaseModel): def __init__(self, generation_settings: dict, model_name: str): model_info = CHAT_MODELS[model_name] settings = copy.deepcopy(bnb_bfloat16_settings) settings["model_settings"]["attn_implementation"] = "sdpa" settings["model_settings"]["device_map"] = "auto" if not torch.cuda.is_available(): settings = {"tokenizer_settings": {}, "model_settings": {"device_map": "cpu"}} super().__init__(model_info, settings, generation_settings) self.generation_settings["pad_token_id"] = self.tokenizer.eos_token_id def create_prompt(self, augmented_query: str) -> str: return ( f"<|system|>{system_message}<|end|>" f"<|user|>{augmented_query}<|end|><|assistant|>" ) @torch.inference_mode() def generate_response(self, inputs, remove_token_type_ids: bool = False): if remove_token_type_ids: inputs.pop("token_type_ids", None) eos_id = self.tokenizer.eos_token_id user_id = self.tokenizer.convert_tokens_to_ids("<|user|>") assist_id = self.tokenizer.convert_tokens_to_ids("<|assistant|>") stop_criteria = StoppingCriteriaList([_StopOnToken({user_id, eos_id})]) streamer = TextIteratorStreamer( self.tokenizer, skip_prompt=True, skip_special_tokens=False ) gen_thread = threading.Thread( target=self.model.generate, kwargs={**inputs, **self.generation_settings, "streamer": streamer, "eos_token_id": eos_id, "pad_token_id": eos_id, "stopping_criteria": stop_criteria}, daemon=True ) gen_thread.start() buffer, sent = "", 0 ASSIST, USER, END = "<|assistant|>", "<|user|>", "<|end|>" for chunk in streamer: buffer += chunk if ASSIST in buffer: buffer = buffer.split(ASSIST)[-1] for tag in (USER, END): cut = buffer.find(tag) if cut != -1: buffer = buffer[:cut] streamer.break_on_eos = True clean = buffer.replace(ASSIST, "").replace(USER, "").replace(END, "") if len(clean) > sent: yield clean[sent:] sent = len(clean) gen_thread.join() def generate_response(model_instance, augmented_query): prompt = model_instance.create_prompt(augmented_query) inputs = model_instance.create_inputs(prompt) for partial_response in model_instance.generate_response(inputs): yield partial_response def choose_model(model_name): if model_name in CHAT_MODELS: model_class_name = CHAT_MODELS[model_name]['function'] model_class = globals()[model_class_name] max_length = get_max_length(model_name) max_new_tokens = get_max_new_tokens(model_name) generation_settings = get_generation_settings(max_length, max_new_tokens) return model_class(generation_settings, model_name) else: raise ValueError(f"Unknown model: {model_name}") ================================================ FILE: chat/jeeves.py ================================================ import sys import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' from pathlib import Path from core.utilities import set_cuda_paths set_cuda_paths() import yaml from core.utilities import ensure_theme_config, load_stylesheet from ctypes import windll, byref, sizeof, c_int from ctypes.wintypes import BOOL, HWND, DWORD import psutil import ctranslate2 import gc import torch import re from transformers import AutoTokenizer from sentence_transformers import SentenceTransformer import numpy as np from PySide6.QtWidgets import ( QMainWindow, QWidget, QVBoxLayout, QTextEdit, QLineEdit, QMessageBox, QPushButton, QLabel, QHBoxLayout, QSizePolicy, QComboBox, QApplication ) from PySide6.QtCore import QThread, Signal, Qt, QTimer, QObject from PySide6.QtGui import QTextCursor, QPixmap from core.constants import ( jeeves_system_message, master_questions, CustomButtonStyles, rag_string, JEEVES_MODELS, PROJECT_ROOT, ) from gui.download_model import ModelDownloader, model_downloaded_signal from db.database_interactions import get_query_db from modules.kokoro import KokoroTTS from core.utilities import normalize_chat_text class GenerationWorker(QThread): token_signal = Signal(str) finished_signal = Signal() error_signal = Signal(str) def __init__(self, generator, tokenizer, prompt, model_dir): super().__init__() self.generator = generator self.tokenizer = tokenizer self.prompt = prompt self.model_dir = model_dir self._is_running = True def run(self): try: tokens = self.tokenizer.convert_ids_to_tokens(self.tokenizer.encode(self.prompt)) try: endofturn_id = self.tokenizer.encode("[|endofturn|]")[0] use_endofturn = True except: use_endofturn = False model_name = Path(self.model_dir).name.lower() generation_params = { "max_length": 2048, "sampling_temperature": 6.0, } if "DeepSeek-R1-Distill-Qwen-1.5B" in model_name: generation_params["repetition_penalty"] = 1.1 token_iterator = self.generator.generate_tokens( [tokens], **generation_params ) for token_result in token_iterator: if not self._is_running: break token_id = token_result.token_id if token_id == self.tokenizer.eos_token_id: break if use_endofturn and token_id == endofturn_id: break token = self.tokenizer.decode([token_id]) self.token_signal.emit(token) self.finished_signal.emit() except Exception as e: self.error_signal.emit(str(e)) def stop(self): self._is_running = False class ChatWindow(QMainWindow): def __init__(self, parent=None): super().__init__(parent) self.setWindowTitle("Ask Jeeves (Welcome back Jeeves!)") self.setGeometry(100, 100, 850, 950) central_widget = QWidget() self.layout = QVBoxLayout(central_widget) self.layout.setContentsMargins(0, 0, 0, 0) self.layout.setSpacing(1) image_path = PROJECT_ROOT / "Assets" / "ask_jeeves_transparent.jpg" if image_path.exists(): pixmap = QPixmap(str(image_path)) if not pixmap.isNull(): image_label = QLabel() image_label.setPixmap(pixmap.scaled(250, 250, Qt.KeepAspectRatio, Qt.SmoothTransformation)) image_label.setAlignment(Qt.AlignCenter) self.layout.addWidget(image_label) model_layout = QHBoxLayout() self.model_selector = QComboBox() self.model_selector.setFixedHeight(30) self.model_selector.addItem("Please choose a model...") self.model_selector.addItems(list(JEEVES_MODELS.keys())) self.model_selector.currentIndexChanged.connect(self.on_model_selected) model_layout.addWidget(self.model_selector) self.eject_button = QPushButton("Eject") self.eject_button.setFixedHeight(30) self.eject_button.clicked.connect(self.eject_model) self.eject_button.setEnabled(False) model_layout.addWidget(self.eject_button) self.layout.addLayout(model_layout) self.chat_display = QTextEdit() self.chat_display.setReadOnly(True) self.chat_display.setPlainText("Hello, my name is Jeeves. Thank you for the job opportunity! Ask me how to use this program.") self.layout.addWidget(self.chat_display, 4) input_row_layout = QHBoxLayout() self.input_field = QLineEdit() self.input_field.setFixedHeight(30) self.input_field.setPlaceholderText("Type your message here...") self.input_field.returnPressed.connect(self.send_message) input_row_layout.addWidget(self.input_field, stretch=4) self.speak_button = QPushButton("Speak Response") self.speak_button.setEnabled(False) self.speak_button.setFixedHeight(30) self.speak_button.clicked.connect(self.toggle_speech) self.speak_button.setStyleSheet(CustomButtonStyles.TEAL_BUTTON_STYLE) input_row_layout.addWidget(self.speak_button) self.voice_select = QComboBox() self.voice_select.setEnabled(False) self.voice_select.addItems(['bm_george', 'bm_lewis', 'bf_isabella', 'af']) self.voice_select.setCurrentText('bm_george') self.voice_select.setFixedHeight(30) input_row_layout.addWidget(self.voice_select) self.speed_control = QComboBox() self.speed_control.setEnabled(False) self.speed_mapping = { 'Slow': 1.0, 'Medium': 1.3, 'Fast': 1.6 } self.speed_control.addItems(list(self.speed_mapping.keys())) self.speed_control.setCurrentText('Medium') self.speed_control.setFixedHeight(30) input_row_layout.addWidget(self.speed_control) self.layout.addLayout(input_row_layout) self.suggestion_widget = QWidget() self.suggestion_widget.setMinimumHeight(100) self.suggestion_layout = QVBoxLayout(self.suggestion_widget) self.suggestion_layout.setContentsMargins(0, 0, 0, 0) self.suggestion_layout.setSpacing(1) self.suggestion_buttons = [] for _ in range(3): btn = QPushButton() btn.setVisible(True) btn.setStyleSheet(CustomButtonStyles.TEAL_BUTTON_STYLE) btn.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) btn.setMinimumSize(200, 35) btn.clicked.connect(self.on_suggestion_clicked) btn.setStyleSheet("text-align: left; padding: 1px 14px;") self.suggestion_buttons.append(btn) self.suggestion_layout.addWidget(btn) self.suggestion_layout.addStretch() self.layout.addWidget(self.suggestion_widget) self.setCentralWidget(central_widget) self.model_dir = None self.generator = None self.tokenizer = None self.worker = None self.vector_db = get_query_db("user_manual") self.model = SentenceTransformer('BAAI/bge-small-en-v1.5', token=False) self.question_embeddings = self.model.encode(master_questions) self.suggestion_cache = {} self.current_text = "" self.timer = QTimer() self.timer.setSingleShot(True) self.timer.timeout.connect(self._delayed_update) self.input_field.textChanged.connect(self.debounce_update) try: tts_path = PROJECT_ROOT / "Models" / "tts" / "ctranslate2-4you--Kokoro-82M-light" self.tts = KokoroTTS(repo_path=str(tts_path)) self.speak_button.setEnabled(True) self.voice_select.setEnabled(True) self.speed_control.setEnabled(True) except Exception: self.tts = None self.tts_thread = None self.tts_worker = None self.is_speaking = False def _ensure_model(self) -> None: model_dir = Path(self.model_dir) if not (model_dir / "model.bin").exists(): print("model.bin missing – redownloading just that file …") self._download_model() def eject_model(self): if self.generator: del self.generator self.generator = None if self.tokenizer: del self.tokenizer self.tokenizer = None if torch.cuda.is_available(): torch.cuda.empty_cache() self.model_selector.setCurrentIndex(0) self.eject_button.setEnabled(False) gc.collect() def toggle_speech(self): if self.is_speaking: self.cancel_speech() else: self.speak_response() def on_model_selected(self, index): if index == 0: if self.generator or self.tokenizer: self.eject_model() return model_name = self.model_selector.currentText() model_info = JEEVES_MODELS[model_name] self.model_dir = str(PROJECT_ROOT / "Models" / "Jeeves" / model_info["folder_name"]) if not Path(self.model_dir).exists(): self.model_selector.setEnabled(False) self.input_field.setEnabled(False) self.eject_button.setEnabled(False) download_config = { "repo_id": model_info["repo"], "cache_dir": model_info["folder_name"] } self.download_worker = QThread() self.downloader = ModelDownloader( model_info=download_config, model_type="jeeves" ) self.downloader.moveToThread(self.download_worker) self.download_worker.started.connect(self.downloader.download) model_downloaded_signal.downloaded.connect(self.on_model_downloaded) self.download_worker.start() return self._load_model() def on_model_downloaded(self, model_name, model_type): self.model_selector.setEnabled(True) self.input_field.setEnabled(True) self.download_worker.quit() self.download_worker.wait() self._load_model() def _load_model(self): self._ensure_model() physical_cores = max(1, psutil.cpu_count(logical=False) - 1) device = "cuda" if torch.cuda.is_available() else "cpu" if self.generator: del self.generator if self.tokenizer: del self.tokenizer if torch.cuda.is_available(): torch.cuda.empty_cache() self.generator = ctranslate2.Generator( self.model_dir, device=device, intra_threads=physical_cores, ) self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir, token=False, trust_remote_code=True) self.eject_button.setEnabled(True) def showEvent(self, event): super().showEvent(event) self.apply_dark_mode_settings() def apply_dark_mode_settings(self): DWMWA_USE_IMMERSIVE_DARK_MODE = DWORD(20) set_window_attribute = windll.dwmapi.DwmSetWindowAttribute hwnd = HWND(int(self.winId())) true_bool = BOOL(True) set_window_attribute( hwnd, DWMWA_USE_IMMERSIVE_DARK_MODE, byref(true_bool), sizeof(true_bool) ) DWMWA_BORDER_COLOR = DWORD(34) black_color = c_int(0xFF000000) set_window_attribute( hwnd, DWMWA_BORDER_COLOR, byref(black_color), sizeof(black_color) ) def build_prompt(self, user_message): model_name = self.model_selector.currentText() prompt_format = JEEVES_MODELS[model_name]["prompt_format"] return prompt_format.format( jeeves_system_message=jeeves_system_message, user_message=user_message ) def send_message(self): if not self.generator or not self.tokenizer: QMessageBox.warning(self, "No Model Selected", "Please select a language model before sending a message.") return if self.worker and self.worker.isRunning(): return user_message = self.input_field.text().strip() if not user_message: return self.chat_display.clear() try: contexts, metadata = self.vector_db.search(user_message, k=5, score_threshold=0.9) if not contexts: QMessageBox.warning( self, "No Contexts Found", "No relevant chunks were found in the user manual database for this question. " "Try rephrasing your question." ) return except Exception as e: QMessageBox.warning(self, "Database Query Error", f"An error occurred while querying the database: {e}") return contexts_text = "\n\n".join(contexts) full_context = f"{rag_string}\n\n{contexts_text}" self.input_field.clear() self.input_field.setDisabled(True) self.chat_display.append(f"User: {user_message}") self.chat_display.append("\nAssistant: ") prompt = self.build_prompt(user_message) prompt = f"{full_context}\n\n{prompt}" self.worker = GenerationWorker(self.generator, self.tokenizer, prompt, self.model_dir) self.worker.token_signal.connect(self.update_response) self.worker.error_signal.connect(self.show_error) self.worker.finished_signal.connect(self.on_generation_finished) self.worker.start() def update_response(self, token): cursor = self.chat_display.textCursor() cursor.movePosition(QTextCursor.End) self.chat_display.setTextCursor(cursor) self.chat_display.insertPlainText(token) self.chat_display.ensureCursorVisible() def show_error(self, error_message): QMessageBox.warning(self, "Error", f"An error occurred: {error_message}") self.input_field.setDisabled(False) def on_generation_finished(self): self.input_field.setDisabled(False) self.input_field.setFocus() if self.worker: if self.worker.isRunning(): self.worker.wait() self.worker.deleteLater() self.worker = None def find_top_similar(self, input_text, top_k=5): if not input_text.strip() or len(input_text) < 3: return [] input_embedding = self.model.encode([input_text])[0] similarities = np.dot(self.question_embeddings, input_embedding) / ( np.linalg.norm(self.question_embeddings, axis=1) * np.linalg.norm(input_embedding) ) top_indices = similarities.argsort()[-top_k:][::-1] top_similarities = similarities[top_indices] threshold = 0.8 top_questions = [ master_questions[idx] for idx, sim in zip(top_indices, top_similarities) if sim > threshold ] return top_questions def debounce_update(self, text): self.current_text = text self.timer.start(500) def _delayed_update(self): text = self.current_text if len(text) >= 3: suggestions = self.find_top_similar(text, top_k=3) self.update_suggestions(suggestions) else: self.clear_suggestions() def update_suggestions(self, suggestions): for i, btn in enumerate(self.suggestion_buttons): if i < len(suggestions): btn.setText(suggestions[i]) btn.setEnabled(True) else: btn.setText("") btn.setEnabled(False) def clear_suggestions(self): for btn in self.suggestion_buttons: btn.setText("") btn.setEnabled(False) def on_suggestion_clicked(self): sender = self.sender() if sender and isinstance(sender, QPushButton): suggestion = sender.text() self.input_field.setText(suggestion) self.send_message() def speak_response(self): if not self.tts: QMessageBox.warning(self, "TTS Not Available", "Text-to-speech is not available. Please check if KokoroTTS is properly installed.") return selected_voice = self.voice_select.currentText() selected_speed = self.speed_mapping[self.speed_control.currentText()] text = self.chat_display.toPlainText() try: response_text = text.split("Assistant: ", 1)[1].strip() except IndexError: QMessageBox.warning(self, "No Response", "There is no response from Jeeves to speak. Please ask a question first.") return if not response_text: QMessageBox.warning(self, "Empty Response", "The response is empty. Please ask a question first.") return self.is_speaking = True self.speak_button.setText("Cancel Playback") self.voice_select.setEnabled(False) self.speed_control.setEnabled(False) self.tts_thread = QThread() self.tts_worker = TTSWorker(self.tts, response_text, selected_voice, selected_speed) self.tts_worker.moveToThread(self.tts_thread) self.tts_thread.started.connect(self.tts_worker.run) self.tts_worker.finished.connect(self.on_speech_finished) self.tts_worker.finished.connect(self.tts_worker.deleteLater) self.tts_thread.finished.connect(self.tts_thread.deleteLater) self.tts_worker.error.connect(self.handle_tts_error) self.tts_thread.start() def cancel_speech(self): if self.tts_worker: self.tts_worker.stop() def on_speech_finished(self): self.is_speaking = False self.speak_button.setText("Speak Response") self.speak_button.setEnabled(True) self.voice_select.setEnabled(True) self.speed_control.setEnabled(True) if self.tts_thread: self.tts_thread.quit() self.tts_thread.wait() def handle_tts_error(self, error_message): self.on_speech_finished() QMessageBox.warning(self, "TTS Error", f"An error occurred while trying to speak: {error_message}") def closeEvent(self, event): if hasattr(self, 'vector_db'): self.vector_db.cleanup() if torch.cuda.is_available(): torch.cuda.empty_cache() event.accept() class TTSWorker(QObject): finished = Signal() error = Signal(str) def __init__(self, tts, text, voice, speed): super().__init__() self.tts = tts self.text = text self.voice = voice self.speed = speed self._should_stop = False def stop(self): self._should_stop = True if hasattr(self.tts, 'stop'): self.tts.stop() def run(self): try: text_without_asterisks = self.text.replace('*', '') text_cleaned = re.sub(r'#{2,}', '', text_without_asterisks) normalized_text = normalize_chat_text(text_cleaned) if not self._should_stop: self.tts.speak(normalized_text, voice=self.voice, speed=self.speed) self.finished.emit() except Exception as e: if not self._should_stop: self.error.emit(str(e)) def launch_jeeves_process(): from core.utilities import set_cuda_paths set_cuda_paths() from PySide6.QtWidgets import QApplication from PySide6.QtCore import Qt if hasattr(QApplication, 'setHighDpiScaleFactorRoundingPolicy'): QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) QApplication.setAttribute(Qt.AA_EnableHighDpiScaling) QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) app = QApplication([]) theme = ensure_theme_config() app.setStyleSheet(load_stylesheet(theme)) window = ChatWindow() window.show() ret = app.exec() sys.exit(ret) ================================================ FILE: chat/kobold.py ================================================ import json import logging import requests import sseclient from PySide6.QtCore import QThread, Signal from db.database_interactions import get_query_db from chat.base import ChatSignals, load_chat_config, save_metadata, build_augmented_query, cleanup_gpu from core.utilities import format_citations from core.constants import PROJECT_ROOT class KoboldChat: def __init__(self): self.signals = ChatSignals() self.config = load_chat_config() self.query_vector_db = None self.api_url = "http://localhost:5001/api/extra/generate/stream" self.stop_request = False def connect_to_kobold(self, augmented_query): payload = { "prompt": augmented_query, "max_context_length": 8192, "max_length": 1024, "temperature": 0.1, "top_p": 0.9, } response = None try: response = requests.post(self.api_url, json=payload, stream=True, timeout=20) response.raise_for_status() client = sseclient.SSEClient(response) for event in client.events(): if self.stop_request: break if event.event == "message": try: data = json.loads(event.data) if 'token' in data: yield data['token'] except json.JSONDecodeError: logging.error(f"Failed to parse JSON: {event.data}") raise ValueError(f"Failed to parse response: {event.data}") except Exception as e: logging.error(f"Error in Kobold API request: {str(e)}") raise finally: if response: response.close() def handle_response_and_cleanup(self, full_response, metadata_list): citations = format_citations(metadata_list) if self.query_vector_db: self.query_vector_db.cleanup() cleanup_gpu() return citations def ask_kobold(self, query, selected_database): if self.query_vector_db is None or self.query_vector_db.selected_database != selected_database: self.query_vector_db = get_query_db(selected_database) contexts, metadata_list = self.query_vector_db.search(query) save_metadata(metadata_list) if not contexts: self.signals.error_signal.emit( "No chunks passed the similarity threshold. " "Try lowering the 'Similarity' setting in the Database Query settings tab." ) self.signals.finished_signal.emit() return augmented_query = build_augmented_query(contexts, query) full_response = "" try: response_generator = self.connect_to_kobold(augmented_query) for response_chunk in response_generator: if self.stop_request: break self.signals.response_signal.emit(response_chunk) full_response += response_chunk self.signals.response_signal.emit("\n") citations = self.handle_response_and_cleanup(full_response, metadata_list) self.signals.citations_signal.emit(citations) except Exception as e: self.signals.error_signal.emit(str(e)) raise class KoboldThread(QThread): response_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() citations_signal = Signal(str) def __init__(self, query, selected_database): super().__init__() self.query = query self.selected_database = selected_database self.kobold_chat = KoboldChat() self.kobold_chat.signals.response_signal.connect(self.response_signal.emit) self.kobold_chat.signals.error_signal.connect(self.error_signal.emit) self.kobold_chat.signals.citations_signal.connect(self.citations_signal.emit) def run(self): try: self.kobold_chat.ask_kobold(self.query, self.selected_database) except Exception as e: logging.error(f"Error in KoboldThread: {str(e)}") self.error_signal.emit(str(e)) finally: self.finished_signal.emit() def stop(self): self.kobold_chat.stop_request = True self.wait(5000) ================================================ FILE: chat/lm_studio.py ================================================ import logging import re import requests from openai import OpenAI from PySide6.QtCore import QThread from db.database_interactions import get_query_db from chat.base import ChatSignals, load_chat_config, save_metadata, build_augmented_query, cleanup_gpu from core.utilities import format_citations from core.constants import system_message, THINKING_TAGS _ALL_THINKING_TAGS = [t for pair in THINKING_TAGS.values() for t in pair] _START_THINKING_TAGS = frozenset(s for s, _ in THINKING_TAGS.values()) _THINKING_TAG_RE = re.compile("|".join(re.escape(t) for t in _ALL_THINKING_TAGS)) def _strip_thinking(buffer, in_thinking): """Process buffer, toggling in_thinking at each tag match. Returns (text_to_yield, new_buffer, new_in_thinking). Holds back any tail that could be the start of a partial tag so a tag split across chunks (e.g. '') is still detected on the next call. """ out = [] pos = 0 cur_in = in_thinking while True: m = _THINKING_TAG_RE.search(buffer, pos) if m is None: break if not cur_in: out.append(buffer[pos:m.start()]) cur_in = m.group(0) in _START_THINKING_TAGS pos = m.end() tail = buffer[pos:] hold = 0 for t in _ALL_THINKING_TAGS: max_i = min(len(t) - 1, len(tail)) for i in range(max_i, 0, -1): if tail.endswith(t[:i]): if i > hold: hold = i break if hold: flushable = tail[:-hold] new_buffer = tail[-hold:] else: flushable = tail new_buffer = "" if not cur_in: out.append(flushable) return "".join(out), new_buffer, cur_in class LMStudioChat: def __init__(self): self.signals = ChatSignals() self.config = load_chat_config() self.query_vector_db = None def connect_to_local_chatgpt(self, prompt): server_config = self.config.get('server', {}) base_url = server_config.get('connection_str') show_thinking = server_config.get('show_thinking', False) client = OpenAI(base_url=base_url, api_key='lm-studio') messages = [ {"role": "system", "content": system_message}, {"role": "user", "content": prompt} ] stream = client.chat.completions.create( model="local-model", messages=messages, stream=True ) in_thinking_block = False first_content = True buffer = "" for chunk in stream: if chunk.choices[0].delta.content is None: continue content = chunk.choices[0].delta.content if show_thinking: if first_content: content = content.lstrip() if not content: continue first_content = False yield content continue buffer += content text, buffer, in_thinking_block = _strip_thinking(buffer, in_thinking_block) if not text: continue if first_content: text = text.lstrip() if not text: continue first_content = False yield text if not show_thinking and buffer and not in_thinking_block: tail = buffer.lstrip() if first_content else buffer if tail: yield tail def handle_response_and_cleanup(self, full_response, metadata_list): citations = format_citations(metadata_list) if self.query_vector_db: self.query_vector_db.cleanup() cleanup_gpu() return citations def ask_local_chatgpt(self, query, selected_database): if self.query_vector_db is None or self.query_vector_db.selected_database != selected_database: self.query_vector_db = get_query_db(selected_database) contexts, metadata_list = self.query_vector_db.search(query) save_metadata(metadata_list) if not contexts: self.signals.error_signal.emit( "No chunks passed the similarity threshold. " "Try lowering the 'Similarity' setting in the Database Query settings tab." ) self.signals.finished_signal.emit() return augmented_query = build_augmented_query(contexts, query) full_response = "" response_generator = self.connect_to_local_chatgpt(augmented_query) for response_chunk in response_generator: self.signals.response_signal.emit(response_chunk) full_response += response_chunk self.signals.response_signal.emit("\n") citations = self.handle_response_and_cleanup(full_response, metadata_list) self.signals.citations_signal.emit(citations) self.signals.finished_signal.emit() class LMStudioChatThread(QThread): def __init__(self, query, selected_database): super().__init__() self.query = query self.selected_database = selected_database self.lm_studio_chat = LMStudioChat() def run(self): try: self.lm_studio_chat.ask_local_chatgpt(self.query, self.selected_database) except Exception as e: logging.error(f"Error in LMStudioChatThread: {str(e)}") self.lm_studio_chat.signals.error_signal.emit(str(e)) finally: self.lm_studio_chat.signals.finished_signal.emit() def is_lm_studio_available(): try: response = requests.get("http://127.0.0.1:1234/v1/models/", timeout=3) return response.status_code == 200 except requests.exceptions.RequestException: return False """ [Main Process] | | DatabaseQueryTab (GUI) LMStudioChatThread | ------------------ ----------------- | | | | [Submit Button] | | | | | on_submit_button_clicked() | | | | | |---> LMStudioChatThread.start() ---->| | | | | [LMStudioChat Instance] | | | ask_local_chatgpt() | | | [QueryVectorDB Search] | | | connect_to_local_chatgpt() | | | Signal Flow OpenAI API Stream | ----------- ---------------- | | | | Signals Received: Stream Chunks: | - response_signal - chunk.choices[0].delta.content | - error_signal | | - finished_signal | | - citations_signal | | | | | GUI Updates: Cleanup Operations: | - update_response_lm_studio() - handle_response_and_cleanup() | - show_error_message() - save_metadata_to_file() | - on_submission_finished() - torch.cuda.empty_cache() | - display_citations_in_widget() - gc.collect() | | | Emit Final Signals: | - citations_signal | - finished_signal """ ================================================ FILE: chat/local_model.py ================================================ import time import logging from enum import Enum, auto from typing import Any, Optional from dataclasses import dataclass import torch from multiprocessing import Process, Pipe from multiprocessing.connection import PipeConnection from PySide6.QtCore import QObject, Signal import chat.base as module_chat from db.database_interactions import get_query_db from core.utilities import format_citations, my_cprint from core.constants import rag_string, PROJECT_ROOT from pathlib import Path class MessageType(Enum): QUESTION = auto() RESPONSE = auto() PARTIAL_RESPONSE = auto() CITATIONS = auto() ERROR = auto() FINISHED = auto() EXIT = auto() TOKEN_COUNTS = auto() @dataclass class PipeMessage: type: MessageType payload: Any = None class LocalModelSignals(QObject): response_signal = Signal(str) citations_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() model_loaded_signal = Signal() model_unloaded_signal = Signal() token_count_signal = Signal(str) class LocalModelChat: def __init__(self): self.model_process = None self.model_pipe = None self.current_model = None self.signals = LocalModelSignals() def start_model_process(self, model_name): if self.current_model != model_name: if self.is_model_loaded(): self.terminate_current_process() parent_conn, child_conn = Pipe() self.model_pipe = parent_conn self.model_process = Process(target=self._local_model_process, args=(child_conn, model_name), daemon=True) self.model_process.start() self.current_model = model_name self._start_listening_thread() self.signals.model_loaded_signal.emit() else: logging.warning(f"Model {model_name} is already loaded") def terminate_current_process(self): if self.model_process is not None: try: if self.model_pipe: try: self.model_pipe.send(PipeMessage(MessageType.EXIT)) except (BrokenPipeError, OSError): logging.warning("Pipe already closed") finally: self.model_pipe.close() self.model_pipe = None process = self.model_process self.model_process = None if process.is_alive(): process.join(timeout=10) if process.is_alive(): logging.warning("Process did not terminate, forcing termination") process.terminate() process.join(timeout=5) except Exception as e: logging.exception(f"Error during process termination: {e}") else: logging.warning("No process to terminate") self.model_pipe = None self.model_process = None self.current_model = None time.sleep(0.5) self.signals.model_unloaded_signal.emit() def start_chat(self, user_question, selected_model, selected_database): if not self.model_pipe: self.signals.error_signal.emit("Model not loaded. Please start a model first.") return self.model_pipe.send(PipeMessage( MessageType.QUESTION, (user_question, selected_model, selected_database) )) def is_model_loaded(self): return self.model_process is not None and self.model_process.is_alive() def eject_model(self): self.terminate_current_process() def _start_listening_thread(self): import threading if hasattr(self, "_stop_listener_event"): self._stop_listener_event.set() if getattr(self, "listener_thread", None) and self.listener_thread.is_alive(): self.listener_thread.join() self._stop_listener_event = threading.Event() self.listener_thread = threading.Thread( target=self._listen_for_response, args=(self._stop_listener_event,), daemon=True, ) self.listener_thread.start() def _listen_for_response(self, stop_event): while not stop_event.is_set(): if not self.model_pipe or not isinstance(self.model_pipe, PipeConnection): break try: if self.model_pipe.poll(timeout=1): message = self.model_pipe.recv() if message.type in [MessageType.RESPONSE, MessageType.PARTIAL_RESPONSE]: self.signals.response_signal.emit(message.payload) elif message.type == MessageType.CITATIONS: self.signals.citations_signal.emit(message.payload) elif message.type == MessageType.ERROR: self.signals.error_signal.emit(message.payload) elif message.type == MessageType.FINISHED: self.signals.finished_signal.emit() if message.payload == MessageType.EXIT: break elif message.type == MessageType.TOKEN_COUNTS: self.signals.token_count_signal.emit(message.payload) else: time.sleep(0.1) except (BrokenPipeError, EOFError, OSError): break except Exception as e: logging.warning(f"Unexpected error in _listen_for_response: {str(e)}") break self.cleanup_listener_resources() def cleanup_listener_resources(self): self.model_pipe = None self.model_process = None self.current_model = None @staticmethod def _local_model_process(conn, model_name): model_instance = module_chat.choose_model(model_name) query_vector_db = None current_database = None try: while True: try: message = conn.recv() if message.type == MessageType.QUESTION: user_question, _, selected_database = message.payload if query_vector_db is None or current_database != selected_database: query_vector_db = get_query_db(selected_database) current_database = selected_database contexts, metadata_list = query_vector_db.search(user_question) if not contexts: conn.send(PipeMessage( MessageType.ERROR, "No chunks passed the similarity threshold. " "Try lowering the 'Similarity' setting in the Database Query settings tab." )) conn.send(PipeMessage(MessageType.FINISHED)) continue max_context_tokens = model_instance.max_length - 100 context_tokens = len(model_instance.tokenizer.encode("\n\n---\n\n".join(contexts))) if context_tokens > max_context_tokens: logging.warning(f"Context tokens ({context_tokens}) exceed max context limit ({max_context_tokens})") error_message = ( "The contexts received from the vector database exceed the chat model's context limit.\n\n" "You can either:\n" "1) Adjust the chunk size setting when creating the database;\n" "2) Adjust the search settings (e.g. relevancy, number of contexts to return, etc.);\n" "3) Choose a chat model with a larger context." ) conn.send(PipeMessage(MessageType.ERROR, error_message)) conn.send(PipeMessage(MessageType.FINISHED)) continue augmented_query = f"{rag_string}\n\n---\n\n" + "\n\n---\n\n".join(contexts) + "\n\n-----\n\n" + user_question prepend_token_count = len(model_instance.tokenizer.encode(rag_string)) context_token_count = len(model_instance.tokenizer.encode("\n\n---\n\n".join(contexts))) user_question_token_count = len(model_instance.tokenizer.encode(user_question)) full_response = "" buffer = "" for partial_response in module_chat.generate_response(model_instance, augmented_query): full_response += partial_response buffer += partial_response if len(buffer) >= 50 or '\n' in buffer: conn.send(PipeMessage(MessageType.PARTIAL_RESPONSE, buffer)) buffer = "" if buffer: conn.send(PipeMessage(MessageType.PARTIAL_RESPONSE, buffer)) response_token_count = len(model_instance.tokenizer.encode(full_response)) remaining_tokens = model_instance.max_length - (prepend_token_count + user_question_token_count + context_token_count + response_token_count) total_tokens = prepend_token_count + context_token_count + user_question_token_count + response_token_count token_count_string = ( f"available tokens ({model_instance.max_length})" f" - rag instruction ({prepend_token_count})" f" - query ({user_question_token_count})" f" - contexts ({context_token_count})" f" - response ({response_token_count})" f" = {remaining_tokens} remaining tokens." ) conn.send(PipeMessage(MessageType.TOKEN_COUNTS, token_count_string)) citations = format_citations(metadata_list) conn.send(PipeMessage(MessageType.CITATIONS, citations)) conn.send(PipeMessage(MessageType.FINISHED)) elif message.type == MessageType.EXIT: break except EOFError: logging.warning("Connection closed by main process.") break except Exception as e: logging.exception(f"Error in local_model_process: {e}") conn.send(PipeMessage(MessageType.ERROR, str(e))) conn.send(PipeMessage(MessageType.FINISHED)) finally: try: if hasattr(model_instance, 'cleanup'): model_instance.cleanup() finally: conn.close() my_cprint("Local chat model removed from memory.", "red") def is_cuda_available(): return torch.cuda.is_available() ================================================ FILE: chat/minimax.py ================================================ import logging from openai import OpenAI from PySide6.QtCore import QThread, Signal from db.database_interactions import get_query_db from chat.base import load_chat_config, save_metadata, build_augmented_query, cleanup_gpu from core.utilities import format_citations from core.constants import system_message, PROJECT_ROOT MINIMAX_BASE_URL = "https://api.minimax.io/v1" MINIMAX_MODELS = ["MiniMax-M2.7", "MiniMax-M2.7-highspeed"] # Temperature must be in (0.0, 1.0] for MiniMax _MINIMAX_MIN_TEMP = 0.01 class MiniMaxChat: def __init__(self, override_model: str = None): self.response_callback = lambda x: None self.error_callback = lambda x: None self.finished_callback = lambda: None self.citations_callback = lambda x: None self.config = load_chat_config() if override_model: self.config.setdefault('minimax', {})['model'] = override_model self.query_vector_db = None def connect_to_minimax(self, augmented_query): minimax_config = self.config.get('minimax', {}) model = minimax_config.get('model', 'MiniMax-M2.7') api_key = minimax_config.get('api_key') if not api_key: raise ValueError("MiniMax API key not found in config.yaml.\n\n Please set it within the 'File' menu.") client = OpenAI(api_key=api_key, base_url=MINIMAX_BASE_URL) messages = [ {"role": "system", "content": system_message}, {"role": "user", "content": augmented_query} ] # MiniMax temperature must be in (0.0, 1.0] temperature = max(_MINIMAX_MIN_TEMP, 0.1) stream = client.chat.completions.create( model=model, messages=messages, temperature=temperature, stream=True ) for chunk in stream: if chunk.choices[0].delta.content is not None: yield chunk.choices[0].delta.content def handle_response_and_cleanup(self, full_response, metadata_list): citations = format_citations(metadata_list) if self.query_vector_db: if hasattr(self.query_vector_db.embeddings, 'client'): del self.query_vector_db.embeddings.client del self.query_vector_db.embeddings cleanup_gpu() return citations def ask_minimax(self, query, selected_database): if self.query_vector_db is None or self.query_vector_db.selected_database != selected_database: self.query_vector_db = get_query_db(selected_database) contexts, metadata_list = self.query_vector_db.search(query) save_metadata(metadata_list) if not contexts: self.error_callback( "No chunks passed the similarity threshold. " "Try lowering the 'Similarity' setting in the Database Query settings tab." ) self.finished_callback() return augmented_query = build_augmented_query(contexts, query) full_response = "" response_generator = self.connect_to_minimax(augmented_query) for response_chunk in response_generator: self.response_callback(response_chunk) full_response += response_chunk self.response_callback("\n") citations = self.handle_response_and_cleanup(full_response, metadata_list) self.citations_callback(citations) self.finished_callback() class MiniMaxThread(QThread): response_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() citations_signal = Signal(str) def __init__(self, query, selected_database, model_name: str = None): super().__init__() self.query = query self.selected_database = selected_database self.minimax_chat = MiniMaxChat(override_model=model_name) self.minimax_chat.response_callback = self.on_response self.minimax_chat.error_callback = self.on_error self.minimax_chat.finished_callback = self.on_finished self.minimax_chat.citations_callback = self.on_citations def on_response(self, text): self.response_signal.emit(text) def on_error(self, error): self.error_signal.emit(error) def on_finished(self): self.finished_signal.emit() def on_citations(self, citations): self.citations_signal.emit(citations) def run(self): try: self.minimax_chat.ask_minimax(self.query, self.selected_database) except Exception as e: logging.error(f"Error in MiniMaxThread: {str(e)}") self.on_error(str(e)) finally: self.on_finished() ================================================ FILE: chat/openai.py ================================================ import logging from openai import OpenAI from PySide6.QtCore import QThread, Signal from db.database_interactions import get_query_db from chat.base import load_chat_config, save_metadata, build_augmented_query, cleanup_gpu from core.utilities import format_citations from core.constants import system_message from core.chatgpt_settings import ( DEFAULT_OPENAI_MODEL, DEFAULT_VERBOSITY, DEFAULT_REASONING_EFFORT, supports_verbosity, supports_reasoning_effort, ) class ChatGPTChat: def __init__(self): self.response_callback = lambda x: None self.error_callback = lambda x: None self.finished_callback = lambda: None self.citations_callback = lambda x: None self.config = load_chat_config() self.query_vector_db = None def connect_to_chatgpt(self, augmented_query): openai_config = self.config.get('openai', {}) or {} model = openai_config.get('model') or DEFAULT_OPENAI_MODEL api_key = openai_config.get('api_key') verbosity = openai_config.get('verbosity') or DEFAULT_VERBOSITY reasoning_effort = openai_config.get('reasoning_effort') or DEFAULT_REASONING_EFFORT if not api_key: raise ValueError( "OpenAI API key not found in config.yaml.\n\n" "Please set it via File menu → Chat Backend Settings…" ) client = OpenAI(api_key=api_key) messages = [ {"role": "system", "content": system_message}, {"role": "user", "content": augmented_query}, ] request_args = { "model": model, "input": messages, "stream": True, } if supports_verbosity(model): request_args["text"] = {"verbosity": verbosity} if supports_reasoning_effort(model) and reasoning_effort and reasoning_effort != "none": request_args["reasoning"] = {"effort": reasoning_effort} stream = client.responses.create(**request_args) for event in stream: event_type = getattr(event, "type", "") if event_type == "response.output_text.delta": delta = getattr(event, "delta", "") or "" if delta: yield delta elif event_type == "response.error": msg = str(getattr(event, "error", "unknown error")) logging.error(f"OpenAI Responses API error: {msg}") raise RuntimeError(msg) def handle_response_and_cleanup(self, full_response, metadata_list): citations = format_citations(metadata_list) if self.query_vector_db: if hasattr(self.query_vector_db.embeddings, 'client'): del self.query_vector_db.embeddings.client del self.query_vector_db.embeddings cleanup_gpu() return citations def ask_chatgpt(self, query, selected_database): if self.query_vector_db is None or self.query_vector_db.selected_database != selected_database: self.query_vector_db = get_query_db(selected_database) contexts, metadata_list = self.query_vector_db.search(query) save_metadata(metadata_list) if not contexts: self.error_callback( "No chunks passed the similarity threshold. " "Try lowering the 'Similarity' setting in the Database Query settings tab." ) self.finished_callback() return augmented_query = build_augmented_query(contexts, query) full_response = "" response_generator = self.connect_to_chatgpt(augmented_query) for response_chunk in response_generator: self.response_callback(response_chunk) full_response += response_chunk self.response_callback("\n") citations = self.handle_response_and_cleanup(full_response, metadata_list) self.citations_callback(citations) self.finished_callback() class ChatGPTThread(QThread): response_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() citations_signal = Signal(str) def __init__(self, query, selected_database): super().__init__() self.query = query self.selected_database = selected_database self.chatgpt_chat = ChatGPTChat() self.chatgpt_chat.response_callback = self.on_response self.chatgpt_chat.error_callback = self.on_error self.chatgpt_chat.finished_callback = self.on_finished self.chatgpt_chat.citations_callback = self.on_citations def on_response(self, text): self.response_signal.emit(text) def on_error(self, error): self.error_signal.emit(error) def on_finished(self): self.finished_signal.emit() def on_citations(self, citations): self.citations_signal.emit(citations) def run(self): try: self.chatgpt_chat.ask_chatgpt(self.query, self.selected_database) except Exception as e: logging.error(f"Error in ChatGPTThread: {str(e)}") self.on_error(str(e)) finally: self.on_finished() ================================================ FILE: core/__init__.py ================================================ ================================================ FILE: core/chatgpt_settings.py ================================================ AVAILABLE_OPENAI_MODELS = [ "gpt-5.5", "gpt-5.4", "gpt-5.4-mini", ] MODEL_DISPLAY_NAMES = { "gpt-5.5": "gpt-5.5 (Thinking)", "gpt-5.4": "gpt-5.4 (Thinking)", "gpt-5.4-mini": "gpt-5.4 mini", } MODEL_PRICING = { "gpt-5.5": (5.00, 0.50, 30.00), "gpt-5.4": (2.50, 0.25, 15.00), "gpt-5.4-mini": (0.25, 0.025, 2.00), } REASONING_EFFORT_OPTIONS = ["none", "low", "medium", "high", "xhigh"] VERBOSITY_OPTIONS = ["low", "medium", "high"] DEFAULT_OPENAI_MODEL = "gpt-5.4-mini" DEFAULT_VERBOSITY = "low" DEFAULT_REASONING_EFFORT = "medium" def get_display_name(model: str) -> str: return MODEL_DISPLAY_NAMES.get(model, model) def get_model_from_display_name(display_name: str) -> str: for model, name in MODEL_DISPLAY_NAMES.items(): if name == display_name: return model return display_name def get_model_pricing(model_name: str) -> tuple[float, float, float]: return MODEL_PRICING.get(model_name, (0.00, 0.00, 0.00)) def supports_reasoning_effort(model_name: str) -> bool: m = (model_name or "").strip().lower() if m.endswith("-chat-latest"): return False return m.startswith("gpt-5.") def supports_verbosity(model_name: str) -> bool: m = (model_name or "").strip().lower() return m.startswith("gpt-5.") def migrate_legacy_model(model_name: str) -> str: if model_name in AVAILABLE_OPENAI_MODELS: return model_name return DEFAULT_OPENAI_MODEL ================================================ FILE: core/config.py ================================================ from pathlib import Path from typing import Optional, Dict, Any, Literal from pydantic import BaseModel, Field, field_validator, PrivateAttr from pydantic_settings import BaseSettings, SettingsConfigDict import yaml import threading class OpenAIConfig(BaseModel): api_key: Optional[str] = None model: str = "gpt-4o-mini" reasoning_effort: str = "medium" class MiniMaxConfig(BaseModel): api_key: Optional[str] = None model: str = "MiniMax-M2.7" class ServerConfig(BaseModel): api_key: str = "" connection_str: str = "http://127.0.0.1:1234/v1" show_thinking: bool = False @field_validator('connection_str') @classmethod def validate_connection_str(cls, v: str) -> str: if not v.startswith(('http://', 'https://')): raise ValueError("Connection string must start with http:// or https://") return v class DatabaseConfig(BaseModel): chunk_size: int = Field(default=700, gt=0, le=100000) chunk_overlap: int = Field(default=250, ge=0, le=100000) contexts: int = Field(default=5, gt=0, le=1000) similarity: float = Field(default=0.7, ge=0.0, le=1.0) half: bool = False database_to_search: str = "" document_types: str = "" search_term: str = "" pipeline_preset: str = "normal" @field_validator('contexts', mode='before') @classmethod def coerce_contexts(cls, v): if isinstance(v, str): return int(v) return v @field_validator('chunk_overlap') @classmethod def validate_overlap(cls, v: int, info) -> int: if 'chunk_size' in info.data and v >= info.data['chunk_size']: raise ValueError("chunk_overlap must be less than chunk_size") return v @field_validator('pipeline_preset') @classmethod def validate_pipeline_preset(cls, v: str) -> str: valid = {"minimal", "low", "normal", "high", "maximum"} if v not in valid: raise ValueError(f"pipeline_preset must be one of {valid}") return v class ComputeDeviceConfig(BaseModel): available: list = Field(default_factory=lambda: ["cpu"]) database_creation: str = "cpu" database_query: str = "cpu" gpu_brand: Optional[str] = None @field_validator('database_creation', 'database_query') @classmethod def validate_device(cls, v: str, info) -> str: if 'available' in info.data and v not in info.data['available']: return "cpu" return v class DatabaseInfo(BaseModel): model: str chunk_size: int chunk_overlap: int class AppearanceConfig(BaseModel): theme: str = "default" class PlatformInfo(BaseModel): os: str = "" class AppConfig(BaseSettings): model_config = SettingsConfigDict( env_file=None, extra='allow', validate_assignment=True ) openai: OpenAIConfig = Field(default_factory=OpenAIConfig) minimax: MiniMaxConfig = Field(default_factory=MiniMaxConfig) server: ServerConfig = Field(default_factory=ServerConfig) database: DatabaseConfig = Field(default_factory=DatabaseConfig) Compute_Device: ComputeDeviceConfig = Field(default_factory=ComputeDeviceConfig) appearance: AppearanceConfig = Field(default_factory=AppearanceConfig) Platform_Info: PlatformInfo = Field(default_factory=PlatformInfo) EMBEDDING_MODEL_NAME: Optional[str] = None EMBEDDING_MODEL_DIMENSIONS: Optional[int] = None hf_access_token: Optional[str] = None created_databases: Dict[str, DatabaseInfo] = Field(default_factory=dict) _config_path: Path = PrivateAttr(default=Path("config.yaml")) _lock: threading.RLock = PrivateAttr(default_factory=threading.RLock) @property def root_dir(self) -> Path: return Path(__file__).resolve().parent.parent @property def docs_dir(self) -> Path: return self.root_dir / "Docs_for_DB" @property def vector_db_dir(self) -> Path: return self.root_dir / "Vector_DB" @property def vector_db_backup_dir(self) -> Path: return self.root_dir / "Vector_DB_Backup" @property def models_dir(self) -> Path: return self.root_dir / "Models" @property def vector_models_dir(self) -> Path: return self.models_dir / "vector" @classmethod def load(cls, path: Optional[Path] = None) -> "AppConfig": config_path = path or Path("config.yaml") if not config_path.exists(): instance = cls() instance._config_path = config_path instance.save(config_path) return instance try: with open(config_path, 'r', encoding='utf-8') as f: data = yaml.safe_load(f) or {} if 'created_databases' in data: for db_name, db_data in data['created_databases'].items(): if isinstance(db_data, dict) and not isinstance(db_data, DatabaseInfo): data['created_databases'][db_name] = DatabaseInfo(**db_data) instance = cls(**data) instance._config_path = config_path return instance except Exception as e: print(f"Error loading config: {e}") instance = cls() instance._config_path = config_path return instance def save(self, path: Optional[Path] = None) -> None: save_path = path or self._config_path with self._lock: data = self.model_dump() temp_path = save_path.with_suffix('.tmp') with open(temp_path, 'w', encoding='utf-8') as f: yaml.safe_dump(data, f, allow_unicode=True) temp_path.replace(save_path) def update_field(self, field_path: str, value: Any) -> None: with self._lock: parts = field_path.split('.') obj = self for part in parts[:-1]: obj = getattr(obj, part) setattr(obj, parts[-1], value) self.save() def update_setting(self, field_path: str, value: Any) -> tuple[bool, str]: try: self.update_field(field_path, value) return True, "Setting updated successfully" except ValueError as e: return False, str(e) except Exception as e: return False, f"Error updating setting: {str(e)}" def add_database(self, name: str, model_path: str, chunk_size: int, chunk_overlap: int) -> None: self.created_databases[name] = DatabaseInfo( model=model_path, chunk_size=chunk_size, chunk_overlap=chunk_overlap ) self.save() def remove_database(self, name: str) -> None: if name in self.created_databases: del self.created_databases[name] self.save() def get_user_databases(self) -> list[str]: return [name for name in self.created_databases.keys() if name != "user_manual"] _config_instance: Optional[AppConfig] = None _config_lock = threading.Lock() def get_config() -> AppConfig: global _config_instance if _config_instance is None: with _config_lock: if _config_instance is None: _config_instance = AppConfig.load() return _config_instance def reload_config() -> AppConfig: global _config_instance with _config_lock: _config_instance = AppConfig.load() return _config_instance ================================================ FILE: core/constants.py ================================================ import os from pathlib import Path PROJECT_ROOT = Path(__file__).resolve().parent.parent _cpu = os.cpu_count() or 4 PIPELINE_PRESETS = { "minimal": { "ingest_threads": 1, "ingest_processes": 1, "split_max_parallel_workers": 1, "tokenize_max_parallel_workers": 1, "split_worker_batch_size": 5000, }, "low": { "ingest_threads": 4, "ingest_processes": 2, "split_max_parallel_workers": 2, "tokenize_max_parallel_workers": 2, "split_worker_batch_size": 3000, }, "normal": { "ingest_threads": min(max(_cpu - 2, 1), 8), "ingest_processes": min(max(_cpu - 2, 1), 4), "split_max_parallel_workers": min(max(_cpu - 2, 1), 4), "tokenize_max_parallel_workers": min(max(_cpu - 2, 1), 4), "split_worker_batch_size": 2000, }, "high": { "ingest_threads": min(max(_cpu - 2, 1), 16), "ingest_processes": min(max(_cpu - 2, 1), 8), "split_max_parallel_workers": min(max(_cpu - 2, 1), 8), "tokenize_max_parallel_workers": min(max(_cpu - 2, 1), 8), "split_worker_batch_size": 2000, }, "maximum": { "ingest_threads": max(_cpu - 2, 1), "ingest_processes": max(_cpu - 2, 1), "split_max_parallel_workers": 0, "tokenize_max_parallel_workers": 0, "split_worker_batch_size": 1000, }, } THEMES = { "default": { "bg_window": "#1e1e1e", "bg_surface": "#161b22", "bg_control": "#263238", "bg_control_hover": "#2F4F4F", "bg_dialog_button": "#255a7e", "bg_tab": "#255a7e", "bg_tab_selected": "#1e2a88", "bg_tab_hover": "#2b3d93", "bg_menu_selected": "#4A148C", "bg_splitter": "#1B5E20", "bg_list_hover": "#006064", "text_primary": "#d2d2d2", "text_input": "#a8beb5", "text_placeholder": "#d67373", "border_focus": "#6c757d", "selection_bg": "#69a9d4", "selection_fg": "black", }, "auburn": { "bg_window": "#161b22", "bg_surface": "#3b301b", "bg_control": "#5a423c", "bg_control_hover": "#4a3c2b", "bg_dialog_button": "#6c757d", "bg_tab": "#7a645b", "bg_tab_selected": "#39424e", "bg_tab_hover": "#9a8072", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#6b5343", "text_primary": "white", "text_input": "white", "text_placeholder": "#b28a70", "border_focus": "#6c757d", "selection_bg": "#8c6a5a", "selection_fg": "white", }, "black": { "bg_window": "#0E0D13", "bg_surface": "#0B0A11", "bg_control": "#0B0A11", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#0B0A11", "bg_tab_selected": "#39424e", "bg_tab_hover": "#2f343f", "bg_menu_selected": "#39424e", "bg_splitter": "#0E0D13", "bg_list_hover": "#39424e", "text_primary": "#D1D7E2", "text_input": "#D1D7E2", "text_placeholder": "#7BA8D8", "border_focus": "#6c757d", "selection_bg": "#555", "selection_fg": "#D1D7E2", }, "bluey": { "bg_window": "#1E2A3A", "bg_surface": "#2C3E50", "bg_control": "#34495E", "bg_control_hover": "#2C3E50", "bg_dialog_button": "#34495E", "bg_tab": "#34495E", "bg_tab_selected": "#2C3E50", "bg_tab_hover": "#4A6377", "bg_menu_selected": "#2C3E50", "bg_splitter": "#2C3E50", "bg_list_hover": "#34495E", "text_primary": "#ECF0F1", "text_input": "#ECF0F1", "text_placeholder": "#95A5A6", "border_focus": "#7F8C8D", "selection_bg": "#4A6377", "selection_fg": "#ECF0F1", }, "bluish": { "bg_window": "#161b22", "bg_surface": "#1b2230", "bg_control": "#2d3c47", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#4b5664", "bg_tab_selected": "#39424e", "bg_tab_hover": "#60687f", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4b5664", "text_primary": "white", "text_input": "white", "text_placeholder": "#89a2a2", "border_focus": "#6c757d", "selection_bg": "#4f5a77", "selection_fg": "white", }, "colorblind": { "bg_window": "#F0F0F0", "bg_surface": "#E0E0E0", "bg_control": "#A0A0A0", "bg_control_hover": "#808080", "bg_dialog_button": "#A0A0A0", "bg_tab": "#777777", "bg_tab_selected": "#555", "bg_tab_hover": "#666", "bg_menu_selected": "#888", "bg_splitter": "#C0C0C0", "bg_list_hover": "#808080", "text_primary": "#000000", "text_input": "#000000", "text_placeholder": "#666", "border_focus": "#555", "selection_bg": "#555", "selection_fg": "#FFFFFF", }, "dark_blue": { "bg_window": "#1a1d29", "bg_surface": "#252836", "bg_control": "#323842", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#4b4b4b", "bg_tab_selected": "#39424e", "bg_tab_hover": "#666", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4b4b4b", "text_primary": "white", "text_input": "white", "text_placeholder": "#969686", "border_focus": "#6c757d", "selection_bg": "#555", "selection_fg": "white", }, "dark_grey": { "bg_window": "#1a1d21", "bg_surface": "#2a2e32", "bg_control": "#2a2e32", "bg_control_hover": "#3a3e42", "bg_dialog_button": "#3498db", "bg_tab": "#2a2e32", "bg_tab_selected": "#3498db", "bg_tab_hover": "#3a3e42", "bg_menu_selected": "#3a3e42", "bg_splitter": "#2a2e32", "bg_list_hover": "#3a3e42", "text_primary": "white", "text_input": "white", "text_placeholder": "#8a8e92", "border_focus": "#4a4e52", "selection_bg": "#4a4e52", "selection_fg": "white", }, "dark_yellow": { "bg_window": "#161b22", "bg_surface": "#3b382b", "bg_control": "#5a5a5a", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#7a7664", "bg_tab_selected": "#39424e", "bg_tab_hover": "#9a9280", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#5a5a5a", "text_primary": "white", "text_input": "white", "text_placeholder": "#b2a27a", "border_focus": "#6c757d", "selection_bg": "#8c7a5a", "selection_fg": "white", }, "green_grey": { "bg_window": "#1b2224", "bg_surface": "#09272b", "bg_control": "#424244", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#4b4b4d", "bg_tab_selected": "#39424e", "bg_tab_hover": "#666669", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e21", "bg_list_hover": "#4b4b4d", "text_primary": "white", "text_input": "white", "text_placeholder": "#96989a", "border_focus": "#6c757d", "selection_bg": "#555559", "selection_fg": "white", }, "greenish": { "bg_window": "#161b22", "bg_surface": "#1b3016", "bg_control": "#3c472d", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#4f604b", "bg_tab_selected": "#39424e", "bg_tab_hover": "#608060", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4f604b", "text_primary": "white", "text_input": "white", "text_placeholder": "#89a280", "border_focus": "#6c757d", "selection_bg": "#5a774f", "selection_fg": "white", }, "grey": { "bg_window": "#2D2D2D", "bg_surface": "#383838", "bg_control": "#4E4E4E", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#4E4E4E", "bg_tab_selected": "#39424e", "bg_tab_hover": "#7E7E7E", "bg_menu_selected": "#39424e", "bg_splitter": "#2D2D2D", "bg_list_hover": "#4E4E4E", "text_primary": "white", "text_input": "#A0A0A0", "text_placeholder": "#7E7E7E", "border_focus": "#6c757d", "selection_bg": "#626262", "selection_fg": "white", }, "hyperbolic": { "bg_window": "#1B1B1B", "bg_surface": "#006064", "bg_control": "#4A148C", "bg_control_hover": "#673AB7", "bg_dialog_button": "#4A148C", "bg_tab": "#311B92", "bg_tab_selected": "#4A148C", "bg_tab_hover": "#5E35B1", "bg_menu_selected": "#4A148C", "bg_splitter": "#3E2723", "bg_list_hover": "#0097A7", "text_primary": "#E0E0E0", "text_input": "white", "text_placeholder": "#9E9E9E", "border_focus": "#B39DDB", "selection_bg": "#0288D1", "selection_fg": "white", }, "jewel": { "bg_window": "#161b22", "bg_surface": "#301b38", "bg_control": "#423c57", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#605b6e", "bg_tab_selected": "#39424e", "bg_tab_hover": "#807a8c", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#534153", "text_primary": "white", "text_input": "white", "text_placeholder": "#a27aa2", "border_focus": "#6c757d", "selection_bg": "#775a7a", "selection_fg": "white", }, "matrix": { "bg_window": "#000000", "bg_surface": "#001a00", "bg_control": "#001a00", "bg_control_hover": "#003300", "bg_dialog_button": "#001a00", "bg_tab": "#001a00", "bg_tab_selected": "#00ff00", "bg_tab_hover": "#003300", "bg_menu_selected": "#003300", "bg_splitter": "#00ff00", "bg_list_hover": "#003300", "text_primary": "#00ff00", "text_input": "#00ff00", "text_placeholder": "#008000", "border_focus": "#008000", "selection_bg": "#003300", "selection_fg": "#00ff00", }, "monet": { "bg_window": "#161b22", "bg_surface": "#a8beb5", "bg_control": "#8ca6db", "bg_control_hover": "#aacbe8", "bg_dialog_button": "#8ca6db", "bg_tab": "#aacbe8", "bg_tab_selected": "#39424e", "bg_tab_hover": "#cdd3e5", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#39424e", "text_primary": "white", "text_input": "#a8beb5", "text_placeholder": "#aacbe8", "border_focus": "#6c757d", "selection_bg": "#9dbf9e", "selection_fg": "black", }, "okeefe": { "bg_window": "#161b22", "bg_surface": "#3e3033", "bg_control": "#856d88", "bg_control_hover": "#2f343f", "bg_dialog_button": "#856d88", "bg_tab": "#907880", "bg_tab_selected": "#39424e", "bg_tab_hover": "#a79f9d", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#856d88", "text_primary": "white", "text_input": "#7a6469", "text_placeholder": "#907880", "border_focus": "#6c757d", "selection_bg": "#a88c95", "selection_fg": "white", }, "orangish": { "bg_window": "#161b22", "bg_surface": "#30261b", "bg_control": "#4a3b2d", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#60594b", "bg_tab_selected": "#39424e", "bg_tab_hover": "#807562", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4a3b2d", "text_primary": "white", "text_input": "white", "text_placeholder": "#a28a70", "border_focus": "#6c757d", "selection_bg": "#776855", "selection_fg": "white", }, "puke": { "bg_window": "#161b22", "bg_surface": "#303a35", "bg_control": "#4a5a4e", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#6e7e71", "bg_tab_selected": "#39424e", "bg_tab_hover": "#8c9c89", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4a5a4e", "text_primary": "white", "text_input": "#59665c", "text_placeholder": "#6e7e71", "border_focus": "#6c757d", "selection_bg": "#7a8c7c", "selection_fg": "white", }, "purplish": { "bg_window": "#161b22", "bg_surface": "#301b30", "bg_control": "#423c47", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#5b4b5e", "bg_tab_selected": "#39424e", "bg_tab_hover": "#806080", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4d4154", "text_primary": "white", "text_input": "white", "text_placeholder": "#a289a2", "border_focus": "#6c757d", "selection_bg": "#5a4f5a", "selection_fg": "white", }, "reddish": { "bg_window": "#161b22", "bg_surface": "#30161b", "bg_control": "#472d3c", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#604b4f", "bg_tab_selected": "#39424e", "bg_tab_hover": "#806060", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#543d41", "text_primary": "white", "text_input": "white", "text_placeholder": "#a28089", "border_focus": "#6c757d", "selection_bg": "#774f5a", "selection_fg": "white", }, "steel_ocean": { "bg_window": "#1e2126", "bg_surface": "#1b3a47", "bg_control": "#39424e", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#565e66", "bg_tab_selected": "#39424e", "bg_tab_hover": "#737c85", "bg_menu_selected": "#39424e", "bg_splitter": "#202428", "bg_list_hover": "#565e66", "text_primary": "white", "text_input": "white", "text_placeholder": "#a2a2a3", "border_focus": "#6c757d", "selection_bg": "#6c757d", "selection_fg": "white", }, "tron": { "bg_window": "#010b19", "bg_surface": "#011627", "bg_control": "#011627", "bg_control_hover": "#00ffff", "bg_dialog_button": "#011627", "bg_tab": "#011627", "bg_tab_selected": "#00ffff", "bg_tab_hover": "#405c7d", "bg_menu_selected": "#00ffff", "bg_splitter": "#00ffff", "bg_list_hover": "#00ffff", "text_primary": "#7dfdfe", "text_input": "#7dfdfe", "text_placeholder": "#405c7d", "border_focus": "#7dfdfe", "selection_bg": "#00ffff", "selection_fg": "#010b19", }, "yellowish": { "bg_window": "#161b22", "bg_surface": "#302f1b", "bg_control": "#4a4739", "bg_control_hover": "#2f343f", "bg_dialog_button": "#6c757d", "bg_tab": "#5e5d4b", "bg_tab_selected": "#39424e", "bg_tab_hover": "#807f6a", "bg_menu_selected": "#39424e", "bg_splitter": "#1e1e1e", "bg_list_hover": "#4a4739", "text_primary": "white", "text_input": "white", "text_placeholder": "#a2a27a", "border_focus": "#6c757d", "selection_bg": "#75705b", "selection_fg": "white", }, } SUPPORTED_EXTENSIONS = ( ".pdf", ".docx", ".txt", ".eml", ".msg", ".csv", ".xls", ".xlsx", ".xlsm", ".rtf", ".md", ".html", ".htm", ) GLM4Z1_CHAT_TEMPLATE = """[gMASK] {%- if tools -%} <|system|> 你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。 # 可用工具 {%- for tool in tools %} {%- set function = tool.function if tool.get("function") else tool %} ## {{ function.name }} {{ function | tojson(indent=4, ensure_ascii=False) }} 在调用上述函数时,请使用 Json 格式表示调用的参数。 {%- endfor %} {%- endif -%} {%- for msg in messages %} {%- if msg.role == 'system' %} <|system|> {{ msg.content }} {%- endif %} {%- endfor %} {%- for message in messages if message.role != 'system' %} {%- set role = message['role'] %} {%- set content = message['content'] %} {%- set visible = content.split('')[-1].strip() %} {%- set meta = message.get("metadata", "") %} {%- if role == 'user' %} <|user|> {{ visible }} {%- elif role == 'assistant' and not meta %} <|assistant|> {{ visible }} {%- elif role == 'assistant' and meta %} <|assistant|>{{ meta }} {{ visible }} {%- elif role == 'observation' %} <|observation|> {{ visible }} {%- endif %} {%- endfor %} {% if add_generation_prompt %}<|assistant|> {% endif %}""" priority_libs = { "cp311": { "GPU": [ "https://github.com/kingbri1/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu128torch2.8.0cxx11abiFALSE-cp311-cp311-win_amd64.whl", "https://download.pytorch.org/whl/cu128/torch-2.9.0%2Bcu128-cp311-cp311-win_amd64.whl#sha256=dc6f6c6e7d7eed20c687fc189754a6ea6bf2da9c64eff59fd6753b80ed4bca05", "https://download.pytorch.org/whl/cu128/torchvision-0.23.0%2Bcu128-cp311-cp311-win_amd64.whl#sha256=70b3d8bfe04438006ec880c162b0e3aaac90c48b759aa41638dd714c732b182c", "https://download.pytorch.org/whl/cu128/torchaudio-2.9.0%2Bcu128-cp311-cp311-win_amd64.whl#sha256=daa01250079ef024987622429f379723d306e92fad42290868041a60d4fef2e6", "triton-windows==3.4.0.post20", "xformers==0.0.33.post1", "nvidia-cuda-runtime-cu12==12.8.90", "nvidia-cublas-cu12==12.8.4.1", "nvidia-cuda-nvrtc-cu12==12.8.93", "nvidia-cuda-nvcc-cu12==12.8.93", "nvidia-cufft-cu12==11.3.3.83", "nvidia-cudnn-cu12==9.10.2.21", "nvidia-ml-py==13.580.82", ], "CPU": [ ], "COMMON": [ "https://github.com/simonflueckiger/tesserocr-windows_build/releases/download/tesserocr-v2.9.1-tesseract-5.5.1/tesserocr-2.9.1-cp311-cp311-win_amd64.whl", ], }, "cp312": { "GPU": [ "https://github.com/kingbri1/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu128torch2.8.0cxx11abiFALSE-cp312-cp312-win_amd64.whl", "https://download.pytorch.org/whl/cu128/torch-2.9.0%2Bcu128-cp312-cp312-win_amd64.whl#sha256=c97dc47a1f64745d439dd9471a96d216b728d528011029b4f9ae780e985529e0", "https://download.pytorch.org/whl/cu128/torchvision-0.24.0%2Bcu128-cp312-cp312-win_amd64.whl#sha256=1aa36ac00106e1381c38348611a1ec0eebe942570ebaf0490f026b061dfc212c", "https://download.pytorch.org/whl/cu128/torchaudio-2.9.0%2Bcu128-cp312-cp312-win_amd64.whl#sha256=90cd2b4d7c375c9a5c2d79117985f8f506718f494914ad9b5c5dee5581216898", "triton-windows==3.4.0.post20", "xformers==0.0.33.post1", "nvidia-cuda-runtime-cu12==12.8.90", "nvidia-cublas-cu12==12.8.4.1", "nvidia-cuda-nvrtc-cu12==12.8.93", "nvidia-cuda-nvcc-cu12==12.8.93", "nvidia-cufft-cu12==11.3.3.83", "nvidia-cudnn-cu12==9.10.2.21", "nvidia-ml-py==13.580.82", ], "CPU": [ ], "COMMON": [ "https://github.com/simonflueckiger/tesserocr-windows_build/releases/download/tesserocr-v2.9.1-tesseract-5.5.1/tesserocr-2.9.1-cp312-cp312-win_amd64.whl", ] }, "cp313": { "GPU": [ "https://github.com/kingbri1/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu128torch2.8.0cxx11abiFALSE-cp313-cp313-win_amd64.whl", "https://download.pytorch.org/whl/cu128/torch-2.9.0%2Bcu128-cp313-cp313-win_amd64.whl#sha256=9cba9f0fa2e1b70fffdcec1235a1bb727cbff7e7b118ba111b2b7f984b7087e2", "https://download.pytorch.org/whl/cu128/torchvision-0.24.0%2Bcu128-cp313-cp313-win_amd64.whl#sha256=f82cd941bc36033ebdb2974c83caa2913cc37e6567fe97cdd69f5a568ff182c8", "https://download.pytorch.org/whl/cu128/torchaudio-2.9.0%2Bcu128-cp313-cp313-win_amd64.whl#sha256=76df3fdb5e1194b51e69187e00d53d18bb5c2e0f3904d105e644b5c3aba5c9f4", "triton-windows==3.4.0.post20", "xformers==0.0.33.post1", "nvidia-cuda-runtime-cu12==12.8.90", "nvidia-cublas-cu12==12.8.4.1", "nvidia-cuda-nvrtc-cu12==12.8.93", "nvidia-cuda-nvcc-cu12==12.8.93", "nvidia-cufft-cu12==11.3.3.83", "nvidia-cudnn-cu12==9.10.2.21", "nvidia-ml-py==13.580.82", ], "CPU": [ ], "COMMON": [ "https://github.com/simonflueckiger/tesserocr-windows_build/releases/download/tesserocr-v2.9.1-tesseract-5.5.1/tesserocr-2.9.1-cp313-cp313-win_amd64.whl", ] } } libs = [ "accelerate==1.11.0", "aiofiles==25.1.0", "aiohappyeyeballs==2.6.1", "aiohttp==3.13.2", "aiosignal==1.4.0", "anndata==0.12.5", "annotated-types==0.7.0", "anyio==4.11.0", "array_api_compat==1.12.0", "async-timeout==5.0.1", "attrs==25.4.0", "av==16.0.1", "backoff==2.2.1", "beautifulsoup4==4.14.2", "bitsandbytes==0.48.2", "braceexpand==0.1.7", "certifi==2025.10.5", "cfgv==3.4.0", "cffi==2.0.0", "chardet==5.2.0", "charset-normalizer==3.4.4", "git+https://github.com/BBC-Esq/chatterbox-light", "chattts==0.2.5", "click==8.3.0", "cloudpickle==3.1.2", "colorama==0.4.6", "colorclass==2.2.2", "coloredlogs==15.0.1", "compressed-rtf==1.0.7", "contourpy==1.3.3", "cryptography==46.0.3", "ctranslate2==4.6.2", "curl_cffi==0.15.0", "cycler==0.12.1", "dataclasses-json==0.6.7", "datasets==4.3.0", "deepdiff==8.6.1", "Deprecated==1.2.18", "deprecation==2.1.0", "diffusers==0.35.2", "dill==0.3.8", "distlib==0.4.0", "distro==1.9.0", "docx2txt==0.9", "easygui==0.98.3", "ebcdic==1.1.1", "einops==0.8.1", "einx==0.3.0", "emoji==2.15.0", "encodec==0.1.1", "et-xmlfile==2.0.0", "eval-type-backport==0.2.2", "extract-msg==0.55.0", "fastcore==1.8.13", "fastprogress==1.0.3", "filetype==1.2.0", "filelock==3.20.0", "fonttools==4.60.1", "frozendict==2.4.6", "frozenlist==1.8.0", "fsspec[http]==2025.9.0", "googleapis-common-protos==1.70.0", "greenlet==3.2.4", "grpcio==1.75.1", "gTTS==2.5.4", "h11==0.16.0", "h5py==3.15.1", "hf-xet==1.2.0", "html5lib==1.1", "httpcore==1.0.9", "httpx==0.28.1", "httpx-sse==0.4.3", "huggingface-hub==0.36.0", "humanfriendly==10.0", "HyperPyYAML==1.2.2", "identify==2.6.15", "idna==3.11", "img2pdf==0.6.1", "importlib_metadata==8.7.0", "Jinja2==3.1.6", "jiter==0.11.1", "joblib==1.5.2", "jsonpatch==1.33", "jsonpath-python==1.0.6", "jsonpointer==3.0.0", "jsonschema==4.25.1", "jsonschema-specifications==2025.9.1", "kiwisolver==1.4.9", "lark==1.3.1", "llvmlite==0.45.1", "lxml==6.0.2", "Markdown==3.9", "markdown-it-py==4.0.0", "MarkupSafe==3.0.3", "marshmallow==3.26.1", "matplotlib==3.10.7", "mdurl==0.1.2", "ml-dtypes==0.5.3", "more-itertools==10.8.0", "mpmath==1.3.0", "msoffcrypto-tool==6.0.0", "multidict==6.7.0", "multiprocess==0.70.16", "mypy-extensions==1.1.0", "natsort==8.4.0", "nest-asyncio==1.6.0", "networkx==3.5", "nodeenv==1.9.1", "nltk==3.9.1", "numba==0.62.1", "numpy==2.3.4", "ocrmypdf==16.11.1", "olefile==0.47", "oletools==0.60.2", "onnx==1.19.1", "openai==2.6.1", "openai-whisper==20250625", "openpyxl==3.1.5", "opentelemetry-api==1.38.0", "opentelemetry-exporter-otlp-proto-grpc==1.38.0", "opentelemetry-sdk==1.38.0", "opentelemetry-semantic-conventions", "opentelemetry-exporter-otlp-proto-common==1.38.0", "opentelemetry-proto==1.38.0", "optimum==2.0.0", "ordered-set==4.1.0", "orderly-set==5.5.0", "orjson==3.11.4", "overrides==7.7.0", "packaging==25.0", "pandas==2.3.3", "pcodedmp==1.2.6", "pdfminer.six==20250506", "pi-heif==1.1.1", "pikepdf==9.11.0", "pillow==12.0.0", "pipdeptree", "platformdirs==4.5.0", "pluggy==1.6.0", "posthog==5.4.0", "pre-commit==4.3.0", "propcache==0.4.1", "protobuf==6.33.0", "psutil==7.1.3", "pyarrow==22.0.0", "pybase16384==0.3.8", "pybase64==1.4.2", "pycparser==2.23", "pydantic==2.12.3", "pydantic_core==2.41.4", "pydantic-settings==2.11.0", "Pygments==2.19.2", "PyOpenGL==3.1.10", "PyOpenGL-accelerate==3.1.10", "pypandoc==1.15", "pyparsing==3.2.5", "pypdf==6.1.3", "pyreadline3==3.5.4", "python-dateutil==2.9.0.post0", "python-docx==1.2.0", "python-dotenv==1.1.1", "python-iso639==2025.2.18", "python-magic==0.4.27", "pytz==2025.2", "PyYAML==6.0.3", "rapidfuzz==3.14.3", "red-black-tree-mod==1.22", "referencing==0.37.0", "regex==2025.10.23", "requests==2.32.5", "requests-toolbelt==1.0.0", "rpds-py", "rich==14.2.0", "RTFDE==0.1.2.2", "ruamel.yaml==0.18.16", "ruamel.yaml.clib==0.2.14", "s3tokenizer==0.2.0", "safetensors==0.6.2", "scikit-learn==1.7.2", "scipy==1.16.3", "sentence-transformers==5.1.2", "sentencepiece==0.2.1", "six==1.17.0", "sniffio==1.3.1", "sounddevice==0.5.3", "soundfile==0.13.1", "soupsieve==2.8", "speechbrain==0.5.16", "SQLAlchemy==2.0.44", "sseclient-py==1.8.0", "striprtf==0.0.29", "sympy==1.13.3", "tabulate2==1.10.2", "tenacity==9.1.2", "termcolor==3.2.0", "tessdata==1.0.0", "tessdata.eng==1.0.0", "threadpoolctl==3.6.0", "tiktoken==0.12.0", "tiledb==0.36.0", "tiledb-cloud==0.14.4", "tiledb-vector-search==0.16.0", "timm==1.0.20", "tokenizers==0.22.1", "tqdm==4.67.1", "transformers==4.57.4", "typing-inspection==0.4.2", "typing_extensions==4.15.0", "unstructured-client==0.42.3", "virtualenv==20.35.3", "tzdata==2025.2", "tzlocal==5.3.1", "urllib3==2.5.0", "vector-quantize-pytorch==1.24.2", "vocos==0.1.0", "watchdog==6.0.0", "wcwidth==0.2.14", "webdataset==1.0.2", "webencodings==0.5.1", "whisper-s2t-reborn>=1.6.0,<2", "whisperspeech2>=1.0.0,<2", "win-unicode-console==0.5", "wrapt==1.17.3", "xlrd==2.0.2", "xxhash==3.6.0", "yarl==1.22.0", "zipp==3.23.0", "zstandard==0.25.0" ] full_install_libs = [ "PySide6==6.10.0", "pymupdf==1.26.5", "unstructured==0.18.15", ] BACKEND_DEPENDENCIES = { "kyutai": { "moshi": "0.2.13", "sphn": "0.2.0" }, "kyutaipocket": { "pocket_tts": "2.0.0" }, "bark": { }, "whisperspeech": { }, "chattts": { }, "chatterbox": { }, "googletts": { } } CHAT_MODELS = { 'LiquidAI - .35b': { 'model': 'LiquidAI - .35b', 'repo_id': 'LiquidAI/LFM2-350M', 'cache_dir': 'LiquidAI--LFM2-350M', 'cps': 251.69, 'vram': 888.05, 'function': 'LiquidAI', 'precision': 'bfloat16', 'gated': False, 'license': 'lfm1.0', 'max_new_tokens': 1024, }, 'Qwen 3 - 0.6b (Thinking)': { 'model': 'Qwen 3 - 0.6b (Thinking)', 'repo_id': 'Qwen/Qwen3-0.6B', 'cache_dir': 'Qwen--Qwen3-0.6B', 'cps': 203.25, 'vram': 1293.37, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 2048, }, 'LiquidAI - .7b': { 'model': 'LiquidAI - .7b', 'repo_id': 'LiquidAI/LFM2-700M', 'cache_dir': 'LiquidAI--LFM2-700M', 'cps': 328.76, 'vram': 1204.43, 'function': 'LiquidAI', 'precision': 'bfloat16', 'gated': False, 'license': 'lfm1.0', 'max_new_tokens': 2048, }, 'LiquidAI - 1.2b': { 'model': 'LiquidAI - 1.2b', 'repo_id': 'LiquidAI/LFM2-1.2B', 'cache_dir': 'LiquidAI--LFM2-1.2B', 'cps': 293.37, 'vram': 1621.93, 'function': 'LiquidAI', 'precision': 'bfloat16', 'gated': False, 'license': 'lfm1.0', 'max_new_tokens': 2048, }, 'Qwen 3 - 1.7b (Thinking)': { 'model': 'Qwen 3 - 1.7b (Thinking)', 'repo_id': 'Qwen/Qwen3-1.7B', 'cache_dir': 'Qwen--Qwen3-1.7B', 'cps': 200.81, 'vram': 2603.93, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 2048, }, 'Granite - 2b': { 'model': 'Granite - 2b', 'repo_id': 'ibm-granite/granite-3.3-2b-instruct', 'cache_dir': 'ibm-granite--granite-3.3-2b-instruct', 'cps': 155.22, 'vram': 3141.37, 'function': 'Granite', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 1024, }, 'Qwen 3 - 4b': { 'model': 'Qwen 3 - 4b', 'repo_id': 'Qwen/Qwen3-4B-Instruct-2507', 'cache_dir': 'Qwen--Qwen3-4B-Instruct-2507', 'cps': 153.87, 'vram': 4439.74, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 2048, }, 'Qwen 3 - 4b (Thinking)': { 'model': 'Qwen 3 - 4b (Thinking)', 'repo_id': 'Qwen/Qwen3-4B-Thinking-2507', 'cache_dir': 'Qwen--Qwen3-4B-Thinking-2507', 'cps': 153.87, 'vram': 4439.74, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 2048, }, 'Phi 4 Mini - 4b': { 'model': 'Phi 4 Mini - 4b', 'repo_id': 'microsoft/Phi-4-mini-instruct', 'cache_dir': 'microsoft--Phi-4-mini-instruct', 'cps': 222.77, 'vram': 4761.80, 'function': 'Phi4', 'precision': 'bfloat16', 'gated': False, 'license': 'mit', 'max_new_tokens': 2048, }, 'Qwen 3 - 8b (Thinking)': { 'model': 'Qwen 3 - 8b (Thinking)', 'repo_id': 'Qwen/Qwen3-8B', 'cache_dir': 'Qwen--Qwen3-8B', 'cps': 152.61, 'vram': 8390.24, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 2048, }, 'Deepseek R1 - 8b (Thinking)': { 'model': 'Deepseek R1 - 8b (Thinking)', 'repo_id': 'deepseek-ai/DeepSeek-R1-0528-Qwen3-8B', 'cache_dir': 'deepseek-ai--DeepSeek-R1-0528-Qwen3-8B', 'cps': 171.55, 'vram': 8425.49, 'function': 'DeepseekR1', 'precision': 'bfloat16', 'gated': False, 'license': 'mit', 'max_new_tokens': 2048, }, 'Seed Coder - 8b': { 'model': 'Seed Coder - 8b', 'repo_id': 'ByteDance-Seed/Seed-Coder-8B-Instruct', 'cache_dir': 'ByteDance-Seed--Seed-Coder-8B-Instruct', 'cps': 183.82, 'vram': 8441.93, 'function': 'SeedCoder', 'precision': 'bfloat16', 'gated': False, 'license': 'mit', 'max_new_tokens': 2048, }, 'Granite - 8b': { 'model': 'Granite - 8b', 'repo_id': 'ibm-granite/granite-3.3-8b-instruct', 'cache_dir': 'ibm-granite--granite-3.3-8b-instruct', 'cps': 173.62, 'vram': 8513.93, 'function': 'Granite', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', }, 'GLM4-Z1 - 9b (Thinking)': { 'model': 'GLM4-Z1 - 9b (Thinking)', 'repo_id': 'THUDM/GLM-Z1-9B-0414', 'cache_dir': 'THUDM--GLM-Z1-9B-0414', 'cps': 395.18, 'vram': 9592.77, 'function': 'GLM4Z1', 'precision': 'bfloat16', 'gated': False, 'license': 'mit', 'max_new_tokens': 2048, }, 'Qwen 3 - 14b (Thinking)': { 'model': 'Qwen 3 - 14b (Thinking)', 'repo_id': 'Qwen/Qwen3-14B', 'cache_dir': 'Qwen--Qwen3-14B', 'cps': 140.79, 'vram': 11597.37, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 4096, }, 'Mistral Small 3 - 24b': { 'model': 'Mistral Small 3 - 24b', 'repo_id': 'mistralai/Mistral-Small-24B-Instruct-2501', 'cache_dir': 'mistralai--Mistral-Small-24B-Instruct-2501', 'cps': 134.32, 'vram': 14790.80, 'function': 'Mistral_Small_24b', 'precision': 'bfloat16', 'gated': True, 'license': 'apache-2.0', 'max_new_tokens': 4096, }, 'Qwen 3 - 32b (Thinking)': { 'model': 'Qwen 3 - 32b (Thinking)', 'repo_id': 'Qwen/Qwen3-32B', 'cache_dir': 'Qwen--Qwen3-32B', 'cps': 97.56, 'vram': 19493.55, 'function': 'Qwen', 'precision': 'bfloat16', 'gated': False, 'license': 'apache-2.0', 'max_new_tokens': 4096, }, 'GLM4-Z1 - 32b (Thinking)': { 'model': 'GLM4-Z1 - 32b (Thinking)', 'repo_id': 'THUDM/GLM-Z1-32B-0414', 'cache_dir': 'THUDM--GLM-Z1-32B-0414', 'cps': 121.65, 'vram': 19947.77, 'function': 'GLM4Z1', 'precision': 'bfloat16', 'gated': False, 'license': 'mit', 'max_new_tokens': 4096, }, } VECTOR_MODELS = { 'BAAI': [ { 'name': 'bge-small-en-v1.5', 'dimensions': 384, 'max_sequence': 512, 'size_mb': 134, 'repo_id': 'BAAI/bge-small-en-v1.5', 'cache_dir': 'BAAI--bge-small-en-v1.5', 'type': 'vector', 'parameters': '33.4m', 'precision': 'float32', 'rank': 12, 'license': 'mit', }, { 'name': 'bge-base-en-v1.5', 'dimensions': 768, 'max_sequence': 512, 'size_mb': 438, 'repo_id': 'BAAI/bge-base-en-v1.5', 'cache_dir': 'BAAI--bge-base-en-v1.5', 'type': 'vector', 'parameters': '109m', 'precision': 'float32', 'rank': 10, 'license': 'mit', }, { 'name': 'bge-large-en-v1.5', 'dimensions': 1024, 'max_sequence': 512, 'size_mb': 1340, 'repo_id': 'BAAI/bge-large-en-v1.5', 'cache_dir': 'BAAI--bge-large-en-v1.5', 'type': 'vector', 'parameters': '335m', 'precision': 'float32', 'rank': 7, 'license': 'mit', }, ], 'Google': [ { 'name': 'embeddinggemma-300m', 'dimensions': 768, 'max_sequence': 2048, 'size_mb': 1210, 'repo_id': 'google/embeddinggemma-300m', 'cache_dir': 'google--embeddinggemma-300m', 'type': 'vector', 'parameters': '303m', 'precision': 'float32', 'rank': 4, 'license': 'gemma - commercial ok', }, ], 'IBM': [ { 'name': 'Granite-30m-English', 'dimensions': 384, 'max_sequence': 512, 'size_mb': 61, 'repo_id': 'ibm-granite/granite-embedding-30m-english', 'cache_dir': 'ibm-granite--granite-embedding-30m-english', 'type': 'vector', 'parameters': '30.3m', 'precision': 'bfloat16', 'rank': 14, 'license': 'apache-2.0', }, { 'name': 'Granite-125m-English', 'dimensions': 768, 'max_sequence': 512, 'size_mb': 249, 'repo_id': 'ibm-granite/granite-embedding-125m-english', 'cache_dir': 'ibm-granite--granite-embedding-125m-english', 'type': 'vector', 'parameters': '125m', 'precision': 'bfloat16', 'rank': 13, 'license': 'apache-2.0', }, ], 'infly': [ { 'name': 'inf-retriever-v1-1.5b', 'dimensions': 1536, 'max_sequence': 8192, 'size_mb': 3090, 'repo_id': 'infly/inf-retriever-v1-1.5b', 'cache_dir': 'infly--inf-retriever-v1-1.5b', 'type': 'vector', 'parameters': '1540m', 'precision': 'bfloat16', 'rank': 16, 'license': 'apache-2.0', }, { 'name': 'inf-retriever-v1-7b', 'dimensions': 3584, 'max_sequence': 8192, 'size_mb': 14130, 'repo_id': 'infly/inf-retriever-v1', 'cache_dir': 'infly--inf-retriever-v1-7b', 'type': 'vector', 'parameters': '7070m', 'precision': 'bfloat16', 'rank': 15, 'license': 'apache-2.0', }, ], 'intfloat': [ { 'name': 'e5-small-v2', 'dimensions': 384, 'max_sequence': 512, 'size_mb': 134, 'repo_id': 'intfloat/e5-small-v2', 'cache_dir': 'intfloat--e5-small-v2', 'type': 'vector', 'parameters': '33.4m', 'precision': 'float32', 'rank': 11, 'license': 'mit', }, { 'name': 'e5-base-v2', 'dimensions': 768, 'max_sequence': 512, 'size_mb': 438, 'repo_id': 'intfloat/e5-base-v2', 'cache_dir': 'intfloat--e5-base-v2', 'type': 'vector', 'parameters': '109m', 'precision': 'float32', 'rank': 8, 'license': 'mit', }, { 'name': 'e5-large-v2', 'dimensions': 1024, 'max_sequence': 512, 'size_mb': 1340, 'repo_id': 'intfloat/e5-large-v2', 'cache_dir': 'intfloat--e5-large-v2', 'type': 'vector', 'parameters': '335m', 'precision': 'float32', 'rank': 7, 'license': 'mit', }, ], 'Qwen': [ { 'name': 'Qwen3-Embedding-0.6B', 'dimensions': 1024, 'max_sequence':8192, 'size_mb': 1190, 'repo_id': 'Qwen/Qwen3-Embedding-0.6B', 'cache_dir': 'Qwen--Qwen3-Embedding-0.6B', 'type': 'vector', 'parameters': '596m', 'precision': 'bfloat16', 'rank': 3, 'license': 'apache-2.0', }, { 'name': 'Qwen3-Embedding-4B', 'dimensions': 2560, 'max_sequence':8192, 'size_mb': 4970, 'repo_id': 'Qwen/Qwen3-Embedding-4B', 'cache_dir': 'Qwen--Qwen3-Embedding-4B', 'type': 'vector', 'parameters': '4020m', 'precision': 'bfloat16', 'rank': 2, 'license': 'apache-2.0', }, { 'name': 'Qwen3-Embedding-8B', 'dimensions': 4096, 'max_sequence':8192, 'size_mb': 15136, 'repo_id': 'Qwen/Qwen3-Embedding-8B', 'cache_dir': 'Qwen--Qwen3-Embedding-8B', 'type': 'vector', 'parameters': '7570m', 'precision': 'bfloat16', 'rank': 1, 'license': 'apache-2.0', }, ], 'Snowflake': [ { 'name': 'arctic-embed-m-v2.0', 'dimensions': 768, 'max_sequence':8192, 'size_mb': 1220, 'repo_id': 'Snowflake/snowflake-arctic-embed-m-v2.0', 'cache_dir': 'Snowflake--snowflake-arctic-embed-m-v2.0', 'type': 'vector', 'parameters': '305m', 'precision': 'float32', 'rank': 6, 'license': 'apache-2.0', }, { 'name': 'arctic-embed-l-v2.0', 'dimensions': 1024, 'max_sequence': 8192, 'size_mb': 2270, 'repo_id': 'Snowflake/snowflake-arctic-embed-l-v2.0', 'cache_dir': 'Snowflake--snowflake-arctic-embed-l-v2.0', 'type': 'vector', 'parameters': '568m', 'precision': 'float32', 'rank': 5, 'license': 'apache-2.0', }, ], } VISION_MODELS = { 'Liquid-VL - 480M': { 'precision': 'bfloat16', 'quant': 'n/a', 'size': '480m', 'repo_id': 'LiquidAI/LFM2-VL-450M', 'cache_dir': 'LiquidAI--LFM2-VL-450M', 'requires_cuda': False, 'vram': '628 MB', 'speed': '497.64 char/s', 'avg_length': 855, 'loader': 'loader_liquidvl', 'vision_component': 'SigLIP2 NaFlex base (86M)', 'chat_component': 'LFM2-350M', 'license': 'lfm1.0', }, 'Liquid-VL - 1.6B': { 'precision': 'bfloat16', 'quant': 'n/a', 'size': '1.6b', 'repo_id': 'LiquidAI/LFM2-VL-1.6B', 'cache_dir': 'LiquidAI--LFM2-VL-1.6B', 'requires_cuda': False, 'vram': '1.4 GB', 'speed': '437.5 char/s', 'avg_length': 722, 'loader': 'loader_liquidvl', 'vision_component': 'SigLIP2 NaFlex shape‑optimized (400M)', 'chat_component': 'LFM2-1.2B', 'license': 'lfm1.0', }, 'InternVL3 - 1b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '1b', 'repo_id': 'OpenGVLab/InternVL3-1B', 'cache_dir': 'OpenGVLab--InternVL3-1B', 'requires_cuda': False, 'vram': '2.4 GB', 'avg_length': 560, 'loader': 'loader_internvl', 'vision_component': 'InternViT-300M-448px-V2_5', 'chat_component': 'Qwen2.5-0.5B', 'license': 'apache-2.0', }, 'InternVL3 - 2b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '2b', 'repo_id': 'OpenGVLab/InternVL3-2B', 'cache_dir': 'OpenGVLab--InternVL3-2B', 'requires_cuda': False, 'vram': '3.2 GB', 'avg_length': 626, 'loader': 'loader_internvl', 'vision_component': 'InternViT-300M-448px-V2_5', 'chat_component': 'Qwen2.5-1.5B', 'license': 'apache-2.0', }, 'Granite Vision - 2b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '2b', 'repo_id': 'ibm-granite/granite-vision-3.2-2b', 'cache_dir': 'ibm-granite--granite-vision-3.2-2b', 'requires_cuda': False, 'vram': '4.1 gb+', 'avg_length': 966, 'loader': 'loader_granite', 'vision_component': 'siglip-so400m-patch14-384', 'chat_component': 'granite-3.1-2b-instruct', 'license': 'apache-2.0', }, 'Qwen VL - 2b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '2b', 'repo_id': 'Qwen/Qwen3-VL-2B-Instruct', 'cache_dir': 'Qwen--Qwen3-VL-2B-Instruct', 'requires_cuda': True, 'vram': '4.1 GB', 'avg_length': 975, 'loader': 'loader_qwenvl', 'vision_component': 'Custom ViT', 'chat_component': 'Qwen2.5-3B-Instruct', 'license': 'apache-2.0', }, 'Liquid-VL - 3B': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '3b', 'repo_id': 'LiquidAI/LFM2-VL-3B', 'cache_dir': 'LiquidAI--LFM2-VL-3B', 'requires_cuda': True, 'vram': '6.3 GB', 'avg_length': 807, 'loader': 'loader_liquidvl', 'vision_component': 'SigLIP2 400M NaFlex', 'chat_component': 'LFM2-2.6B', 'license': 'Commercial under 10M Revenue', }, 'Qwen VL - 3b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '3b', 'repo_id': 'Qwen/Qwen2.5-VL-3B-Instruct', 'cache_dir': 'Qwen--Qwen2.5-VL-3B-Instruct', 'requires_cuda': True, 'vram': '6.3 GB', 'avg_length': 703, 'loader': 'loader_qwenvl', 'vision_component': 'Custom ViT', 'chat_component': 'Qwen2.5-3B-Instruct', 'license': 'Custom Non-Commercial', }, 'Qwen VL - 4b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '4b', 'repo_id': 'Qwen/Qwen3-VL-4B-Instruct', 'cache_dir': 'Qwen--Qwen3-VL-4B-Instruct', 'requires_cuda': True, 'vram': '6.3 GB', 'avg_length': 975, 'loader': 'loader_qwenvl', 'vision_component': 'Custom ViT', 'chat_component': 'Qwen3-3B-Instruct', 'license': 'apache-2.0', }, 'InternVL3 - 8b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '1b', 'repo_id': 'OpenGVLab/InternVL3-8B', 'cache_dir': 'OpenGVLab--InternVL3-8B', 'requires_cuda': True, 'vram': '8.2 GB', 'avg_length': 717, 'loader': 'loader_internvl', 'vision_component': 'InternViT-300M-448px-V2_5', 'chat_component': 'Qwen2.5-7B', 'license': 'apache-2.0', }, 'Qwen VL - 7b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '7b', 'repo_id': 'Qwen/Qwen2.5-VL-7B-Instruct', 'cache_dir': 'Qwen--Qwen2.5-VL-7B-Instruct', 'requires_cuda': True, 'vram': '9.6 GB', 'avg_length': 918, 'loader': 'loader_qwenvl', 'vision_component': 'Custom ViT', 'chat_component': 'Qwen2.5-7-Instruct', 'license': 'Custom Non-Commercial', }, 'GLM-4.1V-9B-Thinking': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '9b', 'repo_id': 'zai-org/GLM-4.1V-9B-Thinking', 'cache_dir': 'zai-org--GLM-4.1V-9B-Thinking', 'requires_cuda': True, 'vram': '10 GB', 'avg_length': 653, 'loader': 'loader_glmv4_thinking', 'vision_component': 'AIMv2-Huge-336', 'chat_component': 'GLM-4-9B-0414', 'license': 'mit', }, 'InternVL3 - 14b': { 'precision': 'bfloat16', 'quant': '4-bit', 'size': '1b', 'repo_id': 'OpenGVLab/InternVL3-14B', 'cache_dir': 'OpenGVLab--InternVL3-14B', 'requires_cuda': True, 'vram': '12.6 GB', 'avg_length': 757, 'loader': 'loader_internvl', 'vision_component': 'InternViT-300M-448px-V2_5', 'chat_component': 'Qwen2.5-14B', 'license': 'apache-2.0', }, } OCR_MODELS = { 'GOT-OCR2': { 'precision': 'bfloat16', 'size': '716m', 'repo_id': 'ctranslate2-4you/GOT-OCR2_0-Customized', 'cache_dir': 'ctranslate2-4you--GOT-OCR2_0-Customized', 'requires_cuda': True, 'license': 'apache-2.0', }, } TTS_MODELS = { "Kokoro": { "model": "Kokoro", "repo_id": "ctranslate2-4you/Kokoro-82M-light", "save_dir": "ctranslate2-4you--Kokoro-82M-light", "cps": 20.5, "vram": "2GB", "precision": "float32", "gated": False, 'license': 'apache-2.0', "allow_patterns": [ "voices/**", "config.json", "istftnet.py", "kokoro-v0_19.pth", "kokoro.py", "models.py", "plbert.py" ], }, "Bark - Normal": { "model": "Bark - Normal", "repo_id": "suno/bark", "save_dir": "tts", "cps": 18.2, "vram": "4GB", "precision": "float32", "gated": False, 'license': 'mit', "allow_patterns": [ "voices/**", "config.json", "istftnet.py", "kokoro-v0_19.pth", "plbert.py" ], "ignore_patterns": [ "demo/**", "fp16/**", ".gitattributes", "kokoro-v0_19.onnx", "kokoro.py", "models.py", ] }, "Bark - Small": { "model": "Bark - Small", "repo_id": "suno/bark-small", "save_dir": "tts", "cps": 18.2, "vram": "4GB", "precision": "float32", "gated": False, 'license': 'mit', "allow_patterns": [ "voices/**", "config.json", "istftnet.py", "kokoro-v0_19.pth", "plbert.py" ], "ignore_patterns": [ "demo/**", "fp16/**", ".gitattributes", "kokoro-v0_19.onnx", "kokoro.py", "models.py", ] }, "WhisperSpeech": { "model": "WhisperSpeech", "repo_id": "WhisperSpeech/WhisperSpeech", "save_dir": "tts", "cps": 18.2, "vram": "4GB", "precision": "fp32", "gated": False, 'license': 'mit', "allow_patterns": [ "voices/**", "config.json", "istftnet.py", "kokoro-v0_19.pth", "plbert.py" ], "ignore_patterns": [ "demo/**", "fp16/**", ".gitattributes", "kokoro-v0_19.onnx", "kokoro.py", "models.py", ] }, "ChatTTS": { "model": "ChatTTS", "repo_id": "2Noise/ChatTTS", "save_dir": "tts", "cps": 18.2, "vram": "4GB", "precision": "fp32", "gated": False, 'license': 'CCA Non-Commercial 4.0', "allow_patterns": [ "asset/**", "config/**", ], "ignore_patterns": [ "demo/**", "fp16/**", ".gitattributes", "kokoro-v0_19.onnx", "kokoro.py", "models.py", ] }, } JEEVES_MODELS = { "Llama - 3b": { "original_repo": "meta-llama/Llama-3.2-3B-Instruct", "repo": "ctranslate2-4you/Llama-3.2-3B-Instruct-ct2-int8", "folder_name": "ctranslate2-4you--Llama-3.2-3B-Instruct-ct2-int8", "prompt_format": """<|begin_of_text|><|start_header_id|>system<|end_header_id|> Cutting Knowledge Date: December 2023 {jeeves_system_message}<|eot_id|> <|start_header_id|>user<|end_header_id|> {user_message}<|eot_id|> <|start_header_id|>assistant<|end_header_id|>""" }, "Qwen - 3b": { "original_repo": "Qwen/Qwen2.5-3B-Instruct", "repo": "ctranslate2-4you/Qwen2.5-3B-Instruct-ct2-int8", "folder_name": "ctranslate2-4you--Qwen2.5-3B-Instruct-ct2-int8", "prompt_format": """<|im_start|>system {jeeves_system_message}<|im_end|> <|im_start|>user {user_message}<|im_end|> <|im_start|>assistant""" }, "Danube - 4b": { "original_repo": "h2oai/h2o-danube3-4b-chat", "repo": "ctranslate2-4you/h2o-danube3-4b-chat-ct2-int8", "folder_name": "ctranslate2-4you--h2o-danube3.1-4b-chat-ct2-int8", "prompt_format": """<|system|>{jeeves_system_message}<|prompt|>{user_message}<|answer|>""" }, } WHISPER_SPEECH_MODELS = { "s2a": { "s2a-q4-tiny": ("s2a-q4-tiny-en+pl.model", 77), "s2a-q4-base": ("s2a-q4-base-en+pl.model", 193), "s2a-q4-hq-fast": ("s2a-q4-hq-fast-en+pl.model", 363), "s2a-q4-small": ("s2a-q4-small-en+pl.model", 833), "s2a-v1.1-small": ("s2a-v1.1-small-en+pl.model", 417), }, "t2s": { "t2s-tiny": ("t2s-tiny-en+pl.model", 71), "t2s-base": ("t2s-base-en+pl.model", 184), "t2s-small": ("t2s-small-en+pl.model", 817), "t2s-fast-small": ("t2s-fast-small-en+pl.model", 709), "t2s-fast-medium": ("t2s-fast-medium-en+pl+yt.model", 1254), "t2s-hq-fast": ("t2s-hq-fast-en+pl.model", 709), } } WHISPER_MODELS = { 'Distil Whisper large-v3 - float32': { 'name': 'Distil Whisper large-v3', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/distil-whisper-large-v3-ct2-float32', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper large-v3 - bfloat16': { 'name': 'Distil Whisper large-v3', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/distil-whisper-large-v3-ct2-bfloat16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper large-v3 - float16': { 'name': 'Distil Whisper large-v3', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/distil-whisper-large-v3-ct2-float16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Whisper large-v3 - float32': { 'name': 'Whisper large-v3', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/whisper-large-v3-ct2-float32', 'cps': 85, 'optimal_batch_size': 2, 'vram': '5.5 GB' }, 'Whisper large-v3 - bfloat16': { 'name': 'Whisper large-v3', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/whisper-large-v3-ct2-bfloat16', 'cps': 95, 'optimal_batch_size': 3, 'vram': '3.8 GB' }, 'Whisper large-v3 - float16': { 'name': 'Whisper large-v3', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/whisper-large-v3-ct2-float16', 'cps': 100, 'optimal_batch_size': 3, 'vram': '3.3 GB' }, 'Distil Whisper medium.en - float32': { 'name': 'Distil Whisper large-v3', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/distil-whisper-medium.en-ct2-float32', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper medium.en - bfloat16': { 'name': 'Distil Whisper medium.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/distil-whisper-medium.en-ct2-bfloat16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper medium.en - float16': { 'name': 'Distil Whisper medium.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/distil-whisper-medium.en-ct2-float16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Whisper medium.en - float32': { 'name': 'Whisper medium.en', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/whisper-medium.en-ct2-float32', 'cps': 130, 'optimal_batch_size': 6, 'vram': '2.5 GB' }, 'Whisper medium.en - bfloat16': { 'name': 'Whisper medium.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/whisper-medium.en-ct2-bfloat16', 'cps': 140, 'optimal_batch_size': 7, 'vram': '2.0 GB' }, 'Whisper medium.en - float16': { 'name': 'Whisper medium.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/whisper-medium.en-ct2-float16', 'cps': 145, 'optimal_batch_size': 7, 'vram': '1.8 GB' }, 'Distil Whisper small.en - float32': { 'name': 'Distil Whisper small.en', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/distil-whisper-small.en-ct2-float32', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper small.en - bfloat16': { 'name': 'Distil Whisper small.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/distil-whisper-small.en-ct2-bfloat16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Distil Whisper small.en - float16': { 'name': 'Distil Whisper small.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/distil-whisper-small.en-ct2-float16', 'cps': 160, 'optimal_batch_size': 4, 'vram': '3.0 GB' }, 'Whisper small.en - float32': { 'name': 'Whisper small.en', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/whisper-small.en-ct2-float32', 'cps': 180, 'optimal_batch_size': 14, 'vram': '1.5 GB' }, 'Whisper small.en - bfloat16': { 'name': 'Whisper small.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/whisper-small.en-ct2-bfloat16', 'cps': 190, 'optimal_batch_size': 15, 'vram': '1.2 GB' }, 'Whisper small.en - float16': { 'name': 'Whisper small.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/whisper-small.en-ct2-float16', 'cps': 195, 'optimal_batch_size': 15, 'vram': '1.1 GB' }, 'Whisper base.en - float32': { 'name': 'Whisper base.en', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/whisper-base.en-ct2-float32', 'cps': 230, 'optimal_batch_size': 22, 'vram': '1.0 GB' }, 'Whisper base.en - bfloat16': { 'name': 'Whisper base.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/whisper-base.en-ct2-bfloat16', 'cps': 240, 'optimal_batch_size': 23, 'vram': '0.85 GB' }, 'Whisper base.en - float16': { 'name': 'Whisper base.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/whisper-base.en-ct2-float16', 'cps': 245, 'optimal_batch_size': 23, 'vram': '0.8 GB' }, 'Whisper tiny.en - float32': { 'name': 'Whisper tiny.en', 'precision': 'float32', 'repo_id': 'ctranslate2-4you/whisper-tiny.en-ct2-float32', 'cps': 280, 'optimal_batch_size': 30, 'vram': '0.7 GB' }, 'Whisper tiny.en - bfloat16': { 'name': 'Whisper tiny.en', 'precision': 'bfloat16', 'repo_id': 'ctranslate2-4you/whisper-tiny.en-ct2-bfloat16', 'cps': 290, 'optimal_batch_size': 31, 'vram': '0.6 GB' }, 'Whisper tiny.en - float16': { 'name': 'Whisper tiny.en', 'precision': 'float16', 'repo_id': 'ctranslate2-4you/whisper-tiny.en-ct2-float16', 'cps': 295, 'optimal_batch_size': 31, 'vram': '0.55 GB' }, } DOCUMENT_LOADERS = { ".pdf": "CustomPyMuPDFLoader", ".docx": "Docx2txtLoader", ".txt": "TextLoader", ".enex": "EverNoteLoader", ".epub": "UnstructuredEPubLoader", ".eml": "UnstructuredEmailLoader", ".msg": "UnstructuredEmailLoader", ".csv": "CSVLoader", ".xls": "UnstructuredExcelLoader", ".xlsx": "UnstructuredExcelLoader", ".xlsm": "UnstructuredExcelLoader", ".rtf": "UnstructuredRTFLoader", ".odt": "UnstructuredODTLoader", ".md": "UnstructuredMarkdownLoader", ".html": "BSHTMLLoader", } THINKING_TAGS = { "think": ("", ""), "thinking": ("", "") } TOOLTIPS = { "AUDIO_FILE_SELECT": "Select an audio file. Supports various audio formats.", "CHOOSE_FILES": "Select documents to add to the database. Remember to transcribe audio files in the Tools tab first.", "CHUNK_OVERLAP": "Characters shared between chunks. Set to 25-50% of chunk size.", "CHUNK_SIZE": ( "" "Upper limit (in characters, not tokens) that a chunk can be after being split. Make sure that it falls within" "the Max Sequence of the embedding model being used, which is measured in tokens (not characters), remembering that" "approximately 3-4 characters = 1 token." "" ), "CHUNKS_ONLY": "Solely query the vector database and get relevant chunks. Very useful to test the chunk size/overlap settings.", "CONTEXTS": "Maximum number of chunks (aka contexts) to return.", "COPY_RESPONSE": "Copy the chunks (if chunks only is checked) or model's response to the clipboard.", "CREATE_DEVICE_DB": "Choose 'cpu' or 'cuda'. Use 'cuda' if available.", "CREATE_DEVICE_QUERY": "Choose 'cpu' or 'cuda'. 'cpu' recommended to conserve VRAM.", "CREATE_VECTOR_DB": "Creates a new vector database.", "DATABASE_NAME_INPUT": "Enter a unique database name. Use only lowercase letters, numbers, underscores, and hyphens.", "DATABASE_SELECT": "Vector database that will be queried.", "DOWNLOAD_MODEL": "Download the selected vector model.", "EJECT_LOCAL_MODEL": "Unload the current local model from memory.", "FILE_TYPE_FILTER": "Only allows chunks that originate from certain file types.", "HALF_PRECISION": "Uses bfloat16/float16 for 2x speedup. Requires a GPU.", "LOCAL_MODEL_SELECT": "Select a local model for generating responses.", "MODEL_BACKEND_SELECT": "Choose the backend for the large language model response.", "PORT": "Must match the port used in LM Studio.", "QUESTION_INPUT": "Type your question here or use the voice recorder.", "RESTORE_CONFIG": "Restores original config.yaml. May require manual database cleanup.", "RESTORE_DATABASE": "Restores backed-up databases. Use with caution.", "SEARCH_TERM_FILTER": "Removes chunks that do not contain this term as a case-insensitive substring.", "SELECT_VECTOR_MODEL": "Choose the vector model for text embedding.", "SIMILARITY": "Relevance threshold for chunks. 0-1, higher returns more. Don't use 1.", "SPEAK_RESPONSE": "Speak the response from the large language model using text-to-speech.", "SHOW_THINKING_CHECKBOX": "If checked, show the model's internal thought process. Only applies to models like Deepseek's R1 and it will be disregarded if not applicable.", "TRANSCRIBE_BUTTON": "Start transcription.", "TTS_MODEL": "Choose TTS model. Bark offers customization, Google requires internet.", "VECTOR_MODEL_DIMENSIONS": "Higher dimensions captures more nuance but requires more processing time.", "VECTOR_MODEL_DOWNLOADED": "Whether the model has been downloaded.", "VECTOR_MODEL_LINK": "Huggingface link.", "VECTOR_MODEL_MAX_SEQUENCE": "Number of tokens the model can process at once. Different from the Chunk Size setting, which is in characters.", "VECTOR_MODEL_NAME": "The name of the vector model.", "VECTOR_MODEL_PARAMETERS": "The number of internal weights and biases that the model learns and adjusts during training.", "VECTOR_MODEL_PRECISION": ( "" "" "

" "The precision ultimately used depends on your setup:

" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "
Compute DeviceEmbedding Model Precision'Half' Checked?Precision Ultimately Used
CPUAnyEitherfloat32
CUDAfloat16Yesfloat16
CUDAbfloat16Yes" "bfloat16 (if CUDA capability ≥ 8.0) or float16
CUDAfloat32Nofloat32
CUDAfloat32Yes" "bfloat16 (if CUDA capability ≥ 8.0) or float16" "
" "" "" ), "VECTOR_MODEL_SELECT": "Choose a vector model to download.", "VECTOR_MODEL_SIZE": "Size on disk.", "VISION_MODEL": "Select vision model for image processing. Test before bulk processing.", "VOICE_RECORDER": "Click to start recording, speak your question, then click again to stop recording.", "WHISPER_BATCH_SIZE": "Batch size for transcription. See the User Guid for optimal values.", "WHISPER_MODEL_SELECT": "Distil models use ~ 70% VRAM of their non-Distil equivalents with little quality loss." } scrape_documentation = { "Accelerate 1.7.0": { "URL": "https://huggingface.co/docs/accelerate/v1.7.0/en", "folder": "accelerate_170", "scraper_class": "HuggingfaceScraper" }, "aiohappyeyeballs": { "URL": "https://aiohappyeyeballs.readthedocs.io/en/stable/", "folder": "aiohappyeyeballs", "scraper_class": "FuroThemeScraper" }, "aiohttp": { "URL": "https://docs.aiohttp.org/en/stable/", "folder": "aiohttp" }, "aiosignal": { "URL": "https://aiosignal.aio-libs.org/en/stable/", "folder": "aiosignal" }, "anndata": { "URL": "https://anndata.readthedocs.io/en/stable/", "folder": "anndata", "scraper_class": "PydataThemeScraper" }, "anyio": { "URL": "https://anyio.readthedocs.io/en/stable/", "folder": "anyio", "scraper_class": "ReadthedocsScraper" }, "array_api_compat": { "URL": "https://data-apis.org/array-api-compat/", "folder": "array_api_compat", "scraper_class": "FuroThemeScraper" }, "attrs": { "URL": "https://www.attrs.org/en/stable/", "folder": "attrs", "scraper_class": "FuroThemeScraper" }, "Beautiful Soup 4": { "URL": "https://www.crummy.com/software/BeautifulSoup/bs4/doc/", "folder": "beautiful_soup_4" }, "bitsandbytes 0.48.2": { "URL": "https://huggingface.co/docs/bitsandbytes/v0.48.2/en/", "folder": "bitsandbytes_0482", "scraper_class": "HuggingfaceScraper" }, "cffi": { "URL": "https://cffi.readthedocs.io/en/stable/", "folder": "cffi", "scraper_class": "DivClassDocumentScraper" }, "chardet": { "URL": "https://chardet.readthedocs.io/en/stable/", "folder": "chardet", "scraper_class": "FuroThemeScraper" }, "charset-normalizer": { "URL": "https://charset-normalizer.readthedocs.io/en/stable/", "folder": "charset_normalizer", "scraper_class": "FuroThemeScraper" }, "click": { "URL": "https://click.palletsprojects.com/en/stable/", "folder": "click", "scraper_class": "BodyRoleMainScraper" }, "coloredlogs": { "URL": "https://coloredlogs.readthedocs.io/en/latest/", "folder": "coloredlogs", "scraper_class": "BodyRoleMainScraper" }, "contourpy": { "URL": "https://contourpy.readthedocs.io/en/stable/", "folder": "contourpy", "scraper_class": "FuroThemeScraper" }, "cryptography": { "URL": "https://cryptography.io/en/stable/", "folder": "cryptography", "scraper_class": "DivClassDocumentScraper" }, "CTranslate2": { "URL": "https://opennmt.net/CTranslate2/", "folder": "ctranslate2", "scraper_class": "DivClassDocumentScraper" }, "curl_cffi": { "URL": "https://curl-cffi.readthedocs.io/en/stable/", "folder": "curl_cffi", "scraper_class": "DivClassDocumentScraper" }, "cycler": { "URL": "https://matplotlib.org/cycler/", "folder": "cycler", "scraper_class": "BodyRoleMainScraper" }, "dataclasses-json": { "URL": "https://lidatong.github.io/dataclasses-json/", "folder": "dataclasses_json", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "datasets 4.3.0": { "URL": "https://huggingface.co/docs/datasets/v4.3.0/en/", "folder": "datasets_0430", "scraper_class": "HuggingfaceScraper" }, "deepdiff 8.6.1": { "URL": "https://zepworks.com/deepdiff/8.6.1/", "folder": "deepdiff_861", "scraper_class": "BodyRoleMainScraper" }, "Deprecated": { "URL": "https://deprecated.readthedocs.io/en/latest/", "folder": "deprecated", "scraper_class": "BodyRoleMainScraper" }, "deprecation": { "URL": "https://deprecation.readthedocs.io/en/latest/", "folder": "deprecation", "scraper_class": "BodyRoleMainScraper" }, "Diffusers 0.35.0": { "URL": "https://huggingface.co/docs/diffusers/v0.35.0/en/", "folder": "diffusers_0350", "scraper_class": "HuggingfaceScraper" }, "dill": { "URL": "https://dill.readthedocs.io/en/latest/", "folder": "dill", "scraper_class": "RtdThemeScraper" }, "distro": { "URL": "https://distro.readthedocs.io/en/stable/", "folder": "distro", "scraper_class": "BodyRoleMainScraper" }, "einops": { "URL": "https://einops.rocks/", "folder": "einops", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "einx": { "URL": "https://einx.readthedocs.io/en/stable/", "folder": "einx", "scraper_class": "PydataThemeScraper" }, "emoji": { "URL": "https://carpedm20.github.io/emoji/docs/", "folder": "emoji", "scraper_class": "DivClassDocumentScraper" }, "fastcore": { "URL": "https://fastcore.fast.ai/", "folder": "fastcore", "scraper_class": "FastcoreScraper" }, "filelock": { "URL": "https://py-filelock.readthedocs.io/en/stable/", "folder": "filelock", "scraper_class": "FuroThemeScraper" }, "fonttools": { "URL": "https://fonttools.readthedocs.io/en/stable/", "folder": "fonttools", "scraper_class": "DivClassDocumentScraper" }, "fsspec": { "URL": "https://filesystem-spec.readthedocs.io/en/stable/", "folder": "fsspec", "scraper_class": "RtdThemeScraper" }, "greenlet": { "URL": "https://greenlet.readthedocs.io/en/stable/", "folder": "greenlet", "scraper_class": "FuroThemeScraper" }, "gTTS": { "URL": "https://gtts.readthedocs.io/en/latest/", "folder": "gtts", "scraper_class": "RtdThemeScraper" }, "h11": { "URL": "https://h11.readthedocs.io/en/latest/", "folder": "h11", "scraper_class": "BodyRoleMainScraper" }, "HDF5": { "URL": "https://docs.h5py.org/en/stable/", "folder": "hdf5", "scraper_class": "RtdThemeScraper" }, "httpcore": { "URL": "https://www.encode.io/httpcore/", "folder": "httpcore", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "httpx": { "URL": "https://www.python-httpx.org/", "folder": "httpx", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "Huggingface Hub 0.36.0": { "URL": "https://huggingface.co/docs/huggingface_hub/v0.36.0/en/", "folder": "huggingface_hub_0360", "scraper_class": "HuggingfaceScraper" }, "humanfriendly": { "URL": "https://humanfriendly.readthedocs.io/en/latest/", "folder": "humanfriendly", "scraper_class": "BodyRoleMainScraper" }, "importlib_metadata": { "URL": "https://importlib-metadata.readthedocs.io/en/stable/", "folder": "importlib_metadata", "scraper_class": "FuroThemeScraper" }, "Jinja": { "URL": "https://jinja.palletsprojects.com/en/stable/", "folder": "jinja", "scraper_class": "BodyRoleMainScraper" }, "joblib": { "URL": "https://joblib.readthedocs.io/en/stable/", "folder": "kiwisolver", "scraper_class": "ReadthedocsScraper" }, "kiwisolver": { "URL": "https://kiwisolver.readthedocs.io/en/stable/", "folder": "kiwisolver", "scraper_class": "ReadthedocsScraper" }, "llvmlite": { "URL": "https://llvmlite.readthedocs.io/en/stable/", "folder": "llvmlite", "scraper_class": "RtdThemeScraper" }, "lxml": { "URL": "https://lxml.de/", "folder": "lxml", "scraper_class": "DivClassDocumentScraper" }, "Markdown": { "URL": "https://python-markdown.github.io/", "folder": "Markdown", "scraper_class": "BodyRoleMainScraper" }, "markdown-it-py": { "URL": "https://markdown-it-py.readthedocs.io/en/stable/", "folder": "markdown_it_py", "scraper_class": "PydataThemeScraper" }, "markupsafe": { "URL": "https://markupsafe.palletsprojects.com/en/stable/", "folder": "markupsafe", "scraper_class": "BodyRoleMainScraper" }, "marshmallow": { "URL": "https://marshmallow.readthedocs.io/en/stable/", "folder": "marshmallow", "scraper_class": "FuroThemeScraper" }, "Matplotlib": { "URL": "https://matplotlib.org/stable/", "folder": "matplotlib", "scraper_class": "PydataThemeScraper" }, "Model Context Protocol": { "URL": "https://modelcontextprotocol.io/docs/", "folder": "model_context_protocol", "scraper_class": "MintlifyScraper" }, "more-itertools": { "URL": "https://more-itertools.readthedocs.io/en/stable/", "folder": "more_itertools", "scraper_class": "FuroThemeScraper" }, "mpmath": { "URL": "https://mpmath.org/doc/current/", "folder": "mpmath", "scraper_class": "BodyRoleMainScraper" }, "msg-parser": { "URL": "https://msg-parser.readthedocs.io/en/latest/", "folder": "msg_parser", "scraper_class": "BodyRoleMainScraper" }, "multidict": { "URL": "https://multidict.aio-libs.org/en/stable/", "folder": "multidict", "scraper_class": "BodyRoleMainScraper" }, "multiprocess": { "URL": "https://multiprocess.readthedocs.io/en/stable/", "folder": "multiprocess", "scraper_class": "RtdThemeScraper" }, "natsort": { "URL": "https://natsort.readthedocs.io/en/stable/", "folder": "natsort", "scraper_class": "RtdThemeScraper" }, "NetworkX": { "URL": "https://networkx.org/documentation/stable/", "folder": "networkx", "scraper_class": "PydataThemeScraper" }, "NLTK": { "URL": "https://www.nltk.org/", "folder": "nltk", "scraper_class": "DivIdMainContentRoleMainScraper" }, "numba": { "URL": "https://numba.readthedocs.io/en/stable/", "folder": "numba", "scraper_class": "RtdThemeScraper" }, "NumPy (latest stable)": { "URL": "https://numpy.org/doc/stable/", "folder": "numpy", "scraper_class": "PydataThemeScraper" }, "ocrmypdf": { "URL": "https://ocrmypdf.readthedocs.io/en/stable/", "folder": "ocrmypdf", "scraper_class": "RtdThemeScraper" }, "onnx": { "URL": "https://onnx.ai/onnx/", "folder": "onnx", "scraper_class": "FuroThemeScraper" }, "openpyxl": { "URL": "https://openpyxl.readthedocs.io/en/stable/", "folder": "openpyxl", "scraper_class": "BodyRoleMainScraper" }, "Optimum (main)": { "URL": "https://huggingface.co/docs/optimum/main/en/", "folder": "optimum_main", "scraper_class": "HuggingfaceScraper" }, "Optimum ONNX (main)": { "URL": "https://huggingface.co/docs/optimum-onnx/main/en/", "folder": "optimum_onnx_main", "scraper_class": "HuggingfaceScraper" }, "packaging": { "URL": "https://packaging.pypa.io/en/stable/", "folder": "packaging", "scraper_class": "FuroThemeScraper" }, "pandas": { "URL": "https://pandas.pydata.org/docs/", "folder": "pandas", "scraper_class": "PydataThemeScraper" }, "pdfminer.six": { "URL": "https://pdfminersix.readthedocs.io/en/master/", "folder": "pdfminer_six", "scraper_class": "BodyRoleMainScraper" }, "pi-heif": { "URL": "https://pillow-heif.readthedocs.io/en/latest/", "folder": "piheif", "scraper_class": "DivClassDocumentScraper" }, "pikepdf": { "URL": "https://pikepdf.readthedocs.io/en/stable/", "folder": "pikepdf", "scraper_class": "RtdThemeScraper" }, "platformdirs": { "URL": "https://platformdirs.readthedocs.io/en/stable/", "folder": "platformdirs", "scraper_class": "FuroThemeScraper" }, "pluggy": { "URL": "https://pluggy.readthedocs.io/en/stable/", "folder": "pluggy", "scraper_class": "BodyRoleMainScraper" }, "Pillow": { "URL": "https://pillow.readthedocs.io/en/stable/", "folder": "pillow", "scraper_class": "FuroThemeScraper" }, "protobuf": { "URL": "https://protobuf.dev/", "folder": "protobuf", "scraper_class": "DivClassTdContentScraper" }, "pyarrow": { "URL": "https://arrow.apache.org/docs/python/", "folder": "pyarrow", "scraper_class": "PydataThemeScraper" }, "psutil": { "URL": "https://psutil.readthedocs.io/en/stable/", "folder": "psutil", "scraper_class": "RtdThemeScraper" }, "PyAV": { "URL": "https://pyav.org/docs/stable/", "folder": "pyav", "scraper_class": "BodyRoleMainScraper" }, "Pydantic": { "URL": "https://pydantic.dev/docs/validation/latest/", "folder": "pydantic", "scraper_class": "MainScraper" }, "pydantic-settings": { "URL": "https://pydantic.dev/docs/validation/latest/concepts/pydantic_settings/", "folder": "pydantic_settings", "scraper_class": "MainScraper" }, "Pygments": { "URL": "https://pygments.org/docs/", "folder": "pygments", "scraper_class": "BodyRoleMainScraper" }, "PyMuPDF": { "URL": "https://pymupdf.readthedocs.io/en/latest/", "folder": "pymupdf", "scraper_class": "PymupdfScraper" }, "pyparsing": { "URL": "https://pyparsing-docs.readthedocs.io/en/latest/", "folder": "pyparsing", "scraper_class": "DivClassDocumentScraper" }, "PyOpenGL": { "URL": "https://mcfletch.github.io/pyopengl/documentation/manual/", "folder": "pyopengl", "scraper_class": "MainScraper" }, "PyPDF": { "URL": "https://pypdf.readthedocs.io/en/stable/", "folder": "pypdf", "scraper_class": "RtdThemeScraper" }, "python-docx": { "URL": "https://python-docx.readthedocs.io/en/stable/", "folder": "python_docx", "scraper_class": "BodyRoleMainScraper" }, "python-dateutil": { "URL": "https://dateutil.readthedocs.io/en/stable/", "folder": "python_dateutil", "scraper_class": "DivClassDocumentScraper" }, "python-dotenv": { "URL": "https://saurabh-kumar.com/python-dotenv/", "folder": "python-dotenv", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "python-oxmsg": { "URL": "https://scanny.github.io/python-oxmsg/", "folder": "python-oxmsg", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "PyYAML": { "URL": "https://pyyaml.org/wiki/PyYAMLDocumentation", "folder": "pyyaml", "scraper_class": "BodyScraper" }, "Pyside 6": { "URL": "https://doc.qt.io/qtforpython-6/", "folder": "pyside6", "scraper_class": "FuroThemeScraper" }, "pytz": { "URL": "https://pythonhosted.org/pytz/", "folder": "pytz", "scraper_class": "BodyRoleMainScraper" }, "RapidFuzz": { "URL": "https://rapidfuzz.github.io/RapidFuzz/", "folder": "rapidfuzz", "scraper_class": "FuroThemeScraper" }, "Referencing": { "URL": "https://referencing.readthedocs.io/en/stable/", "folder": "referencing", "scraper_class": "FuroThemeScraper" }, "Requests": { "URL": "https://requests.readthedocs.io/en/stable/", "folder": "requests", "scraper_class": "BodyRoleMainScraper" }, "requests_toolbelt": { "URL": "https://toolbelt.readthedocs.io/en/latest/", "folder": "requeststoolbelt", "scraper_class": "BodyRoleMainScraper" }, "Rich": { "URL": "https://rich.readthedocs.io/en/stable/", "folder": "rich", "scraper_class": "DivClassDocumentScraper" }, "rpds-py": { "URL": "https://rpds.readthedocs.io/en/stable/", "folder": "rpds_py", "scraper_class": "ArticleRoleMainScraper" }, "ruamel.yaml": { "URL": "https://yaml.dev/doc/ruamel.yaml/", "folder": "ruamel_yaml", "scraper_class": "DivIdContentSecondScraper" }, "Safetensors (main)": { "URL": "https://huggingface.co/docs/safetensors/main/en/", "folder": "safetensors_main", "scraper_class": "HuggingfaceScraper" }, "scikit-learn": { "URL": "https://scikit-learn.org/stable/", "folder": "scikit_learn", "scraper_class": "PydataThemeScraper" }, "SciPy 1.16.2": { "URL": "https://docs.scipy.org/doc/scipy-1.16.2/", "folder": "scipy_1162", "scraper_class": "PydataThemeScraper", }, "Sentence-Transformers": { "URL": "https://www.sbert.net/docs", "folder": "sentence_transformers", "scraper_class": "RtdThemeScraper" }, "Six": { "URL": "https://six.readthedocs.io/", "folder": "six", "scraper_class": "DivClassDocumentScraper" }, "sniffio": { "URL": "https://sniffio.readthedocs.io/en/stable/", "folder": "sniffio", "scraper_class": "DivClassDocumentScraper" }, "SoundFile 0.13.1": { "URL": "https://python-soundfile.readthedocs.io/en/0.13.1/", "folder": "soundfile_0131", "scraper_class": "DivClassDocumentScraper" }, "sounddevice 0.5.3": { "URL": "https://python-sounddevice.readthedocs.io/en/0.5.3/", "folder": "sounddevice_053", "scraper_class": "BodyRoleMainScraper" }, "Soupsieve": { "URL": "https://facelessuser.github.io/soupsieve/", "folder": "soupsieve", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "SpeechBrain (latest)": { "URL": "https://speechbrain.readthedocs.io/en/latest/", "folder": "speechbrain_latest", "scraper_class": "DivClassDocumentScraper" }, "SQLAlchemy 20": { "URL": "https://docs.sqlalchemy.org/en/20/", "folder": "sqlalchemy_20", "scraper_class": "BodyRoleMainScraper" }, "sympy": { "URL": "https://docs.sympy.org/latest/", "folder": "sympy", "scraper_class": "PymupdfScraper" }, "tenacity": { "URL": "https://tenacity.readthedocs.io/en/stable/", "folder": "tenacity", "scraper_class": "DivClassDocumentScraper" }, "Tile DB": { "URL": "https://tiledb-inc-tiledb.readthedocs-hosted.com/projects/tiledb-py/en/stable/", "folder": "tiledb", "scraper_class": "DivClassDocumentScraper" }, "tiledb-vector-search": { "URL": "https://tiledb-inc.github.io/TileDB-Vector-Search/documentation/", "folder": "tiledb_vector_search", "scraper_class": "FastcoreScraper" }, "tiledb-cloud": { "URL": "https://tiledb-inc.github.io/TileDB-Cloud-Py/", "folder": "tiledb_cloud", "scraper_class": "FastcoreScraper" }, "Timm 1.0.20": { "URL": "https://huggingface.co/docs/timm/v1.0.20/en/", "folder": "timm_1020", "scraper_class": "HuggingfaceScraper" }, "tokenizers 0.22.1": { "URL": "https://huggingface.co/docs/tokenizers/v0.22.1/en", "folder": "tokenizers_0221", "scraper_class": "HuggingfaceScraper" }, "torch 2.9": { "URL": "https://docs.pytorch.org/docs/2.9/", "folder": "torch_29", "scraper_class": "PyTorchScraper" }, "Torchaudio 2.9": { "URL": "https://docs.pytorch.org/audio/2.9.0/", "folder": "torchaudio_29", "scraper_class": "PyTorchScraper" }, "Torchvision 0.24": { "URL": "https://docs.pytorch.org/vision/0.24/", "folder": "torchvision_024", "scraper_class": "PyTorchScraper" }, "tqdm": { "URL": "https://tqdm.github.io", "folder": "tqdm", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "Transformers 4.57.5": { "URL": "https://huggingface.co/docs/transformers/v4.57.5/en", "folder": "transformers_4575", "scraper_class": "HuggingfaceScraper" }, "typing_extensions": { "URL": "https://typing-extensions.readthedocs.io/en/stable/", "folder": "typing_extensions", "scraper_class": "BodyRoleMainScraper" }, "typing-inspection": { "URL": "https://pydantic.github.io/typing-inspection/dev/", "folder": "typing_extensions", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "tzdata": { "URL": "https://tzdata.python.org/", "folder": "tzdata", "scraper_class": "FuroThemeScraper" }, "urllib3": { "URL": "https://urllib3.readthedocs.io/en/stable/", "folder": "urllib3", "scraper_class": "FuroThemeScraper" }, "uv": { "URL": "https://docs.astral.sh/uv/", "folder": "uv", "scraper_class": "ArticleMdContentInnerMdTypesetScraper" }, "Watchdog": { "URL": "https://python-watchdog.readthedocs.io/en/stable/", "folder": "watchdog", "scraper_class": "BodyRoleMainScraper" }, "webdataset": { "URL": "https://huggingface.co/docs/hub/en/datasets-webdataset", "folder": "webdataset", "scraper_class": "HuggingfaceScraper" }, "webencodings": { "URL": "https://pythonhosted.org/webencodings/", "folder": "webencodings", "scraper_class": "BodyRoleMainScraper" }, "Wrapt": { "URL": "https://wrapt.readthedocs.io/en/master/", "folder": "wrapt", "scraper_class": "RstContentScraper" }, "xlrd": { "URL": "https://xlrd.readthedocs.io/en/stable/", "folder": "xlrd", "scraper_class": "DivClassDocumentScraper" }, "yarl": { "URL": "https://yarl.aio-libs.org/en/stable/", "folder": "yarl", "scraper_class": "BodyRoleMainScraper" }, "zstandard": { "URL": "https://python-zstandard.readthedocs.io/en/stable/", "folder": "zstandard", "scraper_class": "BodyRoleMainScraper" }, } class CustomButtonStyles: LIGHT_GREY = "#C8C8C8" DISABLED_TEXT = "#969696" COLORS = { "RED": { "base": "#320A0A", "hover": "#4B0F0F", "pressed": "#290909", "disabled": "#7D1919" }, "BLUE": { "base": "#0A0A32", "hover": "#0F0F4B", "pressed": "#09092B", "disabled": "#19197D" }, "GREEN": { "base": "#0A320A", "hover": "#0F4B0F", "pressed": "#092909", "disabled": "#197D19" }, "YELLOW": { "base": "#32320A", "hover": "#4B4B0F", "pressed": "#292909", "disabled": "#7D7D19" }, "PURPLE": { "base": "#320A32", "hover": "#4B0F4B", "pressed": "#290929", "disabled": "#7D197D" }, "ORANGE": { "base": "#321E0A", "hover": "#4B2D0F", "pressed": "#291909", "disabled": "#7D5A19" }, "TEAL": { "base": "#0A3232", "hover": "#0F4B4B", "pressed": "#092929", "disabled": "#197D7D" }, "BROWN": { "base": "#2B1E0A", "hover": "#412D0F", "pressed": "#231909", "disabled": "#6B5A19" } } @classmethod def _generate_button_style(cls, color_values): return f""" QPushButton {{ background-color: {color_values['base']}; color: {cls.LIGHT_GREY}; padding: 5px; border: none; border-radius: 3px; }} QPushButton:hover {{ background-color: {color_values['hover']}; }} QPushButton:pressed {{ background-color: {color_values['pressed']}; }} QPushButton:disabled {{ background-color: {color_values['disabled']}; color: {cls.DISABLED_TEXT}; }} """ for color_name, color_values in CustomButtonStyles.COLORS.items(): setattr(CustomButtonStyles, f"{color_name}_BUTTON_STYLE", CustomButtonStyles._generate_button_style(color_values)) GPUS_NVIDIA = { "GeForce GTX 1630": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 512 }, "GeForce GTX 1650 (Apr 2019)": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 896 }, "GeForce GTX 1650 (Apr 2020)": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 896 }, "GeForce GTX 1650 (Jun 2020)": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 896 }, "GeForce GTX 1650 (Laptop)": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 1024 }, "GeForce GTX 1650 Max-Q": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 1024 }, "GeForce GTX 1650 Ti Max-Q": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 1024 }, "GeForce GTX 1650 Ti": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 1024 }, "GeForce GTX 1650 Super": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 1280 }, "GeForce GTX 1660": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1408 }, "GeForce GTX 1660 (Laptop)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1408 }, "GeForce GTX 1660 Super": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1408 }, "GeForce GTX 1660 Ti Max-Q": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1536 }, "GeForce GTX 1660 Ti (Laptop)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1536 }, "GeForce GTX 1660 Ti": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1536 }, "GeForce RTX 2060": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1920 }, "GeForce RTX 2060 Max-Q": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1920 }, "GeForce RTX 2060 (Jan 2019)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1920 }, "GeForce RTX 2060 (Jan 2020)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 1920 }, "GeForce RTX 3050 Mobile (4GB)": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 2048 }, "GeForce RTX 2060 (Dec 2021)": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 2176 }, "GeForce RTX 2060 Super": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2176 }, "GeForce RTX 2070": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2304 }, "GeForce RTX 2070 Max-Q": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2304 }, "GeForce RTX 3050 (GA107-325)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 2304 }, "GeForce RTX 3050 (GA106-150)": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2304 }, "GeForce RTX 3050 (GA107-150-A1)": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2560 }, "GeForce RTX 4050 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 2560 }, "GeForce RTX 3050 Ti Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 4, "CUDA Cores": 2560 }, "GeForce RTX 3050 Mobile (6GB)": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 2560 }, "GeForce RTX 2070 Super": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2560 }, "GeForce RTX 2070 Super Max-Q": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 2560 }, "GeForce RTX 4060": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 3072 }, "GeForce RTX 2080 Super": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 3072 }, "GeForce RTX 2080 Super Max-Q": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 3072 }, "GeForce RTX 3060": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 3584 }, "GeForce RTX 3060 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 6, "CUDA Cores": 3840 }, "GeForce RTX 4060 Ti": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 4352 }, "GeForce RTX 2080 Ti": { "Brand": "NVIDIA", "Size (GB)": 11, "CUDA Cores": 4352 }, "GeForce RTX 4070 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 4608 }, "GeForce RTX 5070 (laptop)": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 4608 }, "Nvidia TITAN RTX": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 4608 }, "GeForce RTX 3060 Ti": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 4864 }, "GeForce RTX 3070 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 5120 }, "GeForce RTX 3070": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 5888 }, "GeForce RTX 4070": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 5888 }, "GeForce RTX 5080 Ti (laptop)": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 5888 }, "GeForce RTX 3070 Ti": { "Brand": "NVIDIA", "Size (GB)": 8, "CUDA Cores": 6144 }, "GeForce RTX 5070": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 6144 }, "GeForce RTX 3070 Ti Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": "8-16", "CUDA Cores": 6144 }, "GeForce RTX 4070 Super": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 7168 }, "GeForce RTX 4080 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 7424 }, "GeForce RTX 3080 Ti Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 7424 }, "GeForce RTX 4070 Ti": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 7680 }, "GeForce RTX 4080 (AD104-400)": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 7680 }, "GeForce RTX 5080 (laptop)": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 7680 }, "GeForce RTX 4070 Ti Super": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 8448 }, "GeForce RTX 3080": { "Brand": "NVIDIA", "Size (GB)": 10, "CUDA Cores": 8704 }, "GeForce RTX 3080 Ti": { "Brand": "NVIDIA", "Size (GB)": 12, "CUDA Cores": 8960 }, "GeForce RTX 5070 Ti": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 8960 }, "GeForce RTX 4080 (AD103-300)": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 9728 }, "GeForce RTX 4090 Mobile/Laptop": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 9728 }, "GeForce RTX 4080 Super": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 10240 }, "GeForce RTX 3090": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 10496 }, "GeForce RTX 5090 (laptop)": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 10496 }, "GeForce RTX 3090 Ti": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 10752 }, "GeForce RTX 5080": { "Brand": "NVIDIA", "Size (GB)": 16, "CUDA Cores": 10752 }, "GeForce RTX 4090 D": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 14592 }, "GeForce RTX 4090": { "Brand": "NVIDIA", "Size (GB)": 24, "CUDA Cores": 16384 }, "GeForce RTX 5090": { "Brand": "NVIDIA", "Size (GB)": 32, "CUDA Cores": 21760 } } GPUS_AMD = { "Radeon RX 9060 XT 16GB": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 2048 }, "Radeon RX 9060 XT 8GB": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 7600": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 7600 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 2048 }, "Radeon RX 7700 XT": { "Brand": "AMD", "Size (GB)": 12, "Shaders": 3456 }, "Radeon RX 7800 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 3840 }, "Radeon RX 9070 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 4096 }, "Radeon RX 9070": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 3584 }, "Radeon RX 7900 GRE": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 5120 }, "Radeon RX 7900 XT": { "Brand": "AMD", "Size (GB)": 20, "Shaders": 5376 }, "Radeon RX 7900 XTX": { "Brand": "AMD", "Size (GB)": 24, "Shaders": 6144 }, "Radeon RX 6300": { "Brand": "AMD", "Size (GB)": 2, "Shaders": 768 }, "Radeon RX 6400": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1024 }, "Radeon RX 6500 XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1024 }, "Radeon RX 6600": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 6600 XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 6650 XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 6700": { "Brand": "AMD", "Size (GB)": 10, "Shaders": 2304 }, "Radeon RX 6750 GRE 10GB": { "Brand": "AMD", "Size (GB)": 10, "Shaders": 2560 }, "Radeon RX 6750 XT": { "Brand": "AMD", "Size (GB)": 12, "Shaders": 2560 }, "Radeon RX 6800": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 3840 }, "Radeon RX 6800 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 4608 }, "Radeon RX 6900 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 5120 }, "Radeon RX 6950 XT": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 5120 }, "Radeon RX 5300": { "Brand": "AMD", "Size (GB)": 3, "Shaders": 1408 }, "Radeon RX 5300 XT": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1408 }, "Radeon RX 5500": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1408 }, "Radeon RX 5500 XT": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1408 }, "Radeon RX 5600": { "Brand": "AMD", "Size (GB)": 6, "Shaders": 2048 }, "Radeon RX 5600 XT": { "Brand": "AMD", "Size (GB)": 6, "Shaders": 2304 }, "Radeon RX 5700": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2304 }, "Radeon RX 5700 XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2560 }, "Radeon RX 5700 XT 50th Anniversary Edition": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2560 }, "Radeon RX Vega 56": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 3584 }, "Radeon RX Vega 64": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 4096 }, "Radeon RX Vega 64 Liquid": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 4096 }, "Radeon VII": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 3840 }, "Radeon RX 7600S": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 7600M": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 7600M XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 7700S": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 7900M": { "Brand": "AMD", "Size (GB)": 16, "Shaders": 4608 }, "Radeon RX 6300M": { "Brand": "AMD", "Size (GB)": 2, "Shaders": 768 }, "Radeon RX 6450M": { "Brand": "AMD", "Size (GB)": 2, "Shaders": 768 }, "Radeon RX 6550S": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 768 }, "Radeon RX 6500M": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1024 }, "Radeon RX 6550M": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1024 }, "Radeon RX 6600S": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 6700S": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 6600M": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 6650M": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 1792 }, "Radeon RX 6800S": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 6650M XT": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2048 }, "Radeon RX 6700M": { "Brand": "AMD", "Size (GB)": 10, "Shaders": 2304 }, "Radeon RX 6800M": { "Brand": "AMD", "Size (GB)": 12, "Shaders": 2560 }, "Radeon RX 6850M XT": { "Brand": "AMD", "Size (GB)": 12, "Shaders": 2560 }, "Radeon RX 5300M": { "Brand": "AMD", "Size (GB)": 3, "Shaders": 1408 }, "Radeon RX 5500M": { "Brand": "AMD", "Size (GB)": 4, "Shaders": 1408 }, "Radeon RX 5600M": { "Brand": "AMD", "Size (GB)": 6, "Shaders": 2304 }, "Radeon RX 5700M": { "Brand": "AMD", "Size (GB)": 8, "Shaders": 2304 } } GPUS_INTEL = { "Intel Arc A310": { "Brand": "Intel", "Size (GB)": 4, "Shading Cores": 768 }, "Intel Arc A380": { "Brand": "Intel", "Size (GB)": 6, "Shading Cores": 1024 }, "Intel Arc B570": { "Brand": "Intel", "Size (GB)": 10, "Shading Cores": 2304 }, "Intel Arc B580": { "Brand": "Intel", "Size (GB)": 12, "Shading Cores": 2560 }, "Intel Arc A580": { "Brand": "Intel", "Size (GB)": 8, "Shading Cores": 3072 }, "Intel Arc A750": { "Brand": "Intel", "Size (GB)": 8, "Shading Cores": 3584 }, "Intel Arc A770 8GB": { "Brand": "Intel", "Size (GB)": 8, "Shading Cores": 4096 }, "Intel Arc A770 16GB": { "Brand": "Intel", "Size (GB)": 16, "Shading Cores": 4096 } } master_questions = [ "What is the VectorDB-Plugin and what can it do?", "What are the system requirements and prerequisites?", "Why is Visual Studio required to run this program?", "How do I install and launch the VectorDB-Plugin?", "How do I download or add embedding models?", "How do I query the database for answers?", "Which chat backend should I use?", "What is LM Studio chat model backend?", "What is Kobold chat model backend?", "What is the OpenAI GPT Chat Model Backend?", "What local chat models are available and how can I use them?", "How do I get a huggingface access token?", "What is a context limit or maximum sequence length?", "What happens if I exceed the maximum sequence length of an embedding model?", "How many contexts should I retrieve when querying the vector database?", "What does the chunks only checkbox do?", "What are embedding or vector models?", "Which embedding or vector model should I choose?", "What are the dimensions of a vector or embedding model?", "What are some general tips for choosing an embedding model?", "What Are Vision Models?", "What vision models are available in this program?", "Do you have any tips for choosing a vision model?", "What is whisper and how does this program use voice recording or transcribing an audio file?", "How can I record my question for the vector database query?", "How can I transcribe an audio file to be put into the vector database?", "What are the distil variants of the whisper models when transcribing and audio file?", "What whisper model should I choose to transcribe a file?", "What are floating point formats, precision, and quantization?", "What are the common floating point formats?", "What are precision and range regarding floating point formats and which should I use?", "What is Quantization?", "What are the aspects or effects of quantization?", "What are the LM Studio Server settings?", "What are the database creation settings?", "What are the database query settings?", "How does the Contexts setting work exactly?", "What is the similarity setting?", "What is the search term filter setting?", "What is the File Type setting?", "What are text to speech models (aks TTS models) and how are they used in this program?", "What text to speech models are availble in this program to use?", "What is the Bark text to speech?", "What is the WhisperSpeech text to speech?", "What is the ChatTTS text to speech?", "What is the Google TTS text to speech?", "What is the Chatterbox text to speech?", "Which text to speech backend or models should I use", "Can I back up or restore my databases and are they backed up automatically", "What happens if I lose a configuration file and can I restore it?", "What are some good tips for searching a vector database?", "General VRAM Considerations", "How can I manage vram?", "What are the speed and VRAM requirements for the various chat models?", "What are the speed and VRAM requirements for the various vision models?", "What are maximunm context length and maximum sequence length and how to they relate?", "What is the scrape documentaton feature?", "Which vector or embedding models are available in this program?", "What is the manage databaes tab?", "How can I create a vector database?", "Can I use images and audio files in my database?", "What chat models are available with the local models option?", "What are the Qwen 3 Chat Models?", "What are the Granite 3.3 Chat Models?", "What are the GLM-Z1 Chat Models?", "What is the Mistral Small Chat Model?", "What is the gte-Qwen2-1.5B-instruct embedding model?", "What are the BGE Embedding Models?", "What are the Granite Embedding Models?", "What are the Intfloat Embedding Models?", "What are the Arctic Embedding Models?", "What is the Scrape Documentation tool?", "How do I test vision models on images?", "What is Optical Character Recognition?", "How can I extract text from PDFs or images with OCR?", "What other features does the Misc tab have?", "What is Ask Jeeves and how do I use it?", "What are the InternVL3 Vision Models?", "What are the Ovis2 Vision Models?", "What are the Florence-2 Vision Models?", "What are the Granite Vision Models?", "What are the Qwen2.5VL Vision Models?", "What is the GLM-4V-9B Vision Model?", "What is the Molmo-D-0924 Vision Model?", ] jeeves_system_message = "You are a helpful British butler who clearly and directly answers questions in a succinct fashion based on contexts provided to you. If you cannot find the answer within the contexts simply tell me that the contexts do not provide an answer. However, if the contexts partially address a question you answer based on what the contexts say and then briefly summarize the parts of the question that the contexts didn't provide an answer to. Also, you should be very respectful to the person asking the question and frequently offer traditional butler services like various fancy drinks, snacks, various butler services like shining of shoes, pressing of suites, and stuff like that. Also, if you can't answer the question at all based on the provided contexts, you should apologize profusely and beg to keep your job. Lastly, it is essential that if there are no contexts actually provided it means that a user's question wasn't relevant and you should state that you can't answer based off of the contexts because there are none. And it goes without saying you should refuse to answer any questions that are not directly answerable by the provided contexts. Moreover, some of the contexts might not have relevant information and you should simply ignore them and focus on only answering a user's question. I cannot emphasize enough that you must gear your answer towards using this program and based your response off of the contexts you receive. Lastly, in addition to offering to perform stereotypical butler services in the midst of your response, you must always always always end your response with some kind of offering of butler services even they don't want it." system_message = "You are a helpful person who clearly and directly answers questions in a succinct fashion based on contexts provided to you. If you cannot find the answer within the contexts simply tell me that the contexts do not provide an answer. However, if the contexts partially address my question I still want you to answer based on what the contexts say and then briefly summarize the parts of my question that the contexts didn't provide an answer." rag_string = "Here are the contexts to base your answer on. However, I need to reiterate that I only want you to base your response on these contexts and do not use outside knowledge that you may have been trained with." ================================================ FILE: core/extract_metadata.py ================================================ import os import datetime import hashlib import re from db.document_processor import Document from typing import List, Tuple def compute_content_hash(content: str) -> str: return hashlib.sha256(content.encode('utf-8')).hexdigest() def compute_file_hash(file_path): hash_sha256 = hashlib.sha256() with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest() def extract_common_metadata(file_path, content_hash=None): file_path = os.path.realpath(file_path) file_name = os.path.basename(file_path) file_type = os.path.splitext(file_path)[1] creation_date = datetime.datetime.fromtimestamp(os.path.getctime(file_path)).isoformat() modification_date = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat() file_hash = content_hash if content_hash else compute_file_hash(file_path) metadata = { "file_path": file_path, "file_type": file_type, "file_name": file_name, "creation_date": creation_date, "modification_date": modification_date, "hash": file_hash } clean_metadata = {} for k, v in metadata.items(): if isinstance(v, (str, int, float, bool, type(None))): clean_metadata[k] = v else: clean_metadata[k] = str(v) return clean_metadata def extract_typed_metadata(file_path, document_type, content_hash=None): metadata = extract_common_metadata(file_path, content_hash) metadata["document_type"] = document_type return metadata def add_pymupdf_page_metadata(doc: Document, chunk_size: int = 1200, chunk_overlap: int = 600) -> List[Document]: def split_text(text: str, chunk_size: int, chunk_overlap: int) -> List[Tuple[str, int]]: page_markers = [] offset = 0 for m in re.finditer(r'\[\[page(\d+)\]\]', text): marker_len = len(m.group(0)) page_markers.append((m.start() - offset, int(m.group(1)))) offset += marker_len clean_text = re.sub(r'\[\[page\d+\]\]', '', text) chunks = [] start = 0 while start < len(clean_text): end = start + chunk_size if end > len(clean_text): end = len(clean_text) chunk = clean_text[start:end].strip() page_num = 1 for marker_pos, page in reversed(page_markers): if marker_pos <= start: page_num = page break if chunk: chunks.append((chunk, page_num)) start += chunk_size - chunk_overlap return chunks chunks = split_text(doc.page_content, chunk_size, chunk_overlap) new_docs = [] for chunk, page_num in chunks: new_metadata = {} if doc.metadata: for k, v in doc.metadata.items(): if k is not None and v is not None: key = str(k) if isinstance(v, (str, int, float, bool)): new_metadata[key] = v else: new_metadata[key] = str(v) new_metadata['page_number'] = page_num new_doc = Document( page_content=str(chunk).strip(), metadata=new_metadata ) new_docs.append(new_doc) return new_docs ================================================ FILE: core/initialize.py ================================================ import platform import shutil from pathlib import Path import logging import torch import yaml import ctranslate2 from core.constants import PROJECT_ROOT def get_compute_device_info(): available_devices = ["cpu"] gpu_brand = None if torch.cuda.is_available(): available_devices.append('cuda') return { 'available': available_devices, 'gpu_brand': gpu_brand } def get_platform_info(): return {'os': platform.system().lower()} def get_supported_quantizations(device_type): types = ctranslate2.get_supported_compute_types(device_type) filtered_types = [q for q in types if q != 'int16'] desired_order = ['float32', 'float16', 'bfloat16', 'int8_float32', 'int8_float16', 'int8_bfloat16', 'int8'] return [q for q in desired_order if q in filtered_types] def update_config_file(**system_info): full_config_path = Path('config.yaml').resolve() with open(full_config_path, 'r', encoding='utf-8') as stream: config_data = yaml.safe_load(stream) compute_device_info = system_info.get('Compute_Device', {}) config_data['Compute_Device']['available'] = compute_device_info.get('available', ['cpu']) valid_devices = ['cpu', 'cuda', 'mps'] for key in ['database_creation', 'database_query']: config_data['Compute_Device'][key] = config_data['Compute_Device'].get(key, 'cpu') if config_data['Compute_Device'].get(key) in valid_devices else 'cpu' config_data['Supported_CTranslate2_Quantizations'] = { 'CPU': get_supported_quantizations('cpu'), 'GPU': get_supported_quantizations('cuda') if torch.cuda.is_available() else [] } for key, value in system_info.items(): if key not in ('Compute_Device', 'Supported_CTranslate2_Quantizations'): config_data[key] = value with open(full_config_path, 'w', encoding='utf-8') as stream: yaml.safe_dump(config_data, stream) def check_for_necessary_folders(): folders = [ "Assets", "Docs_for_DB", "Vector_DB_Backup", "Vector_DB", "Models", "Models/vector", "Models/chat", "Models/tts", "Models/vision", "Models/whisper", "Scraped_Documentation", ] for folder in folders: Path(folder).mkdir(exist_ok=True) def restore_vector_db_backup(): backup_folder = Path('Vector_DB_Backup') destination_folder = Path('Vector_DB') if not backup_folder.exists(): logging.error("Backup folder 'Vector_DB_Backup' does not exist.") return try: if destination_folder.exists(): shutil.rmtree(destination_folder) logging.info("Deleted existing 'Vector_DB' folder.") destination_folder.mkdir() logging.info("Created 'Vector_DB' folder.") for item in backup_folder.iterdir(): dest_path = destination_folder / item.name if item.is_dir(): shutil.copytree(item, dest_path) logging.info(f"Copied directory: {item.name}") else: shutil.copy2(item, dest_path) logging.info(f"Copied file: {item.name}") logging.info("Successfully restored Vector DB backup.") except Exception as e: logging.error(f"Error restoring Vector DB backup: {e}") def delete_chat_history(): chat_history_path = PROJECT_ROOT / 'chat_history.txt' chat_history_path.unlink(missing_ok=True) def main(): compute_device_info = get_compute_device_info() platform_info = get_platform_info() update_config_file(Compute_Device=compute_device_info, Platform_Info=platform_info) check_for_necessary_folders() delete_chat_history() if __name__ == "__main__": main() ================================================ FILE: core/utilities.py ================================================ import importlib import importlib.metadata import importlib.util import os import threading import logging import platform import shutil import sys from pathlib import Path import psutil import subprocess import re from string import Template import torch import yaml from packaging import version from PySide6.QtCore import QRunnable, QObject, Signal, QThreadPool from PySide6.QtWidgets import QApplication, QMessageBox from termcolor import cprint from core.constants import PROJECT_ROOT, THEMES def set_cuda_paths(): import sys import os from pathlib import Path venv_base = Path(sys.executable).parent.parent nvidia_base_path = venv_base / 'Lib' / 'site-packages' / 'nvidia' cuda_path_runtime = nvidia_base_path / 'cuda_runtime' / 'bin' cuda_path_runtime_lib = nvidia_base_path / 'cuda_runtime' / 'lib' / 'x64' cuda_path_runtime_include = nvidia_base_path / 'cuda_runtime' / 'include' cublas_path = nvidia_base_path / 'cublas' / 'bin' cudnn_path = nvidia_base_path / 'cudnn' / 'bin' nvrtc_path = nvidia_base_path / 'cuda_nvrtc' / 'bin' nvcc_path = nvidia_base_path / 'cuda_nvcc' / 'bin' paths_to_add = [ str(cuda_path_runtime), str(cuda_path_runtime_lib), str(cuda_path_runtime_include), str(cublas_path), str(cudnn_path), str(nvrtc_path), str(nvcc_path), ] current_value = os.environ.get('PATH', '') new_value = os.pathsep.join(paths_to_add + ([current_value] if current_value else [])) os.environ['PATH'] = new_value triton_cuda_path = nvidia_base_path / 'cuda_runtime' current_cuda_path = os.environ.get('CUDA_PATH', '') new_cuda_path = os.pathsep.join([str(triton_cuda_path)] + ([current_cuda_path] if current_cuda_path else [])) os.environ['CUDA_PATH'] = new_cuda_path def check_backend_dependencies(backend_name: str, interactive: bool = True) -> bool: from core.constants import BACKEND_DEPENDENCIES required_packages = BACKEND_DEPENDENCIES.get(backend_name, {}) if not required_packages: return True return check_and_install_dependencies( required_packages, backend_name=backend_name.title(), interactive=interactive ) def is_package_available(pkg_name: str) -> tuple[bool, str]: import importlib.util import importlib.metadata package_exists = importlib.util.find_spec(pkg_name) is not None package_version = "N/A" if package_exists: try: package_version = importlib.metadata.version(pkg_name) except importlib.metadata.PackageNotFoundError: package_exists = False return package_exists, package_version def verify_installation(package_name: str, expected_version: str) -> bool: try: import importlib.metadata installed_version = importlib.metadata.version(package_name) return installed_version == expected_version except importlib.metadata.PackageNotFoundError: return False def install_packages(packages: list[tuple[str, str]], no_deps: bool = True) -> bool: import subprocess import sys for package, version in packages: my_cprint(f"Installing {package}=={version}...", "yellow") try: command = [sys.executable, "-m", "pip", "install", f"{package}=={version}"] if no_deps: command.append("--no-deps") result = subprocess.run(command, capture_output=True, text=True, check=True) my_cprint(f"Successfully installed {package}=={version}", "green") except subprocess.CalledProcessError as e: my_cprint(f"Failed to install {package}: {e.stderr}", "red") return False return True def check_and_install_dependencies(required_packages: dict[str, str], backend_name: str = "backend", interactive: bool = True) -> bool: import sys missing_packages = [] for package, version in required_packages.items(): available, current_version = is_package_available(package) if not available: missing_packages.append((package, version)) elif current_version != version: my_cprint(f"Warning: {package} version {current_version} found, expected {version}", "yellow") if not missing_packages: return True if not interactive or not sys.stdin.isatty(): return False return False def get_platform_info(): import platform return { "system": platform.system(), "platform": platform.platform(), "architecture": platform.machine() } def get_python_version(): import sys major = sys.version_info.major minor = sys.version_info.minor return { 'major': major, 'minor': minor, 'version_string': f'{major}.{minor}' } def has_nvidia_gpu(): import subprocess try: result = subprocess.run( ["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) return result.returncode == 0 except FileNotFoundError: return False def gpu_summary(): from pynvml import ( nvmlInit, nvmlShutdown, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetName, nvmlDeviceGetMemoryInfo, ) from numba import cuda cc_cores_per_SM = { (2, 0): 32, (2, 1): 48, (3, 0): 192, (3, 5): 192, (3, 7): 192, (5, 0): 128, (5, 2): 128, (6, 0): 64, (6, 1): 128, (7, 0): 64, (7, 5): 64, (8, 0): 64, (8, 6): 128, (8, 9): 128, (9, 0): 128, (10, 0): 128, (12, 0): 128, } nvmlInit() try: gpu_count = nvmlDeviceGetCount() summaries = [] for idx in range(gpu_count): handle = nvmlDeviceGetHandleByIndex(idx) name = nvmlDeviceGetName(handle) vram_gb = nvmlDeviceGetMemoryInfo(handle).total / (1024 ** 3) dev = cuda.select_device(idx) cc_major, cc_minor = dev.compute_capability sm_count = dev.MULTIPROCESSOR_COUNT cores_per_sm = cc_cores_per_SM.get((cc_major, cc_minor), 128) total_cores = cores_per_sm * sm_count summaries.append( { "index": idx, "name": name, "cuda_compute": f"{cc_major}.{cc_minor}", "vram": round(vram_gb, 2), "cuda_cores": total_cores, } ) return summaries finally: nvmlShutdown() def _needs_ocr_worker(path: str) -> bool: import fitz, logging try: with fitz.open(path) as doc: for page in doc: if page.get_text().strip(): return False return True except Exception as e: logging.error(f"PDF check error {path}: {e}") return False def clean_triton_cache(): import shutil from pathlib import Path triton_cache_dir = Path.home() / '.triton' if triton_cache_dir.exists(): try: print(f"\nRemoving Triton cache at {triton_cache_dir}...") shutil.rmtree(triton_cache_dir) print("\033[92mTriton cache successfully removed.\033[0m") return True except Exception as e: print(f"\033[91mWarning: Failed to remove Triton cache: {e}\033[0m") return False else: print("\nNo Triton cache found to clean.") return True def check_pdfs_for_ocr(script_dir): import multiprocessing as mp from pathlib import Path import fitz, logging, tempfile, os, threading from PySide6.QtWidgets import QMessageBox try: import psutil physical = psutil.cpu_count(logical=False) or mp.cpu_count() except ImportError: logical = mp.cpu_count() estimate = max(logical // 2, logical - 4) physical = max(1, estimate) n_procs = max(1, physical - 1) docs_dir = Path(script_dir) / "Docs_for_DB" pdf_paths = [p for p in docs_dir.iterdir() if p.suffix.lower() == ".pdf"] if not pdf_paths: return True, "" ctx = mp.get_context("spawn") with ctx.Pool(processes=n_procs) as pool: mask = pool.map(_needs_ocr_worker, map(str, pdf_paths), chunksize=16) non_ocr_pdfs = [p for p, flag in zip(pdf_paths, mask) if flag] if non_ocr_pdfs: message = "The following PDF files appear to have no text content and likely need OCR done on them:\n\n" for pdf_path in non_ocr_pdfs: message += f" - {pdf_path}\n" message += "\nPlease perform OCR on these by going to the Tools Tab first or remove them from the files selected for processing." msg_box = QMessageBox() msg_box.setWindowTitle("PDFs Need OCR") msg_box.setText(message) msg_box.setIcon(QMessageBox.Icon.Warning) msg_box.addButton(QMessageBox.StandardButton.Ok) view_report_button = msg_box.addButton("View Report", QMessageBox.ButtonRole.ActionRole) msg_box.exec() if msg_box.clickedButton() == view_report_button: with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as tmp: tmp.write("PDFs that need OCR:\n\n") for pdf_path in non_ocr_pdfs: tmp.write(f"{pdf_path}\n") temp_path = tmp.name os.startfile(temp_path) def cleanup(): try: os.unlink(temp_path) except FileNotFoundError: pass threading.Timer(1.0, cleanup).start() return False, "PDFs without text content detected." return True, "" class DownloadSignals(QObject): finished = Signal(bool, str) progress = Signal(str) class DownloadRunnable(QRunnable): def __init__(self, download_func, *args): super().__init__() self.download_func = download_func self.args = args self.signals = DownloadSignals() def run(self): try: result = self.download_func(*self.args) self.signals.finished.emit(result, "Download completed successfully") except Exception as e: self.signals.finished.emit(False, str(e)) def download_with_threadpool(download_func, *args, callback=None): runnable = DownloadRunnable(download_func, *args) if callback: runnable.signals.finished.connect(callback) QThreadPool.globalInstance().start(runnable) def download_kokoro_tts(): from pathlib import Path from huggingface_hub import snapshot_download import shutil repo_id = "ctranslate2-4you/Kokoro-82M-light" tts_path = PROJECT_ROOT / "Models" / "tts" / "ctranslate2-4you--Kokoro-82M-light" try: tts_path.parent.mkdir(parents=True, exist_ok=True) print(f"Downloading Kokoro TTS model from {repo_id}...") snapshot_download( repo_id=repo_id, local_dir=str(tts_path), max_workers=4, token=False ) print("Kokoro TTS model downloaded successfully") return True except Exception as e: print(f"Failed to download Kokoro TTS model: {e}") if tts_path.exists(): shutil.rmtree(tts_path) return False def normalize_chat_text(text): def split_num(num): num = num.group() if '.' in num: return num elif ':' in num: h, m = [int(n) for n in num.split(':')] if m == 0: return f"{h} o'clock" elif m < 10: return f'{h} oh {m}' return f'{h} {m}' year = int(num[:4]) if year < 1100 or year % 1000 < 10: return num left, right = num[:2], int(num[2:4]) s = 's' if num.endswith('s') else '' if 100 <= year % 1000 <= 999: if right == 0: return f'{left} hundred{s}' elif right < 10: return f'{left} oh {right}{s}' return f'{left} {right}{s}' def flip_money(m): m = m.group() bill = 'dollar' if m[0] == '$' else 'pound' if m[-1].isalpha(): return f'{m[1:]} {bill}s' elif '.' not in m: s = '' if m[1:] == '1' else 's' return f'{m[1:]} {bill}{s}' b, c = m[1:].split('.') s = '' if b == '1' else 's' c = int(c.ljust(2, '0')) coins = f"cent{'' if c == 1 else 's'}" if m[0] == '$' else ('penny' if c == 1 else 'pence') return f'{b} {bill}{s} and {c} {coins}' def point_num(num): a, b = num.group().split('.') return ' point '.join([a, ' '.join(b)]) text = text.replace('§', ' section ') text = text.replace(chr(8216), "'").replace(chr(8217), "'") text = text.replace('«', '"').replace('»', '"') text = text.replace(chr(8220), '"').replace(chr(8221), '"') text = re.sub(r'\bD[Rr]\.(?= [A-Z])', 'Doctor', text) text = re.sub(r'\b(?:Mr\.|MR\.(?= [A-Z]))', 'Mister', text) text = re.sub(r'\b(?:Ms\.|MS\.(?= [A-Z]))', 'Miss', text) text = re.sub(r'\b(?:Mrs\.|MRS\.(?= [A-Z]))', 'Mrs', text) text = re.sub(r'\d*\.\d+|\b\d{4}s?\b|(?= 8 logging.debug(f"Flash attention {'supported' if supports else 'not supported'}") return supports def check_cuda_re_triton(): logging.debug("Starting CUDA files check for Triton") venv_base = Path(sys.executable).parent.parent nvidia_base_path = venv_base / 'Lib' / 'site-packages' / 'nvidia' cuda_runtime = nvidia_base_path / 'cuda_runtime' logging.debug(f"Virtual environment base path: {venv_base}") logging.debug(f"NVIDIA base path: {nvidia_base_path}") logging.debug(f"CUDA runtime path: {cuda_runtime}") files_to_check = [ cuda_runtime / "bin" / "cudart64_12.dll", cuda_runtime / "bin" / "ptxas.exe", cuda_runtime / "include" / "cuda.h", cuda_runtime / "lib" / "x64" / "cuda.lib" ] logging.debug("Beginning file existence checks") print("Checking CUDA files:") for file_path in files_to_check: exists = file_path.exists() status = "✓ Found" if exists else "✗ Missing" logging.debug(f"Checking {file_path}: {'exists' if exists else 'missing'}") print(f"{status}: {file_path}") print() logging.debug("CUDA file check completed") def get_model_native_precision(embedding_model_name, vector_models=None): logging.debug(f"Looking for precision for model: {embedding_model_name}") if vector_models is None: from core.constants import VECTOR_MODELS vector_models = VECTOR_MODELS model_name = os.path.basename(embedding_model_name) repo_style_name = model_name.replace('--', '/') for group_name, group_models in vector_models.items(): logging.debug(f"Checking group: {group_name}") for model in group_models: logging.debug(f"Checking model: {model['repo_id']} / {model['name']}") if model['repo_id'] == repo_style_name or model['name'] in model_name: logging.debug(f"Found match! Using precision: {model['precision']}") return model['precision'] logging.debug("No match found, defaulting to float32") return 'float32' def get_appropriate_dtype(compute_device, use_half, model_native_precision): logging.debug(f"compute_device: {compute_device}") logging.debug(f"use_half: {use_half}") logging.debug(f"model_native_precision: {model_native_precision}") compute_device = compute_device.lower() model_native_precision = model_native_precision.lower() if compute_device == 'cpu': logging.debug("Using CPU, returning float32") return torch.float32 cuda_available = torch.cuda.is_available() if cuda_available: cuda_capability = torch.cuda.get_device_capability() logging.debug(f"CUDA is available. Capability: {cuda_capability}") else: cuda_capability = (0, 0) logging.debug("CUDA is not available.") if model_native_precision == 'bfloat16': if use_half: if cuda_available: if cuda_capability[0] >= 8: logging.debug("Model native precision is bfloat16, GPU supports it, returning bfloat16") return torch.bfloat16 else: logging.debug("GPU doesn't support bfloat16, falling back to float16") return torch.float16 else: logging.debug("No CUDA available for bfloat16, falling back to float32") return torch.float32 else: logging.debug("Half checkbox not checked for bfloat16 model, returning float32") return torch.float32 elif model_native_precision == 'float16': if use_half: if cuda_available: logging.debug("Model native precision is float16 and CUDA is available, returning float16") return torch.float16 else: logging.debug("Model native precision is float16 but CUDA is not available, returning float32") return torch.float32 else: logging.debug("Half checkbox not checked for float16 model, returning float32") return torch.float32 elif model_native_precision == 'float32': if not use_half: logging.debug("Model is float32 and use_half is False, returning float32") return torch.float32 else: if cuda_available: if cuda_capability[0] >= 8: logging.debug("Using bfloat16 due to Ampere+ GPU") return torch.bfloat16 else: logging.debug("Using float16 due to pre-Ampere GPU") return torch.float16 else: logging.debug("No CUDA available, returning float32") return torch.float32 else: logging.debug(f"Unrecognized precision '{model_native_precision}', returning float32") return torch.float32 def format_citations(metadata_list): def group_metadata(metadata_list): grouped = {} for metadata in metadata_list: file_path = metadata['file_path'] grouped.setdefault(file_path, { 'name': Path(file_path).name, 'scores': [], 'pages': set(), 'file_type': metadata.get('file_type', '') }) grouped[file_path]['scores'].append(metadata['similarity_score']) if grouped[file_path]['file_type'] == '.pdf': page_number = metadata.get('page_number') if page_number is not None: grouped[file_path]['pages'].add(page_number) return grouped def format_pages(pages): if not pages: return '' sorted_pages = sorted(pages) ranges = [] start = prev = sorted_pages[0] for page in sorted_pages[1:]: if page == prev + 1: prev = page else: ranges.append((start, prev)) start = prev = page ranges.append((start, prev)) page_str = ', '.join(f"{s}-{e}" if s != e else f"{s}" for s, e in ranges) return f' p.{page_str}' def create_citation(data, file_path): min_score = min(data['scores']) max_score = max(data['scores']) score_range = f"{min_score:.4f}" if min_score == max_score else f"{min_score:.4f}-{max_score:.4f}" pages_html = format_pages(data['pages']) if data['file_type'] == '.pdf' else '' citation = ( f'{data["name"]}' f' [' f'{score_range}]' f'{pages_html}' f'' ) return min_score, citation grouped_citations = group_metadata(metadata_list) citations_with_scores = [create_citation(data, file_path) for file_path, data in grouped_citations.items()] sorted_citations = [citation for _, citation in sorted(citations_with_scores)] list_items = "".join(f"
  • {citation}
  • " for citation in sorted_citations) return f"
      {list_items}
    " def list_theme_files(): return sorted(THEMES.keys()) def load_stylesheet(name): if name not in THEMES: name = 'default' template_path = PROJECT_ROOT / 'CSS' / 'template.css' with template_path.open('r') as f: template = Template(f.read()) return template.substitute(THEMES[name]) def ensure_theme_config(): try: with open('config.yaml', 'r') as f: config = yaml.safe_load(f) if config is None: config = {} if 'appearance' not in config: config['appearance'] = {} theme = config['appearance'].get('theme') if not theme or theme not in THEMES: config['appearance']['theme'] = 'default' with open('config.yaml', 'w') as f: yaml.safe_dump(config, f) return config['appearance']['theme'] except Exception: return 'default' def update_theme_in_config(new_theme): try: with open('config.yaml', 'r') as f: config = yaml.safe_load(f) if config is None: config = {} if 'appearance' not in config: config['appearance'] = {} config['appearance']['theme'] = new_theme with open('config.yaml', 'w') as f: yaml.safe_dump(config, f) except Exception: pass def make_theme_changer(theme_name): def change_theme(): QApplication.instance().setStyleSheet(load_stylesheet(theme_name)) update_theme_in_config(theme_name) return change_theme def backup_database(database_name=None): source_directory = Path('Vector_DB') backup_directory = Path('Vector_DB_Backup') if database_name: logging.debug("Starting incremental database backup") backup_directory.mkdir(parents=True, exist_ok=True) source_db_path = source_directory / database_name backup_db_path = backup_directory / database_name if backup_db_path.exists(): try: shutil.rmtree(backup_db_path) except Exception as e: logging.debug(f"Failed to remove existing backup: {e}") print(f"Warning: Could not remove existing backup of {database_name}: {e}") try: shutil.copytree(source_db_path, backup_db_path) logging.debug(f"Successfully created backup of {database_name}") except Exception as e: logging.debug(f"Backup failed: {e}") print(f"Error backing up {database_name}: {e}") else: logging.debug("Starting full database backup") if backup_directory.exists(): for item in backup_directory.iterdir(): if item.is_dir(): shutil.rmtree(item) else: item.unlink() else: backup_directory.mkdir(parents=True, exist_ok=True) shutil.copytree(source_directory, backup_directory, dirs_exist_ok=True) logging.debug("Database backup completed successfully") def open_file(file_path): try: if platform.system() == "Windows": os.startfile(file_path) elif platform.system() == "Darwin": subprocess.Popen(["open", file_path]) else: subprocess.Popen(["xdg-open", file_path]) except OSError: QMessageBox.warning(None, "Error", "No default viewer detected.") def delete_file(file_path): try: os.remove(file_path) except OSError: QMessageBox.warning(None, "Unable to delete file(s), please delete manually.") def check_preconditions_for_db_creation(script_dir, database_name, skip_ocr=False): if not database_name or len(database_name) < 3 or database_name.lower() in ["null", "none"]: return False, "Name must be at least 3 characters long and not be 'null' or 'none.'" vector_db_path = script_dir / "Vector_DB" / database_name if vector_db_path.exists(): return False, ( f"A vector database called '{database_name}' already exists—" "choose a different name or delete the old one first." ) config_path = script_dir / 'config.yaml' if not config_path.exists(): return False, "The configuration file (config.yaml) is missing." with open(config_path, 'r') as file: config = yaml.safe_load(file) image_extensions = ['.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tif', '.tiff'] documents_dir = script_dir / "Docs_for_DB" if platform.system() == "Darwin" and any(file.suffix in image_extensions for file in documents_dir.iterdir() if file.is_file()): return False, "Image processing has been disabled for MacOS until a fix can be implemented. Please remove all image files and try again." embedding_model_name = config.get('EMBEDDING_MODEL_NAME') if not embedding_model_name: return False, "You must first download an embedding model, select it, and choose documents before proceeding." if not any(file.is_file() for file in documents_dir.iterdir()): return False, "No documents are yet added to be processed." compute_device = config.get('Compute_Device', {}).get('available', []) database_creation = config.get('Compute_Device', {}).get('database_creation') if ("cuda" in compute_device or "mps" in compute_device) and database_creation == "cpu": return False, ("GPU-acceleration is available and strongly recommended. " "Please switch the database creation device to 'cuda' or 'mps', " "or confirm your choice in the GUI.") if not torch.cuda.is_available(): if config.get('database', {}).get('half', False): message = ("CUDA is not available on your system, but half-precision (FP16) " "is selected for database creation. Half-precision requires CUDA. " "Please disable half-precision in the configuration or use a CUDA-enabled GPU.") return False, message if not skip_ocr: ocr_check, ocr_message = check_pdfs_for_ocr(script_dir) if not ocr_check: return False, ocr_message return True, "" def my_cprint(*args, **kwargs): filename = os.path.basename(sys._getframe(1).f_code.co_filename) modified_message = f"{args[0]}" kwargs['flush'] = True cprint(modified_message, *args[1:], **kwargs) def has_bfloat16_support(): logging.debug("Checking bfloat16 support") if not torch.cuda.is_available(): logging.debug("CUDA not available, bfloat16 not supported") return False capability = torch.cuda.get_device_capability() logging.debug(f"CUDA compute capability: {capability}") has_support = capability >= (8, 0) logging.debug(f"bfloat16 {'supported' if has_support else 'not supported'}") return has_support def set_logging_level(): library_levels = { "accelerate": logging.WARNING, "bitsandbytes": logging.WARNING, "ctranslate2": logging.WARNING, "datasets": logging.WARNING, "einops": logging.WARNING, "einx": logging.WARNING, "flash_attn": logging.WARNING, "huggingface-hub": logging.WARNING, "numpy": logging.WARNING, "openai": logging.WARNING, "openai-whisper": logging.WARNING, "optimum": logging.WARNING, "pillow": logging.WARNING, "requests": logging.WARNING, "sentence-transformers": logging.WARNING, "sounddevice": logging.WARNING, "speechbrain": logging.WARNING, "sympy": logging.WARNING, "tiledb": logging.WARNING, "tiledb-cloud": logging.WARNING, "tiledb-vector-search": logging.WARNING, "timm": logging.WARNING, "tokenizers": logging.WARNING, "torch": logging.WARNING, "torchaudio": logging.WARNING, "torchvision": logging.WARNING, "transformers": logging.WARNING, "unstructured": logging.WARNING, "unstructured-client": logging.WARNING, "vector-quantize-pytorch": logging.WARNING, "vocos": logging.WARNING, "xformers": logging.WARNING } for lib, level in library_levels.items(): logging.getLogger(lib).setLevel(level) def prepare_long_path(base_path: str, filename: str) -> str: base_path = os.path.normpath(base_path) full_path = os.path.join(base_path, filename) if os.name == 'nt' and len(full_path) > 255: full_path = "\\\\?\\" + os.path.abspath(full_path) return full_path def normalize_text(text, preserve_whitespace=False): import unicodedata if text is None: return None if isinstance(text, (list, tuple)): text = " ".join(str(item) for item in text if item is not None) if not isinstance(text, str): text = str(text) text = unicodedata.normalize("NFKC", text) INVISIBLE_CHARS = { '\u00ad', '\u200b', '\u200c', '\u200d', '\u200e', '\u200f', '\u2060', '\u2061', '\u2062', '\u2063', '\u2064', '\ufeff', } cleaned = [] for char in text: code = ord(char) if char == '\n' or char == '\t': if preserve_whitespace: cleaned.append(char) else: cleaned.append(' ') elif char == '\r': cleaned.append(' ') elif code < 32: continue elif code == 127: continue elif code > 65535: continue elif char in INVISIBLE_CHARS: continue elif 128 <= code <= 159: continue elif code == 65533: continue elif 57344 <= code <= 63743: continue else: cleaned.append(char) result = "".join(cleaned) if preserve_whitespace: result = re.sub(r'[^\S\n\t]+', ' ', result) result = re.sub(r' *\n *', '\n', result) result = re.sub(r'\n{3,}', '\n\n', result) else: result = " ".join(result.split()) result = result.strip() return result if result else None def get_embedding_batch_size(model_name: str, compute_device: str) -> int: if compute_device.lower() == 'cpu': return 2 batch_size_mapping = { 'inf-retriever-v1-7b': 2, 'Qwen3-Embedding-8B': 2, 'Qwen3-Embedding-4B': 3, 'inf-retriever-v1-1.5b': 3, 'Qwen3-Embedding-0.6B': 4, 'e5-base': 6, 'e5-large': 7, 'arctic-embed-l': 7, 'bge-large-en-v1.5': 6, 'e5-small': 10, 'gte-large': 12, 'Granite-30m-English': 12, 'bge-small': 12, 'bge-small-en-v1.5': 12, 'bge-base-en-v1.5': 8, 'gte-base': 14, 'arctic-embed-m': 14, } model_name_lower = model_name.lower() for key, value in batch_size_mapping.items(): if key.lower() in model_name_lower: return value return 8 def get_embedding_dtype_and_batch( compute_device: str, use_half: bool, model_native_precision: str, model_name: str, is_query: bool, ): dtype = get_appropriate_dtype(compute_device, use_half, model_native_precision) batch = 1 if is_query else get_embedding_batch_size(model_name, compute_device) return dtype, batch def configure_logging(level: str = "INFO"): root = logging.getLogger() if root.handlers: root.setLevel(level.upper()) return root.setLevel(level.upper()) h = logging.StreamHandler() fmt = logging.Formatter( "%(asctime)s %(levelname)s [%(name)s] %(message)s" ) h.setFormatter(fmt) root.addHandler(h) ================================================ FILE: db/__init__.py ================================================ ================================================ FILE: db/choose_documents.py ================================================ from pathlib import Path from multiprocessing import Pool, cpu_count import yaml from PySide6.QtCore import QElapsedTimer, QThread, Signal, Qt from PySide6.QtWidgets import ( QApplication, QFileDialog, QFileSystemModel, QHBoxLayout, QProgressDialog, QVBoxLayout, QDialog, QTextEdit, QPushButton, QMessageBox, ) from db.create_symlinks import _create_single_symlink from core.constants import PROJECT_ROOT ALLOWED_EXTENSIONS = { ".pdf", ".docx", ".txt", ".eml", ".msg", ".csv", ".xls", ".xlsx", ".xlsm", ".rtf", ".png", ".jpg", ".jpeg", ".bmp", ".gif", ".tif", ".tiff", ".html", ".htm", ".md", } DOCS_FOLDER = "Docs_for_DB" CONFIG_FILE = "config.yaml" class SymlinkWorker(QThread): progress = Signal(int) finished = Signal(int, list) def __init__(self, source, target_dir, parent=None): super().__init__(parent) self.source = source self.target_dir = Path(target_dir) def run(self): if isinstance(self.source, (str, Path)): dir_path = Path(self.source) try: import os filenames = os.listdir(str(dir_path)) files = [ str(dir_path / filename) for filename in filenames if (dir_path / filename).is_file() and (dir_path / filename).suffix.lower() in ALLOWED_EXTENSIONS ] except OSError: files = [] print(f"Error accessing directory {dir_path}") else: files = list(self.source) total = len(files) made = 0 errors = [] last_pct = -1 timer = QElapsedTimer() timer.start() step = max(1, total // 100) if total else 1 if total > 1000: processes = min((total // 10000) + 1, cpu_count()) file_args = [(f, str(self.target_dir)) for f in files] with Pool(processes=processes) as pool: for i, (ok, err) in enumerate( pool.imap_unordered(_create_single_symlink, file_args), 1 ): if ok: made += 1 if err: errors.append(err) if i % step == 0 or i == total: pct = int(i * 100 / total) if total else 100 if pct != last_pct and timer.elapsed() > 500: self.progress.emit(pct) last_pct = pct timer.restart() else: for f in files: if self.isInterruptionRequested(): break ok, err = _create_single_symlink((f, str(self.target_dir))) if ok: made += 1 if err: errors.append(err) if made % step == 0 or made == total: pct = int(made * 100 / total) if total else 100 if pct != last_pct and timer.elapsed() > 500: self.progress.emit(pct) last_pct = pct timer.restart() self.finished.emit(made, errors) def choose_documents_directory(): current_dir = PROJECT_ROOT target_dir = current_dir / DOCS_FOLDER target_dir.mkdir(parents=True, exist_ok=True) msg_box = QMessageBox() msg_box.setWindowTitle("Selection Type") msg_box.setText("Would you like to select a directory or individual files?") dir_button = msg_box.addButton("Select Directory", QMessageBox.ActionRole) files_button = msg_box.addButton("Select Files", QMessageBox.ActionRole) cancel_button = msg_box.addButton("Cancel", QMessageBox.RejectRole) msg_box.exec() clicked_button = msg_box.clickedButton() if clicked_button == cancel_button: return file_dialog = QFileDialog() def start_worker(source): progress = QProgressDialog( "Creating symlinks...", "Cancel", 0, 0 ) progress.setWindowModality(Qt.WindowModal) progress.setMinimumDuration(0) worker = SymlinkWorker(source, target_dir) main_window = _get_main_window() if main_window and hasattr(main_window, "databases_tab"): db_tab = main_window.databases_tab if hasattr(db_tab, "docs_model") and db_tab.docs_model: if hasattr(QFileSystemModel, "DontWatchForChanges"): db_tab.docs_model.setOption( QFileSystemModel.DontWatchForChanges, True ) if hasattr(db_tab, "docs_refresh"): db_tab.docs_refresh.start() progress.canceled.connect(worker.requestInterruption) def update_progress(pct): if progress.maximum() == 0: progress.setRange(0, 100) progress.setValue(pct) worker.progress.connect(update_progress) def _done(count, errs): if main_window and hasattr(main_window, "databases_tab"): db_tab = main_window.databases_tab if hasattr(db_tab, "docs_refresh"): db_tab.docs_refresh.stop() if hasattr(db_tab, "docs_model") and db_tab.docs_model: if hasattr(db_tab.docs_model, "refresh"): db_tab.docs_model.refresh() elif hasattr(db_tab.docs_model, "reindex"): db_tab.docs_model.reindex() if hasattr(QFileSystemModel, "DontWatchForChanges"): db_tab.docs_model.setOption( QFileSystemModel.DontWatchForChanges, False ) progress.reset() msg = f"Created {count} symlinks" if errs: msg += f" – {len(errs)} errors (see console)" print(*errs, sep="\n") QMessageBox.information(None, "Symlinks", msg) worker.finished.connect(_done) worker.progress.connect(update_progress) worker.start() choose_documents_directory._symlink_thread = worker if clicked_button == dir_button: file_dialog.setFileMode(QFileDialog.Directory) file_dialog.setOption(QFileDialog.ShowDirsOnly, True) selected_dir = file_dialog.getExistingDirectory( None, "Choose Directory for Database", str(current_dir) ) if selected_dir: selected_path = Path(selected_dir) top_level_files = [ str(p) for p in selected_path.iterdir() if p.is_file() and p.suffix.lower() in ALLOWED_EXTENSIONS ] subdirectory_files = [ str(p) for p in selected_path.rglob("*") if p.is_file() and p.parent != selected_path and p.suffix.lower() in ALLOWED_EXTENSIONS ] include_subdirs = False if subdirectory_files: reply = QMessageBox.question( None, "Include Subdirectories?", ( f"This folder contains {len(top_level_files)} compatible file(s) " f"at the top level and {len(subdirectory_files)} more in " f"subdirectories.\n\nInclude the subdirectory files as well?" ), QMessageBox.Yes | QMessageBox.No, QMessageBox.No, ) include_subdirs = (reply == QMessageBox.Yes) files_to_symlink = ( top_level_files + subdirectory_files if include_subdirs else top_level_files ) if files_to_symlink: start_worker(files_to_symlink) else: QMessageBox.information( None, "No Compatible Files", "No compatible files were found in the selected directory." ) else: file_dialog.setFileMode(QFileDialog.ExistingFiles) file_paths = file_dialog.getOpenFileNames( None, "Choose Documents and Images for Database", str(current_dir) )[0] if file_paths: compatible_files = [] incompatible_files = [] for file_path in file_paths: path = Path(file_path) if path.suffix.lower() in ALLOWED_EXTENSIONS: compatible_files.append(str(path)) else: incompatible_files.append(path.name) if incompatible_files and not show_incompatible_files_dialog( incompatible_files ): return if compatible_files: start_worker(compatible_files) def show_incompatible_files_dialog(incompatible_files): dialog_text = ( "The following files cannot be added here due to their file extension:\n\n" + "\n".join(incompatible_files) + "\n\nHowever, if any of them are audio files you can still add them directly in the Tools Tab." "\n\nClick 'Ok' to add the compatible documents only (remembering to add audio files separately)" " or 'Cancel' to back out completely." ) incompatible_dialog = QDialog() incompatible_dialog.resize(800, 600) incompatible_dialog.setWindowTitle("Incompatible Files Detected") layout = QVBoxLayout() text_edit = QTextEdit() text_edit.setReadOnly(True) text_edit.setText(dialog_text) layout.addWidget(text_edit) button_box = QHBoxLayout() ok_button = QPushButton("OK") cancel_button = QPushButton("Cancel") button_box.addWidget(ok_button) button_box.addWidget(cancel_button) layout.addLayout(button_box) incompatible_dialog.setLayout(layout) ok_button.clicked.connect(incompatible_dialog.accept) cancel_button.clicked.connect(incompatible_dialog.reject) return incompatible_dialog.exec() == QDialog.Accepted def load_config(): with open(CONFIG_FILE, "r", encoding="utf-8") as stream: return yaml.safe_load(stream) def select_embedding_model_directory(): initial_dir = Path("Models") if Path("Models").exists() else Path.home() chosen_directory = QFileDialog.getExistingDirectory( None, "Select Embedding Model Directory", str(initial_dir) ) if chosen_directory: config_file_path = Path(CONFIG_FILE) config_data = ( yaml.safe_load(config_file_path.read_text(encoding="utf-8")) if config_file_path.exists() else {} ) config_data["EMBEDDING_MODEL_NAME"] = chosen_directory config_file_path.write_text(yaml.dump(config_data), encoding="utf-8") def _get_main_window(): for widget in QApplication.topLevelWidgets(): if hasattr(widget, "databases_tab"): return widget return None ================================================ FILE: db/create_symlinks.py ================================================ import hashlib from multiprocessing import Pool, cpu_count from pathlib import Path from typing import Union, List, Tuple def _points_to(link_path: Path, source_path) -> bool: try: return link_path.is_symlink() and link_path.resolve() == Path(source_path).resolve() except Exception: return False def _create_single_symlink(args): source_path, target_dir = args try: source = Path(source_path) target = Path(target_dir) link_path = target / source.name if not link_path.exists(): link_path.symlink_to(source_path) return True, None if _points_to(link_path, source_path): return False, None suffix_hash = hashlib.md5(str(source).encode("utf-8")).hexdigest()[:8] disambiguated = target / f"{source.stem}_{suffix_hash}{source.suffix}" if not disambiguated.exists(): disambiguated.symlink_to(source_path) return True, None if _points_to(disambiguated, source_path): return False, None return False, f"Symlink collision could not be resolved for {source.name}" except Exception as e: return False, f"Error creating symlink for {Path(source_path).name}: {str(e)}" def create_symlinks_parallel(source: Union[str, Path, List[str], List[Path]], target_dir: Union[str, Path] = "Docs_for_DB") -> Tuple[int, list]: target_dir = Path(target_dir) if not target_dir.exists(): print(f"Target directory does not exist: {target_dir}") return 0, [] try: if isinstance(source, (str, Path)) and not isinstance(source, list): source_dir = Path(source) if not source_dir.exists(): raise ValueError(f"Source directory does not exist: {source_dir}") files = [(str(p), str(target_dir)) for p in source_dir.iterdir() if p.is_file()] elif isinstance(source, list): files = [(str(Path(p)), str(target_dir)) for p in source] else: raise ValueError("Source must be either a directory path or a list of file paths") file_count = len(files) if file_count <= 1000: results = [_create_single_symlink(file) for file in files] else: if file_count <= 10000: processes = 1 else: processes = min((file_count // 10000) + 1, cpu_count()) print(f"Processing {file_count} files using {processes} processes") with Pool(processes=processes) as pool: results = pool.map(_create_single_symlink, files) count = sum(1 for success, _ in results if success) errors = [error for _, error in results if error is not None] print(f"\nComplete! Created {count} symbolic links") if errors: print("\nErrors occurred:") for error in errors: print(error) return count, errors except Exception as e: raise RuntimeError(f"An error occurred: {str(e)}") ================================================ FILE: db/cuda_manager.py ================================================ import threading import logging import torch from contextlib import contextmanager logger = logging.getLogger(__name__) class CUDAManager: _instance = None _lock = threading.Lock() def __new__(cls): if cls._instance is None: with cls._lock: if cls._instance is None: cls._instance = super().__new__(cls) cls._instance._initialized = False return cls._instance def __init__(self): if self._initialized: return self.active_operations = 0 self.operation_lock = threading.Lock() self._initialized = True @contextmanager def cuda_operation(self): with self.operation_lock: self.active_operations += 1 try: yield finally: with self.operation_lock: self.active_operations -= 1 def safe_empty_cache(self): if not torch.cuda.is_available(): return with self.operation_lock: if self.active_operations > 0: logger.debug(f"Skipping cache clear: {self.active_operations} active operations") return try: torch.cuda.empty_cache() logger.debug("CUDA cache cleared successfully") except Exception as e: logger.warning(f"Failed to clear CUDA cache: {e}") def force_empty_cache(self): if not torch.cuda.is_available(): return try: torch.cuda.empty_cache() logger.debug("CUDA cache forcibly cleared") except Exception as e: logger.warning(f"Failed to force clear CUDA cache: {e}") _cuda_manager_instance = None def get_cuda_manager() -> CUDAManager: global _cuda_manager_instance if _cuda_manager_instance is None: _cuda_manager_instance = CUDAManager() return _cuda_manager_instance ================================================ FILE: db/database_interactions.py ================================================ import faulthandler faulthandler.enable() # Module-level TileDB DLL preload. Mirrors the approach in VectorDB-Light's # vector_db_query.py (lines 1-31). Critical for subprocesses spawned via # multiprocessing.Process (e.g. the chunks-only query path): when the # fresh interpreter imports this module, the DLLs load immediately — # before any other code can accidentally trigger a tiledb.vector_search # import without DLL registration, which on Windows causes the # _tiledbvspy native module to fail with # ImportError: DLL load failed while importing _tiledbvspy # or an even worse silent hang. The standalone _setup_tiledb_dlls() # function below is kept for the creation subprocess path, where DLL # setup has to happen after configure_logging() / set_cuda_paths(). import os import sys import ctypes try: import tiledb as _tiledb_bootstrap # noqa: F401 _venv_root = os.path.dirname(os.path.dirname(sys.executable)) _site_packages = os.path.join(_venv_root, "Lib", "site-packages") _tiledb_libs = os.path.join(_site_packages, "tiledb.libs") _vector_search_lib = os.path.join(_site_packages, "tiledb", "vector_search", "lib") for _directory in (_tiledb_libs, _vector_search_lib): if os.path.isdir(_directory): try: os.add_dll_directory(_directory) except OSError: pass if os.path.isdir(_tiledb_libs): for _filename in sorted(os.listdir(_tiledb_libs)): if _filename.endswith(".dll"): try: ctypes.CDLL(os.path.join(_tiledb_libs, _filename)) except Exception: pass if os.path.isdir(_vector_search_lib): _tiledb_dll = os.path.join(_vector_search_lib, "tiledb.dll") if os.path.exists(_tiledb_dll): try: ctypes.CDLL(_tiledb_dll) except Exception: pass except ImportError: # tiledb not installed — will fail later at actual use. Don't block # the import itself in case this module is loaded for non-TileDB work # (e.g. tests that only exercise pure helpers). pass import gc import json import logging import pickle import random import re import shutil import subprocess import tempfile import threading import time import traceback from pathlib import Path from typing import Optional import numpy as np import torch # orjson is a Rust-based JSON encoder that's ~10x faster than stdlib json # and avoids the heap fragmentation that triggers OverflowError + access # violation when serializing millions of metadata dicts in tight loops. try: import orjson def _json_dumps(obj) -> str: return orjson.dumps(obj).decode("utf-8") except ImportError: def _json_dumps(obj) -> str: return json.dumps(obj) from db.document_processor import Document from db.embedding_models import load_embedding_model from db.sqlite_operations import create_metadata_db from db.cuda_manager import get_cuda_manager from core.config import get_config from core.constants import PROJECT_ROOT, PIPELINE_PRESETS from core.utilities import my_cprint, set_cuda_paths, configure_logging logger = logging.getLogger(__name__) os.environ.setdefault("TOKENIZERS_PARALLELISM", "false") os.environ.setdefault("RUST_BACKTRACE", "1") STAGE_EXTRACT_PATH = PROJECT_ROOT / "db" / "stage_extract.py" STAGE_SPLIT_PATH = PROJECT_ROOT / "db" / "stage_split.py" EXTRACT_MAX_RETRIES = 3 SPLIT_MAX_WORKER_RETRIES = 3 SPLIT_MAX_RETRIES = 5 TILEDB_WRITE_BATCH_SIZE = 100000 MAX_UINT64_SENTINEL = np.iinfo(np.uint64).max def _get_split_params(): try: preset_name = get_config().database.pipeline_preset except Exception: preset_name = "normal" preset = PIPELINE_PRESETS.get(preset_name, PIPELINE_PRESETS["normal"]) return preset["split_max_parallel_workers"], preset["split_worker_batch_size"] def _run_subprocess_stage(name, cmd, timeout=3600): logger.info(f"Starting subprocess stage: {name}") process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, cwd=str(PROJECT_ROOT), env={**os.environ, "PYTHONUNBUFFERED": "1"}, ) output_lines = [] for line in process.stdout: line = line.rstrip("\n") if line.strip(): logger.info(f" [{name}] {line}") output_lines.append(line) process.wait(timeout=timeout) if process.returncode != 0: for line in output_lines[-10:]: logger.error(f" {line}") return process.returncode, output_lines def _run_extract_with_retry(source_dir, output_pkl): python = sys.executable cmd = [python, str(STAGE_EXTRACT_PATH), str(source_dir), str(output_pkl)] for attempt in range(1, EXTRACT_MAX_RETRIES + 1): logger.info(f"Extract attempt {attempt}/{EXTRACT_MAX_RETRIES}") exit_code, _ = _run_subprocess_stage(f"Extract (attempt {attempt})", cmd) if exit_code == 0 and output_pkl.exists(): logger.info(f"Extract stage completed on attempt {attempt}") return logger.error(f"Extract attempt {attempt} failed (exit code {exit_code})") if attempt < EXTRACT_MAX_RETRIES: logger.info("Waiting 3 seconds before retry...") time.sleep(3) gc.collect() raise RuntimeError(f"Extract stage failed after {EXTRACT_MAX_RETRIES} attempts") def _run_split_with_retry(extracted_pkl, chunks_pkl, chunk_size, chunk_overlap, checkpoint_dir): python = sys.executable split_parallel, split_batch = _get_split_params() for attempt in range(1, SPLIT_MAX_RETRIES + 1): logger.info(f"Split attempt {attempt}/{SPLIT_MAX_RETRIES}") split_cmd = [ python, str(STAGE_SPLIT_PATH), str(extracted_pkl), str(chunks_pkl), str(chunk_size), str(chunk_overlap), "--worker-batch-size", str(split_batch), "--max-worker-retries", str(SPLIT_MAX_WORKER_RETRIES), "--max-parallel-workers", str(split_parallel), "--checkpoint-dir", str(checkpoint_dir), "--checkpoint-interval", "5", ] exit_code, _ = _run_subprocess_stage(f"Split (attempt {attempt})", split_cmd) if exit_code == 0 and chunks_pkl.exists(): logger.info(f"Split stage completed on attempt {attempt}") return logger.error(f"Split attempt {attempt} failed (exit code {exit_code})") if attempt < SPLIT_MAX_RETRIES: logger.info("Waiting 3 seconds before retry...") time.sleep(3) gc.collect() raise RuntimeError(f"Split stage failed after {SPLIT_MAX_RETRIES} attempts") def _setup_tiledb_dlls(): import ctypes import tiledb venv_root = os.path.dirname(os.path.dirname(sys.executable)) site_packages = os.path.join(venv_root, 'Lib', 'site-packages') tiledb_libs = os.path.join(site_packages, 'tiledb.libs') vector_search_lib = os.path.join(site_packages, 'tiledb', 'vector_search', 'lib') for directory in [tiledb_libs, vector_search_lib]: if os.path.isdir(directory): try: os.add_dll_directory(directory) except OSError: pass if os.path.isdir(tiledb_libs): for filename in sorted(os.listdir(tiledb_libs)): if filename.endswith('.dll'): try: ctypes.CDLL(os.path.join(tiledb_libs, filename)) except Exception: pass if os.path.isdir(vector_search_lib): tiledb_dll = os.path.join(vector_search_lib, 'tiledb.dll') if os.path.exists(tiledb_dll): try: ctypes.CDLL(tiledb_dll) except Exception: pass def create_vector_db_in_process(database_name): faulthandler.enable() configure_logging("INFO") set_cuda_paths() _setup_tiledb_dlls() os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["RUST_BACKTRACE"] = "1" create_vector_db = None try: create_vector_db = CreateVectorDB(database_name=database_name) create_vector_db.run() except Exception: traceback.print_exc() raise finally: if create_vector_db: del create_vector_db gc.collect() if torch.cuda.is_available(): try: torch.cuda.empty_cache() torch.cuda.synchronize() except Exception: pass time.sleep(0.1) def process_chunks_only_query(database_name, query, result_queue): configure_logging("INFO") try: query_db = QueryVectorDB(database_name) try: contexts, metadata_list = query_db.search(query) if not contexts: result_queue.put( "No chunks passed the similarity threshold.\n\n" "Try lowering the 'Similarity' setting in the Database Query " "settings tab (e.g. from 0.7 to 0.4) and run the query again." ) return formatted_contexts = [] for index, (context, metadata) in enumerate(zip(contexts, metadata_list), start=1): file_name = metadata.get('file_name', 'Unknown') cleaned_context = re.sub(r'\n[ \t]+\n', '\n\n', context) cleaned_context = re.sub(r'\n\s*\n\s*\n*', '\n\n', cleaned_context.strip()) formatted_context = ( f"{'-'*80}\n" f"CONTEXT {index} | {file_name}\n" f"{'-'*80}\n" f"{cleaned_context}\n" ) formatted_contexts.append(formatted_context) result_queue.put("\n".join(formatted_contexts)) finally: query_db.close() except Exception as e: result_queue.put(f"Error querying database: {str(e)}") class CreateVectorDB: def __init__(self, database_name): self.config = get_config() self.SOURCE_DIRECTORY = self.config.docs_dir self.PERSIST_DIRECTORY = self.config.vector_db_dir / database_name @torch.inference_mode() def initialize_vector_model(self, embedding_model_name, config_data): return load_embedding_model( model_path=embedding_model_name, compute_device=config_data.Compute_Device.database_creation, use_half=config_data.database.half, is_query=False, verbose=True, ) def _create_tiledb_array(self, texts, vectors_array, metadatas): _setup_tiledb_dlls() import tiledb import tiledb.vector_search as vs from tiledb.vector_search import _tiledbvspy as vspy embedding_dim = vectors_array.shape[1] num_vectors = vectors_array.shape[0] MAX_UINT64 = 18446744073709551615 logger.info(f"Creating TileDB array: {num_vectors:,} vectors of dimension {embedding_dim}") array_uri = str(self.PERSIST_DIRECTORY / "vectors") dom = tiledb.Domain( tiledb.Dim(name="id", domain=(0, np.iinfo(np.uint64).max - 20000), tile=10000, dtype=np.uint64) ) attrs = [ tiledb.Attr(name="vector", dtype=np.dtype([("", np.float32)] * embedding_dim)), tiledb.Attr(name="text", dtype=str, var=True), tiledb.Attr(name="metadata", dtype=str, var=True), ] schema = tiledb.ArraySchema( domain=dom, attrs=attrs, sparse=True, cell_order='row-major', tile_order='row-major' ) tiledb.Array.create(array_uri, schema) num_batches = (num_vectors + TILEDB_WRITE_BATCH_SIZE - 1) // TILEDB_WRITE_BATCH_SIZE logger.info(f"Writing TileDB array in {num_batches} batch(es)") all_ids = np.empty(num_vectors, dtype=np.uint64) hash_id_mappings = [] rng = np.random.default_rng() for batch_idx in range(num_batches): start = batch_idx * TILEDB_WRITE_BATCH_SIZE end = min(start + TILEDB_WRITE_BATCH_SIZE, num_vectors) # Use numpy's vectorized generator instead of a Python list # comprehension over random.randint. The list-comprehension # approach allocated end-start Python int objects per batch # (~7+ GB total at the Caselaw scale), which triggered an # OverflowError + access violation inside random.randint on # Python 3.12. numpy's integers() runs entirely in C and # returns a uint64 array directly. batch_ids = rng.integers( low=0, high=np.iinfo(np.uint64).max, size=end - start, dtype=np.uint64, endpoint=False, ) all_ids[start:end] = batch_ids for i in range(start, end): file_hash = metadatas[i].get('hash', '') hash_id_mappings.append((str(batch_ids[i - start]), file_hash)) batch_vectors = vectors_array[start:end] batch_texts = np.array(texts[start:end], dtype=object) # _json_dumps uses orjson when available (Rust-based, ~10x faster # than stdlib json). The stdlib json.dumps loop here triggered an # OverflowError + access violation at the Caselaw scale due to # heap fragmentation from millions of small string allocations. batch_metadata = np.array( [_json_dumps(metadatas[i]) for i in range(start, end)], dtype=object ) batch_structured = np.array( [tuple(vec) for vec in batch_vectors], dtype=[("", np.float32)] * embedding_dim ) with tiledb.open(array_uri, mode='w') as A: A[batch_ids] = { "vector": batch_structured, "text": batch_texts, "metadata": batch_metadata, } del batch_structured, batch_texts, batch_metadata, batch_vectors gc.collect() tiledb.consolidate(array_uri) tiledb.vacuum(array_uri) index_uri = str(self.PERSIST_DIRECTORY / "vector_index") vs.ingest( index_type="FLAT", index_uri=index_uri, input_vectors=vectors_array, external_ids=all_ids, dimensions=embedding_dim, distance_metric=vspy.DistanceMetric.COSINE ) metadata_file = self.PERSIST_DIRECTORY / "index_metadata.json" with open(metadata_file, 'w') as f: json.dump({ 'distance_metric': 'cosine', 'dimensions': embedding_dim, 'vector_type': 'float32', 'index_type': 'FLAT', 'num_vectors': num_vectors }, f) logger.info(f"FLAT index created at: {index_uri}") return hash_id_mappings def load_audio_documents(self, source_dir=None): if source_dir is None: source_dir = self.SOURCE_DIRECTORY json_paths = [f for f in source_dir.iterdir() if f.suffix.lower() == '.json'] docs = [] for json_path in json_paths: try: with open(json_path, 'r', encoding='utf-8') as json_file: data = json.loads(json_file.read()) doc = Document( page_content=data.get('page_content', ''), metadata=data.get('metadata', {}) ) docs.append(doc) except Exception as e: my_cprint(f"Error loading {json_path}: {e}", "red") return docs def clear_docs_for_db_folder(self): for item in self.SOURCE_DIRECTORY.iterdir(): if item.is_file() or item.is_symlink(): try: item.unlink() except Exception as e: logger.warning(f"Failed to delete {item}: {e}") @torch.inference_mode() def run(self): cuda_mgr = get_cuda_manager() pipeline_t0 = time.time() config_data = get_config() EMBEDDING_MODEL_NAME = config_data.EMBEDDING_MODEL_NAME chunk_size = config_data.database.chunk_size chunk_overlap = config_data.database.chunk_overlap tmp_dir = tempfile.mkdtemp(prefix="vectordb_create_") tmp_path = Path(tmp_dir) extracted_pkl = tmp_path / "extracted.pkl" chunks_pkl = tmp_path / "chunks.pkl" checkpoint_dir = tmp_path / "checkpoints" checkpoint_dir.mkdir(exist_ok=True) try: # Stage 1: Extract documents via subprocess my_cprint("Extracting documents (subprocess)...", "yellow") extract_t0 = time.time() _run_extract_with_retry(self.SOURCE_DIRECTORY, extracted_pkl) logger.info(f"Extract stage: {time.time() - extract_t0:.1f}s") with open(extracted_pkl, "rb") as f: doc_data = pickle.load(f) logger.info(f"Extracted {len(doc_data)} documents") json_docs_to_save = [] for content, metadata in doc_data: json_docs_to_save.append(Document(page_content=content, metadata=metadata)) print("Processing any audio transcripts...") audio_documents = self.load_audio_documents() if audio_documents: for doc in audio_documents: doc_data.append((doc.page_content, doc.metadata)) json_docs_to_save.append(doc) print("Processing any images...") try: from modules.process_images import choose_image_loader image_documents = choose_image_loader() if isinstance(image_documents, list) and image_documents: for doc in image_documents: content = doc.page_content if hasattr(doc, 'page_content') else str(doc) metadata = doc.metadata if hasattr(doc, 'metadata') else {} doc_data.append((content, metadata)) json_docs_to_save.append(Document(page_content=content, metadata=metadata)) except Exception as e: logger.warning(f"Image processing skipped: {e}") if not doc_data: my_cprint("No documents, audio transcripts, or images found to process.", "red") raise RuntimeError("No content found to ingest into the database.") # Re-write extracted.pkl with audio+image docs included with open(extracted_pkl, "wb") as f: pickle.dump(doc_data, f, protocol=pickle.HIGHEST_PROTOCOL) del doc_data gc.collect() # Stage 2: Split documents via subprocess my_cprint("Splitting documents into chunks (subprocess)...", "yellow") split_t0 = time.time() _run_split_with_retry(extracted_pkl, chunks_pkl, chunk_size, chunk_overlap, checkpoint_dir) logger.info(f"Split stage: {time.time() - split_t0:.1f}s") try: extracted_pkl.unlink() except Exception: pass with open(chunks_pkl, "rb") as f: split_output = pickle.load(f) if isinstance(split_output, dict): chunk_texts = split_output["texts"] chunks_with_meta = split_output.get("chunks", []) del split_output else: chunk_texts = split_output chunks_with_meta = [] del split_output gc.collect() logger.info(f"Split into {len(chunk_texts):,} chunks") if not chunk_texts: my_cprint("No chunks produced after splitting.", "red") return # Extract metadata dicts from chunks_with_meta, then free it all_metadatas = [] for idx in range(len(chunk_texts)): if idx < len(chunks_with_meta): _, meta = chunks_with_meta[idx] else: meta = {} all_metadatas.append(meta) del chunks_with_meta gc.collect() # Stage 3+4: Tokenize + Embed via subprocess pipeline with cuda_mgr.cuda_operation(): embeddings = self.initialize_vector_model(EMBEDDING_MODEL_NAME, config_data) my_cprint("\nComputing vectors...", "yellow") embed_t0 = time.time() try: self.PERSIST_DIRECTORY.mkdir(parents=True, exist_ok=False) my_cprint(f"Created directory: {self.PERSIST_DIRECTORY}", "green") except FileExistsError: raise FileExistsError( f"Vector database '{self.PERSIST_DIRECTORY.name}' already exists. " "Choose a different name or delete the existing DB first." ) with cuda_mgr.cuda_operation(): vectors = embeddings.embed_documents(chunk_texts) embed_elapsed = time.time() - embed_t0 my_cprint(f"Embedding computation completed in {embed_elapsed:.2f} seconds.", "cyan") del embeddings gc.collect() cuda_mgr.force_empty_cache() vectors_array = np.ascontiguousarray(vectors, dtype=np.float32) del vectors gc.collect() # Stage 5: Write TileDB array + FLAT index (IDs generated per-batch) try: hash_id_mappings = self._create_tiledb_array(chunk_texts, vectors_array, all_metadatas) except Exception as e: logger.error(f"Error creating TileDB database: {e}") traceback.print_exc() if self.PERSIST_DIRECTORY.exists(): try: shutil.rmtree(self.PERSIST_DIRECTORY) except Exception: pass raise my_cprint("Processed all chunks", "yellow") pipeline_elapsed = time.time() - pipeline_t0 my_cprint(f"Database created. Total time: {pipeline_elapsed:.2f} seconds.", "green") # Stage 6: Write SQLite metadata DB del chunk_texts, vectors_array, all_metadatas gc.collect() create_metadata_db(self.PERSIST_DIRECTORY, json_docs_to_save, hash_id_mappings) del json_docs_to_save, hash_id_mappings gc.collect() self.clear_docs_for_db_folder() except Exception: traceback.print_exc() raise finally: try: shutil.rmtree(tmp_dir, ignore_errors=True) except Exception: pass _thread_local = threading.local() def get_query_db(database_name: str) -> "QueryVectorDB": """Return a thread-local QueryVectorDB instance, creating it if needed. Each thread gets its own cache of database name → QueryVectorDB, so concurrent queries against different databases don't thrash singleton state. """ if not hasattr(_thread_local, "query_db_cache"): _thread_local.query_db_cache = {} if database_name in _thread_local.query_db_cache: return _thread_local.query_db_cache[database_name] instance = QueryVectorDB(database_name) _thread_local.query_db_cache[database_name] = instance return instance def clear_query_cache(database_name: Optional[str] = None) -> None: """Clear the thread-local QueryVectorDB cache for the current thread.""" if not hasattr(_thread_local, "query_db_cache"): return if database_name: if database_name in _thread_local.query_db_cache: _thread_local.query_db_cache[database_name].close() del _thread_local.query_db_cache[database_name] else: for db_instance in _thread_local.query_db_cache.values(): db_instance.close() _thread_local.query_db_cache.clear() class QueryVectorDB: def __init__(self, selected_database: str): self.config = self.load_configuration() if not selected_database: raise ValueError("No vector database selected.") if selected_database not in self.config.created_databases: raise ValueError(f'Database "{selected_database}" not found in config.') db_path = self.config.vector_db_dir / selected_database if not db_path.exists(): raise FileNotFoundError(f'Database folder "{selected_database}" is missing on disk.') self.selected_database = selected_database self.db_path = db_path self.index_uri = str(db_path / "vector_index") self.array_uri = str(db_path / "vectors") self.embeddings = None self.index = None self.model_name = None self._debug_id = id(self) self.distance_metric = "cosine" self.index_type = "FLAT" try: metadata_file = db_path / "index_metadata.json" if metadata_file.exists(): with open(metadata_file, 'r') as f: metadata = json.load(f) self.distance_metric = metadata.get('distance_metric', 'cosine') self.index_type = metadata.get('index_type', 'FLAT') except Exception as e: logger.warning(f"Could not load index metadata, using defaults: {e}") def load_configuration(self): try: return get_config() except Exception as e: logger.error(f"Error loading configuration: {e}") raise @torch.inference_mode() def initialize_vector_model(self): model_path = self.config.created_databases[self.selected_database].model self.model_name = os.path.basename(model_path) return load_embedding_model( model_path=model_path, compute_device=self.config.Compute_Device.database_query, use_half=self.config.database.half, is_query=True, ) @torch.inference_mode() def search(self, query, k: Optional[int] = None, score_threshold: Optional[float] = None): _setup_tiledb_dlls() import tiledb import tiledb.vector_search as vs cuda_mgr = get_cuda_manager() if not self.embeddings: logger.info(f"Initializing embedding model for database {self.selected_database}") self.embeddings = self.initialize_vector_model() if not self.index: logger.info(f"Loading TileDB FLAT index for {self.selected_database}") self.index = vs.FlatIndex(uri=self.index_uri) self.config = self.load_configuration() k = k if k is not None else self.config.database.contexts score_threshold = score_threshold if score_threshold is not None else self.config.database.similarity with cuda_mgr.cuda_operation(): query_vector = self.embeddings.embed_query(query) query_vector_np = np.array([query_vector], dtype=np.float32) logger.info(f"Querying TileDB index: {self.index_uri}") result_distances, result_ids = self.index.query(query_vector_np, k=k) if len(result_distances) == 0 or len(result_distances[0]) == 0: logger.warning("No results returned from vector search") return [], [] distances = result_distances[0] ids = result_ids[0] if len(ids) > 0 and ids[0] == MAX_UINT64_SENTINEL: logger.warning("TileDB returned sentinel value - no matches found in index") return [], [] valid_mask = ids != MAX_UINT64_SENTINEL distances = distances[valid_mask] ids = ids[valid_mask] if len(ids) == 0: logger.warning("All results were sentinel values - no valid matches") return [], [] logger.info(f"Raw distances - min: {distances.min():.4f}, max: {distances.max():.4f}, mean: {distances.mean():.4f}") if self.distance_metric == "cosine": similarities = np.clip(1.0 - distances, 0.0, 1.0) else: logger.warning(f"Unknown distance metric '{self.distance_metric}', assuming cosine") similarities = np.clip(1.0 - distances, 0.0, 1.0) logger.info(f"Similarities - min: {similarities.min():.4f}, max: {similarities.max():.4f}") logger.info(f"Score threshold: {score_threshold}, Results before filtering: {len(similarities)}") results = [] valid_indices = similarities >= score_threshold num_passing = np.sum(valid_indices) logger.info(f"Results passing threshold: {num_passing}") if not np.any(valid_indices): logger.warning(f"No results passed the similarity threshold of {score_threshold}") return [], [] filtered_distances = distances[valid_indices] filtered_ids = ids[valid_indices] filtered_similarities = similarities[valid_indices] with tiledb.open(self.array_uri, mode='r') as A: data = A.multi_index[filtered_ids.astype(np.uint64)] returned_ids = data['id'] texts_raw = data['text'] metadatas_raw = data['metadata'] id_to_idx = {int(rid): idx for idx, rid in enumerate(returned_ids)} for distance, vec_id, similarity in zip(filtered_distances, filtered_ids, filtered_similarities): try: idx = id_to_idx.get(int(vec_id)) if idx is None: logger.warning(f"Vector ID {vec_id} not found in TileDB result; skipping") continue text_raw = texts_raw[idx] if isinstance(text_raw, np.ndarray): text = text_raw.item() if text_raw.size == 1 else str(text_raw[0]) else: text = str(text_raw) metadata_raw = metadatas_raw[idx] if isinstance(metadata_raw, np.ndarray): metadata_str = metadata_raw.item() if metadata_raw.size == 1 else str(metadata_raw[0]) else: metadata_str = str(metadata_raw) metadata = json.loads(metadata_str) metadata['similarity_score'] = float(similarity) metadata['distance'] = float(distance) results.append((text, metadata)) except json.JSONDecodeError as je: logger.warning(f"Failed to parse JSON for vector ID {vec_id}: {je}") continue except Exception as e: logger.warning(f"Failed to retrieve data for vector ID {vec_id}: {e}") continue search_term = self.config.database.search_term.lower() if search_term: filtered_results = [ (text, metadata) for text, metadata in results if search_term in text.lower() ] else: filtered_results = results document_types = self.config.database.document_types if document_types: filtered_results = [ (text, metadata) for text, metadata in filtered_results if metadata.get('document_type') == document_types ] contexts = [text for text, _ in filtered_results] metadata_list = [metadata for _, metadata in filtered_results] logger.info(f"Final results returned: {len(contexts)}") return contexts, metadata_list def cleanup(self): if self.embeddings: del self.embeddings self.embeddings = None if self.index: del self.index self.index = None get_cuda_manager().safe_empty_cache() gc.collect() def close(self): self.cleanup() ================================================ FILE: db/document_processor.py ================================================ import os import csv import logging import warnings import datetime import hashlib import re from pathlib import Path from dataclasses import dataclass, field from typing import List, Tuple, Optional from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed import fitz from bs4 import BeautifulSoup from core.utilities import normalize_text from core.constants import SUPPORTED_EXTENSIONS, PIPELINE_PRESETS warnings.filterwarnings("ignore", category=FutureWarning) warnings.filterwarnings("ignore", category=UserWarning) THREADS_PER_PROCESS = 4 def _get_ingest_params(): try: from core.config import get_config preset_name = get_config().database.pipeline_preset except Exception: preset_name = "normal" preset = PIPELINE_PRESETS.get(preset_name, PIPELINE_PRESETS["normal"]) return preset["ingest_threads"], preset["ingest_processes"] logger = logging.getLogger(__name__) @dataclass class Document: page_content: str = "" metadata: dict = field(default_factory=dict) def compute_content_hash(content: str) -> str: return hashlib.sha256(content.encode('utf-8')).hexdigest() def compute_file_hash(file_path): hash_sha256 = hashlib.sha256() with open(file_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest() def extract_document_metadata(file_path, content_hash=None): file_path = os.path.realpath(file_path) file_name = os.path.basename(file_path) file_type = os.path.splitext(file_path)[1] creation_date = datetime.datetime.fromtimestamp(os.path.getctime(file_path)).isoformat() modification_date = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat() file_hash = content_hash if content_hash else compute_file_hash(file_path) return { "file_path": file_path, "file_type": file_type, "file_name": file_name, "creation_date": creation_date, "modification_date": modification_date, "hash": file_hash, "document_type": "document", } def _load_pdf(file_path: Path) -> Optional[str]: full_content = [] with fitz.open(str(file_path)) as doc: for page in doc: text = page.get_text() if text.strip(): full_content.append(f"[[page{page.number + 1}]]{text}") return "".join(full_content) if full_content else None def _load_docx(file_path: Path) -> Optional[str]: import docx2txt text = docx2txt.process(str(file_path)) return text if text and text.strip() else None def _load_txt(file_path: Path) -> Optional[str]: encodings = ["utf-8", "utf-8-sig", "latin-1", "cp1252"] for enc in encodings: try: with open(file_path, "r", encoding=enc) as f: text = f.read() return text if text and text.strip() else None except UnicodeDecodeError: continue return None def _load_csv(file_path: Path) -> Optional[str]: rows = [] encodings = ["utf-8", "utf-8-sig", "latin-1", "cp1252"] for enc in encodings: try: with open(file_path, "r", encoding=enc, newline="") as f: reader = csv.reader(f) for row in reader: rows.append(" ".join(row)) break except UnicodeDecodeError: continue return "\n".join(rows) if rows else None def _load_html(file_path: Path) -> Optional[str]: encodings = ["utf-8", "utf-8-sig", "latin-1", "cp1252"] for enc in encodings: try: with open(file_path, "r", encoding=enc) as f: soup = BeautifulSoup(f, "lxml") text = soup.get_text(separator=" ") return text if text and text.strip() else None except UnicodeDecodeError: continue return None def _load_eml(file_path: Path) -> Optional[str]: import email from email import policy with open(file_path, "rb") as f: msg = email.message_from_binary_file(f, policy=policy.default) parts = [] subject = msg.get("Subject", "") if subject: parts.append(f"Subject: {subject}") if msg.is_multipart(): for part in msg.walk(): content_type = part.get_content_type() if content_type == "text/plain": payload = part.get_content() if isinstance(payload, str) and payload.strip(): parts.append(payload) elif content_type == "text/html": payload = part.get_content() if isinstance(payload, str): soup = BeautifulSoup(payload, "lxml") text = soup.get_text(separator=" ") if text.strip(): parts.append(text) else: payload = msg.get_content() if isinstance(payload, str) and payload.strip(): parts.append(payload) return "\n".join(parts) if parts else None def _load_msg(file_path: Path) -> Optional[str]: import extract_msg msg = extract_msg.Message(str(file_path)) parts = [] if msg.subject: parts.append(f"Subject: {msg.subject}") if msg.body: parts.append(msg.body) msg.close() return "\n".join(parts) if parts else None def _load_xls(file_path: Path) -> Optional[str]: import xlrd workbook = xlrd.open_workbook(str(file_path)) parts = [] for sheet in workbook.sheets(): for row_idx in range(sheet.nrows): row_values = [] for col_idx in range(sheet.ncols): cell = sheet.cell(row_idx, col_idx) if cell.value is not None and str(cell.value).strip(): row_values.append(str(cell.value)) if row_values: parts.append(" ".join(row_values)) return "\n".join(parts) if parts else None def _load_xlsx(file_path: Path) -> Optional[str]: from openpyxl import load_workbook wb = load_workbook(str(file_path), data_only=True, read_only=True) parts = [] for sheet in wb.sheetnames: ws = wb[sheet] for row in ws.iter_rows(): row_values = [] for cell in row: if cell.value is not None and str(cell.value).strip(): row_values.append(str(cell.value)) if row_values: parts.append(" ".join(row_values)) wb.close() return "\n".join(parts) if parts else None def _load_rtf(file_path: Path) -> Optional[str]: from striprtf.striprtf import rtf_to_text encodings = ["utf-8", "utf-8-sig", "latin-1", "cp1252"] for enc in encodings: try: with open(file_path, "r", encoding=enc) as f: rtf_content = f.read() text = rtf_to_text(rtf_content) return text if text and text.strip() else None except UnicodeDecodeError: continue return None def _load_md(file_path: Path) -> Optional[str]: encodings = ["utf-8", "utf-8-sig", "latin-1", "cp1252"] for enc in encodings: try: with open(file_path, "r", encoding=enc) as f: text = f.read() return text if text and text.strip() else None except UnicodeDecodeError: continue return None LOADER_MAP = { ".pdf": _load_pdf, ".docx": _load_docx, ".txt": _load_txt, ".csv": _load_csv, ".html": _load_html, ".htm": _load_html, ".eml": _load_eml, ".msg": _load_msg, ".xls": _load_xls, ".xlsx": _load_xlsx, ".xlsm": _load_xlsx, ".rtf": _load_rtf, ".md": _load_md, } def load_single_document(file_path: Path) -> Optional[Document]: file_extension = file_path.suffix.lower() loader_fn = LOADER_MAP.get(file_extension) if not loader_fn: print(f"\033[91mFailed---> {file_path.name} (extension: {file_extension})\033[0m") logger.error(f"Unsupported file type: {file_path.name} (extension: {file_extension})") return None try: content = loader_fn(file_path) if not content: print(f"\033[91mFailed---> {file_path.name} (No content extracted)\033[0m") logger.error(f"No content extracted: {file_path.name}") return None content_hash = compute_content_hash(content) metadata = extract_document_metadata(file_path, content_hash) print(f"Loaded---> {file_path.name}") return Document(page_content=content, metadata=metadata) except (OSError, UnicodeDecodeError) as e: print(f"\033[91mFailed---> {file_path.name} (Access/encoding error)\033[0m") logger.error(f"File access/encoding error - File: {file_path.name} - Error: {str(e)}") return None except Exception as e: print(f"\033[91mFailed---> {file_path.name} (Unexpected error)\033[0m") logger.error(f"Unexpected error processing file: {file_path.name} - Error: {type(e).__name__}: {str(e)}") logging.exception("Full traceback:") return None def _extraction_worker_batch(file_paths): results = [] def _process_one(file_path): return load_single_document(file_path) n_threads = min(THREADS_PER_PROCESS, len(file_paths)) with ThreadPoolExecutor(n_threads) as pool: futures = {pool.submit(_process_one, p): p for p in file_paths} for future in as_completed(futures): try: doc = future.result() if doc is not None: results.append((doc.page_content, doc.metadata)) except Exception as e: path = futures[future] logger.error(f"Error processing document {path}: {e}") return results def load_documents(source_dir: Path) -> list: valid_extensions = set(SUPPORTED_EXTENSIONS) doc_paths = [f for f in source_dir.iterdir() if f.suffix.lower() in valid_extensions] docs = [] if not doc_paths: return docs ingest_threads, ingest_processes = _get_ingest_params() if len(doc_paths) <= ingest_processes: n_workers = min(ingest_threads, max(len(doc_paths), 1)) executor = None try: executor = ThreadPoolExecutor(n_workers) futures = [executor.submit(load_single_document, path) for path in doc_paths] for future in as_completed(futures): try: result = future.result() if result is not None: docs.append(result) except Exception as e: logger.error(f"Error processing document: {e}") except Exception as e: logger.error(f"Error in document loading executor: {e}") raise finally: if executor: executor.shutdown(wait=True, cancel_futures=True) else: n_procs = min(ingest_processes, len(doc_paths)) logger.info(f"Loading {len(doc_paths)} documents with {n_procs} processes \u00b7 {THREADS_PER_PROCESS} threads each") chunks = [[] for _ in range(n_procs)] for i, chunk in enumerate(doc_paths): chunks[i % n_procs].append(chunk) try: with ProcessPoolExecutor(n_procs) as executor: futures = [executor.submit(_extraction_worker_batch, chunk) for chunk in chunks] for future in as_completed(futures): try: batch_results = future.result() for content, metadata in batch_results: docs.append(Document(page_content=content, metadata=metadata)) except Exception as e: logger.error(f"Error in extraction worker: {e}") except Exception as e: logger.error(f"Error in multi-process document loading: {e}") raise return docs class FixedSizeTextSplitter: def __init__(self, chunk_size: int, chunk_overlap: int = 0): self.chunk_size = chunk_size self.chunk_overlap = chunk_overlap def split_documents(self, docs: List[Document]) -> List[Document]: chunks: List[Document] = [] step = self.chunk_size - self.chunk_overlap if step <= 0: step = 1 for doc in docs: text = doc.page_content if text is None: logger.warning("Skipping document with None page_content") continue if isinstance(text, (list, tuple)): text = " ".join(str(item) for item in text if item) logger.warning("Flattened list/tuple page_content to string") if not isinstance(text, str): text = str(text) text = text.strip() if not text: logger.warning("Skipping document with empty page_content") continue for start in range(0, len(text), step): piece = text[start:start + self.chunk_size].strip() if not piece: continue metadata = doc.metadata if doc.metadata else {} chunks.append(Document(page_content=piece, metadata=dict(metadata))) return chunks def add_pymupdf_page_metadata(doc: Document, chunk_size: int = 1200, chunk_overlap: int = 600) -> List[Document]: def split_text(text: str, chunk_size: int, chunk_overlap: int) -> List[Tuple[str, int]]: if text is None: return [] if isinstance(text, (list, tuple)): text = " ".join(str(item) for item in text if item) if not isinstance(text, str): text = str(text) page_markers = [] offset = 0 for m in re.finditer(r'\[\[page(\d+)\]\]', text): marker_len = len(m.group(0)) page_markers.append((m.start() - offset, int(m.group(1)))) offset += marker_len clean_text = re.sub(r'\[\[page\d+\]\]', '', text) chunks = [] start = 0 while start < len(clean_text): end = start + chunk_size if end > len(clean_text): end = len(clean_text) chunk = clean_text[start:end].strip() page_num = None for marker_pos, page in reversed(page_markers): if marker_pos <= start: page_num = page break if chunk and page_num is not None: chunks.append((chunk, page_num)) elif chunk and page_num is None: chunks.append((chunk, 1)) start += chunk_size - chunk_overlap return chunks text = doc.page_content if text is None: logger.warning("Skipping PDF document with None page_content") return [] chunks = split_text(text, chunk_size, chunk_overlap) if not chunks: logger.warning("No chunks created from PDF document") return [] new_docs = [] for chunk, page_num in chunks: if not chunk or not chunk.strip(): continue new_metadata = doc.metadata.copy() if doc.metadata else {} new_metadata['page_number'] = page_num new_doc = Document(page_content=chunk, metadata=new_metadata) new_docs.append(new_doc) return new_docs def split_documents(documents=None, text_documents_pdf=None, chunk_size=None, chunk_overlap=None): try: print("\nSplitting documents into chunks.") if chunk_size is None or chunk_overlap is None: from core.config import get_config config = get_config() chunk_size = chunk_size if chunk_size is not None else config.database.chunk_size chunk_overlap = chunk_overlap if chunk_overlap is not None else config.database.chunk_overlap text_splitter = FixedSizeTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) texts = [] if documents: texts = text_splitter.split_documents(documents) if text_documents_pdf: processed_pdf_docs = [] for doc in text_documents_pdf: chunked_docs = add_pymupdf_page_metadata( doc, chunk_size=chunk_size, chunk_overlap=chunk_overlap, ) processed_pdf_docs.extend(chunked_docs) texts.extend(processed_pdf_docs) normalized = [] for doc in texts: cleaned = normalize_text(doc.page_content, preserve_whitespace=True) if cleaned is None: logger.warning(f"Dropping chunk with empty content after normalization " f"(source: {doc.metadata.get('file_name', 'unknown')})") continue doc.page_content = cleaned normalized.append(doc) texts = normalized print(f"Total chunks after splitting and normalization: {len(texts)}") return texts except Exception as e: logging.exception("Error during document splitting") logger.error(f"Error type: {type(e)}") raise ================================================ FILE: db/embedding_models.py ================================================ import gc import logging import os import pickle import subprocess import sys import tempfile import time import unicodedata from pathlib import Path import numpy as np import torch from sentence_transformers import SentenceTransformer from sentence_transformers.util import batch_to_device from core.config import get_config from core.utilities import ( supports_flash_attention, get_embedding_dtype_and_batch, get_model_native_precision, ) logger = logging.getLogger(__name__) os.environ.setdefault("TOKENIZERS_PARALLELISM", "false") PROJECT_ROOT = Path(__file__).resolve().parent.parent STAGE_TOKENIZE_PATH = PROJECT_ROOT / "db" / "stage_tokenize.py" from core.constants import PIPELINE_PRESETS TOKENIZE_BATCH_SIZE = 100 WORKER_BATCH_SIZE = 60000 MAX_WORKER_RETRIES = 3 TOKENIZE_MAX_RETRIES = 5 TOKENIZE_CHECKPOINT_INTERVAL = 5 def _get_tokenize_parallel_workers(): try: preset_name = get_config().database.pipeline_preset except Exception: preset_name = "normal" preset = PIPELINE_PRESETS.get(preset_name, PIPELINE_PRESETS["normal"]) return preset["tokenize_max_parallel_workers"] def _get_model_family(model_path: str) -> str: model_path_lower = model_path.lower() if "qwen" in model_path_lower or "qwen3-embedding" in model_path_lower: return "qwen" if "bge" in model_path_lower: return "bge" return "generic" def _get_prompt_for_family(family: str, is_query: bool = False) -> str: if family == "qwen" and is_query: return "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery:" if family == "bge" and is_query: return "Represent this sentence for searching relevant passages: " return "" def _normalize_text(text: str) -> str: text = unicodedata.normalize("NFKC", text) cleaned = [] for char in text: if char in "\n\t\r": cleaned.append(" ") elif ord(char) < 32: continue elif ord(char) == 127: continue elif ord(char) > 65535: continue else: cleaned.append(char) result = "".join(cleaned) result = " ".join(result.split()) return result.strip() or " " ENCODE_BATCH_SIZE_BY_MODEL = { "bge-small-en-v1.5": 100, "bge-base-en-v1.5": 80, "bge-large-en-v1.5": 50, "Qwen3-Embedding-0.6B": 10, "Qwen3-Embedding-4B": 5, } def _get_encode_batch_size(device: str, model_path: str = "") -> int: model_name = os.path.basename(model_path).lower() if model_path else "" for key, batch_size in ENCODE_BATCH_SIZE_BY_MODEL.items(): if key.lower() in model_name: logger.info(f" ENCODE_BATCH_SIZE: {batch_size} (model-aware default for {key})") return batch_size if device.startswith("cuda"): try: gpu_props = torch.cuda.get_device_properties(0) vram_gb = gpu_props.total_memory / (1024 ** 3) batch_size = max(10, min(256, int(vram_gb * 4))) logger.info(f" ENCODE_BATCH_SIZE: {batch_size} (VRAM fallback, " f"GPU: {gpu_props.name}, {vram_gb:.1f} GB)") return batch_size except Exception as e: logger.warning(f" Could not query GPU: {e}, defaulting to 10") return 10 else: logger.info(f" ENCODE_BATCH_SIZE: 10 (CPU mode)") return 10 def _run_subprocess_stage(name, cmd, cwd, timeout=3600): logger.info(f"Starting subprocess stage: {name}") process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, cwd=str(cwd), env={**os.environ, "PYTHONUNBUFFERED": "1"}, ) output_lines = [] for line in process.stdout: line = line.rstrip("\n") if line.strip(): logger.info(f" [{name}] {line}") output_lines.append(line) process.wait(timeout=timeout) if process.returncode != 0: for line in output_lines[-10:]: logger.error(f" {line}") return process.returncode, output_lines def _run_tokenize_with_retry( python_exe, model_path, texts_pkl, tokenized_pkl, checkpoint_dir, max_seq_length, encode_batch_size, use_fast=True, length_sort=True, ): all_batches = [] all_errors = [] total_real_tokens = 0 total_pad_tokens = 0 current_start_index = 0 total_texts = None attempt = 0 while attempt < TOKENIZE_MAX_RETRIES: attempt += 1 attempt_output = checkpoint_dir / f"tokenized_attempt_{attempt}.pkl" logger.info(f"Tokenize attempt {attempt}/{TOKENIZE_MAX_RETRIES} " f"(starting from text index {current_start_index})") tokenize_cmd = [ python_exe, str(STAGE_TOKENIZE_PATH), str(texts_pkl), str(attempt_output), model_path, str(TOKENIZE_BATCH_SIZE), str(max_seq_length), "--checkpoint-dir", str(checkpoint_dir), "--checkpoint-interval", str(TOKENIZE_CHECKPOINT_INTERVAL), "--start-text-index", str(current_start_index), "--worker-batch-size", str(WORKER_BATCH_SIZE), "--max-worker-retries", str(MAX_WORKER_RETRIES), "--max-parallel-workers", str(_get_tokenize_parallel_workers()), "--encode-batch-size", str(encode_batch_size), ] if use_fast: tokenize_cmd.append("--use-fast") else: tokenize_cmd.append("--no-use-fast") if length_sort: tokenize_cmd.append("--length-sort") else: tokenize_cmd.append("--no-length-sort") exit_code, _ = _run_subprocess_stage( f"Tokenize (attempt {attempt})", tokenize_cmd, cwd=PROJECT_ROOT) attempt_data = None checkpoint_path = checkpoint_dir / "tokenize_checkpoint.pkl" if exit_code == 0 and attempt_output.exists(): logger.info(f"Attempt {attempt} completed successfully") with open(attempt_output, "rb") as f: attempt_data = pickle.load(f) try: attempt_output.unlink() except Exception: pass elif checkpoint_path.exists(): logger.warning(f"Attempt {attempt} crashed (exit code {exit_code}), " f"loading checkpoint...") try: with open(checkpoint_path, "rb") as f: attempt_data = pickle.load(f) try: checkpoint_path.unlink() except Exception: pass except Exception as e: logger.error(f"Failed to read checkpoint: {e}") if attempt_output.exists(): try: attempt_output.unlink() except Exception: pass else: logger.error(f"Attempt {attempt} crashed with no recoverable data") if attempt_data is not None: if total_texts is None: total_texts = attempt_data.get("total_texts", 0) new_batches = attempt_data.get("batches", []) new_errors = attempt_data.get("errors", []) texts_processed = attempt_data.get("texts_processed", 0) all_batches.extend(new_batches) all_errors.extend(new_errors) ps = attempt_data.get("padding_stats", {}) total_real_tokens += ps.get("total_real_tokens", 0) total_pad_tokens += ps.get("total_pad_tokens", 0) if "next_text_index" in attempt_data: next_index = attempt_data["next_text_index"] else: next_index = attempt_data.get("start_text_index", current_start_index) + texts_processed current_start_index = next_index if total_texts is not None and current_start_index >= total_texts: break if exit_code == 0: break else: logger.warning(f"No data recovered from attempt {attempt}") if attempt >= TOKENIZE_MAX_RETRIES: logger.error(f"Exhausted all {TOKENIZE_MAX_RETRIES} retries!") break logger.info("Waiting 3 seconds before retry...") time.sleep(3) gc.collect() total_tokens = total_real_tokens + total_pad_tokens efficiency_pct = (total_real_tokens / total_tokens * 100) if total_tokens > 0 else 100.0 logger.info(f"Tokenization complete: {len(all_batches)} batches, " f"{len(all_errors)} errors, {efficiency_pct:.1f}% padding efficiency") return { "total_texts": total_texts or 0, "batches": all_batches, "errors": all_errors, "padding_stats": { "total_real_tokens": total_real_tokens, "total_pad_tokens": total_pad_tokens, "efficiency_pct": efficiency_pct, }, } class DirectEmbeddingModel: def __init__( self, model_path: str, device: str = "cpu", dtype: torch.dtype = None, batch_size: int = 8, max_seq_length: int = 512, prompt: str = "", ): self.model_path = model_path self.device = device self.dtype = dtype self.batch_size = batch_size self.max_seq_length = max_seq_length self.prompt = prompt self.model = None self.tokenizer = None self._initialize_model() def _initialize_model(self): family = _get_model_family(self.model_path) model_kwargs = { "torch_dtype": self.dtype if self.dtype else torch.float32, } is_cuda = self.device.lower().startswith("cuda") if family == "qwen": if is_cuda and supports_flash_attention(): model_kwargs["attn_implementation"] = "flash_attention_2" else: model_kwargs["attn_implementation"] = "sdpa" else: model_kwargs["attn_implementation"] = "sdpa" tokenizer_kwargs = { "model_max_length": self.max_seq_length, } if family == "qwen": tokenizer_kwargs["padding_side"] = "left" self.model = SentenceTransformer( model_name_or_path=self.model_path, device=self.device, trust_remote_code=True, model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs, ) self.model.max_seq_length = self.max_seq_length if hasattr(self.model, "tokenizer") and self.model.tokenizer is not None: self.tokenizer = self.model.tokenizer if self.tokenizer.pad_token is None: if self.tokenizer.eos_token is not None: self.tokenizer.pad_token = self.tokenizer.eos_token self.tokenizer.pad_token_id = self.tokenizer.eos_token_id else: self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) self.model.to(self.device) def _safe_encode(self, texts: list) -> np.ndarray: bs = self.batch_size if self.batch_size else len(texts) embeddings = self.model.encode( texts, batch_size=bs, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False, ) if isinstance(embeddings, torch.Tensor): return embeddings.float().cpu().numpy() return np.asarray(embeddings, dtype=np.float32) @torch.inference_mode() def embed_documents(self, texts: list) -> np.ndarray: if not texts: return np.array([], dtype=np.float32) total = len(texts) logger.info(f"Embedding {total} texts via subprocess tokenization pipeline") encode_batch_size = _get_encode_batch_size(self.device, self.model_path) tmp_dir = tempfile.mkdtemp(prefix="vectordb_embed_") tmp_path = Path(tmp_dir) texts_pkl = tmp_path / "texts.pkl" tokenized_pkl = tmp_path / "tokenized.pkl" checkpoint_dir = tmp_path / "checkpoints" checkpoint_dir.mkdir(exist_ok=True) try: logger.info(f"Writing {total} texts to temp pickle...") with open(texts_pkl, "wb") as f: pickle.dump(texts, f, protocol=pickle.HIGHEST_PROTOCOL) tokenized_data = _run_tokenize_with_retry( python_exe=sys.executable, model_path=self.model_path, texts_pkl=texts_pkl, tokenized_pkl=tokenized_pkl, checkpoint_dir=checkpoint_dir, max_seq_length=self.max_seq_length, encode_batch_size=encode_batch_size, use_fast=True, length_sort=True, ) batches = tokenized_data["batches"] errors = tokenized_data["errors"] if errors: logger.warning(f"{len(errors)} tokenization errors occurred") logger.info(f"Running forward pass on {len(batches)} pre-padded batches...") self.model.eval() all_embeddings = [] all_seq_indices = [] batch_count = 0 for batch_info in batches: batch_count += 1 features_raw = batch_info["features"] if "seq_indices" not in batch_info: raise ValueError( f"Batch {batch_count} missing required 'seq_indices' field. " f"The tokenize stage must emit seq_indices for every batch so " f"the embed stage can restore original chunk order." ) seq_indices = batch_info["seq_indices"] features = {} for key, padded in features_raw.items(): if isinstance(padded, np.ndarray): features[key] = torch.from_numpy(padded).long() else: features[key] = torch.tensor(padded, dtype=torch.long) features = batch_to_device(features, self.model.device) with torch.no_grad(): out_features = self.model.forward(features) embeddings = out_features["sentence_embedding"].detach() embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) embeddings = embeddings.float().cpu().numpy() all_embeddings.append(embeddings) all_seq_indices.append(seq_indices) del out_features del features if batch_count % 50 == 0: gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() if batch_count % 500 == 0: logger.info(f" Forward pass: {batch_count}/{len(batches)} batches") logger.info(f"Forward pass complete: {batch_count} batches processed") if not all_embeddings: return np.array([], dtype=np.float32) sorted_embeddings = np.concatenate(all_embeddings, axis=0) indices = np.concatenate(all_seq_indices, axis=0) result = np.empty_like(sorted_embeddings) result[indices] = sorted_embeddings logger.info(f"Unsorting embeddings: restored original order via seq_indices") return result finally: import shutil try: shutil.rmtree(tmp_dir, ignore_errors=True) except Exception: pass def embed_query(self, text: str) -> list: if self.prompt: text = self.prompt + text if not isinstance(text, str): text = str(text) text = _normalize_text(text) embeddings = self._safe_encode([text]) return embeddings[0].tolist() if len(embeddings) else [] def __del__(self): if self.model is not None: del self.model self.model = None if self.tokenizer is not None: del self.tokenizer self.tokenizer = None def create_embedding_model( model_path: str, compute_device: str = "cpu", dtype: torch.dtype = None, batch_size: int = None, is_query: bool = False, ) -> DirectEmbeddingModel: config = get_config() model_name = os.path.basename(model_path) family = _get_model_family(model_path) model_native_precision = get_model_native_precision(model_name) use_half = config.database.half _dtype, _batch_size = get_embedding_dtype_and_batch( compute_device=compute_device, use_half=use_half, model_native_precision=model_native_precision, model_name=model_name, is_query=is_query, ) final_dtype = dtype if dtype is not None else _dtype final_batch_size = batch_size if batch_size is not None else _batch_size if family == "qwen": max_seq_length = 8192 else: max_seq_length = 512 prompt = _get_prompt_for_family(family, is_query) return DirectEmbeddingModel( model_path=model_path, device=compute_device, dtype=final_dtype, batch_size=final_batch_size, max_seq_length=max_seq_length, prompt=prompt, ) def load_embedding_model( model_path: str, compute_device: str, use_half: bool, is_query: bool = False, verbose: bool = False, ) -> DirectEmbeddingModel: model_name = os.path.basename(model_path) model_native_precision = get_model_native_precision(model_name) dtype, batch_size = get_embedding_dtype_and_batch( compute_device=compute_device, use_half=use_half, model_native_precision=model_native_precision, model_name=model_name, is_query=is_query, ) model = create_embedding_model( model_path=model_path, compute_device=compute_device, dtype=dtype, batch_size=batch_size, is_query=is_query, ) if verbose: from core.utilities import my_cprint precision = "float32" if dtype is None else str(dtype).split(".")[-1] my_cprint(f"{model_name} ({precision}) loaded using a batch size of {batch_size}.", "green") return model ================================================ FILE: db/process_manager.py ================================================ import logging import threading import multiprocessing from typing import List logger = logging.getLogger(__name__) class ProcessManager: """Singleton that tracks every multiprocessing.Process spawned by the app and provides a graceful, time-bounded cleanup path on shutdown. Cleanup escalates: terminate (with timeout) -> kill (with timeout) -> close. Without this, an unclean GUI exit can leak Python child processes that hold open TileDB arrays, model weights, or CUDA contexts. """ _instance = None _lock = threading.Lock() def __new__(cls): if cls._instance is None: with cls._lock: if cls._instance is None: cls._instance = super().__new__(cls) cls._instance._initialized = False return cls._instance def __init__(self): if self._initialized: return self.processes: List[multiprocessing.Process] = [] self.lock = threading.Lock() self._initialized = True def register(self, process: multiprocessing.Process): with self.lock: self.processes.append(process) logger.debug(f"Registered process {process.pid if process.pid else 'pending'}") def unregister(self, process: multiprocessing.Process): with self.lock: if process in self.processes: self.processes.remove(process) logger.debug(f"Unregistered process {process.pid if process.pid else 'unknown'}") def cleanup_one(self, process: multiprocessing.Process, timeout: float = 5.0) -> bool: if not process or not process.is_alive(): return True try: logger.debug(f"Terminating process {process.pid}") process.terminate() process.join(timeout=timeout) if process.is_alive(): logger.warning(f"Process {process.pid} did not terminate, killing") process.kill() process.join(timeout=1.0) if hasattr(process, 'close'): try: process.close() except Exception: pass self.unregister(process) return not process.is_alive() except Exception as e: logger.error(f"Error cleaning up process: {e}") return False def cleanup_all(self, timeout: float = 5.0): with self.lock: processes_copy = self.processes[:] for process in processes_copy: self.cleanup_one(process, timeout) with self.lock: remaining = len(self.processes) if remaining > 0: logger.warning(f"{remaining} processes could not be cleaned up") self.processes.clear() def get_active_count(self) -> int: with self.lock: return sum(1 for p in self.processes if p.is_alive()) _manager_instance = None def get_process_manager() -> ProcessManager: global _manager_instance if _manager_instance is None: _manager_instance = ProcessManager() return _manager_instance ================================================ FILE: db/sqlite_operations.py ================================================ import sqlite3 from pathlib import Path def create_metadata_db(persist_directory, documents, hash_id_mappings): if not persist_directory.exists(): persist_directory.mkdir(parents=True, exist_ok=True) sqlite_db_path = persist_directory / "metadata.db" conn = sqlite3.connect(sqlite_db_path) cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS document_metadata ( id INTEGER PRIMARY KEY AUTOINCREMENT, file_name TEXT, hash TEXT, file_path TEXT, page_content TEXT ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS hash_chunk_ids ( tiledb_id TEXT PRIMARY KEY, hash TEXT ) ''') try: doc_rows = [ ( doc.metadata.get("file_name", ""), doc.metadata.get("hash", ""), doc.metadata.get("file_path", ""), doc.page_content ) for doc in documents ] cursor.executemany(''' INSERT INTO document_metadata (file_name, hash, file_path, page_content) VALUES (?, ?, ?, ?) ''', doc_rows) cursor.executemany(''' INSERT INTO hash_chunk_ids (tiledb_id, hash) VALUES (?, ?) ''', hash_id_mappings) conn.commit() finally: conn.close() ================================================ FILE: db/stage_extract.py ================================================ import logging import os import pickle import sys import time from pathlib import Path # Ensure project root is on sys.path for imports sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s [%(name)s] %(message)s", ) logger = logging.getLogger("stage_extract") def main(): if len(sys.argv) != 3: print(f"Usage: {sys.argv[0]} ", file=sys.stderr) sys.exit(1) source_dir = Path(sys.argv[1]) output_path = Path(sys.argv[2]) if not source_dir.is_dir(): print(f"ERROR: Source directory does not exist: {source_dir}", file=sys.stderr) sys.exit(1) logger.info(f"Stage 1: Extracting documents from {source_dir}") t0 = time.time() from db.document_processor import load_documents docs = load_documents(source_dir) doc_data = [] for doc in docs: clean_meta = {str(k): v for k, v in doc.metadata.items()} doc_data.append((doc.page_content, clean_meta)) elapsed = time.time() - t0 logger.info(f"Extracted {len(doc_data)} documents in {elapsed:.1f}s") with open(output_path, "wb") as f: pickle.dump(doc_data, f, protocol=pickle.HIGHEST_PROTOCOL) file_size_mb = output_path.stat().st_size / (1024 * 1024) logger.info(f"Wrote {output_path} ({file_size_mb:.1f} MB)") if __name__ == "__main__": main() ================================================ FILE: db/stage_split.py ================================================ import argparse import concurrent.futures import gc import logging import os import pickle import subprocess import sys import tempfile import time from pathlib import Path # Ensure project root is on sys.path for imports sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s [%(name)s] %(message)s", ) logger = logging.getLogger("stage_split") WORKER_SCRIPT = r''' import faulthandler faulthandler.enable() import gc import os import pickle import sys # Ensure project root is on sys.path sys.path.insert(0, os.environ.get("VECTORDB_PROJECT_ROOT", "")) def main(): input_pkl = sys.argv[1] output_pkl = sys.argv[2] chunk_size = int(sys.argv[3]) chunk_overlap = int(sys.argv[4]) with open(input_pkl, "rb") as f: doc_data = pickle.load(f) from db.document_processor import FixedSizeTextSplitter, add_pymupdf_page_metadata, Document from core.utilities import normalize_text splitter = FixedSizeTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) chunks_with_meta = [] errors = [] for i, (content, metadata) in enumerate(doc_data): try: doc = Document(page_content=content, metadata=metadata) if metadata.get("file_type") == ".pdf": chunks = add_pymupdf_page_metadata( doc, chunk_size=chunk_size, chunk_overlap=chunk_overlap, ) else: chunks = splitter.split_documents([doc]) for chunk in chunks: cleaned = normalize_text(chunk.page_content, preserve_whitespace=True) if cleaned is not None: chunk_meta = chunk.metadata if chunk.metadata else {} chunks_with_meta.append((cleaned, chunk_meta)) except Exception as e: file_name = metadata.get("file_name", "unknown") errors.append({ "doc_index": i, "file_name": file_name, "error": f"{type(e).__name__}: {e}", }) valid = [] valid_with_meta = [] skipped = 0 for text, meta in chunks_with_meta: if isinstance(text, str) and text.strip(): valid.append(text) valid_with_meta.append((text, meta)) else: skipped += 1 output = { "texts": valid, "chunks": valid_with_meta, "errors": errors, "docs_processed": len(doc_data), "skipped": skipped, } with open(output_pkl, "wb") as f: pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": main() ''' def save_checkpoint(checkpoint_path, data): tmp_path = checkpoint_path.with_suffix(".tmp") with open(tmp_path, "wb") as f: pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) for attempt in range(5): try: os.replace(tmp_path, checkpoint_path) return except PermissionError: if attempt == 4: raise time.sleep(0.2) def run_worker(python_exe: str, worker_script_path: str, docs_pkl: str, output_pkl: str, chunk_size: int, chunk_overlap: int, timeout: int = 600) -> tuple: cmd = [ python_exe, worker_script_path, docs_pkl, output_pkl, str(chunk_size), str(chunk_overlap), ] project_root = str(Path(__file__).resolve().parent.parent) t0 = time.time() with subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, env={**os.environ, "PYTHONUNBUFFERED": "1", "VECTORDB_PROJECT_ROOT": project_root}, ) as process: output_lines = [] for line in process.stdout: line = line.rstrip("\n") if line.strip(): logger.warning(f" [worker] {line}") output_lines.append(line) process.wait(timeout=timeout) elapsed = time.time() - t0 returncode = process.returncode return returncode, elapsed def get_physical_core_count() -> int: try: import psutil count = psutil.cpu_count(logical=False) if count is not None and count > 0: return count except ImportError: pass logical = os.cpu_count() or 4 return max(1, logical // 2) def run_worker_with_retries(worker_id: int, total_workers: int, python_exe: str, worker_script_path: str, chunk_docs: list, worker_dir: Path, chunk_size: int, chunk_overlap: int, max_retries: int) -> dict: num_docs = len(chunk_docs) docs_pkl = worker_dir / f"_split_worker_input_{worker_id}.pkl" result_pkl = worker_dir / f"_split_worker_output_{worker_id}.pkl" with open(docs_pkl, "wb") as f: pickle.dump(chunk_docs, f, protocol=pickle.HIGHEST_PROTOCOL) worker_t0 = time.time() worker_success = False texts = [] chunks = [] errors = [] skipped = 0 for retry in range(max_retries): exit_code, elapsed = run_worker( python_exe, str(worker_script_path), str(docs_pkl), str(result_pkl), chunk_size, chunk_overlap, timeout=600, ) if exit_code == 0 and result_pkl.exists(): try: with open(result_pkl, "rb") as f: worker_data = pickle.load(f) texts = worker_data.get("texts", []) chunks = worker_data.get("chunks", []) errors = worker_data.get("errors", []) skipped = worker_data.get("skipped", 0) worker_success = True logger.info(f" Worker {worker_id}/{total_workers} completed in {elapsed:.1f}s " f"({len(texts)} chunks, {len(errors)} errors, {skipped} skipped)") break except Exception as e: logger.error(f" Worker {worker_id}: failed to read output: {e}") else: logger.warning(f" Worker {worker_id} crashed (exit code {exit_code}, " f"{elapsed:.1f}s), retry {retry + 1}/{max_retries}") time.sleep(2) try: if result_pkl.exists(): result_pkl.unlink() except Exception: pass try: docs_pkl.unlink() except Exception: pass try: if result_pkl.exists(): result_pkl.unlink() except Exception: pass if not worker_success: logger.error(f" Worker {worker_id} FAILED after {max_retries} retries, " f"skipping {num_docs} documents") errors.append({ "doc_index": -1, "file_name": "BATCH_FAILURE", "error": f"Worker crashed {max_retries} times", }) worker_elapsed = time.time() - worker_t0 return { "worker_id": worker_id, "num_docs": num_docs, "success": worker_success, "texts": texts, "chunks": chunks, "errors": errors, "skipped": skipped, "elapsed": worker_elapsed, } def main(): import faulthandler faulthandler.enable() parser = argparse.ArgumentParser(description="Stage 2: Text Splitting (subprocess-per-chunk)") parser.add_argument("input_pickle", type=Path) parser.add_argument("output_pickle", type=Path) parser.add_argument("chunk_size", type=int) parser.add_argument("chunk_overlap", type=int) parser.add_argument("--worker-batch-size", type=int, default=2000) parser.add_argument("--max-worker-retries", type=int, default=3) parser.add_argument("--max-parallel-workers", type=int, default=0) parser.add_argument("--checkpoint-dir", type=Path, default=None) parser.add_argument("--checkpoint-interval", type=int, default=5) args = parser.parse_args() if not args.input_pickle.exists(): print(f"ERROR: Input file does not exist: {args.input_pickle}", file=sys.stderr) sys.exit(1) python_exe = sys.executable checkpoint_dir = args.checkpoint_dir checkpoint_path = None if checkpoint_dir is not None: checkpoint_dir.mkdir(parents=True, exist_ok=True) checkpoint_path = checkpoint_dir / "split_checkpoint.pkl" worker_dir = checkpoint_dir if checkpoint_dir else Path(tempfile.gettempdir()) worker_dir.mkdir(parents=True, exist_ok=True) worker_script_path = worker_dir / "_split_worker.py" with open(worker_script_path, "w", encoding="utf-8") as f: f.write(WORKER_SCRIPT) with open(args.input_pickle, "rb") as f: doc_data = pickle.load(f) total_docs = len(doc_data) MIN_DOCS_FOR_PARALLEL = 5000 if args.max_parallel_workers > 0: max_parallel = args.max_parallel_workers else: physical_cores = get_physical_core_count() max_parallel = max(1, physical_cores - 2) if total_docs <= MIN_DOCS_FOR_PARALLEL: effective_parallel = 1 else: effective_parallel = max_parallel logger.info(f"Stage 2: Splitting documents (subprocess-per-chunk isolation)") logger.info(f" chunk_size={args.chunk_size}, chunk_overlap={args.chunk_overlap}") logger.info(f" worker_batch_size={args.worker_batch_size}") logger.info(f" parallel_workers={effective_parallel}") t0 = time.time() logger.info(f"Loaded {total_docs} documents from {args.input_pickle}") if total_docs == 0: logger.info("No documents to process") with open(args.output_pickle, "wb") as f: pickle.dump({"texts": [], "chunks": []}, f, protocol=pickle.HIGHEST_PROTOCOL) return worker_batch_size = args.worker_batch_size worker_jobs = [] offset = 0 worker_id = 0 while offset < total_docs: worker_id += 1 chunk_end = min(offset + worker_batch_size, total_docs) chunk_docs = doc_data[offset:chunk_end] worker_jobs.append((worker_id, chunk_docs)) offset = chunk_end total_workers = len(worker_jobs) logger.info(f"Processing {total_docs} documents in {total_workers} worker subprocess(es)") all_texts = [] all_chunks = [] all_errors = [] total_skipped = 0 workers_completed = 0 workers_since_checkpoint = 0 if effective_parallel <= 1: for wid, chunk_docs in worker_jobs: logger.info(f"Worker {wid}/{total_workers}: {len(chunk_docs)} documents") result = run_worker_with_retries( wid, total_workers, python_exe, str(worker_script_path), chunk_docs, worker_dir, args.chunk_size, args.chunk_overlap, args.max_worker_retries, ) all_texts.extend(result["texts"]) all_chunks.extend(result.get("chunks", [])) all_errors.extend(result["errors"]) total_skipped += result["skipped"] workers_completed += 1 workers_since_checkpoint += 1 if checkpoint_path is not None and workers_since_checkpoint >= args.checkpoint_interval: save_checkpoint(checkpoint_path, { "texts": all_texts, "chunks": all_chunks, "errors": all_errors, "skipped": total_skipped, "workers_completed": workers_completed, }) workers_since_checkpoint = 0 gc.collect() else: for wave_start in range(0, total_workers, effective_parallel): wave_end = min(wave_start + effective_parallel, total_workers) wave_jobs = worker_jobs[wave_start:wave_end] logger.info(f"Launching parallel wave: workers {wave_jobs[0][0]}-{wave_jobs[-1][0]}") wave_results = {} with concurrent.futures.ThreadPoolExecutor(max_workers=len(wave_jobs)) as executor: future_to_wid = {} for wid, chunk_docs in wave_jobs: future = executor.submit( run_worker_with_retries, wid, total_workers, python_exe, str(worker_script_path), chunk_docs, worker_dir, args.chunk_size, args.chunk_overlap, args.max_worker_retries, ) future_to_wid[future] = wid for future in concurrent.futures.as_completed(future_to_wid): wid = future_to_wid[future] try: result = future.result() wave_results[wid] = result except Exception as e: logger.error(f" Worker {wid} thread raised exception: {e}") wave_results[wid] = { "texts": [], "chunks": [], "errors": [{"doc_index": -1, "file_name": "THREAD_EXCEPTION", "error": str(e)}], "skipped": 0, } for wid, chunk_docs in wave_jobs: result = wave_results.get(wid, {"texts": [], "chunks": [], "errors": [], "skipped": 0}) all_texts.extend(result["texts"]) all_chunks.extend(result.get("chunks", [])) all_errors.extend(result["errors"]) total_skipped += result.get("skipped", 0) workers_completed += 1 workers_since_checkpoint += 1 if checkpoint_path is not None and workers_since_checkpoint >= args.checkpoint_interval: save_checkpoint(checkpoint_path, { "texts": all_texts, "chunks": all_chunks, "errors": all_errors, "skipped": total_skipped, "workers_completed": workers_completed, }) workers_since_checkpoint = 0 gc.collect() elapsed = time.time() - t0 logger.info(f"Split {total_docs} documents into {len(all_texts)} chunks in {elapsed:.1f}s " f"({len(all_errors)} errors, {total_skipped} skipped)") output_data = {"texts": all_texts, "chunks": all_chunks} with open(args.output_pickle, "wb") as f: pickle.dump(output_data, f, protocol=pickle.HIGHEST_PROTOCOL) try: worker_script_path.unlink() except Exception: pass if checkpoint_path is not None and checkpoint_path.exists(): try: checkpoint_path.unlink() except Exception: pass if __name__ == "__main__": main() ================================================ FILE: db/stage_tokenize.py ================================================ import argparse import concurrent.futures import gc import logging import os import pickle import subprocess import sys import tempfile import time from pathlib import Path # Ensure project root is on sys.path for imports sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s [%(name)s] %(message)s", ) logger = logging.getLogger("stage_tokenize") WORKER_SCRIPT = r''' import faulthandler faulthandler.enable() import gc import os import pickle import sys import numpy as np os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["RUST_BACKTRACE"] = "1" def main(): input_pkl = sys.argv[1] output_pkl = sys.argv[2] model_path = sys.argv[3] batch_size = int(sys.argv[4]) max_seq_length = int(sys.argv[5]) use_fast = sys.argv[6] == "True" global_start_index = int(sys.argv[7]) encode_batch_size = int(sys.argv[8]) length_sort = sys.argv[9] == "True" with open(input_pkl, "rb") as f: texts = pickle.load(f) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( model_path, trust_remote_code=True, model_max_length=max_seq_length, use_fast=use_fast, ) if tokenizer.pad_token is None: if tokenizer.eos_token is not None: tokenizer.pad_token = tokenizer.eos_token tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) pad_token_id = tokenizer.pad_token_id or 0 padding_side = getattr(tokenizer, "padding_side", "right") all_sequences = [] errors_result = [] for start in range(0, len(texts), batch_size): end = min(start + batch_size, len(texts)) batch_texts = texts[start:end] batch_id = start // batch_size + 1 global_start = global_start_index + start try: batch_raw = tokenizer( batch_texts, padding=False, truncation=True, max_length=max_seq_length, return_tensors=None, return_attention_mask=True, ) num_texts_in_batch = len(batch_raw["input_ids"]) keys = list(batch_raw.keys()) for i in range(num_texts_in_batch): seq_dict = {"seq_index": global_start + i} for key in keys: val = batch_raw[key][i] if not isinstance(val, list): seq_dict[key] = list(val) else: seq_dict[key] = val all_sequences.append(seq_dict) except Exception as e: error_msg = f"{type(e).__name__}: {e}" errors_result.append({ "batch_id": batch_id, "start_index": global_start, "error": error_msg, }) if length_sort and all_sequences: all_sequences.sort(key=lambda s: len(s["input_ids"]), reverse=True) feature_keys = [k for k in all_sequences[0].keys() if k != "seq_index"] if all_sequences else [] batches_result = [] total_real_tokens = 0 total_pad_tokens = 0 for b_start in range(0, len(all_sequences), encode_batch_size): b_end = min(b_start + encode_batch_size, len(all_sequences)) batch_seqs = all_sequences[b_start:b_end] batch_id = b_start // encode_batch_size + 1 start_index = batch_seqs[0]["seq_index"] batch_size_actual = len(batch_seqs) max_len = max(len(s["input_ids"]) for s in batch_seqs) result = {} for key in feature_keys: pad_val = pad_token_id if key == "input_ids" else 0 padded = np.full((batch_size_actual, max_len), pad_val, dtype=np.int64) for i, seq in enumerate(batch_seqs): seq_data = seq[key] seq_len = len(seq_data) if padding_side == "left": padded[i, max_len - seq_len:] = seq_data else: padded[i, :seq_len] = seq_data if key == "input_ids": total_real_tokens += seq_len total_pad_tokens += (max_len - seq_len) result[key] = padded seq_indices = np.array([s["seq_index"] for s in batch_seqs], dtype=np.int64) batches_result.append({ "batch_id": batch_id, "start_index": start_index, "seq_indices": seq_indices, "features": result, }) del all_sequences gc.collect() total_tokens = total_real_tokens + total_pad_tokens efficiency_pct = (total_real_tokens / total_tokens * 100) if total_tokens > 0 else 100.0 output = { "batches": batches_result, "errors": errors_result, "texts_processed": len(texts), "padding_stats": { "total_real_tokens": total_real_tokens, "total_pad_tokens": total_pad_tokens, "efficiency_pct": efficiency_pct, }, } with open(output_pkl, "wb") as f: pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": main() ''' def save_checkpoint(checkpoint_path, data): tmp_path = checkpoint_path.with_suffix(".tmp") with open(tmp_path, "wb") as f: pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) for attempt in range(5): try: os.replace(tmp_path, checkpoint_path) return except PermissionError: if attempt == 4: raise time.sleep(0.2) def run_worker(python_exe: str, worker_script_path: str, texts_pkl: str, output_pkl: str, model_path: str, batch_size: int, max_seq_length: int, use_fast: bool, global_start_index: int, encode_batch_size: int, length_sort: bool, timeout: int = 600) -> tuple: cmd = [ python_exe, worker_script_path, texts_pkl, output_pkl, model_path, str(batch_size), str(max_seq_length), str(use_fast), str(global_start_index), str(encode_batch_size), str(length_sort), ] t0 = time.time() with subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, env={**os.environ, "PYTHONUNBUFFERED": "1"}, ) as process: output_lines = [] for line in process.stdout: line = line.rstrip("\n") if line.strip(): logger.warning(f" [worker] {line}") output_lines.append(line) process.wait(timeout=timeout) elapsed = time.time() - t0 returncode = process.returncode return returncode, elapsed def get_physical_core_count() -> int: try: import psutil count = psutil.cpu_count(logical=False) if count is not None and count > 0: return count except ImportError: pass logical = os.cpu_count() or 4 return max(1, logical // 2) def run_worker_with_retries(worker_id, total_workers, python_exe, worker_script_path, chunk_texts, global_start, worker_dir, model_path, batch_size, max_seq_length, use_fast, max_retries, encode_batch_size, length_sort) -> dict: num_texts = len(chunk_texts) chunk_pkl = worker_dir / f"_worker_input_{worker_id}.pkl" result_pkl = worker_dir / f"_worker_output_{worker_id}.pkl" with open(chunk_pkl, "wb") as f: pickle.dump(chunk_texts, f, protocol=pickle.HIGHEST_PROTOCOL) worker_t0 = time.time() worker_success = False batches = [] errors = [] padding_stats = {} for retry in range(max_retries): exit_code, elapsed = run_worker( python_exe, str(worker_script_path), str(chunk_pkl), str(result_pkl), model_path, batch_size, max_seq_length, use_fast, global_start, encode_batch_size, length_sort, timeout=600, ) if exit_code == 0 and result_pkl.exists(): try: with open(result_pkl, "rb") as f: worker_data = pickle.load(f) batches = worker_data.get("batches", []) errors = worker_data.get("errors", []) padding_stats = worker_data.get("padding_stats", {}) worker_success = True eff = padding_stats.get("efficiency_pct", 0) logger.info(f" Worker {worker_id}/{total_workers} completed in {elapsed:.1f}s " f"({len(batches)} batches, {eff:.1f}% pad efficiency)") break except Exception as e: logger.error(f" Worker {worker_id}: failed to read output: {e}") else: logger.warning(f" Worker {worker_id} crashed (exit code {exit_code}, " f"{elapsed:.1f}s), retry {retry + 1}/{max_retries}") time.sleep(2) try: if result_pkl.exists(): result_pkl.unlink() except Exception: pass try: chunk_pkl.unlink() except Exception: pass try: if result_pkl.exists(): result_pkl.unlink() except Exception: pass if not worker_success: logger.error(f" Worker {worker_id} FAILED after {max_retries} retries") for batch_start in range(0, num_texts, batch_size): errors.append({ "batch_id": -1, "start_index": global_start + batch_start, "error": f"Worker crashed {max_retries} times", }) worker_elapsed = time.time() - worker_t0 return { "worker_id": worker_id, "global_start": global_start, "num_texts": num_texts, "success": worker_success, "batches": batches, "errors": errors, "padding_stats": padding_stats, "elapsed": worker_elapsed, } def main(): import faulthandler faulthandler.enable() parser = argparse.ArgumentParser(description="Stage 3: Tokenization (subprocess-per-chunk)") parser.add_argument("input_pickle", type=Path) parser.add_argument("output_pickle", type=Path) parser.add_argument("model_path", type=str) parser.add_argument("batch_size", type=int) parser.add_argument("max_seq_length", type=int) parser.add_argument("--use-fast", action="store_true", default=True) parser.add_argument("--no-use-fast", dest="use_fast", action="store_false") parser.add_argument("--worker-batch-size", type=int, default=20000) parser.add_argument("--checkpoint-dir", type=Path, default=None) parser.add_argument("--checkpoint-interval", type=int, default=5) parser.add_argument("--start-text-index", type=int, default=0) parser.add_argument("--max-worker-retries", type=int, default=3) parser.add_argument("--max-parallel-workers", type=int, default=0) parser.add_argument("--encode-batch-size", type=int, required=True) parser.add_argument("--length-sort", action="store_true", default=False) parser.add_argument("--no-length-sort", dest="length_sort", action="store_false") args = parser.parse_args() if not args.input_pickle.exists(): print(f"ERROR: Input file does not exist: {args.input_pickle}", file=sys.stderr) sys.exit(1) python_exe = sys.executable start_text_index = args.start_text_index checkpoint_dir = args.checkpoint_dir checkpoint_interval = args.checkpoint_interval checkpoint_path = None if checkpoint_dir is not None: checkpoint_dir.mkdir(parents=True, exist_ok=True) checkpoint_path = checkpoint_dir / "tokenize_checkpoint.pkl" worker_dir = checkpoint_dir if checkpoint_dir else Path(tempfile.gettempdir()) worker_dir.mkdir(parents=True, exist_ok=True) worker_script_path = worker_dir / "_tokenize_worker.py" with open(worker_script_path, "w", encoding="utf-8") as f: f.write(WORKER_SCRIPT) with open(args.input_pickle, "rb") as f: _loaded = pickle.load(f) if isinstance(_loaded, dict): all_texts = _loaded["texts"] else: all_texts = _loaded del _loaded total_all = len(all_texts) if start_text_index > 0: logger.info(f"Resuming from text index {start_text_index}") texts = all_texts[start_text_index:] total = len(texts) MIN_TEXTS_FOR_PARALLEL = 5000 if args.max_parallel_workers > 0: max_parallel = args.max_parallel_workers else: physical_cores = get_physical_core_count() max_parallel = max(1, physical_cores - 4) if total <= MIN_TEXTS_FOR_PARALLEL: effective_parallel = 1 else: effective_parallel = max_parallel logger.info(f"Stage 3: Tokenizing (subprocess-per-chunk isolation)") logger.info(f" batch_size={args.batch_size}, max_seq_length={args.max_seq_length}") logger.info(f" encode_batch_size={args.encode_batch_size}, length_sort={args.length_sort}") logger.info(f" parallel_workers={effective_parallel}") t0 = time.time() if total == 0: logger.info("No texts to process") output = { "total_texts": total_all, "batch_size": args.batch_size, "encode_batch_size": args.encode_batch_size, "start_text_index": start_text_index, "texts_processed": 0, "batches": [], "errors": [], "padding_stats": {"total_real_tokens": 0, "total_pad_tokens": 0, "efficiency_pct": 100.0}, } with open(args.output_pickle, "wb") as f: pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL) return worker_batch_size = args.worker_batch_size worker_jobs = [] offset = 0 worker_id = 0 while offset < total: worker_id += 1 chunk_end = min(offset + worker_batch_size, total) chunk_texts = texts[offset:chunk_end] global_start = start_text_index + offset worker_jobs.append((worker_id, global_start, chunk_texts)) offset = chunk_end total_workers = len(worker_jobs) logger.info(f"Processing {total} texts in {total_workers} worker subprocess(es)") all_batches = [] all_errors = [] total_real_tokens = 0 total_pad_tokens = 0 workers_completed = 0 workers_since_checkpoint = 0 if effective_parallel <= 1: for wid, gstart, chunk in worker_jobs: logger.info(f"Worker {wid}/{total_workers}: {len(chunk)} texts") result = run_worker_with_retries( wid, total_workers, python_exe, str(worker_script_path), chunk, gstart, worker_dir, args.model_path, args.batch_size, args.max_seq_length, args.use_fast, args.max_worker_retries, args.encode_batch_size, args.length_sort, ) all_batches.extend(result["batches"]) all_errors.extend(result["errors"]) ps = result.get("padding_stats", {}) total_real_tokens += ps.get("total_real_tokens", 0) total_pad_tokens += ps.get("total_pad_tokens", 0) workers_completed += 1 workers_since_checkpoint += 1 if checkpoint_path is not None and workers_since_checkpoint >= checkpoint_interval: current_offset = gstart + result["num_texts"] - start_text_index save_checkpoint(checkpoint_path, { "total_texts": total_all, "start_text_index": start_text_index, "texts_processed": current_offset, "batches": all_batches, "errors": all_errors, "next_text_index": start_text_index + current_offset, "padding_stats": {"total_real_tokens": total_real_tokens, "total_pad_tokens": total_pad_tokens}, }) workers_since_checkpoint = 0 gc.collect() else: for wave_start in range(0, total_workers, effective_parallel): wave_end = min(wave_start + effective_parallel, total_workers) wave_jobs = worker_jobs[wave_start:wave_end] logger.info(f"Launching parallel wave: workers {wave_jobs[0][0]}-{wave_jobs[-1][0]}") wave_results = {} with concurrent.futures.ThreadPoolExecutor(max_workers=len(wave_jobs)) as executor: future_to_wid = {} for wid, gstart, chunk in wave_jobs: future = executor.submit( run_worker_with_retries, wid, total_workers, python_exe, str(worker_script_path), chunk, gstart, worker_dir, args.model_path, args.batch_size, args.max_seq_length, args.use_fast, args.max_worker_retries, args.encode_batch_size, args.length_sort, ) future_to_wid[future] = wid for future in concurrent.futures.as_completed(future_to_wid): wid = future_to_wid[future] try: result = future.result() wave_results[wid] = result except Exception as e: logger.error(f" Worker {wid} thread raised exception: {e}") wave_results[wid] = { "batches": [], "errors": [{"batch_id": -1, "start_index": -1, "error": str(e)}], "padding_stats": {}, } for wid, gstart, chunk in wave_jobs: result = wave_results.get(wid, {"batches": [], "errors": [], "num_texts": len(chunk), "padding_stats": {}}) all_batches.extend(result["batches"]) all_errors.extend(result["errors"]) ps = result.get("padding_stats", {}) total_real_tokens += ps.get("total_real_tokens", 0) total_pad_tokens += ps.get("total_pad_tokens", 0) workers_completed += 1 workers_since_checkpoint += 1 if checkpoint_path is not None and workers_since_checkpoint >= checkpoint_interval: last_wid, last_gstart, last_chunk = wave_jobs[-1] current_offset = last_gstart + len(last_chunk) - start_text_index save_checkpoint(checkpoint_path, { "total_texts": total_all, "start_text_index": start_text_index, "texts_processed": current_offset, "batches": all_batches, "errors": all_errors, "next_text_index": start_text_index + current_offset, "padding_stats": {"total_real_tokens": total_real_tokens, "total_pad_tokens": total_pad_tokens}, }) workers_since_checkpoint = 0 gc.collect() elapsed = time.time() - t0 total_tokens = total_real_tokens + total_pad_tokens efficiency_pct = (total_real_tokens / total_tokens * 100) if total_tokens > 0 else 100.0 logger.info(f"Tokenized {len(all_batches)} batches in {elapsed:.1f}s " f"({len(all_errors)} errors, {efficiency_pct:.1f}% pad efficiency)") output = { "total_texts": total_all, "batch_size": args.batch_size, "encode_batch_size": args.encode_batch_size, "start_text_index": start_text_index, "texts_processed": total, "batches": all_batches, "errors": all_errors, "padding_stats": { "total_real_tokens": total_real_tokens, "total_pad_tokens": total_pad_tokens, "efficiency_pct": efficiency_pct, }, } with open(args.output_pickle, "wb") as f: pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL) try: worker_script_path.unlink() except Exception: pass if checkpoint_path is not None and checkpoint_path.exists(): try: checkpoint_path.unlink() except Exception: pass if __name__ == "__main__": main() ================================================ FILE: gui/__init__.py ================================================ ================================================ FILE: gui/credentials.py ================================================ from pathlib import Path from PySide6.QtWidgets import (QDialog, QDialogButtonBox, QVBoxLayout, QLabel, QLineEdit, QPushButton, QMessageBox) import yaml import logging import traceback from core.utilities import my_cprint from core.constants import PROJECT_ROOT from abc import ABC, abstractmethod from typing import Optional, Dict, Any class CredentialManager(ABC): def __init__(self, parent_widget): self.parent_widget = parent_widget self.config_file_path = PROJECT_ROOT / 'config.yaml' self.config = self._load_config() def _load_config(self) -> dict: if self.config_file_path.exists(): with open(self.config_file_path, 'r', encoding='utf-8') as file: return yaml.safe_load(file) or {} return {} def _save_config(self) -> None: with open(self.config_file_path, 'w', encoding='utf-8') as file: yaml.safe_dump(self.config, file, allow_unicode=True) @property @abstractmethod def dialog_title(self) -> str: pass @property @abstractmethod def dialog_label(self) -> str: pass @property @abstractmethod def clear_button_text(self) -> str: pass @property @abstractmethod def credential_name(self) -> str: pass @abstractmethod def get_current_credential(self) -> Optional[str]: pass @abstractmethod def update_credential(self, value: Optional[str]) -> None: pass def show_dialog(self) -> None: try: dialog = QDialog(self.parent_widget) dialog.setWindowTitle(self.dialog_title) layout = QVBoxLayout(dialog) label = QLabel(self.dialog_label, dialog) layout.addWidget(label) credential_input = QLineEdit(dialog) current_value = self.get_current_credential() if current_value: credential_input.setText(current_value) layout.addWidget(credential_input) button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) clear_button = QPushButton(self.clear_button_text) button_box.addButton(clear_button, QDialogButtonBox.ActionRole) layout.addWidget(button_box) def save_credential(): if credential := credential_input.text(): self.update_credential(credential) self._save_config() QMessageBox.information(self.parent_widget, "Success", f"{self.credential_name} saved successfully.") my_cprint(f"{self.credential_name} updated successfully.", "green") dialog.accept() def clear_credential(): self.update_credential(None) self._save_config() QMessageBox.information(self.parent_widget, "Success", f"{self.credential_name} cleared successfully.") my_cprint(f"{self.credential_name} cleared.", "green") dialog.accept() button_box.accepted.connect(save_credential) button_box.rejected.connect(dialog.reject) clear_button.clicked.connect(clear_credential) dialog.exec() except Exception as e: logging.error(f"Error managing {self.credential_name}: {str(e)}") logging.debug(traceback.format_exc()) QMessageBox.critical(self.parent_widget, "Error", f"Failed to manage {self.credential_name}: {str(e)}") class HuggingFaceCredentialManager(CredentialManager): @property def dialog_title(self) -> str: return "Hugging Face Access Token" @property def dialog_label(self) -> str: return "Enter a new Hugging Face access token or clear the current one:" @property def clear_button_text(self) -> str: return "Clear Token" @property def credential_name(self) -> str: return "Hugging Face access token" def get_current_credential(self) -> Optional[str]: return self.config.get('hf_access_token') def update_credential(self, value: Optional[str]) -> None: self.config['hf_access_token'] = value class MiniMaxCredentialManager(CredentialManager): @property def dialog_title(self) -> str: return "MiniMax API Key" @property def dialog_label(self) -> str: return "Enter a new MiniMax API key or clear the current one:" @property def clear_button_text(self) -> str: return "Clear Key" @property def credential_name(self) -> str: return "MiniMax API key" def get_current_credential(self) -> Optional[str]: return self.config.get('minimax', {}).get('api_key') def update_credential(self, value: Optional[str]) -> None: if 'minimax' not in self.config: self.config['minimax'] = {} self.config['minimax']['api_key'] = value def manage_credentials(parent_widget, credential_type: str) -> None: managers = { 'hf': HuggingFaceCredentialManager, 'minimax': MiniMaxCredentialManager, } if manager_class := managers.get(credential_type): manager = manager_class(parent_widget) manager.show_dialog() else: raise ValueError(f"Unknown credential type: {credential_type}") ================================================ FILE: gui/dialogs/__init__.py ================================================ ================================================ FILE: gui/dialogs/ai_backends_dialog.py ================================================ from pathlib import Path import yaml from PySide6.QtWidgets import ( QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QTabWidget, QMessageBox, ) from core.constants import PROJECT_ROOT from gui.dialogs.chatgpt_tab import ChatGPTTab from gui.dialogs.lm_studio_tab import LMStudioTab from gui.dialogs.minimax_tab import MiniMaxTab from gui.dialogs.kobold_tab import KoboldTab class AIBackendsDialog(QDialog): TAB_REGISTRY = [ ("ChatGPT", ChatGPTTab), ("LM Studio", LMStudioTab), ("MiniMax", MiniMaxTab), ("Kobold", KoboldTab), ] def __init__(self, parent=None, initial_tab=0): super().__init__(parent) self.setWindowTitle("Chat Backend Settings") self.resize(620, 540) self.config_path = PROJECT_ROOT / "config.yaml" config = self._load_config() self.tab_widget = QTabWidget() self.tabs = [] for label, tab_class in self.TAB_REGISTRY: tab = tab_class() tab.load_from_config(config) self.tab_widget.addTab(tab, label) self.tabs.append(tab) if 0 <= initial_tab < self.tab_widget.count(): self.tab_widget.setCurrentIndex(initial_tab) button_row = QHBoxLayout() button_row.addStretch(1) ok_btn = QPushButton("OK") cancel_btn = QPushButton("Cancel") ok_btn.clicked.connect(self._on_accept) cancel_btn.clicked.connect(self.reject) button_row.addWidget(ok_btn) button_row.addWidget(cancel_btn) layout = QVBoxLayout(self) layout.addWidget(self.tab_widget) layout.addLayout(button_row) def _load_config(self) -> dict: if not self.config_path.exists(): return {} try: with open(self.config_path, "r", encoding="utf-8") as f: return yaml.safe_load(f) or {} except Exception as e: QMessageBox.critical(self, "Error", f"Failed to load config.yaml: {e}") return {} def _save_config(self, config: dict) -> bool: try: with open(self.config_path, "w", encoding="utf-8") as f: yaml.safe_dump(config, f, allow_unicode=True) return True except Exception as e: QMessageBox.critical(self, "Error", f"Failed to save config.yaml: {e}") return False def _on_accept(self) -> None: for idx, tab in enumerate(self.tabs): ok, error = tab.validate() if not ok: self.tab_widget.setCurrentIndex(idx) QMessageBox.warning(self, "Invalid Setting", error or "Validation failed.") return config = self._load_config() for tab in self.tabs: tab.save_to_config(config) if self._save_config(config): self.accept() ================================================ FILE: gui/dialogs/chatgpt_tab.py ================================================ from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, QComboBox, QPushButton, QLineEdit, QGroupBox, QFrame, ) from core.chatgpt_settings import ( AVAILABLE_OPENAI_MODELS, REASONING_EFFORT_OPTIONS, VERBOSITY_OPTIONS, DEFAULT_OPENAI_MODEL, DEFAULT_VERBOSITY, DEFAULT_REASONING_EFFORT, get_display_name, get_model_pricing, supports_reasoning_effort, supports_verbosity, migrate_legacy_model, ) class CostPanel(QFrame): def __init__(self, parent=None): super().__init__(parent) self.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken) self.setStyleSheet(""" CostPanel { background-color: #2D2D2D; border: 1px solid #404040; border-radius: 4px; padding: 8px; } QLabel { color: #E8E8E8; } QLabel#costHeader { font-weight: bold; color: #2196F3; } QLabel#costValue { font-family: monospace; color: #4CAF50; } """) layout = QVBoxLayout(self) layout.setContentsMargins(10, 8, 10, 8) layout.setSpacing(4) header = QLabel("API Cost (per million tokens)") header.setObjectName("costHeader") layout.addWidget(header) cost_row = QHBoxLayout() cost_row.setSpacing(20) self.input_value = self._build_cost_column(cost_row, "Input:") self.cached_value = self._build_cost_column(cost_row, "Cached:") self.output_value = self._build_cost_column(cost_row, "Output:") cost_row.addStretch() layout.addLayout(cost_row) def _build_cost_column(self, parent_layout: QHBoxLayout, label_text: str) -> QLabel: column = QVBoxLayout() label = QLabel(label_text) label.setStyleSheet("font-size: 9pt; color: #B0B0B0;") value = QLabel("$0.00") value.setObjectName("costValue") value.setStyleSheet("font-size: 11pt; font-weight: bold;") column.addWidget(label) column.addWidget(value) parent_layout.addLayout(column) return value def update_for_model(self, model_name: str) -> None: input_cost, cached_cost, output_cost = get_model_pricing(model_name) paid_style = "font-size: 11pt; font-weight: bold; color: #FFA726;" muted_style = "font-size: 11pt; font-weight: bold; color: #B0B0B0;" self.input_value.setText(f"${input_cost:.2f}") self.input_value.setStyleSheet(paid_style) self.output_value.setText(f"${output_cost:.2f}") self.output_value.setStyleSheet(paid_style) if cached_cost > 0: self.cached_value.setText(f"${cached_cost:.3f}") self.cached_value.setStyleSheet(paid_style) else: self.cached_value.setText("—") self.cached_value.setStyleSheet(muted_style) class ChatGPTTab(QWidget): def __init__(self, parent=None): super().__init__(parent) layout = QVBoxLayout(self) api_group = QGroupBox("API Key") api_layout = QVBoxLayout() api_help = QLabel( "Required for ChatGPT. Get a key from " "platform.openai.com/api-keys." ) api_help.setOpenExternalLinks(True) api_help.setStyleSheet("color: gray;") api_layout.addWidget(api_help) api_row = QHBoxLayout() self.api_key_edit = QLineEdit() self.api_key_edit.setEchoMode(QLineEdit.Password) self.api_key_edit.setPlaceholderText("sk-proj-...") self.show_key_btn = QPushButton("Show / Hide") self.show_key_btn.setMaximumWidth(110) self.show_key_btn.clicked.connect(self._toggle_api_key_visibility) api_row.addWidget(self.api_key_edit) api_row.addWidget(self.show_key_btn) api_layout.addLayout(api_row) api_group.setLayout(api_layout) layout.addWidget(api_group) model_group = QGroupBox("Model") model_layout = QVBoxLayout() model_row = QHBoxLayout() model_row.addWidget(QLabel("Model:")) self.model_combo = QComboBox() for model in AVAILABLE_OPENAI_MODELS: self.model_combo.addItem(get_display_name(model), model) model_row.addWidget(self.model_combo, 1) model_layout.addLayout(model_row) self.cost_panel = CostPanel() model_layout.addWidget(self.cost_panel) verbosity_row = QHBoxLayout() self.verbosity_label = QLabel("Verbosity:") self.verbosity_combo = QComboBox() self.verbosity_combo.addItems(VERBOSITY_OPTIONS) verbosity_row.addWidget(self.verbosity_label) verbosity_row.addWidget(self.verbosity_combo, 1) model_layout.addLayout(verbosity_row) reasoning_row = QHBoxLayout() self.reasoning_label = QLabel("Reasoning Effort:") self.reasoning_combo = QComboBox() self.reasoning_combo.addItems(REASONING_EFFORT_OPTIONS) reasoning_row.addWidget(self.reasoning_label) reasoning_row.addWidget(self.reasoning_combo, 1) model_layout.addLayout(reasoning_row) model_group.setLayout(model_layout) layout.addWidget(model_group) layout.addStretch(1) self.model_combo.currentIndexChanged.connect(self._on_model_changed) def _toggle_api_key_visibility(self) -> None: if self.api_key_edit.echoMode() == QLineEdit.Password: self.api_key_edit.setEchoMode(QLineEdit.Normal) else: self.api_key_edit.setEchoMode(QLineEdit.Password) def _on_model_changed(self) -> None: model = self.model_combo.currentData() or self.model_combo.currentText() self.cost_panel.update_for_model(model) self._update_capability_visibility(model) def _update_capability_visibility(self, model: str) -> None: show_v = supports_verbosity(model) show_r = supports_reasoning_effort(model) self.verbosity_label.setVisible(show_v) self.verbosity_combo.setVisible(show_v) self.reasoning_label.setVisible(show_r) self.reasoning_combo.setVisible(show_r) def _set_combo_to_model(self, model: str) -> None: for i in range(self.model_combo.count()): if self.model_combo.itemData(i) == model: self.model_combo.setCurrentIndex(i) return self.model_combo.setCurrentIndex(0) def load_from_config(self, config: dict) -> None: openai_cfg = (config.get("openai") or {}) api_key = openai_cfg.get("api_key") or "" self.api_key_edit.setText(api_key) model = migrate_legacy_model(openai_cfg.get("model") or DEFAULT_OPENAI_MODEL) self._set_combo_to_model(model) verbosity = openai_cfg.get("verbosity") or DEFAULT_VERBOSITY if verbosity in VERBOSITY_OPTIONS: self.verbosity_combo.setCurrentText(verbosity) else: self.verbosity_combo.setCurrentText(DEFAULT_VERBOSITY) reasoning = openai_cfg.get("reasoning_effort") or DEFAULT_REASONING_EFFORT if reasoning in REASONING_EFFORT_OPTIONS: self.reasoning_combo.setCurrentText(reasoning) else: self.reasoning_combo.setCurrentText(DEFAULT_REASONING_EFFORT) self.cost_panel.update_for_model(model) self._update_capability_visibility(model) def save_to_config(self, config: dict) -> None: openai_cfg = config.setdefault("openai", {}) openai_cfg["api_key"] = self.api_key_edit.text().strip() or None openai_cfg["model"] = self.model_combo.currentData() or self.model_combo.currentText() openai_cfg["verbosity"] = self.verbosity_combo.currentText() openai_cfg["reasoning_effort"] = self.reasoning_combo.currentText() def validate(self) -> tuple[bool, str | None]: return True, None ================================================ FILE: gui/dialogs/kobold_tab.py ================================================ from PySide6.QtCore import Qt from PySide6.QtWidgets import QWidget, QVBoxLayout, QLabel class KoboldTab(QWidget): def __init__(self, parent=None): super().__init__(parent) layout = QVBoxLayout(self) notice = QLabel( "Kobold settings will appear here in a future release.\n\n" "Kobold currently uses its default connection settings." ) notice.setAlignment(Qt.AlignCenter) notice.setWordWrap(True) notice.setStyleSheet("color: #B0B0B0; font-style: italic; padding: 24px;") layout.addStretch(1) layout.addWidget(notice) layout.addStretch(2) def load_from_config(self, config: dict) -> None: return def save_to_config(self, config: dict) -> None: return def validate(self) -> tuple[bool, str | None]: return True, None ================================================ FILE: gui/dialogs/lm_studio_tab.py ================================================ import re from PySide6.QtGui import QIntValidator from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QCheckBox, QGroupBox, ) from core.constants import TOOLTIPS DEFAULT_CONNECTION_STR = "http://localhost:1234/v1" PORT_RE = re.compile(r":(\d{1,5})(?=/)") class LMStudioTab(QWidget): def __init__(self, parent=None): super().__init__(parent) self._original_connection_str = DEFAULT_CONNECTION_STR self._original_port = "" layout = QVBoxLayout(self) server_group = QGroupBox("LM Studio Server") server_layout = QVBoxLayout() port_row = QHBoxLayout() self.port_label = QLabel("Port:") self.port_label.setToolTip(TOOLTIPS.get("PORT", "")) self.port_edit = QLineEdit() self.port_edit.setPlaceholderText("Port") self.port_edit.setValidator(QIntValidator(1, 65535)) self.port_edit.setToolTip(TOOLTIPS.get("PORT", "")) port_row.addWidget(self.port_label) port_row.addWidget(self.port_edit, 1) server_layout.addLayout(port_row) self.thinking_checkbox = QCheckBox("Show thinking process?") self.thinking_checkbox.setToolTip(TOOLTIPS.get("SHOW_THINKING_CHECKBOX", "")) server_layout.addWidget(self.thinking_checkbox) server_group.setLayout(server_layout) layout.addWidget(server_group) layout.addStretch(1) def load_from_config(self, config: dict) -> None: server_cfg = config.get("server") or {} self._original_connection_str = server_cfg.get("connection_str") or DEFAULT_CONNECTION_STR match = PORT_RE.search(self._original_connection_str) self._original_port = match.group(1) if match else "" self.port_label.setText(f"Port: {self._original_port}" if self._original_port else "Port:") self.port_edit.setText(self._original_port) self.thinking_checkbox.setChecked(bool(server_cfg.get("show_thinking", False))) def save_to_config(self, config: dict) -> None: server_cfg = config.setdefault("server", {}) new_port_text = self.port_edit.text().strip() if new_port_text: new_connection_str = self._update_port_in_connection_str( self._original_connection_str, new_port_text ) server_cfg["connection_str"] = new_connection_str else: server_cfg.setdefault("connection_str", self._original_connection_str) server_cfg["show_thinking"] = bool(self.thinking_checkbox.isChecked()) def validate(self) -> tuple[bool, str | None]: new_port_text = self.port_edit.text().strip() if not new_port_text: return True, None try: port = int(new_port_text) except ValueError: return False, "Port must be a number between 1 and 65535." if not (1 <= port <= 65535): return False, "Port must be between 1 and 65535." if not PORT_RE.search(self._original_connection_str): return False, ( "Existing LM Studio connection string is malformed and the port " "cannot be replaced. Edit config.yaml directly to fix it." ) return True, None @staticmethod def _update_port_in_connection_str(connection_str: str, port: str) -> str: match = PORT_RE.search(connection_str) if not match: return connection_str return connection_str[: match.start(1)] + str(port) + connection_str[match.end(1):] ================================================ FILE: gui/dialogs/minimax_tab.py ================================================ from PySide6.QtCore import Qt from PySide6.QtWidgets import QWidget, QVBoxLayout, QLabel class MiniMaxTab(QWidget): def __init__(self, parent=None): super().__init__(parent) layout = QVBoxLayout(self) notice = QLabel( "MiniMax settings will appear here in a future release.\n\n" "For now, the MiniMax API key is managed via the File menu's " "MiniMax API Key entry." ) notice.setAlignment(Qt.AlignCenter) notice.setWordWrap(True) notice.setStyleSheet("color: #B0B0B0; font-style: italic; padding: 24px;") layout.addStretch(1) layout.addWidget(notice) layout.addStretch(2) def load_from_config(self, config: dict) -> None: return def save_to_config(self, config: dict) -> None: return def validate(self) -> tuple[bool, str | None]: return True, None ================================================ FILE: gui/download_model.py ================================================ from pathlib import Path from huggingface_hub import snapshot_download, HfApi from huggingface_hub.utils import disable_progress_bars, RepositoryNotFoundError, GatedRepoError from huggingface_hub.hf_api import RepoFile from PySide6.QtCore import QObject, Signal import fnmatch import humanfriendly import atexit import yaml class ModelDownloadedSignal(QObject): downloaded = Signal(str, str) failed = Signal(str) model_downloaded_signal = ModelDownloadedSignal() MODEL_DIRECTORIES = { "vector": "vector", "chat": "chat", "tts": "tts", "jeeves": "jeeves", "ocr": "ocr" } def get_hf_token(): config_path = Path("config.yaml") if config_path.exists(): try: with open(config_path, "r", encoding="utf-8") as f: data = yaml.safe_load(f) or {} token = (data.get("hf_access_token") or "").strip() return token or None except Exception: return None return None class ModelDownloader(QObject): def __init__(self, model_info, model_type): super().__init__() self.model_info = model_info self.model_type = model_type self._model_directory = None self.hf_token = get_hf_token() self.api = HfApi(token=False) self.api.timeout = 60 disable_progress_bars() self.local_dir = self.get_model_directory() def cleanup_incomplete_download(self): try: if hasattr(self, "local_dir") and self.local_dir and self.local_dir.exists(): if not any(self.local_dir.iterdir()): import shutil shutil.rmtree(self.local_dir) except Exception: pass def get_model_directory_name(self): repo_id = self.get_model_url() if isinstance(repo_id, str): return repo_id.replace("/", "--") return str(repo_id) def get_model_directory(self): base = Path("Models") sub = MODEL_DIRECTORIES.get(self.model_type, self.model_type) return base / sub / self.get_model_directory_name() def get_model_url(self): if isinstance(self.model_info, dict): return self.model_info.get("repo_id") or self.model_info.get("url") or self.model_info.get("name") return self.model_info def check_repo_type(self, repo_id): try: repo_info = self.api.repo_info(repo_id, timeout=60, token=False) if getattr(repo_info, "private", False): return "private" if getattr(repo_info, "gated", False): return "gated" return "public" except RepositoryNotFoundError: return "not_found" except GatedRepoError: return "gated" except Exception as e: msg = str(e).lower() if "401" in msg or "403" in msg or "gated" in msg: try: api_with_token = HfApi(token=self.hf_token or False) _ = api_with_token.repo_info(repo_id, timeout=60) return "public" except GatedRepoError: return "gated" except Exception: return "gated" if not self.hf_token else "error" return "error" def _list_repo_files(self, repo_id, use_token): api = self.api if not use_token else HfApi(token=self.hf_token) return list(api.list_repo_tree(repo_id, recursive=True)) def _select_patterns(self, repo_files, allow_patterns, ignore_patterns): final_ignore = [ "*.ckpt", "*.onnx", "*.h5", "*.tflite", "*.pb", "*.msgpack", "*.safetensors.index.json", "*.bin.index.json", "*.flax", "*.npz", "*.tar", "*.tar.gz", "*.zip", "*.rar", "*.7z", "*.gz", "*.bz2", "*.xz", "*.md", "README*", "LICENSE*", ".*", ".gitattributes", ".git*", ] if ignore_patterns: final_ignore.extend(ignore_patterns) safetensors_files = [f.rfilename for f in repo_files if isinstance(f, RepoFile) and f.rfilename.endswith(".safetensors")] bin_files = [f.rfilename for f in repo_files if isinstance(f, RepoFile) and f.rfilename.endswith(".bin")] if safetensors_files and bin_files: final_ignore.append("*.bin") if safetensors_files or bin_files: final_ignore.append("*consolidated*") if allow_patterns is None: allow_patterns = ["*.json", "*.safetensors", "*.bin", "*.model", "tokenizer*", "vocab*", "merges.txt", "config.yaml", "modules.json", "1_Pooling/*", "sentencepiece.*", "spiece.*"] return allow_patterns, final_ignore def _filter_and_size(self, repo_files, allow_patterns, ignore_patterns): included_files = [] ignored_files = [] total_size = 0 for file in repo_files: if not isinstance(file, RepoFile): continue path = file.rfilename if any(fnmatch.fnmatch(path, pat) for pat in ignore_patterns): ignored_files.append(path) continue if allow_patterns and not any(fnmatch.fnmatch(path, pat) for pat in allow_patterns): ignored_files.append(path) continue included_files.append(path) try: if file.size is not None: total_size += int(file.size) except Exception: pass return included_files, ignored_files, total_size def download(self, allow_patterns=None, ignore_patterns=None): repo_id = self.get_model_url() repo_type = self.check_repo_type(repo_id) if repo_type not in ["public", "gated"]: if repo_type == "private": msg = f"Repository {repo_id} is private and requires a token." if not self.hf_token: msg += "\n\nNo Hugging Face token found. Set one via the File menu." print(msg) model_downloaded_signal.failed.emit(msg) return if repo_type == "not_found": msg = f"Repository {repo_id} not found." print(msg) model_downloaded_signal.failed.emit(msg) return msg = f"Error checking repository {repo_id}." print(msg) model_downloaded_signal.failed.emit(msg) return if repo_type == "gated" and not self.hf_token: msg = ( f"Repository {repo_id} is gated and requires access and a token.\n\n" f"Visit https://huggingface.co/{repo_id} to request access, then set your " f"Hugging Face token via the File menu." ) print(msg) model_downloaded_signal.failed.emit(msg) return local_dir = self.get_model_directory() local_dir.mkdir(parents=True, exist_ok=True) atexit.register(self.cleanup_incomplete_download) try: repo_files = self._list_repo_files(repo_id, use_token=(repo_type == "gated")) allow_patterns, final_ignore_patterns = self._select_patterns(repo_files, allow_patterns, ignore_patterns) included_files, ignored_files, total_size = self._filter_and_size(repo_files, allow_patterns, final_ignore_patterns) readable_total_size = humanfriendly.format_size(total_size, binary=True) print(f"\nTotal size to be downloaded: {readable_total_size}") print("\nFiles to be downloaded:") for f in included_files: print(f"- {f}") print(f"\nDownloading to {local_dir}...") download_kwargs = { "repo_id": repo_id, "local_dir": str(local_dir), "max_workers": 8, "local_dir_use_symlinks": False, "ignore_patterns": final_ignore_patterns, "allow_patterns": allow_patterns, "etag_timeout": 60 } if repo_type == "gated" and self.hf_token: download_kwargs["token"] = self.hf_token else: download_kwargs["token"] = False snapshot_download(**download_kwargs) print("\033[92mModel downloaded and ready to use.\033[0m") atexit.unregister(self.cleanup_incomplete_download) model_downloaded_signal.downloaded.emit(self.get_model_directory_name(), self.model_type) except Exception as e: msg = f"An error occurred during download: {str(e)}" print(msg) if local_dir.exists(): import shutil shutil.rmtree(local_dir) model_downloaded_signal.failed.emit(msg) def download_embedding_model(repo_id, local_dir=None): info = {"repo_id": repo_id} downloader = ModelDownloader(info, "vector") if local_dir: downloader._model_directory = Path(local_dir) downloader.local_dir = downloader.get_model_directory() downloader.download() def download_chat_model(repo_id, local_dir=None): info = {"repo_id": repo_id} downloader = ModelDownloader(info, "chat") if local_dir: downloader._model_directory = Path(local_dir) downloader.local_dir = downloader.get_model_directory() downloader.download() ================================================ FILE: gui/main_window.py ================================================ import sys from ctypes import windll, byref, sizeof, c_int from ctypes.wintypes import BOOL, HWND, DWORD from PySide6.QtCore import QTimer from PySide6.QtWidgets import ( QApplication, QWidget, QVBoxLayout, QTabWidget, QMenuBar, QHBoxLayout, QMessageBox ) from core.initialize import main as initialize_system from gui.metrics_bar import MetricsWidget as MetricsBar from gui.tabs import create_tabs from core.utilities import ( list_theme_files, load_stylesheet, ensure_theme_config, update_theme_in_config, make_theme_changer, download_kokoro_tts, download_with_threadpool, ) from gui.credentials import manage_credentials from chat.jeeves import launch_jeeves_process from core.constants import PROJECT_ROOT script_dir = PROJECT_ROOT class DocQA_GUI(QWidget): def __init__(self): super().__init__() initialize_system() self.metrics_bar = MetricsBar() self.tab_widget = create_tabs() self.init_ui() self.init_menu() self.jeeves_process = None self.set_dark_titlebar() def set_dark_titlebar(self): DWMWA_USE_IMMERSIVE_DARK_MODE = DWORD(20) set_window_attribute = windll.dwmapi.DwmSetWindowAttribute hwnd = HWND(int(self.winId())) rendering_policy = BOOL(True) set_window_attribute( hwnd, DWMWA_USE_IMMERSIVE_DARK_MODE, byref(rendering_policy), sizeof(rendering_policy) ) DWMWA_BORDER_COLOR = DWORD(34) black_color = c_int(0xFF000000) set_window_attribute( hwnd, DWMWA_BORDER_COLOR, byref(black_color), sizeof(black_color) ) def init_ui(self): self.setWindowTitle('VectorDB Plugin') self.setGeometry(300, 300, 820, 1000) self.setMinimumSize(350, 410) main_layout = QVBoxLayout(self) main_layout.addWidget(self.tab_widget) metrics_layout = QHBoxLayout() metrics_layout.addWidget(self.metrics_bar) self.metrics_bar.setMaximumHeight(80) main_layout.addLayout(metrics_layout) def init_menu(self): self.menu_bar = QMenuBar(self) self.layout().setMenuBar(self.menu_bar) self.file_menu = self.menu_bar.addMenu('File') self.theme_menu = self.file_menu.addMenu('Themes') for theme in list_theme_files(): self.theme_menu.addAction(theme).triggered.connect(make_theme_changer(theme)) self.hf_token_menu = self.file_menu.addAction('Hugging Face Access Token') self.hf_token_menu.triggered.connect(lambda: manage_credentials(self, 'hf')) self.minimax_key_menu = self.file_menu.addAction('MiniMax API Key') self.minimax_key_menu.triggered.connect(lambda: manage_credentials(self, 'minimax')) self.file_menu.addSeparator() self.chat_backends_menu = self.file_menu.addAction('Chat Backend Settings…') self.chat_backends_menu.triggered.connect(self.open_chat_backends_dialog) self.jeeves_action = self.menu_bar.addAction('Jeeves') self.jeeves_action.triggered.connect(self.open_chat_window) def open_chat_backends_dialog(self): from gui.dialogs.ai_backends_dialog import AIBackendsDialog AIBackendsDialog(self).exec() def open_chat_window(self): import multiprocessing self.jeeves_action.setEnabled(False) QTimer.singleShot(5000, lambda: self.jeeves_action.setEnabled(True)) required_folder = script_dir / 'Models' / 'vector' / 'BAAI--bge-small-en-v1.5' if not required_folder.exists() or not required_folder.is_dir(): QMessageBox.warning( self, "Ask Jeeves", "Before using Jeeves you must download the bge-small-en-v1.5 embedding model, which you can do from the Models tab. Jeeves is waiting." ) return tts_path = script_dir / "Models" / "tts" / "ctranslate2-4you--Kokoro-82M-light" if not tts_path.exists() or not tts_path.is_dir(): ret = QMessageBox.question( self, "Kokoro TTS Model Not Found", "The Kokoro TTS model is missing!\n\nWould you like to download it now?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes ) if ret == QMessageBox.Yes: def on_kokoro_download_complete(success, message): if success: QMessageBox.information( self, "Download Complete", "Kokoro TTS model has been downloaded successfully." ) else: QMessageBox.critical( self, "Download Error", f"Failed to download Kokoro TTS model: {message}" ) download_with_threadpool(download_kokoro_tts, callback=on_kokoro_download_complete) return if self.jeeves_process and self.jeeves_process.is_alive(): self.jeeves_process.terminate() self.jeeves_process.join(timeout=3) if self.jeeves_process.is_alive(): self.jeeves_process.kill() self.jeeves_process.join() self.jeeves_process.close() if sys.platform == 'win32': multiprocessing.freeze_support() self.jeeves_process = multiprocessing.Process(target=launch_jeeves_process) self.jeeves_process.start() def closeEvent(self, event): if self.jeeves_process and self.jeeves_process.is_alive(): self.jeeves_process.terminate() self.jeeves_process.join(timeout=3) if self.jeeves_process.is_alive(): self.jeeves_process.kill() self.jeeves_process.join() self.jeeves_process.close() try: from db.process_manager import get_process_manager get_process_manager().cleanup_all(timeout=5.0) except Exception: pass docs_dir = PROJECT_ROOT / 'Docs_for_DB' for item in docs_dir.glob('*'): if item.is_file(): item.unlink() self.metrics_bar.stop_metrics_collector() for i in range(self.tab_widget.count()): tab = self.tab_widget.widget(i) if hasattr(tab, 'cleanup') and callable(tab.cleanup): tab.cleanup() super().closeEvent(event) def main(): from PySide6.QtCore import Qt if hasattr(QApplication, 'setHighDpiScaleFactorRoundingPolicy'): QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.PassThrough) QApplication.setAttribute(Qt.AA_EnableHighDpiScaling) QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) app = QApplication(sys.argv) theme = ensure_theme_config() app.setStyleSheet(load_stylesheet(theme)) ex = DocQA_GUI() ex.show() sys.exit(app.exec()) ================================================ FILE: gui/metrics_bar.py ================================================ from dataclasses import dataclass from collections import deque from datetime import datetime from enum import IntEnum from functools import lru_cache from typing import Optional, List import csv import subprocess import psutil from PySide6.QtCore import Qt, QObject, QPointF, QTimer, QThread, Signal from PySide6.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QProgressBar, QMenu from PySide6.QtGui import QPainter, QColor, QPolygon, QPainterPath, QPen, QPixmap, QLinearGradient from math import sin, cos, pi PALETTE = { "CPU": "#FF4136", "RAM": "#B10DC9", "GPU": "#0074D9", "VRAM": "#2ECC40", "GPU_POWER": "#FFD700" } @dataclass class SystemMetrics: timestamp: datetime cpu_usage: float ram_usage_percent: float gpu_utilization: Optional[float] = None vram_usage_percent: Optional[float] = None power_usage_percent: Optional[float] = None power_limit_percent: Optional[float] = None def is_nvidia_gpu_available(): try: subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT) return True except (FileNotFoundError, subprocess.CalledProcessError): return False HAS_NVIDIA_GPU = is_nvidia_gpu_available() if HAS_NVIDIA_GPU: import pynvml pynvml.nvmlInit() HANDLE = pynvml.nvmlDeviceGetHandleByIndex(0) def _shutdown_nvml(): try: pynvml.nvmlShutdown() except Exception: pass else: HANDLE = None class MetricsStore(QObject): metrics_added = Signal(object) def __init__(self, buffer_size: int = 100): super().__init__() self._history: deque[SystemMetrics] = deque(maxlen=buffer_size) def add_metrics(self, metrics: SystemMetrics) -> None: self._history.append(metrics) self.metrics_added.emit(metrics) def subscribe(self, callback): self.metrics_added.connect(callback) def unsubscribe(self, callback): try: self.metrics_added.disconnect(callback) except Exception: pass @property def history(self) -> List[SystemMetrics]: return list(self._history) class BatchCSVLogger(QObject): def __init__(self, filepath: str, flush_interval: int = 5000): super().__init__() self.filepath = filepath self.flush_interval = flush_interval self.buffer = [] self.file = open(self.filepath, 'w', newline='') self.writer = csv.writer(self.file) self.writer.writerow(['timestamp', 'cpu_usage', 'ram_usage_percent', 'gpu_utilization', 'vram_usage_percent', 'power_usage_percent']) self.timer = QTimer(self) self.timer.setInterval(self.flush_interval) self.timer.timeout.connect(self.flush) self.timer.start() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def log(self, metrics): self.buffer.append(metrics) def flush(self): if not self.buffer: return for m in self.buffer: self.writer.writerow([m.timestamp.isoformat(), m.cpu_usage, m.ram_usage_percent, m.gpu_utilization if m.gpu_utilization is not None else '', m.vram_usage_percent if m.vram_usage_percent is not None else '', m.power_usage_percent if m.power_usage_percent is not None else '']) self.file.flush() self.buffer.clear() def close(self): self.timer.stop() self.flush() self.file.close() def __del__(self): try: self.close() except Exception: pass def collect_cpu_metrics(): cpu_times = psutil.cpu_times_percent(interval=None, percpu=True) cpu_percentages = [] for cpu in cpu_times: total_active = sum(v for f, v in cpu._asdict().items() if f not in ('idle', 'iowait')) cpu_percentages.append(total_active) return sum(cpu_percentages) / len(cpu_percentages) def collect_ram_metrics(): ram = psutil.virtual_memory() return ram.percent, ram.used def collect_gpu_metrics(handle): if handle is None: return None, None memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle) gpu_utilization = pynvml.nvmlDeviceGetUtilizationRates(handle).gpu vram_usage_percent = (memory_info.used / memory_info.total) * 100 if memory_info.total else 0 return gpu_utilization, vram_usage_percent def collect_power_metrics(handle): if handle is None: return None, None try: power_usage = pynvml.nvmlDeviceGetPowerUsage(handle) / 1000.0 except pynvml.NVMLError: return None, None try: power_limit = pynvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000.0 except pynvml.NVMLError_NotSupported: try: power_limit = pynvml.nvmlDeviceGetEnforcedPowerLimit(handle) / 1000.0 except pynvml.NVMLError: power_limit = None if power_limit and power_limit > 0: power_percentage = (power_usage / power_limit) * 100 else: power_percentage = 0 return power_percentage, power_limit class MetricsCollectorThread(QThread): metrics_updated = Signal(object) def __init__(self, interval: int = 200): super().__init__() self.interval = interval self.gpu_available = HAS_NVIDIA_GPU def _collect_once(self): try: cpu_usage = collect_cpu_metrics() ram_usage_percent, _ = collect_ram_metrics() if self.gpu_available: gpu_util, vram_usage = collect_gpu_metrics(HANDLE) power_usage, power_limit = collect_power_metrics(HANDLE) else: gpu_util = vram_usage = power_usage = power_limit = None metrics = SystemMetrics(timestamp=datetime.now(), cpu_usage=cpu_usage, ram_usage_percent=ram_usage_percent, gpu_utilization=gpu_util, vram_usage_percent=vram_usage, power_usage_percent=power_usage, power_limit_percent=power_limit) self.metrics_updated.emit(metrics) except Exception as e: print(f"Error collecting metrics: {e}") def run(self): timer = QTimer() timer.setInterval(self.interval) timer.timeout.connect(self._collect_once) timer.start() self.exec() def stop(self): self.quit() self.wait() class BaseVisualization(QWidget): _metric_mappings = [] _gpu_metric_mappings = [] def __init__(self, metrics_store: MetricsStore): super().__init__() self.metrics_store = metrics_store self.metrics_store.subscribe(self.update_metrics) self.has_nvidia_gpu = HAS_NVIDIA_GPU def _update_widget(self, widget, value): raise NotImplementedError def _format_label(self, prefix, value): return f"{prefix} {value:.1f}%" def update_metrics(self, m: SystemMetrics): for attr_name, widget_attr, label_attr, prefix in self._metric_mappings: value = getattr(m, attr_name) self._update_widget(getattr(self, widget_attr), value) getattr(self, label_attr).setText(self._format_label(prefix, value)) if self.has_nvidia_gpu: for attr_name, widget_attr, label_attr, prefix in self._gpu_metric_mappings: value = getattr(m, attr_name) if value is not None: self._update_widget(getattr(self, widget_attr), value) getattr(self, label_attr).setText(self._format_label(prefix, value)) def cleanup(self): self.metrics_store.unsubscribe(self.update_metrics) def color_for(name: str) -> str: return PALETTE[name] class BarVisualization(BaseVisualization): def __init__(self, metrics_store: MetricsStore): super().__init__(metrics_store) self.initUI() self._metric_mappings = [ ("cpu_usage", "cpu_bar", "cpu_percent_label", "CPU"), ("ram_usage_percent", "ram_bar", "ram_percent_label", "RAM"), ] if self.has_nvidia_gpu: self._gpu_metric_mappings = [ ("gpu_utilization", "gpu_bar", "gpu_percent_label", "GPU"), ("vram_usage_percent", "vram_bar", "vram_percent_label", "VRAM"), ("power_usage_percent", "power_bar", "power_percent_label", "GPU Power"), ] def _update_widget(self, widget, value): widget.setValue(int(value)) def _format_label(self, prefix, value): return f"{int(value)}%" def initUI(self): grid_layout = QGridLayout(self) grid_layout.setSpacing(0) grid_layout.setContentsMargins(0, 0, 0, 0) self.cpu_bar, self.cpu_percent_label = self.add_metric_to_grid("CPU Usage:", color_for("CPU"), grid_layout, 0) self.ram_bar, self.ram_percent_label = self.add_metric_to_grid("RAM Usage:", color_for("RAM"), grid_layout, 1) if self.has_nvidia_gpu: self.gpu_bar, self.gpu_percent_label = self.add_metric_to_grid("GPU Usage:", color_for("GPU"), grid_layout, 2) self.vram_bar, self.vram_percent_label = self.add_metric_to_grid("VRAM Usage:", color_for("VRAM"), grid_layout, 3) self.power_bar, self.power_percent_label = self.add_metric_to_grid("GPU Power:", color_for("GPU_POWER"), grid_layout, 4) def add_metric_to_grid(self, label_text, color, grid_layout, row): label = QLabel(label_text) grid_layout.addWidget(label, row, 0) percent_label = QLabel("0%") grid_layout.addWidget(percent_label, row, 1) progress_bar = self.create_progress_bar(color) grid_layout.addWidget(progress_bar, row, 2) return progress_bar, percent_label def create_progress_bar(self, color): bar = QProgressBar() bar.setMaximum(100) bar.setMaximumHeight(11) bar.setStyleSheet(f"QProgressBar {{ background-color: #1e2126; border: none; }}QProgressBar::chunk {{ background-color: {color}; }}") bar.setTextVisible(False) return bar @lru_cache(maxsize=8) def gradient_pixmap(color: str, height: int) -> QPixmap: pixmap = QPixmap(1, height) pixmap.fill(Qt.transparent) painter = QPainter(pixmap) gradient = QLinearGradient(0, 0, 0, height) fill_color = QColor(color) fill_color.setAlpha(60) gradient.setColorAt(0, fill_color) gradient.setColorAt(1, QColor(0, 0, 0, 0)) painter.fillRect(pixmap.rect(), gradient) painter.end() return pixmap class Sparkline(QWidget): def __init__(self, max_values=125, color="#0074D9"): super().__init__() self.values = deque(maxlen=max_values) self.setFixedSize(125, 65) self.color = QColor(color) def add_value(self, value): self.values.append(value) self.update() def paintEvent(self, event): if not self.values: return painter = QPainter(self) painter.setRenderHint(QPainter.Antialiasing) width = self.width() height = self.height() margin = 5 min_value = 0 max_value = 100 value_range = max_value - min_value path = QPainterPath() x_step = (width - 2 * margin) / (len(self.values) - 1) if len(self.values) > 1 else 0 points = [] for i, value in enumerate(self.values): x = margin + i * x_step y = height - margin - (value / value_range) * (height - 2 * margin) points.append(QPointF(x, y)) if i == 0: path.moveTo(x, y) else: path.lineTo(x, y) fill_path = QPainterPath(path) fill_path.lineTo(points[-1].x(), height - margin) fill_path.lineTo(points[0].x(), height - margin) fill_path.closeSubpath() painter.save() painter.setClipPath(fill_path) grad_pm = gradient_pixmap(self.color.name(), height) for x in range(0, width, grad_pm.width()): painter.drawPixmap(x, 0, grad_pm) painter.restore() painter.setPen(QPen(self.color, 1)) painter.setBrush(Qt.NoBrush) painter.drawPath(path) class SparklineVisualization(BaseVisualization): def __init__(self, metrics_store: MetricsStore): super().__init__(metrics_store) self.initUI() self._metric_mappings = [ ("cpu_usage", "cpu_spark", "cpu_lbl", "CPU"), ("ram_usage_percent", "ram_spark", "ram_lbl", "RAM"), ] if self.has_nvidia_gpu: self._gpu_metric_mappings = [ ("gpu_utilization", "gpu_spark", "gpu_lbl", "GPU"), ("vram_usage_percent", "vram_spark", "vram_lbl", "VRAM"), ("power_usage_percent", "power_spark", "power_lbl", "GPU Power"), ] def _update_widget(self, widget, value): widget.add_value(value) def initUI(self): main_layout = QGridLayout(self) main_layout.setSpacing(1) main_layout.setContentsMargins(1, 1, 1, 1) def create_group(name, color_key): w = QWidget() l = QVBoxLayout(w) l.setSpacing(1) l.setContentsMargins(0, 0, 0, 0) s = Sparkline(color=color_for(color_key)) l.addWidget(s, alignment=Qt.AlignCenter) lbl = QLabel(f"{name} 0.0%") lbl.setAlignment(Qt.AlignCenter) l.addWidget(lbl, alignment=Qt.AlignCenter) return w, s, lbl cpu_group, self.cpu_spark, self.cpu_lbl = create_group("CPU", "CPU") main_layout.addWidget(cpu_group, 0, 0) ram_group, self.ram_spark, self.ram_lbl = create_group("RAM", "RAM") main_layout.addWidget(ram_group, 0, 1) if self.has_nvidia_gpu: gpu_group, self.gpu_spark, self.gpu_lbl = create_group("GPU", "GPU") main_layout.addWidget(gpu_group, 0, 2) vram_group, self.vram_spark, self.vram_lbl = create_group("VRAM", "VRAM") main_layout.addWidget(vram_group, 0, 3) power_group, self.power_spark, self.power_lbl = create_group("GPU Power", "GPU_POWER") main_layout.addWidget(power_group, 0, 4) for i in range(main_layout.columnCount()): main_layout.setColumnStretch(i, 1) class Speedometer(QWidget): def __init__(self, min_value=0, max_value=100, colors=None): super().__init__() self.min_value = min_value self.max_value = max_value self.current_value = 0 self.colors = colors or ["#00FF00", "#FFFF00", "#FF0000"] self.setFixedSize(105, 105) def set_value(self, value): self.current_value = max(self.min_value, min(self.max_value, value)) self.update() def get_color_at_angle(self, angle): t = angle / 180 if t <= 0: return QColor(self.colors[0]) if t >= 1: return QColor(self.colors[-1]) segment = t * (len(self.colors) - 1) idx = int(segment) t = segment - idx idx = min(idx, len(self.colors) - 2) c1 = QColor(self.colors[idx]) c2 = QColor(self.colors[idx + 1]) r = int(c1.red() * (1 - t) + c2.red() * t) g = int(c1.green() * (1 - t) + c2.green() * t) b = int(c1.blue() * (1 - t) + c2.blue() * t) return QColor(r, g, b) def paintEvent(self, event): painter = QPainter(self) painter.setRenderHint(QPainter.Antialiasing) w = self.width() h = self.height() cx = w / 2 cy = h / 2 r = min(w, h) / 2 * 0.7 start_angle = 180 * 16 for i in range(180): painter.setPen(self.get_color_at_angle(i)) painter.drawArc(cx - r, cy - r, r * 2, r * 2, start_angle - i * 16, -16) angle = 180 - (self.current_value - self.min_value) / (self.max_value - self.min_value) * 180 n_len = r * 0.9 n_w = 5 rad = angle * (pi / 180) tip_x = cx + n_len * cos(rad) tip_y = cy - n_len * sin(rad) perp = rad + pi / 2 hw = n_w / 2 p1 = QPointF(cx + hw * cos(perp), cy - hw * sin(perp)) p2 = QPointF(cx - hw * cos(perp), cy + hw * sin(perp)) needle = QPolygon([p1.toPoint(), p2.toPoint(), QPointF(tip_x, tip_y).toPoint()]) painter.setPen(Qt.NoPen) painter.setBrush(Qt.white) painter.drawPolygon(needle) class SpeedometerVisualization(BaseVisualization): def __init__(self, metrics_store: MetricsStore): super().__init__(metrics_store) self.initUI() self._metric_mappings = [ ("cpu_usage", "cpu_sm", "cpu_lbl", "CPU"), ("ram_usage_percent", "ram_sm", "ram_lbl", "RAM"), ] if self.has_nvidia_gpu: self._gpu_metric_mappings = [ ("gpu_utilization", "gpu_sm", "gpu_lbl", "GPU"), ("vram_usage_percent", "vram_sm", "vram_lbl", "VRAM"), ("power_usage_percent", "power_sm", "power_lbl", "GPU Power"), ] def _update_widget(self, widget, value): widget.set_value(value) def initUI(self): main_layout = QGridLayout(self) main_layout.setSpacing(1) main_layout.setContentsMargins(1, 1, 1, 1) def create_group(name, color_key=None): l = QVBoxLayout() l.setSpacing(2) sm = Speedometer(colors=["#00FF00", "#FFFF00", "#FF0000"]) sm.setFixedSize(105, 105) l.addWidget(sm, alignment=Qt.AlignCenter) lbl = QLabel(f"{name} 0.0%") lbl.setAlignment(Qt.AlignCenter) l.addWidget(lbl, alignment=Qt.AlignCenter) return l, sm, lbl cpu_group, self.cpu_sm, self.cpu_lbl = create_group("CPU") main_layout.addLayout(cpu_group, 0, 0) ram_group, self.ram_sm, self.ram_lbl = create_group("RAM") main_layout.addLayout(ram_group, 0, 1) if self.has_nvidia_gpu: gpu_group, self.gpu_sm, self.gpu_lbl = create_group("GPU") main_layout.addLayout(gpu_group, 0, 2) vram_group, self.vram_sm, self.vram_lbl = create_group("VRAM") main_layout.addLayout(vram_group, 0, 3) power_group, self.power_sm, self.power_lbl = create_group("GPU Power") main_layout.addLayout(power_group, 0, 4) for i in range(main_layout.columnCount()): main_layout.setColumnStretch(i, 1) @lru_cache(maxsize=8) def arc_background(w: int, h: int) -> QPixmap: pm = QPixmap(w, h) pm.fill(Qt.transparent) painter = QPainter(pm) painter.setRenderHint(QPainter.Antialiasing) r = min(w, h) / 2 - 10 c = QPointF(w / 2, h / 2) painter.setPen(QPen(QColor("#1e2126"), 8)) painter.drawArc(int(c.x() - r), int(c.y() - r), int(r * 2), int(r * 2), 180 * 16, -180 * 16) painter.end() return pm class ArcGraph(QWidget): def __init__(self, color="#0074D9"): super().__init__() self.color = QColor(color) self.value = 0 self.setFixedSize(100, 100) def set_value(self, value): self.value = min(100, max(0, value)) self.update() def paintEvent(self, event): bg = arc_background(self.width(), self.height()) painter = QPainter(self) painter.drawPixmap(0, 0, bg) painter.setRenderHint(QPainter.Antialiasing) w = self.width() h = self.height() r = min(w, h) / 2 - 10 c = QPointF(w / 2, h / 2) painter.setPen(QPen(self.color, 8)) span = -(self.value / 100.0) * 180 painter.drawArc(int(c.x() - r), int(c.y() - r), int(r * 2), int(r * 2), 180 * 16, span * 16) painter.setPen(Qt.white) f = painter.font() f.setPointSize(14) painter.setFont(f) painter.drawText(self.rect(), Qt.AlignCenter, f"{int(self.value)}%") class ArcGraphVisualization(BaseVisualization): def __init__(self, metrics_store: MetricsStore): super().__init__(metrics_store) self.initUI() self._metric_mappings = [ ("cpu_usage", "cpu_arc", "cpu_lbl", "CPU"), ("ram_usage_percent", "ram_arc", "ram_lbl", "RAM"), ] if self.has_nvidia_gpu: self._gpu_metric_mappings = [ ("gpu_utilization", "gpu_arc", "gpu_lbl", "GPU"), ("vram_usage_percent", "vram_arc", "vram_lbl", "VRAM"), ("power_usage_percent", "power_arc", "power_lbl", "GPU Power"), ] def _update_widget(self, widget, value): widget.set_value(value) def initUI(self): main_layout = QGridLayout(self) main_layout.setSpacing(1) main_layout.setContentsMargins(1, 1, 1, 1) def create_group(name, color_key): l = QVBoxLayout() l.setSpacing(2) arc = ArcGraph(color=color_for(color_key)) l.addWidget(arc, alignment=Qt.AlignCenter) lbl = QLabel(name) lbl.setAlignment(Qt.AlignCenter) l.addWidget(lbl, alignment=Qt.AlignCenter) return l, arc, lbl cpu_group, self.cpu_arc, self.cpu_lbl = create_group("CPU", "CPU") main_layout.addLayout(cpu_group, 0, 0) ram_group, self.ram_arc, self.ram_lbl = create_group("RAM", "RAM") main_layout.addLayout(ram_group, 0, 1) if self.has_nvidia_gpu: gpu_group, self.gpu_arc, self.gpu_lbl = create_group("GPU", "GPU") main_layout.addLayout(gpu_group, 0, 2) vram_group, self.vram_arc, self.vram_lbl = create_group("VRAM", "VRAM") main_layout.addLayout(vram_group, 0, 3) power_group, self.power_arc, self.power_lbl = create_group("GPU Power", "GPU_POWER") main_layout.addLayout(power_group, 0, 4) for i in range(main_layout.columnCount()): main_layout.setColumnStretch(i, 1) class VizType(IntEnum): BAR = 0 SPARKLINE = 1 SPEEDO = 2 ARC = 3 VIZ_FACTORY = { VizType.BAR: BarVisualization, VizType.SPARKLINE: SparklineVisualization, VizType.SPEEDO: SpeedometerVisualization, VizType.ARC: ArcGraphVisualization } class MetricsWidget(QWidget): def __init__(self, parent=None): super().__init__(parent) self.metrics_store = MetricsStore(buffer_size=100) self.init_ui() self.current_visualization_type = VizType.SPARKLINE self.setToolTip("Right click for display options") self.collector_thread = MetricsCollectorThread() self.collector_thread.metrics_updated.connect(self.metrics_store.add_metrics) self.start_metrics_collector() def init_ui(self): self.layout = QVBoxLayout(self) self.layout.setContentsMargins(0, 0, 0, 0) self.current_visualization = VIZ_FACTORY[VizType.SPARKLINE](self.metrics_store) self.layout.addWidget(self.current_visualization) def contextMenuEvent(self, event): menu = QMenu(self) visual_menu = menu.addMenu("Visualization") bar_action = visual_menu.addAction("Bar") spark_action = visual_menu.addAction("Sparkline") speed_action = visual_menu.addAction("Speedometer") arc_action = visual_menu.addAction("Arc") actions_map = {bar_action: VizType.BAR, spark_action: VizType.SPARKLINE, speed_action: VizType.SPEEDO, arc_action: VizType.ARC} actions_map_inv = {v: k for k, v in actions_map.items()} actions_map_inv[self.current_visualization_type].setCheckable(True) actions_map_inv[self.current_visualization_type].setChecked(True) menu.addSeparator() running = self.collector_thread and self.collector_thread.isRunning() control_action = menu.addAction("Stop Monitoring" if running else "Start Monitoring") action = menu.exec_(event.globalPos()) if action in actions_map: self.change_visualization(actions_map[action]) elif action == control_action: if running: self.stop_metrics_collector() else: self.start_metrics_collector() def change_visualization(self, kind: VizType): if kind == self.current_visualization_type: return self.current_visualization_type = kind self.current_visualization.cleanup() self.layout.removeWidget(self.current_visualization) self.current_visualization.deleteLater() self.current_visualization = VIZ_FACTORY[kind](self.metrics_store) self.current_visualization.setToolTip("Right click for display options") self.layout.addWidget(self.current_visualization) def start_metrics_collector(self): if not self.collector_thread.isRunning(): self.collector_thread.start() def stop_metrics_collector(self): if self.collector_thread.isRunning(): self.collector_thread.stop() def cleanup(self): if self.collector_thread.isRunning(): self.collector_thread.stop() self.current_visualization.cleanup() if HAS_NVIDIA_GPU: try: import pynvml pynvml.nvmlShutdown() except Exception: pass def closeEvent(self, event): self.cleanup() super().closeEvent(event) ================================================ FILE: gui/tabs.py ================================================ from PySide6.QtWidgets import QTabWidget from gui.tabs_settings.settings import GuiSettingsTab from gui.tabs_tools.tools import GuiSettingsTab as ToolsSettingsTab from gui.tabs_databases.create import DatabasesTab from gui.tabs_models.models import VectorModelsTab from gui.tabs_databases.query import DatabaseQueryTab from gui.tabs_databases.manage import ManageDatabasesTab def create_tabs(): tab_widget = QTabWidget() tab_widget.setTabPosition(QTabWidget.South) tab_font = tab_widget.font() tab_font.setPointSize(13) tab_widget.setFont(tab_font) tabs = [ (GuiSettingsTab(), 'Settings'), (VectorModelsTab(), 'Models'), (ToolsSettingsTab(), 'Tools'), (DatabasesTab(), 'Create Database'), (ManageDatabasesTab(), 'Manage Databases'), (DatabaseQueryTab(), 'Query Database') ] for tab, name in tabs: tab_widget.addTab(tab, name) return tab_widget ================================================ FILE: gui/tabs_databases/__init__.py ================================================ ================================================ FILE: gui/tabs_databases/create.py ================================================ import os import sys import time import gc import json import shutil import subprocess from pathlib import Path import yaml from PySide6.QtCore import QDir, QRegularExpression, QThread, QTimer, Qt, Signal from PySide6.QtGui import QAction, QRegularExpressionValidator from PySide6.QtWidgets import QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QMessageBox, QTreeView, QFileSystemModel, QMenu, QGroupBox, QLabel, QLineEdit, QGridLayout, QSizePolicy, QComboBox from db.database_interactions import create_vector_db_in_process from db.choose_documents import choose_documents_directory from core.utilities import check_preconditions_for_db_creation, open_file, delete_file, backup_database, my_cprint from gui.download_model import model_downloaded_signal from core.constants import TOOLTIPS, PROJECT_ROOT class VectorDBWorker(QThread): """Runs DB creation in a completely separate Python interpreter via subprocess.Popen, with stdout drained inside the thread's run() and progress emitted via Qt signals. subprocess.Popen (as opposed to multiprocessing.Process) is critical on Windows with PySide6: multiprocessing's 'spawn' inherits DLL state from the GUI process (TileDB, CUDA, torch) which causes access violations (0xC0000005) in the child. See dev/production_integration_log.md (Phase 6). """ progress = Signal(str) finished = Signal(bool, int, str) def __init__(self, database_name, parent=None): super().__init__(parent) self.database_name = database_name self._process = None self._cancelled = False def run(self): try: cmd = [ sys.executable, "-c", "from db.database_interactions import create_vector_db_in_process; " f"create_vector_db_in_process({self.database_name!r})" ] env = {**os.environ, "PYTHONUNBUFFERED": "1"} self.progress.emit("Initializing database creation...") self._process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1, cwd=str(PROJECT_ROOT), env=env, ) for line in self._process.stdout: line = line.rstrip("\n") if line.strip(): print(f" [DB Creation] {line}", flush=True) self.progress.emit(line) self._process.wait() exit_code = self._process.returncode if self._cancelled: self.finished.emit(False, exit_code, "Cancelled by user.") elif exit_code == 0: self.finished.emit(True, exit_code, "Database created successfully!") else: self.finished.emit( False, exit_code, f"Database build failed (exit code {exit_code}). " "Check the log window for details." ) except Exception as e: import traceback traceback.print_exc() self.finished.emit(False, -1, f"Database creation failed: {e}") def cancel(self): self._cancelled = True if self._process and self._process.poll() is None: self._process.terminate() class CustomFileSystemModel(QFileSystemModel): def __init__(self, parent=None): super().__init__(parent) self.setFilter(QDir.Files) class DatabasesTab(QWidget): CREATE_DB_BUTTON_LABEL = "Create Vector Database" CREATE_DB_BUTTON_BUSY_LABEL = "Creating..." def __init__(self): super().__init__() model_downloaded_signal.downloaded.connect(self.update_model_combobox) self.layout = QVBoxLayout(self) self.documents_group_box = self.create_group_box("Files To Add to Database", "Docs_for_DB") self.groups = {self.documents_group_box: 1} self.info_label = QLabel() self.info_label.setTextFormat(Qt.RichText) self.info_label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter) self.info_label.setStyleSheet("padding: 4px 6px;") self.layout.addWidget(self.info_label) grid_layout_top_buttons = QGridLayout() self.choose_docs_button = QPushButton("Choose Files") self.choose_docs_button.setToolTip(TOOLTIPS["CHOOSE_FILES"]) self.choose_docs_button.clicked.connect(choose_documents_directory) self.model_combobox = QComboBox() self.model_combobox.setToolTip(TOOLTIPS["SELECT_VECTOR_MODEL"]) self.populate_model_combobox() self.model_combobox.currentIndexChanged.connect(self.on_model_selected) self.model_combobox.activated.connect(self.refresh_model_combobox) self.create_db_button = QPushButton(self.CREATE_DB_BUTTON_LABEL) self.create_db_button.setToolTip(TOOLTIPS["CREATE_VECTOR_DB"]) self.create_db_button.clicked.connect(self.on_create_db_clicked) self.create_db_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.cancel_db_button = QPushButton("Cancel") self.cancel_db_button.setToolTip("Cancel an in-progress database creation and remove any partial files.") self.cancel_db_button.clicked.connect(self.on_cancel_db_clicked) self.cancel_db_button.setEnabled(False) create_cancel_box = QHBoxLayout() create_cancel_box.addWidget(self.create_db_button) create_cancel_box.addWidget(self.cancel_db_button) grid_layout_top_buttons.addWidget(self.choose_docs_button, 0, 0) grid_layout_top_buttons.addWidget(self.model_combobox, 0, 1) grid_layout_top_buttons.addLayout(create_cancel_box, 0, 2) number_of_columns = 3 for column_index in range(number_of_columns): grid_layout_top_buttons.setColumnStretch(column_index, 1) hbox2 = QHBoxLayout() self.database_name_input = QLineEdit() self.database_name_input.setToolTip(TOOLTIPS["DATABASE_NAME_INPUT"]) self.database_name_input.setPlaceholderText("Enter database name") self.database_name_input.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) regex = QRegularExpression("^[a-z0-9_-]*$") validator = QRegularExpressionValidator(regex, self.database_name_input) self.database_name_input.setValidator(validator) hbox2.addWidget(self.database_name_input) self.layout.addLayout(grid_layout_top_buttons) self.layout.addLayout(hbox2) self.sync_combobox_with_config() self.db_worker = None self.current_model_name = None self.current_database_name = None self._refresh_info_label() self.info_refresh_timer = QTimer(self) self.info_refresh_timer.setInterval(1000) self.info_refresh_timer.timeout.connect(self._refresh_info_label) self.info_refresh_timer.start() def _validation_failed(self, message: str): QMessageBox.warning(self, "Validation Failed", message) self.reenable_create_db_button() def refresh_model_combobox(self, index): current_text = self.model_combobox.currentText() self.populate_model_combobox() idx = self.model_combobox.findText(current_text) if idx >= 0: self.model_combobox.setCurrentIndex(idx) def update_model_combobox(self, model_name, model_type): if model_type == "vector": self.populate_model_combobox() self.sync_combobox_with_config() def populate_model_combobox(self): self.model_combobox.clear() self.model_combobox.addItem("Select a model", None) script_dir = PROJECT_ROOT vector_dir = script_dir / "Models" / "vector" if not vector_dir.exists(): return for folder in vector_dir.iterdir(): if folder.is_dir(): display_name = folder.name full_path = str(folder) self.model_combobox.addItem(display_name, full_path) def sync_combobox_with_config(self): config_path = PROJECT_ROOT / "config.yaml" if config_path.exists(): with open(config_path, 'r', encoding='utf-8') as file: config_data = yaml.safe_load(file) or {} current_model = config_data.get("EMBEDDING_MODEL_NAME") if current_model: model_index = self.model_combobox.findData(current_model) if model_index != -1: self.model_combobox.setCurrentIndex(model_index) else: self.model_combobox.setCurrentIndex(0) else: self.model_combobox.setCurrentIndex(0) else: self.model_combobox.setCurrentIndex(0) def on_model_selected(self, index): selected_path = self.model_combobox.itemData(index) config_path = PROJECT_ROOT / "config.yaml" config_data = {} if config_path.exists(): with open(config_path, 'r', encoding='utf-8') as file: config_data = yaml.safe_load(file) or {} if selected_path: config_data["EMBEDDING_MODEL_NAME"] = selected_path if "stella" in selected_path.lower() or "static-retrieval" in selected_path.lower(): config_data["EMBEDDING_MODEL_DIMENSIONS"] = 1024 else: config_json_path = Path(selected_path) / "config.json" if config_json_path.exists(): with open(config_json_path, 'r', encoding='utf-8') as json_file: model_config = json.load(json_file) embedding_dimensions = model_config.get("hidden_size") or model_config.get("d_model") if embedding_dimensions and isinstance(embedding_dimensions, int): config_data["EMBEDDING_MODEL_DIMENSIONS"] = embedding_dimensions else: config_data.pop("EMBEDDING_MODEL_NAME", None) config_data.pop("EMBEDDING_MODEL_DIMENSIONS", None) with open(config_path, 'w', encoding='utf-8') as file: yaml.safe_dump(config_data, file, allow_unicode=True) def create_group_box(self, title, directory_name): group_box = QGroupBox(title) layout = QVBoxLayout() tree_view = self.setup_directory_view(directory_name) layout.addWidget(tree_view) group_box.setLayout(layout) self.layout.addWidget(group_box) group_box.toggled.connect(lambda checked, gb=group_box: self.toggle_group_box(gb, checked)) return group_box def _refresh_docs_model(self): if hasattr(self.docs_model, 'refresh'): self.docs_model.refresh() elif hasattr(self.docs_model, 'reindex'): self.docs_model.reindex() def _refresh_info_label(self): script_dir = PROJECT_ROOT docs_dir = script_dir / "Docs_for_DB" try: file_count = sum(1 for p in docs_dir.iterdir() if p.is_file()) if docs_dir.exists() else 0 except OSError: file_count = 0 config_path = script_dir / "config.yaml" config = {} if config_path.exists(): try: with open(config_path, "r", encoding="utf-8") as f: config = yaml.safe_load(f) or {} except Exception: config = {} db_cfg = (config.get("database") or {}) chunk_size = db_cfg.get("chunk_size", "—") chunk_overlap = db_cfg.get("chunk_overlap", "—") use_half = bool(db_cfg.get("half", False)) precision_str = self._compute_precision_str(config, use_half) text = ( f"Files queued: {file_count}" f"  |  Chunk size: {chunk_size}" f"  |  Overlap: {chunk_overlap}" f"  |  Embedding precision: {precision_str}" ) self.info_label.setText(text) def _compute_precision_str(self, config, use_half): from core.constants import VECTOR_MODELS model_path = config.get("EMBEDDING_MODEL_NAME") if not model_path: return "—" cache_dir_name = Path(model_path).name native_precision = None for vendor_models in VECTOR_MODELS.values(): for model_info in vendor_models: if model_info.get("cache_dir") == cache_dir_name: native_precision = model_info.get("precision", "float32") break if native_precision: break if not native_precision: return "unknown" try: import torch device = "cuda" if torch.cuda.is_available() else "cpu" except Exception: device = "cpu" try: from core.utilities import get_appropriate_dtype dtype = get_appropriate_dtype(device, use_half, native_precision) return str(dtype).split(".")[-1] except Exception: return native_precision def setup_directory_view(self, directory_name): tree_view = QTreeView() model = CustomFileSystemModel() tree_view.setModel(model) tree_view.setSelectionMode(QTreeView.ExtendedSelection) script_dir = PROJECT_ROOT directory_path = script_dir / directory_name model.setRootPath(str(directory_path)) tree_view.setRootIndex(model.index(str(directory_path))) tree_view.hideColumn(1) tree_view.hideColumn(2) tree_view.hideColumn(3) tree_view.doubleClicked.connect(self.on_double_click) tree_view.setContextMenuPolicy(Qt.CustomContextMenu) tree_view.customContextMenuRequested.connect(self.on_context_menu) if directory_name == "Docs_for_DB": self.docs_model = model self.docs_refresh = QTimer(self) self.docs_refresh.setInterval(500) self.docs_refresh.timeout.connect(self._refresh_docs_model) return tree_view def on_double_click(self, index): tree_view = self.sender() model = tree_view.model() file_path = model.filePath(index) open_file(file_path) def on_context_menu(self, point): tree_view = self.sender() context_menu = QMenu(self) delete_action = QAction("Delete File", self) context_menu.addAction(delete_action) delete_action.triggered.connect(lambda: self.on_delete_file(tree_view)) context_menu.exec_(tree_view.viewport().mapToGlobal(point)) def on_delete_file(self, tree_view): selected_indexes = tree_view.selectedIndexes() model = tree_view.model() for index in selected_indexes: if index.column() == 0: file_path = model.filePath(index) delete_file(file_path) def on_create_db_clicked(self): if self.model_combobox.currentIndex() == 0: QMessageBox.warning(self, "No Model Selected", "Please select a model before creating a database.") return database_name = self.database_name_input.text().strip() if not database_name: QMessageBox.warning(self, "Database Name Required", "Please enter a database name before creating a database.") return docs_dir = PROJECT_ROOT / "Docs_for_DB" if not docs_dir.exists() or not any(p for p in docs_dir.iterdir() if p.is_file()): QMessageBox.warning( self, "No Files To Add", "The Docs_for_DB folder is empty. Add at least one file before creating a database." ) return self.create_db_button.setDisabled(True) self.create_db_button.setText(self.CREATE_DB_BUTTON_BUSY_LABEL) self.choose_docs_button.setDisabled(True) self.model_combobox.setDisabled(True) self.database_name_input.setDisabled(True) self.cancel_db_button.setEnabled(True) model_name = self.model_combobox.currentText() self.current_database_name = database_name self.current_model_name = model_name docs_dir = PROJECT_ROOT / "Docs_for_DB" has_pdfs = any(p.suffix.lower() == ".pdf" for p in docs_dir.iterdir() if p.is_file()) skip_ocr = False if has_pdfs: reply = QMessageBox.question(self, "OCR Check", "PDF files detected. Do you want to check if any of the PDFs need OCR? " "If there are a lot of PDFs, it is time-consuming but strongly recommended.", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) skip_ocr = (reply == QMessageBox.No) self.start_database_creation(database_name, model_name, skip_ocr) def start_database_creation(self, database_name, model_name, skip_ocr): try: script_dir = PROJECT_ROOT ok, msg = check_preconditions_for_db_creation(script_dir, database_name, skip_ocr=skip_ocr) if not ok: self._validation_failed(msg) return self.db_worker = VectorDBWorker(database_name, parent=self) self.db_worker.finished.connect(self.on_worker_finished) self.db_worker.start() my_cprint(f"Started database creation for: {database_name}", "green") except Exception as e: self._validation_failed(f"Failed to start database creation: {str(e)}") def on_cancel_db_clicked(self): if self.db_worker is None or not self.db_worker.isRunning(): return self.cancel_db_button.setEnabled(False) self.cancel_db_button.setText("Cancelling...") self.db_worker.cancel() def on_worker_finished(self, success: bool, exit_code: int, message: str): was_cancelled = (not success) and message == "Cancelled by user." try: if was_cancelled: if self.current_database_name: partial_dir = PROJECT_ROOT / "Vector_DB" / self.current_database_name if partial_dir.exists(): shutil.rmtree(partial_dir, ignore_errors=True) QMessageBox.information( self, "Cancelled", "Database creation was cancelled and any partial files were removed." ) elif success: my_cprint(f"{self.current_model_name} removed from memory.", "red") self.update_config_with_database_name() backup_database(self.current_database_name) QMessageBox.information(self, "Success", message) else: QMessageBox.critical(self, "Error", message) except Exception as e: QMessageBox.critical(self, "Error", f"Error handling completion: {e}") finally: if self.db_worker is not None: self.db_worker.deleteLater() self.db_worker = None self.reenable_create_db_button() def update_config_with_database_name(self): config_path = PROJECT_ROOT / "config.yaml" if config_path.exists(): with open(config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) or {} model = config.get('EMBEDDING_MODEL_NAME') chunk_size = config.get('database', {}).get('chunk_size') chunk_overlap = config.get('database', {}).get('chunk_overlap') if 'created_databases' not in config or not isinstance(config['created_databases'], dict): config['created_databases'] = {} config['created_databases'][self.current_database_name] = { 'model': model, 'chunk_size': chunk_size, 'chunk_overlap': chunk_overlap } with open(config_path, 'w', encoding='utf-8') as file: yaml.safe_dump(config, file, allow_unicode=True) def reenable_create_db_button(self): self.create_db_button.setDisabled(False) self.create_db_button.setText(self.CREATE_DB_BUTTON_LABEL) self.choose_docs_button.setDisabled(False) self.model_combobox.setDisabled(False) self.database_name_input.setDisabled(False) self.cancel_db_button.setEnabled(False) self.cancel_db_button.setText("Cancel") self.current_database_name = None self.current_model_name = None gc.collect() def closeEvent(self, event): if self.db_worker is not None and self.db_worker.isRunning(): self.db_worker.cancel() self.db_worker.wait(5000) event.accept() def toggle_group_box(self, group_box, checked): self.groups[group_box] = 1 if checked else 0 self.adjust_stretch() def adjust_stretch(self): for group, stretch in self.groups.items(): self.layout.setStretchFactor(group, stretch if group.isChecked() else 0) ================================================ FILE: gui/tabs_databases/manage.py ================================================ import shutil import sqlite3 from pathlib import Path import yaml from PySide6.QtCore import Qt, QAbstractTableModel from PySide6.QtGui import QAction, QColor from PySide6.QtWidgets import ( QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QTableView, QMenu, QGroupBox, QLabel, QComboBox, QMessageBox, QHeaderView ) from core.utilities import open_file from core.constants import PROJECT_ROOT class SQLiteTableModel(QAbstractTableModel): def __init__(self, data=None): super().__init__() self._data = data or [] self._headers = ["File Name"] def data(self, index, role): if role == Qt.DisplayRole: return self._data[index.row()][0] elif role == Qt.ForegroundRole: return QColor('white') return None def rowCount(self, index): return len(self._data) def columnCount(self, index): return 1 def headerData(self, section, orientation, role): if role == Qt.DisplayRole and orientation == Qt.Horizontal: return self._headers[section] return None class RefreshingComboBox(QComboBox): def __init__(self, parent=None): super().__init__(parent) self.addItem("Select a database...") self.setItemData(0, QColor('gray'), Qt.ForegroundRole) self.setCurrentIndex(0) def showPopup(self): current_text = self.currentText() self.blockSignals(True) self.clear() self.addItem("Select a database...") self.setItemData(0, QColor('gray'), Qt.ForegroundRole) databases = self.parent().load_created_databases() self.addItems(databases) if current_text and current_text in databases: index = self.findText(current_text) if index >= 0: self.setCurrentIndex(index) else: self.setCurrentIndex(0) else: self.setCurrentIndex(0) self.blockSignals(False) super().showPopup() class ManageDatabasesTab(QWidget): def __init__(self): super().__init__() self.config_path = PROJECT_ROOT / "config.yaml" self.created_databases = self.load_created_databases() self.layout = QVBoxLayout(self) self.documents_group_box = self.create_group_box_with_table_view("Files in Selected Database") self.layout.addWidget(self.documents_group_box) self.database_info_layout = QHBoxLayout() self.database_info_label = QLabel("No database selected.") self.database_info_label.setTextFormat(Qt.RichText) self.database_info_layout.addWidget(self.database_info_label) self.layout.addLayout(self.database_info_layout) self.buttons_layout = QHBoxLayout() self.pull_down_menu = RefreshingComboBox(self) self.pull_down_menu.activated.connect(self.update_table_view_and_info_label) self.buttons_layout.addWidget(self.pull_down_menu) self.create_buttons() self.layout.addLayout(self.buttons_layout) def load_created_databases(self): if self.config_path.exists(): with open(self.config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) databases = list(config.get('created_databases', {}).keys()) return [db for db in databases if db != "user_manual"] return [] def display_no_databases_message(self): self.model._data = [] self.model.layoutChanged.emit() self.documents_group_box.hide() self.database_info_label.setText("No database selected.") def create_group_box_with_table_view(self, title): group_box = QGroupBox(title) layout = QVBoxLayout() self.table_view = QTableView() self.model = SQLiteTableModel() self.table_view.setModel(self.model) self.table_view.setSelectionMode(QTableView.SingleSelection) self.table_view.setSelectionBehavior(QTableView.SelectRows) self.table_view.doubleClicked.connect(self.on_double_click) self.table_view.setContextMenuPolicy(Qt.CustomContextMenu) self.table_view.customContextMenuRequested.connect(self.show_context_menu) self.table_view.horizontalHeader().setStretchLastSection(True) self.table_view.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) layout.addWidget(self.table_view) group_box.setLayout(layout) return group_box def update_table_view_and_info_label(self, index): selected_database = self.pull_down_menu.currentText() if selected_database == "Select a database...": self.display_no_databases_message() return if selected_database: self.documents_group_box.show() db_path = PROJECT_ROOT / "Vector_DB" / selected_database / "metadata.db" if db_path.exists(): try: conn = sqlite3.connect(str(db_path)) cursor = conn.cursor() cursor.execute("SELECT file_name, file_path FROM document_metadata") data = cursor.fetchall() conn.close() self.model._data = [(row[0], row[1]) for row in data] self.model.layoutChanged.emit() if self.config_path.exists(): with open(self.config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) db_config = config.get('created_databases', {}).get(selected_database, {}) model_path = db_config.get('model', '') model_name = Path(model_path).name chunk_size = db_config.get('chunk_size', '') chunk_overlap = db_config.get('chunk_overlap', '') info_text = ( f'Name: "{selected_database}" ' f'| ' f'Model: "{model_name}" ' f'| ' f'Chunk size/overlap: {chunk_size} / {chunk_overlap}' ) self.database_info_label.setText(info_text) else: self.database_info_label.setText("Configuration missing.") except sqlite3.Error as e: QMessageBox.warning(self, "Database Error", f"An error occurred while accessing the database: {e}") self.display_no_databases_message() else: self.display_no_databases_message() else: self.display_no_databases_message() def on_double_click(self, index): selected_database = self.pull_down_menu.currentText() if selected_database and selected_database != "Select a database...": file_path = self.model._data[index.row()][1] if Path(file_path).exists(): open_file(file_path) else: QMessageBox.warning(self, "Error", f"File not found at the specified path: {file_path}") else: QMessageBox.warning(self, "Error", "No database selected.") def create_buttons(self): self.delete_database_button = QPushButton("Delete Database") self.buttons_layout.addWidget(self.delete_database_button) self.delete_database_button.clicked.connect(self.delete_selected_database) def delete_selected_database(self): selected_database = self.pull_down_menu.currentText() if not selected_database or selected_database == "Select a database...": QMessageBox.warning(self, "Delete Database", "No database selected.") return reply = QMessageBox.question( self, 'Delete Database', "This cannot be undone.\nClick OK to proceed or Cancel to back out.", QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Cancel ) if reply == QMessageBox.Ok: self.model.beginResetModel() self.model._data = [] self.model.endResetModel() if self.config_path.exists(): try: with open(self.config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) if 'created_databases' in config and selected_database in config['created_databases']: del config['created_databases'][selected_database] config.setdefault('database', {})['database_to_search'] = '' with open(self.config_path, 'w', encoding='utf-8') as file: yaml.safe_dump(config, file) base_dir = PROJECT_ROOT deletion_failed = False for folder_name in ["Vector_DB", "Vector_DB_Backup"]: dir_path = base_dir / folder_name / selected_database if dir_path.exists(): shutil.rmtree(dir_path, ignore_errors=True) if dir_path.exists(): deletion_failed = True print(f"Failed to delete: {dir_path}") if deletion_failed: QMessageBox.warning( self, "Delete Database", "Some files/folders could not be deleted. Please check manually." ) else: QMessageBox.information( self, "Delete Database", f"Database '{selected_database}' and associated files have been deleted." ) self.refresh_pull_down_menu() self.update_table_view_and_info_label(-1) except Exception as e: QMessageBox.warning(self, "Delete Database", f"An error occurred: {e}") else: QMessageBox.warning(self, "Delete Database", "Configuration file missing or corrupted.") def refresh_pull_down_menu(self): self.created_databases = self.load_created_databases() self.pull_down_menu.blockSignals(True) self.pull_down_menu.clear() self.pull_down_menu.addItem("Select a database...") self.pull_down_menu.setItemData(0, QColor('gray'), Qt.ForegroundRole) self.pull_down_menu.addItems(self.created_databases) if self.created_databases: self.pull_down_menu.setCurrentIndex(0) else: self.display_no_databases_message() self.pull_down_menu.blockSignals(False) def show_context_menu(self, position): context_menu = QMenu(self) delete_action = QAction("Delete File", self) delete_action.triggered.connect(self.delete_selected_file) context_menu.addAction(delete_action) context_menu.exec_(self.table_view.viewport().mapToGlobal(position)) def delete_selected_file(self): print("Delete file functionality will be implemented here.") ================================================ FILE: gui/tabs_databases/query.py ================================================ import logging import queue import threading from pathlib import Path import multiprocessing import re import html import torch import yaml from PySide6.QtCore import QThread, Signal, QObject, Qt, QUrl from PySide6.QtGui import QDesktopServices from PySide6.QtWidgets import (QWidget, QVBoxLayout, QTextEdit, QPushButton, QCheckBox, QHBoxLayout, QMessageBox, QApplication, QComboBox, QLabel, QTextBrowser, QProgressBar, QSizePolicy) from abc import ABC, abstractmethod from chat.lm_studio import LMStudioChatThread from chat.local_model import LocalModelChat from chat.openai import ChatGPTThread from chat.minimax import MiniMaxThread from chat.kobold import KoboldThread from core.constants import CHAT_MODELS, CustomButtonStyles from modules.voice_recorder import VoiceRecorder from core.utilities import my_cprint, normalize_chat_text from core.constants import TOOLTIPS, PROJECT_ROOT from db.database_interactions import process_chunks_only_query from db.process_manager import get_process_manager logger = logging.getLogger(__name__) current_dir = PROJECT_ROOT input_text_file = str(current_dir / 'chat_history.txt') class SubmitStrategy(ABC): def __init__(self, tab): self.tab = tab @abstractmethod def submit(self, question: str, db_name: str) -> None: ... class LocalModelStrategy(SubmitStrategy): def submit(self, question, db_name): selected_model = self.tab.model_combo_box.currentText() lm = self.tab.local_model_chat if selected_model != lm.current_model: if lm.is_model_loaded(): lm.terminate_current_process() lm.start_model_process(selected_model) lm.start_chat(question, selected_model, db_name) class LMStudioStrategy(SubmitStrategy): def submit(self, question, db_name): t = self.tab.lm_studio_chat_thread = LMStudioChatThread(question, db_name) s = t.lm_studio_chat.signals s.response_signal.connect(self.tab.update_response_lm_studio) s.error_signal.connect(self.tab.show_error_message) s.finished_signal.connect(self.tab.on_submission_finished) s.citations_signal.connect(self.tab.display_citations_in_widget) t.start() class ChatGPTStrategy(SubmitStrategy): def submit(self, question, db_name): t = self.tab.chatgpt_thread = ChatGPTThread(question, db_name) t.response_signal.connect(self.tab.update_response_lm_studio) t.error_signal.connect(self.tab.show_error_message) t.finished_signal.connect(self.tab.on_submission_finished) t.citations_signal.connect(self.tab.display_citations_in_widget) t.start() class MiniMaxStrategy(SubmitStrategy): def submit(self, question, db_name): model_name = self.tab.model_source_combo.currentText() t = self.tab.minimax_thread = MiniMaxThread(question, db_name, model_name=model_name) t.response_signal.connect(self.tab.update_response_lm_studio) t.error_signal.connect(self.tab.show_error_message) t.finished_signal.connect(self.tab.on_submission_finished) t.citations_signal.connect(self.tab.display_citations_in_widget) t.start() class KoboldStrategy(SubmitStrategy): def submit(self, question, db_name): t = self.tab.kobold_thread = KoboldThread(question, db_name) t.response_signal.connect(self.tab.update_response_lm_studio) t.error_signal.connect(self.tab.show_error_message) t.finished_signal.connect(self.tab.on_submission_finished) t.citations_signal.connect(self.tab.display_citations_in_widget) t.start() class ChunksOnlyStrategy(SubmitStrategy): def submit(self, question, db_name): t = self.tab.database_query_thread = ChunksOnlyThread(question, db_name) t.chunks_ready.connect(self.tab.display_chunks) t.finished.connect(self.tab.on_database_query_finished) t.start() class ThinkingIndicator(QProgressBar): def __init__(self, parent=None): super().__init__(parent) self.setRange(0, 0) self.setTextVisible(False) self.setFixedHeight(12) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) class ChunksOnlyThread(QThread): chunks_ready = Signal(str) def __init__(self, query, database_name): super().__init__() self.query = query self.database_name = database_name self.process = None self.process_lock = threading.Lock() def run(self): ctx = multiprocessing.get_context('spawn') result_queue = ctx.Queue() try: with self.process_lock: self.process = ctx.Process( target=process_chunks_only_query, args=(self.database_name, self.query, result_queue) ) get_process_manager().register(self.process) self.process.start() try: result = result_queue.get(timeout=120) self.chunks_ready.emit(result) except queue.Empty: logger.error("Query timed out after 120 seconds") self.chunks_ready.emit( "Error: Query timed out after 120 seconds. " "Please try a simpler query or check your database." ) except Exception as e: logger.error(f"Error getting result from queue: {e}") self.chunks_ready.emit(f"Error: Failed to retrieve database response - {e}") with self.process_lock: if self.process and self.process.is_alive(): self.process.join(timeout=2) if self.process.is_alive(): self.process.terminate() self.process.join(timeout=1) if self.process.is_alive(): try: self.process.kill() self.process.join(timeout=1) except Exception as e: logger.error(f"Failed to kill process: {e}") if self.process: get_process_manager().unregister(self.process) self.process = None except Exception as e: logger.exception(f"Error in chunks only thread: {e}") self.chunks_ready.emit(f"Error querying database: {e}") with self.process_lock: if self.process: try: if self.process.is_alive(): self.process.terminate() self.process.join(timeout=1) if self.process.is_alive(): self.process.kill() self.process.join(timeout=1) get_process_manager().unregister(self.process) except Exception as cleanup_error: logger.error(f"Error during cleanup: {cleanup_error}") finally: self.process = None def stop(self): with self.process_lock: if self.process: try: if self.process.is_alive(): self.process.terminate() self.process.join(timeout=2) if self.process.is_alive(): self.process.kill() self.process.join(timeout=1) get_process_manager().unregister(self.process) except Exception as e: logger.warning(f"Error stopping process: {e}") finally: self.process = None def run_tts_in_process(config_path, input_text_file): from modules.tts import run_tts run_tts(config_path, input_text_file) my_cprint("TTS models removed from memory.", "red") class RefreshingComboBox(QComboBox): def __init__(self, parent=None): super(RefreshingComboBox, self).__init__(parent) def showPopup(self): new_items = self.parent().load_created_databases() current_items = [self.itemText(i) for i in range(self.count())] if new_items != current_items: current_text = self.currentText() self.clear() self.addItems(new_items) idx = self.findText(current_text) if idx >= 0: self.setCurrentIndex(idx) super(RefreshingComboBox, self).showPopup() class GuiSignals(QObject): response_signal = Signal(str) citations_signal = Signal(str) error_signal = Signal(str) finished_signal = Signal() class CustomTextBrowser(QTextBrowser): def __init__(self, parent=None): super().__init__(parent) self.setOpenExternalLinks(False) def doSetSource(self, name, type): if name.scheme() == 'file': QDesktopServices.openUrl(QUrl.fromLocalFile(name.toLocalFile())) elif name.scheme() in ['http', 'https']: QDesktopServices.openUrl(name) else: super().doSetSource(name, type) class DatabaseQueryTab(QWidget): def __init__(self): super(DatabaseQueryTab, self).__init__() self.config_path = PROJECT_ROOT / 'config.yaml' self.lm_studio_chat_thread = None self.local_model_chat = LocalModelChat() self.chatgpt_thread = None self.kobold_thread = None self.minimax_thread = None self.gui_signals = GuiSignals() self.current_model_name = None self.database_query_thread = None self.raw_response = "" self.citations_block = "" self.in_think_block = False self.initWidgets() self.setup_signals() def initWidgets(self): layout = QVBoxLayout(self) self.response_widget = CustomTextBrowser() self.response_widget.setOpenExternalLinks(True) layout.addWidget(self.response_widget, 5) self.token_count_label = QLabel("") layout.addWidget(self.token_count_label) self.thinking_indicator = ThinkingIndicator() self.thinking_label = QLabel("Thinking…") self.thinking_label.setAlignment(Qt.AlignLeft) indicator_layout = QHBoxLayout() indicator_layout.setContentsMargins(0, 0, 0, 0) indicator_layout.addWidget(self.thinking_label) indicator_layout.addWidget(self.thinking_indicator) self.thinking_label.hide() self.thinking_indicator.hide() layout.addLayout(indicator_layout) hbox1_layout = QHBoxLayout() self.database_pulldown = RefreshingComboBox(self) self.database_pulldown.setToolTip(TOOLTIPS["DATABASE_SELECT"]) self.database_pulldown.addItems(self.load_created_databases()) hbox1_layout.addWidget(self.database_pulldown) self.model_source_combo = QComboBox() self.model_source_combo.setToolTip(TOOLTIPS["MODEL_BACKEND_SELECT"]) self.model_source_combo.addItems([ "Local Model", "Kobold", "LM Studio", "ChatGPT", "MiniMax-M2.7", "MiniMax-M2.7-highspeed", ]) chatgpt_idx = self.model_source_combo.findText("ChatGPT") if chatgpt_idx >= 0: self.model_source_combo.setItemData( chatgpt_idx, "Configure model, API key, verbosity, and reasoning effort via File → Chat Backend Settings…", Qt.ToolTipRole, ) self.model_source_combo.setCurrentText("Local Model") self.model_source_combo.currentTextChanged.connect(self.on_model_source_changed) hbox1_layout.addWidget(self.model_source_combo) self.model_combo_box = QComboBox() self.model_combo_box.setToolTip(TOOLTIPS["LOCAL_MODEL_SELECT"]) if torch.cuda.is_available(): for model_info in CHAT_MODELS.values(): idx = self.model_combo_box.count() self.model_combo_box.addItem(model_info["model"]) gb = round(model_info["vram"] / 1024, 1) self.model_combo_box.setItemData(idx, f"Uses ~{gb} GB memory", Qt.ToolTipRole) self.model_combo_box.setEnabled(True) else: for key in [ "LiquidAI - .35b", "Qwen 3 - 0.6b (Thinking)", "LiquidAI - 1.2b", "Qwen 3 - 1.7b (Thinking)", "Granite - 2b", ]: self.model_combo_box.addItem(CHAT_MODELS[key]["model"]) self.model_combo_box.setToolTip("Choose a local model. It will be downloaded.") if self.model_combo_box.count() > 0: self.model_combo_box.setCurrentIndex(0) hbox1_layout.addWidget(self.model_combo_box) self.eject_button = QPushButton("Eject Local Model") self.eject_button.setToolTip(TOOLTIPS["EJECT_LOCAL_MODEL"]) self.eject_button.clicked.connect(self.eject_model) self.eject_button.setEnabled(False) hbox1_layout.addWidget(self.eject_button) if not torch.cuda.is_available(): self.model_source_combo.setItemData(0, 0, Qt.UserRole - 1) tooltip = "The Local Model option requires GPU-acceleration." self.model_source_combo.setItemData(0, tooltip, Qt.ToolTipRole) self.model_combo_box.setEnabled(False) self.model_combo_box.setToolTip(tooltip) self.model_combo_box.setStyleSheet("QComboBox:disabled { color: #707070; }") layout.addLayout(hbox1_layout) self.text_input = QTextEdit() self.text_input.setToolTip(TOOLTIPS["QUESTION_INPUT"]) self.text_input.setMaximumHeight(80) layout.addWidget(self.text_input, 1) toggles_row = QHBoxLayout() self.show_thinking_checkbox = QCheckBox("Show Thinking") self.show_thinking_checkbox.setChecked(False) self.show_thinking_checkbox.stateChanged.connect(self.toggle_thinking_visibility) toggles_row.addWidget(self.show_thinking_checkbox) self.chunks_only_checkbox = QCheckBox("Chunks Only") self.chunks_only_checkbox.setToolTip(TOOLTIPS["CHUNKS_ONLY"]) toggles_row.addWidget(self.chunks_only_checkbox) toggles_row.addStretch(1) layout.addLayout(toggles_row) actions_row = QHBoxLayout() self.copy_response_button = QPushButton("Copy Response") self.copy_response_button.setToolTip(TOOLTIPS["COPY_RESPONSE"]) self.copy_response_button.clicked.connect(self.on_copy_response_clicked) actions_row.addWidget(self.copy_response_button) self.bark_button = QPushButton("Speak Response") self.bark_button.setToolTip(TOOLTIPS["SPEAK_RESPONSE"]) self.bark_button.clicked.connect(self.on_bark_button_clicked) actions_row.addWidget(self.bark_button) self.record_button = QPushButton("Voice Recorder") self.record_button.setToolTip(TOOLTIPS["VOICE_RECORDER"]) self.record_button.clicked.connect(self.toggle_recording) actions_row.addWidget(self.record_button) self.submit_button = QPushButton("Submit Question") self.submit_button.clicked.connect(self.on_submit_button_clicked) self.submit_button.setStyleSheet(CustomButtonStyles.GREEN_BUTTON_STYLE) self.submit_button.setDefault(True) actions_row.addWidget(self.submit_button) layout.addLayout(actions_row) self.is_recording = False self.voice_recorder = VoiceRecorder(self) def _strategy_for_source(self, source: str) -> SubmitStrategy: STRATEGIES = { "Local Model": LocalModelStrategy(self), "LM Studio": LMStudioStrategy(self), "Kobold": KoboldStrategy(self), "ChatGPT": ChatGPTStrategy(self), "MiniMax-M2.7": MiniMaxStrategy(self), "MiniMax-M2.7-highspeed": MiniMaxStrategy(self), } try: return STRATEGIES[source] except KeyError: raise ValueError(f"Unknown model source: {source}") def setup_signals(self): self.local_model_chat.signals.response_signal.connect(self.update_response_local_model) self.local_model_chat.signals.citations_signal.connect(self.display_citations_in_widget) self.local_model_chat.signals.error_signal.connect(self.show_error_message) self.local_model_chat.signals.finished_signal.connect(self.on_submission_finished) self.local_model_chat.signals.model_loaded_signal.connect(self.on_model_loaded) self.local_model_chat.signals.model_unloaded_signal.connect(self.on_model_unloaded) self.local_model_chat.signals.token_count_signal.connect(self.update_token_count_label) def _render_html(self): if self.show_thinking_checkbox.isChecked(): visible_text = self.raw_response else: txt = self.raw_response txt = re.sub(r".*?", "", txt, flags=re.DOTALL | re.IGNORECASE) txt = re.sub(r".*$", "", txt, flags=re.DOTALL | re.IGNORECASE) txt = re.sub(r"\n\s*\n", "\n", txt).lstrip() visible_text = txt body = html.escape(visible_text).replace("\n", "
    ") body += self.citations_block self.response_widget.setHtml(body) self.response_widget.verticalScrollBar().setValue( self.response_widget.verticalScrollBar().maximum()) def toggle_thinking_visibility(self): self._render_html() def update_token_count_label(self, token_count_string): self.token_count_label.setText(token_count_string) def on_model_source_changed(self, text): is_local = text == "Local Model" self.model_combo_box.setVisible(is_local) self.eject_button.setVisible(is_local) if is_local: self.model_combo_box.setEnabled(torch.cuda.is_available()) self.eject_button.setEnabled(self.local_model_chat.is_model_loaded()) else: self.model_combo_box.setEnabled(False) self.eject_button.setEnabled(False) def load_created_databases(self): if self.config_path.exists(): with open(self.config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) databases = list(config.get('created_databases', {}).keys()) return [db for db in databases if db != "user_manual"] return [] def on_submit_button_clicked(self): script_dir = PROJECT_ROOT selected_database = self.database_pulldown.currentText() if not selected_database or not (script_dir / "Vector_DB" / selected_database).exists(): QMessageBox.warning(self, "No Database Selected", "Select a vector database to query first.") return self.response_widget.clear() self.token_count_label.clear() cursor = self.response_widget.textCursor() cursor.clearSelection() self.response_widget.setTextCursor(cursor) self.raw_response = "" self.citations_block = "" self.submit_button.setDisabled(True) user_question = self.text_input.toPlainText() if self.chunks_only_checkbox.isChecked(): strategy = ChunksOnlyStrategy(self) else: strategy = self._strategy_for_source(self.model_source_combo.currentText()) try: strategy.submit(user_question, selected_database) except Exception as e: logging.exception("Submission failed: %s", e) self.show_error_message(str(e)) self.submit_button.setDisabled(False) def display_chunks(self, chunks): self.response_widget.setPlainText(chunks) def on_database_query_finished(self): self.submit_button.setDisabled(False) def eject_model(self): if self.local_model_chat.is_model_loaded(): try: self.local_model_chat.eject_model() except Exception as e: logging.exception(f"Error during model ejection: {e}") finally: self.eject_button.setEnabled(False) self.model_combo_box.setEnabled(True) else: logging.warning("No model is currently loaded.") def on_model_loaded(self): self.eject_button.setEnabled(True) self.eject_button.setText(f"Eject {self.local_model_chat.current_model}") def on_model_unloaded(self): self.eject_button.setEnabled(False) self.eject_button.setText("Eject Local Model") def display_citations_in_widget(self, citations): if citations: self.citations_block = f"

    Citation Links:{citations}" else: self.citations_block = "

    No citations found." self._render_html() def on_copy_response_clicked(self): clipboard = QApplication.clipboard() response_text = self.response_widget.toPlainText() if response_text: clipboard.setText(response_text) QMessageBox.information(self, "Information", "Response copied to clipboard.") else: QMessageBox.warning(self, "Warning", "No response to copy.") def on_bark_button_clicked(self): script_dir = PROJECT_ROOT config_path = script_dir / 'config.yaml' with open(config_path, 'r', encoding='utf-8') as config_file: config = yaml.safe_load(config_file) tts_config = config.get('tts', {}) tts_model = tts_config.get('model', '').lower() if tts_model not in ['googletts', 'chattts', 'kyutaipocket', 'chatterbox'] and not torch.cuda.is_available(): QMessageBox.warning(self, "Error", "The Text to Speech backend you selected requires GPU-acceleration.") return from core.utilities import check_backend_dependencies, install_packages from core.constants import BACKEND_DEPENDENCIES if not check_backend_dependencies(tts_model, interactive=False): required_packages = BACKEND_DEPENDENCIES.get(tts_model, {}) if required_packages: packages_str = ", ".join([f"{pkg}=={ver}" for pkg, ver in required_packages.items()]) reply = QMessageBox.question( self, "Missing Dependencies", f"{tts_model.title()} backend requires additional packages:\n\n{packages_str}\n\nInstall now?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes ) if reply == QMessageBox.Yes: missing_packages = [(pkg, ver) for pkg, ver in required_packages.items()] if install_packages(missing_packages): QMessageBox.information(self, "Success", "Dependencies installed successfully!") else: QMessageBox.warning(self, "Installation Failed", "Failed to install dependencies. Please install manually.") return else: return if not (script_dir / 'chat_history.txt').exists(): QMessageBox.warning(self, "Error", "No response to play.") return self.run_tts_module() def run_tts_module(self): process = multiprocessing.Process(target=run_tts_in_process, args=(str(self.config_path), input_text_file)) process.start() def toggle_recording(self): if self.is_recording: self.voice_recorder.stop_recording() self.record_button.setText("Voice Recorder") else: self.voice_recorder.start_recording() self.record_button.setText("Stop Recording") self.is_recording = not self.is_recording def update_response_lm_studio(self, response_chunk): self.raw_response += response_chunk self._render_html() self.response_widget.verticalScrollBar().setValue( self.response_widget.verticalScrollBar().maximum() ) def update_response_local_model(self, chunk: str): chunk_lower = chunk.lower() open_pos = chunk_lower.rfind("") close_pos = chunk_lower.rfind("") if open_pos != -1 or close_pos != -1: self.in_think_block = open_pos > close_pos visible = self.in_think_block and not self.show_thinking_checkbox.isChecked() self.thinking_indicator.setVisible(visible) self.thinking_label.setVisible(visible) self.raw_response += chunk self._render_html() def show_error_message(self, error_message): if "exceed the chat model's context limit" in error_message: msg_box = QMessageBox() msg_box.setIcon(QMessageBox.Warning) msg_box.setText(error_message) msg_box.setWindowTitle("Context Limit Exceeded") msg_box.setStandardButtons(QMessageBox.Ok) msg_box.exec() else: QMessageBox.warning(self, "Error", error_message) self.submit_button.setDisabled(False) def on_submission_finished(self): self.submit_button.setDisabled(False) ix = self.raw_response.lower().rfind("
    ") answer_only = self.raw_response[ix + len(""):] if ix != -1 else self.raw_response answer_only = answer_only.lstrip("\n") try: with open(input_text_file, "w", encoding="utf-8") as f: f.write(normalize_chat_text(answer_only)) except OSError as e: logging.exception(f"Could not write chat_history.txt: {e}") def update_transcription(self, transcription_text): self.text_input.setPlainText(transcription_text) def cleanup(self): if self.local_model_chat.is_model_loaded(): self.local_model_chat.eject_model() if self.database_query_thread and self.database_query_thread.isRunning(): self.database_query_thread.stop() self.database_query_thread.wait() if self.chatgpt_thread and self.chatgpt_thread.isRunning(): self.chatgpt_thread.wait() if self.minimax_thread and self.minimax_thread.isRunning(): self.minimax_thread.wait() if self.kobold_thread and self.kobold_thread.isRunning(): self.kobold_thread.stop() self.kobold_thread.wait(timeout=5000) print("Cleanup completed") ================================================ FILE: gui/tabs_models/__init__.py ================================================ ================================================ FILE: gui/tabs_models/models.py ================================================ import threading from pathlib import Path from PySide6.QtCore import Qt, QUrl from PySide6.QtGui import QDesktopServices from PySide6.QtWidgets import ( QWidget, QLabel, QGridLayout, QVBoxLayout, QGroupBox, QPushButton, QRadioButton, QButtonGroup, QMessageBox ) from core.constants import VECTOR_MODELS, TOOLTIPS from gui.download_model import ModelDownloader, model_downloaded_signal class VectorModelsTab(QWidget): DOWNLOAD_BUTTON_LABEL = "Download Selected Model" DOWNLOAD_BUTTON_BUSY_LABEL = "Downloading..." def __init__(self, parent=None): super().__init__(parent) self.main_layout = QVBoxLayout() self.setLayout(self.main_layout) self.group_boxes = {} self.downloaded_labels = {} self.model_radiobuttons = QButtonGroup(self) self.model_radiobuttons.setExclusive(True) self.stretch_factors = { 'BAAI': 4, 'intfloat': 4, 'IBM': 3, 'infly': 3, 'Snowflake': 3, 'Qwen': 4, 'Google': 2, } models_dir = Path('Models') if not models_dir.exists(): models_dir.mkdir(parents=True) vector_models_dir = models_dir / "vector" if not vector_models_dir.exists(): vector_models_dir.mkdir(parents=True) existing_vector_directories = {d.name for d in vector_models_dir.iterdir() if d.is_dir()} headers = ["Select", "Model Name", "Original Precision", "Parameters", "Dimensions", "Max Sequence", "Size (MB)", "Downloaded"] column_stretch_factors = [1, 2, 2, 1, 1, 1, 1, 1] def add_centered_widget(grid, widget, row, col): grid.addWidget(widget, row, col, alignment=Qt.AlignCenter) row_counter = 1 for vendor, models in VECTOR_MODELS.items(): group_box = QGroupBox(vendor) group_box.setStyleSheet(""" QGroupBox::title { subcontrol-origin: margin; padding: 0 5px; font-weight: bold; color: #00bf9e; } """) group_layout = QGridLayout() group_layout.setVerticalSpacing(0) group_layout.setHorizontalSpacing(0) group_box.setLayout(group_layout) group_layout.setContentsMargins(0, 10, 0, 0) size_policy = group_box.sizePolicy() size_policy.setVerticalStretch(self.stretch_factors.get(vendor, 1)) group_box.setSizePolicy(size_policy) self.group_boxes[vendor] = group_box for col, header in enumerate(headers): header_label = QLabel(header) header_label.setAlignment(Qt.AlignCenter) header_label.setStyleSheet("text-decoration: underline;") header_label.setToolTip(TOOLTIPS.get(f"VECTOR_MODEL_{header.upper().replace(' ', '_')}", "")) group_layout.addWidget(header_label, 0, col) for col, stretch_factor in enumerate(column_stretch_factors): group_layout.setColumnStretch(col, stretch_factor) for model in models: model_info = model grid = group_box.layout() row = grid.rowCount() radiobutton = QRadioButton() radiobutton.setToolTip(TOOLTIPS.get("VECTOR_MODEL_SELECT", "")) radiobutton.setProperty("model_info", model_info) radiobutton.setProperty("downloaded_key", f"{vendor}/{model['name']}") self.model_radiobuttons.addButton(radiobutton, row_counter) add_centered_widget(grid, radiobutton, row, 0) model_name_label = QLabel() model_name_label.setTextFormat(Qt.RichText) model_name_label.setText(f'{model["name"]}') model_name_label.setOpenExternalLinks(False) model_name_label.linkActivated.connect(self.open_link) model_name_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_NAME", "")) add_centered_widget(grid, model_name_label, row, 1) precision_label = QLabel(str(model.get('precision', 'N/A'))) precision_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_PRECISION", "")) add_centered_widget(grid, precision_label, row, 2) parameters_label = QLabel(str(model.get('parameters', 'N/A'))) parameters_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_PARAMETERS", "")) add_centered_widget(grid, parameters_label, row, 3) dimensions_label = QLabel(str(model['dimensions'])) dimensions_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_DIMENSIONS", "")) add_centered_widget(grid, dimensions_label, row, 4) max_sequence_label = QLabel(str(model['max_sequence'])) max_sequence_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_MAX_SEQUENCE", "")) add_centered_widget(grid, max_sequence_label, row, 5) size_label = QLabel(str(model['size_mb'])) size_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_SIZE", "")) add_centered_widget(grid, size_label, row, 6) if 'cache_dir' in model: expected_dir_name = model['cache_dir'] else: expected_dir_name = ModelDownloader(model_info, model['type']).get_model_directory_name() is_downloaded = expected_dir_name in existing_vector_directories downloaded_label = QLabel('Yes' if is_downloaded else 'No') downloaded_label.setToolTip(TOOLTIPS.get("VECTOR_MODEL_DOWNLOADED", "")) add_centered_widget(grid, downloaded_label, row, 7) self.downloaded_labels[f"{vendor}/{model['name']}"] = (downloaded_label, model_info, radiobutton) row_counter += 1 for vendor, group_box in self.group_boxes.items(): self.main_layout.addWidget(group_box) self.download_button = QPushButton(self.DOWNLOAD_BUTTON_LABEL) self.download_button.setToolTip(TOOLTIPS.get("DOWNLOAD_MODEL", "")) self.download_button.clicked.connect(self.initiate_model_download) self.main_layout.addWidget(self.download_button) model_downloaded_signal.downloaded.connect(self.update_model_downloaded_status) model_downloaded_signal.failed.connect(self._on_download_failed) def initiate_model_download(self): selected_button = self.model_radiobuttons.checkedButton() if selected_button is None: return model_info = selected_button.property("model_info") downloaded_key = selected_button.property("downloaded_key") downloaded_label = self.downloaded_labels[downloaded_key][0] if downloaded_label.text() == 'Yes': reply = QMessageBox.question( self, "Model Already Downloaded", f"'{model_info['name']}' is already downloaded.\n\nRe-download it?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply != QMessageBox.Yes: return self.download_button.setEnabled(False) self.download_button.setText(self.DOWNLOAD_BUTTON_BUSY_LABEL) model_downloader = ModelDownloader(model_info, model_info['type']) threading.Thread(target=model_downloader.download, daemon=True).start() def _reset_download_button(self): self.download_button.setEnabled(True) self.download_button.setText(self.DOWNLOAD_BUTTON_LABEL) def _on_download_failed(self, message): self._reset_download_button() QMessageBox.critical(self, "Download Failed", message) def update_model_downloaded_status(self, model_name, model_type): self._reset_download_button() models_dir = Path('Models') vector_models_dir = models_dir / "vector" existing_vector_directories = {d.name for d in vector_models_dir.iterdir() if d.is_dir()} for vendor, models in VECTOR_MODELS.items(): for model in models: cache_dir = model.get('cache_dir', '') generated_dir = model['repo_id'].replace('/', '--') if cache_dir == model_name or generated_dir == model_name: key = f"{vendor}/{model['name']}" if key in self.downloaded_labels: downloaded_label, _, _ = self.downloaded_labels[key] downloaded_label.setText('Yes') self.refresh_gui() return print(f"Model {model_name} not found in VECTOR_MODELS") def refresh_gui(self): for group_box in self.group_boxes.values(): group_box.repaint() self.repaint() def open_link(self, url): QDesktopServices.openUrl(QUrl(url)) if __name__ == "__main__": from PySide6.QtWidgets import QApplication app = QApplication([]) window = VectorModelsTab() window.show() app.exec() ================================================ FILE: gui/tabs_settings/__init__.py ================================================ ================================================ FILE: gui/tabs_settings/database_create.py ================================================ import yaml from PySide6.QtGui import QIntValidator from PySide6.QtWidgets import QWidget, QLabel, QLineEdit, QGridLayout, QHBoxLayout, QComboBox, QCheckBox, QMessageBox from core.constants import TOOLTIPS class ChunkSettingsTab(QWidget): def __init__(self): super(ChunkSettingsTab, self).__init__() with open("config.yaml", "r", encoding="utf-8") as f: config_data = yaml.safe_load(f) self.database_config = config_data["database"] self.compute_device_options = config_data["Compute_Device"]["available"] self.database_creation_device = config_data["Compute_Device"]["database_creation"] preset_tooltip = ( "Controls CPU parallelism during database creation.\n" "Minimal: sequential processing (1 thread/process)\n" "Low: light parallelism (2-4 workers)\n" "Normal: moderate parallelism (default)\n" "High: aggressive parallelism\n" "Maximum: all available CPU cores" ) current_size = self.database_config.get("chunk_size", "") current_overlap = self.database_config.get("chunk_overlap", "") current_preset = self.database_config.get("pipeline_preset", "normal") self.device_label = QLabel("Device:") self.device_label.setToolTip(TOOLTIPS["CREATE_DEVICE_DB"]) self.device_combo = QComboBox() self.device_combo.addItems(self.compute_device_options) self.device_combo.setToolTip(TOOLTIPS["CREATE_DEVICE_DB"]) if self.database_creation_device in self.compute_device_options: self.device_combo.setCurrentIndex( self.compute_device_options.index(self.database_creation_device) ) self.device_combo.setMinimumWidth(100) self.half_precision_label = QLabel("Half-Precision (2x speedup - GPU only):") self.half_precision_label.setToolTip(TOOLTIPS["HALF_PRECISION"]) self.half_precision_checkbox = QCheckBox() self.half_precision_checkbox.setChecked(self.database_config.get("half", False)) self.half_precision_checkbox.setToolTip(TOOLTIPS["HALF_PRECISION"]) self.preset_label = QLabel("Pipeline Performance:") self.preset_label.setToolTip(preset_tooltip) self.preset_combo = QComboBox() self.preset_combo.addItems(["minimal", "low", "normal", "high", "maximum"]) self.preset_combo.setCurrentText(current_preset) self.preset_combo.setToolTip(preset_tooltip) self.preset_combo.setMinimumWidth(100) self.chunk_size_label = QLabel("Chunk Size (# characters):") self.chunk_size_label.setToolTip(TOOLTIPS["CHUNK_SIZE"]) self.current_size_label = QLabel(f"{current_size}") self.current_size_label.setToolTip(TOOLTIPS["CHUNK_SIZE"]) self.chunk_size_edit = QLineEdit() self.chunk_size_edit.setPlaceholderText("Enter new chunk_size...") self.chunk_size_edit.setValidator(QIntValidator(1, 1000000)) self.chunk_size_edit.setToolTip(TOOLTIPS["CHUNK_SIZE"]) self.chunk_overlap_label = QLabel("Chunk Overlap (# characters):") self.chunk_overlap_label.setToolTip(TOOLTIPS["CHUNK_OVERLAP"]) self.current_overlap_label = QLabel(f"{current_overlap}") self.current_overlap_label.setToolTip(TOOLTIPS["CHUNK_OVERLAP"]) self.chunk_overlap_edit = QLineEdit() self.chunk_overlap_edit.setPlaceholderText("Enter new chunk_overlap...") self.chunk_overlap_edit.setValidator(QIntValidator(0, 1000000)) self.chunk_overlap_edit.setToolTip(TOOLTIPS["CHUNK_OVERLAP"]) def labeled(label, current, editor, editor_stretch=1): box = QHBoxLayout() box.addWidget(label) box.addWidget(current) box.addWidget(editor, editor_stretch) return box device_cell = QHBoxLayout() device_cell.addWidget(self.device_label) device_cell.addWidget(self.device_combo, 1) half_cell = QHBoxLayout() half_cell.addWidget(self.half_precision_label) half_cell.addWidget(self.half_precision_checkbox) half_cell.addStretch(1) preset_cell = QHBoxLayout() preset_cell.addWidget(self.preset_label) preset_cell.addWidget(self.preset_combo, 1) size_cell = labeled(self.chunk_size_label, self.current_size_label, self.chunk_size_edit) overlap_cell = labeled(self.chunk_overlap_label, self.current_overlap_label, self.chunk_overlap_edit) grid_layout = QGridLayout() for col in range(6): grid_layout.setColumnStretch(col, 1) grid_layout.addLayout(device_cell, 0, 0, 1, 2) grid_layout.addLayout(preset_cell, 0, 2, 1, 2) grid_layout.addLayout(half_cell, 0, 4, 1, 2) grid_layout.addLayout(size_cell, 1, 0, 1, 3) grid_layout.addLayout(overlap_cell, 1, 3, 1, 3) self.setLayout(grid_layout) def update_config(self): try: with open("config.yaml", "r", encoding="utf-8") as f: config_data = yaml.safe_load(f) except Exception as e: QMessageBox.critical( self, "Error Loading Configuration", f"An error occurred while loading the configuration: {e}", ) return False settings_changed = False errors = [] new_device = self.device_combo.currentText() device_changed = new_device != self.database_creation_device new_chunk_size_text = self.chunk_size_edit.text().strip() if new_chunk_size_text: try: new_chunk_size = int(new_chunk_size_text) if new_chunk_size <= 0: raise ValueError("Chunk size must be a positive integer.") except ValueError as ve: errors.append(f"Chunk size must be a positive integer: {str(ve)}") else: new_chunk_size = self.database_config.get("chunk_size", 0) new_chunk_overlap_text = self.chunk_overlap_edit.text().strip() if new_chunk_overlap_text: try: new_chunk_overlap = int(new_chunk_overlap_text) if new_chunk_overlap < 0: raise ValueError("Chunk overlap cannot be negative.") except ValueError as ve: errors.append( f"Chunk overlap must be a non-negative integer: {str(ve)}" ) else: new_chunk_overlap = self.database_config.get("chunk_overlap", 0) if new_chunk_size and new_chunk_overlap >= new_chunk_size: errors.append("Chunk overlap must be less than chunk size.") if errors: error_message = "\n".join(errors) QMessageBox.warning( self, "Invalid Input", f"The following errors occurred:\n{error_message}" ) return False if device_changed: config_data["Compute_Device"]["database_creation"] = new_device self.database_creation_device = new_device settings_changed = True if new_chunk_size_text and new_chunk_size != self.database_config.get( "chunk_size", 0 ): config_data["database"]["chunk_size"] = new_chunk_size self.current_size_label.setText(f"{new_chunk_size}") settings_changed = True if new_chunk_overlap_text and new_chunk_overlap != self.database_config.get( "chunk_overlap", 0 ): config_data["database"]["chunk_overlap"] = new_chunk_overlap self.current_overlap_label.setText(f"{new_chunk_overlap}") settings_changed = True new_half_precision = self.half_precision_checkbox.isChecked() if new_half_precision != self.database_config.get("half", False): config_data["database"]["half"] = new_half_precision settings_changed = True new_preset = self.preset_combo.currentText() if new_preset != self.database_config.get("pipeline_preset", "normal"): config_data["database"]["pipeline_preset"] = new_preset settings_changed = True if settings_changed: try: with open("config.yaml", "w", encoding="utf-8") as f: yaml.safe_dump(config_data, f) self.database_config["chunk_size"] = config_data["database"]["chunk_size"] self.database_config["chunk_overlap"] = config_data["database"]["chunk_overlap"] self.database_config["half"] = config_data["database"]["half"] self.database_config["pipeline_preset"] = config_data["database"].get("pipeline_preset", "normal") self.database_creation_device = config_data["Compute_Device"][ "database_creation" ] self.chunk_overlap_edit.clear() self.chunk_size_edit.clear() except Exception as e: QMessageBox.critical( self, "Error Saving Configuration", f"An error occurred while saving the configuration: {e}", ) return False else: return False return settings_changed ================================================ FILE: gui/tabs_settings/database_query.py ================================================ import yaml from PySide6.QtGui import QIntValidator, QDoubleValidator from PySide6.QtWidgets import ( QWidget, QLabel, QLineEdit, QGridLayout, QHBoxLayout, QSizePolicy, QComboBox, QPushButton, QMessageBox, ) from core.constants import TOOLTIPS class DatabaseSettingsTab(QWidget): def __init__(self): super(DatabaseSettingsTab, self).__init__() try: with open("config.yaml", "r", encoding="utf-8") as f: config_data = yaml.safe_load(f) self.database_config = config_data["database"] self.compute_device_options = config_data["Compute_Device"]["available"] self.database_query_device = config_data["Compute_Device"]["database_query"] self.search_term = self.database_config.get("search_term", "") self.document_type = self.database_config.get("document_types", "") except Exception as e: QMessageBox.critical( self, "Error Loading Configuration", f"An error occurred while loading the configuration: {e}", ) self.database_config = {} self.compute_device_options = [] self.database_query_device = "" self.search_term = "" self.document_type = "" self.field_data = {} self.label_data = {} self.query_device_label = QLabel("Device:") self.query_device_label.setToolTip(TOOLTIPS["CREATE_DEVICE_QUERY"]) self.query_device_combo = QComboBox() self.query_device_combo.addItems(self.compute_device_options) self.query_device_combo.setToolTip(TOOLTIPS["CREATE_DEVICE_QUERY"]) if self.database_query_device in self.compute_device_options: self.query_device_combo.setCurrentIndex( self.compute_device_options.index(self.database_query_device) ) similarity_value = self.database_config.get("similarity", "") self.similarity_edit = QLineEdit() self.similarity_edit.setPlaceholderText("Similarity (0.0 - 1.0)...") self.similarity_edit.setValidator(QDoubleValidator(0.0, 1.0, 4)) self.similarity_edit.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.similarity_edit.setToolTip(TOOLTIPS["SIMILARITY"]) self.similarity_label = QLabel(f"Similarity: {similarity_value}") self.similarity_label.setToolTip(TOOLTIPS["SIMILARITY"]) self.field_data["similarity"] = self.similarity_edit self.label_data["similarity"] = self.similarity_label contexts_value = self.database_config.get("contexts", "") self.contexts_edit = QLineEdit() self.contexts_edit.setPlaceholderText("# Contexts to return...") self.contexts_edit.setValidator(QIntValidator(1, 1000000)) self.contexts_edit.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) self.contexts_edit.setToolTip(TOOLTIPS["CONTEXTS"]) self.contexts_label = QLabel(f"Contexts: {contexts_value}") self.contexts_label.setToolTip(TOOLTIPS["CONTEXTS"]) self.field_data["contexts"] = self.contexts_edit self.label_data["contexts"] = self.contexts_label self.search_term_edit = QLineEdit() self.search_term_edit.setPlaceholderText("Term to require...") self.search_term_edit.setText(self.search_term) self.search_term_edit.setToolTip(TOOLTIPS["SEARCH_TERM_FILTER"]) self.search_term_label = QLabel(f"Search Term Filter: {self.search_term}") self.search_term_label.setToolTip(TOOLTIPS["SEARCH_TERM_FILTER"]) self.filter_button = QPushButton("Clear Filter") self.filter_button.clicked.connect(self.reset_search_term) self.file_type_combo = QComboBox() file_type_items = ["All Files", "Images Only", "Documents Only", "Audio Only"] self.file_type_combo.addItems(file_type_items) self.file_type_combo.setToolTip(TOOLTIPS["FILE_TYPE_FILTER"]) if self.document_type == "image": default_index = file_type_items.index("Images Only") elif self.document_type == "document": default_index = file_type_items.index("Documents Only") elif self.document_type == "audio": default_index = file_type_items.index("Audio Only") else: default_index = file_type_items.index("All Files") self.file_type_combo.setCurrentIndex(default_index) self.file_type_label = QLabel("File Type:") self.file_type_label.setToolTip(TOOLTIPS["FILE_TYPE_FILTER"]) device_cell = QHBoxLayout() device_cell.addWidget(self.query_device_label) device_cell.addWidget(self.query_device_combo, 1) similarity_cell = QHBoxLayout() similarity_cell.addWidget(self.similarity_label) similarity_cell.addWidget(self.similarity_edit, 1) contexts_cell = QHBoxLayout() contexts_cell.addWidget(self.contexts_label) contexts_cell.addWidget(self.contexts_edit, 1) search_cell = QHBoxLayout() search_cell.addWidget(self.search_term_label) search_cell.addWidget(self.search_term_edit, 1) search_cell.addWidget(self.filter_button) file_type_cell = QHBoxLayout() file_type_cell.addWidget(self.file_type_label) file_type_cell.addWidget(self.file_type_combo, 1) grid_layout = QGridLayout() for col in range(6): grid_layout.setColumnStretch(col, 1) grid_layout.addLayout(device_cell, 0, 0, 1, 2) grid_layout.addLayout(similarity_cell, 0, 2, 1, 2) grid_layout.addLayout(contexts_cell, 0, 4, 1, 2) grid_layout.addLayout(search_cell, 1, 0, 1, 4) grid_layout.addLayout(file_type_cell, 1, 4, 1, 2) self.setLayout(grid_layout) def update_config(self): try: with open("config.yaml", "r", encoding="utf-8") as f: config_data = yaml.safe_load(f) except Exception as e: QMessageBox.critical( self, "Error Loading Configuration", f"An error occurred while loading the configuration: {e}", ) return False settings_changed = False errors = [] new_query_device = self.query_device_combo.currentText() device_changed = new_query_device != config_data["Compute_Device"].get( "database_query", "" ) new_similarity_text = self.similarity_edit.text().strip() if new_similarity_text: try: new_similarity = float(new_similarity_text) if not (0.0 <= new_similarity <= 1.0): raise ValueError("Similarity must be between 0.0 and 1.0.") except ValueError: errors.append("Similarity must be a number between 0.0 and 1.0.") else: new_similarity = self.database_config.get("similarity", 0.0) new_contexts_text = self.contexts_edit.text().strip() if new_contexts_text: try: new_contexts = int(new_contexts_text) if new_contexts < 1: raise ValueError("Contexts must be a positive integer.") except ValueError: errors.append("Contexts must be a positive integer.") else: new_contexts = self.database_config.get("contexts", 1) new_search_term = self.search_term_edit.text().strip() file_type_map = { "All Files": "", "Images Only": "image", "Documents Only": "document", "Audio Only": "audio", } file_type_selection = self.file_type_combo.currentText() document_type_value = file_type_map.get(file_type_selection, "") if errors: error_message = "\n".join(errors) QMessageBox.warning( self, "Invalid Input", f"The following errors occurred:\n{error_message}" ) return False if device_changed: config_data["Compute_Device"]["database_query"] = new_query_device settings_changed = True if new_similarity_text and new_similarity != config_data["database"].get( "similarity", 0.0 ): config_data["database"]["similarity"] = new_similarity settings_changed = True if new_contexts_text and new_contexts != config_data["database"].get( "contexts", 1 ): config_data["database"]["contexts"] = new_contexts settings_changed = True if new_search_term and new_search_term != config_data["database"].get( "search_term", "" ): config_data["database"]["search_term"] = new_search_term settings_changed = True if document_type_value != config_data["database"].get("document_types", ""): config_data["database"]["document_types"] = document_type_value settings_changed = True if settings_changed: try: with open("config.yaml", "w", encoding="utf-8") as f: yaml.safe_dump(config_data, f) except Exception as e: QMessageBox.critical( self, "Error Saving Configuration", f"An error occurred while saving the configuration: {e}", ) return False if device_changed: self.database_query_device = new_query_device if new_similarity_text: self.database_config["similarity"] = new_similarity self.similarity_label.setText(f"Similarity: {new_similarity}") if new_contexts_text: self.database_config["contexts"] = new_contexts self.contexts_label.setText(f"Contexts: {new_contexts}") if new_search_term: self.search_term = new_search_term self.database_config["search_term"] = new_search_term self.search_term_label.setText(f"Search Term Filter: {new_search_term}") self.document_type = document_type_value self.database_config["document_types"] = document_type_value self.similarity_edit.clear() self.contexts_edit.clear() self.search_term_edit.clear() return settings_changed def reset_search_term(self): try: with open("config.yaml", "r", encoding="utf-8") as f: config_data = yaml.safe_load(f) except Exception as e: QMessageBox.critical( self, "Error Loading Configuration", f"An error occurred while loading the configuration: {e}", ) return config_data["database"]["search_term"] = "" try: with open("config.yaml", "w", encoding="utf-8") as f: yaml.safe_dump(config_data, f) except Exception as e: QMessageBox.critical( self, "Error Saving Configuration", f"An error occurred while saving the configuration: {e}", ) return self.search_term = "" self.database_config["search_term"] = "" self.search_term_label.setText("Search Term Filter: ") self.search_term_edit.clear() ================================================ FILE: gui/tabs_settings/settings.py ================================================ import logging from functools import partial from PySide6.QtWidgets import ( QVBoxLayout, QGroupBox, QPushButton, QHBoxLayout, QWidget, QMessageBox, ) from gui.tabs_settings.database_create import ChunkSettingsTab from gui.tabs_settings.database_query import DatabaseSettingsTab from gui.tabs_settings.tts import TTSSettingsTab from gui.tabs_settings.vision import VisionSettingsTab def update_all_configs(configs): updated = False for config in configs.values(): updated = config.update_config() or updated if updated: logging.info("config.yaml file updated") message = "Settings Updated" if updated else "No Updates" details = ( "One or more settings have been updated." if updated else "No new settings were entered." ) QMessageBox.information(None, message, details) def adjust_stretch(groups, layout): for group, factor in groups.items(): layout.setStretchFactor(group, factor if group.isChecked() else 0) class GuiSettingsTab(QWidget): def __init__(self): super(GuiSettingsTab, self).__init__() self.layout = QVBoxLayout() classes = { "Database Query": (DatabaseSettingsTab, 4), "Database Creation": (ChunkSettingsTab, 3), } self.groups = {} self.configs = {} for title, (TabClass, stretch) in classes.items(): settings = TabClass() group = QGroupBox(title) layout = QVBoxLayout() layout.addWidget(settings) group.setLayout(layout) group.setCheckable(True) group.setChecked(True) self.groups[group] = stretch self.configs[title] = settings self.layout.addWidget(group, stretch) group.toggled.connect(partial(self.toggle_group, group)) ttsSettings = TTSSettingsTab() ttsGroup = QGroupBox("Text to Speech") ttsLayout = QVBoxLayout() ttsLayout.addWidget(ttsSettings) ttsGroup.setLayout(ttsLayout) ttsGroup.setCheckable(True) ttsGroup.setChecked(True) self.layout.addWidget(ttsGroup, 3) self.groups[ttsGroup] = 3 ttsGroup.toggled.connect(partial(self.toggle_tts_group, ttsSettings)) visionSettings = VisionSettingsTab() visionGroup = QGroupBox("Vision Models") visionLayout = QVBoxLayout() visionLayout.addWidget(visionSettings) visionGroup.setLayout(visionLayout) visionGroup.setCheckable(True) visionGroup.setChecked(True) self.layout.addWidget(visionGroup, 2) self.groups[visionGroup] = 2 visionGroup.toggled.connect(partial(self.toggle_vision_group, visionSettings)) self.update_all_button = QPushButton("Update Settings") self.update_all_button.setStyleSheet("min-width: 200px;") self.update_all_button.clicked.connect(self.update_all_settings) center_button_layout = QHBoxLayout() center_button_layout.addStretch(1) center_button_layout.addWidget(self.update_all_button) center_button_layout.addStretch(1) self.layout.addLayout(center_button_layout) self.setLayout(self.layout) adjust_stretch(self.groups, self.layout) def toggle_group(self, group, checked): if group.title() in self.configs: self.configs[group.title()].setVisible(checked) adjust_stretch(self.groups, self.layout) def toggle_tts_group(self, ttsSettings, checked): ttsSettings.setVisible(checked) adjust_stretch(self.groups, self.layout) def toggle_vision_group(self, visionSettings, checked): visionSettings.setVisible(checked) adjust_stretch(self.groups, self.layout) def update_all_settings(self): update_all_configs(self.configs) ================================================ FILE: gui/tabs_settings/tts.py ================================================ import yaml from pathlib import Path from PySide6.QtCore import Qt from PySide6.QtWidgets import ( QLabel, QComboBox, QWidget, QGridLayout, QMessageBox, QHBoxLayout, QCheckBox ) from core.constants import WHISPER_SPEECH_MODELS WHISPER_SPEECH_SPEAKERS = ["default", "classic", "voice_b"] WHISPER_SPEECH_VOICE_CLONING_LABEL = "Voice Cloning (Coming Soon)" KYUTAI_POCKET_VOICES = [ "alba", "anna", "azelma", "bill_boerst", "caro_davy", "charles", "cosette", "eponine", "eve", "fantine", "george", "jane", "javert", "jean", "marius", "mary", "michael", "paul", "peter_yearsley", "stuart_bell", "vera", ] KYUTAI_POCKET_QUANTIZE_TOOLTIP = ( "Apply int8 quantization. The developers claim no loss of quality " "(WER unchanged) with ~48% less RAM and ~27% faster inference. " "Feel free to test and decide for yourself." ) class TTSSettingsTab(QWidget): BACKENDS = { "bark": { "label": "Bark (GPU)", "extras": { "size": { "label": "Model", "options": ["normal", "small"], "default": "small", }, "speaker": { "label": "Speaker", "options": [ "v2/en_speaker_0", "v2/en_speaker_1", "v2/en_speaker_2", "v2/en_speaker_3", "v2/en_speaker_4", "v2/en_speaker_5", "v2/en_speaker_6", "v2/en_speaker_7", "v2/en_speaker_8", "v2/en_speaker_9", ], "default": "v2/en_speaker_6", }, }, }, "whisperspeech": { "label": "WhisperSpeech (GPU)", "extras": { "s2a": { "label": "S2A Model", "options": list(WHISPER_SPEECH_MODELS["s2a"].keys()), "default": list(WHISPER_SPEECH_MODELS["s2a"].keys())[0], }, "t2s": { "label": "T2S Model", "options": list(WHISPER_SPEECH_MODELS["t2s"].keys()), "default": list(WHISPER_SPEECH_MODELS["t2s"].keys())[0], }, "speaker": { "label": "Speaker", "options": WHISPER_SPEECH_SPEAKERS + [WHISPER_SPEECH_VOICE_CLONING_LABEL], "default": WHISPER_SPEECH_SPEAKERS[0], }, }, }, "chattts": { "label": "ChatTTS (CPU/CPU)", "extras": {}, }, "chatterbox": { "label": "Chatterbox (CPU/GPU)", "extras": {}, }, "googletts": { "label": "Google TTS (CPU)", "extras": {}, }, "kyutaipocket": { "label": "Kyutai Pocket (CPU)", "extras": { "voice": { "label": "Voice", "options": KYUTAI_POCKET_VOICES, "default": "alba", }, }, }, "kyutai": { "label": "Kyutai (GPU)", "extras": { "model": { "label": "Model", "options": ["1.6B (EN+FR, ~4.2GB VRAM)", "0.75B (EN, ~2GB VRAM)"], "default": "1.6B (EN+FR, ~4.2GB VRAM)", }, "voice": { "label": "Voice", "options": [ "Default Male", "Fast Male 1", "Fast Female", "Fast Male 2", "Happy Male", "Happy Female 1", "Happy Female 2", "Enunciated Female" ], "default": "Happy Male", }, }, }, } def __init__(self): super().__init__() self.widgets_for_backend: dict[str, dict[str, QWidget]] = {} self._build_ui() self._load_from_yaml() self._update_visible_extras() def _build_ui(self): layout = QGridLayout(self) layout.setColumnStretch(0, 0) layout.setColumnStretch(1, 0) layout.setColumnStretch(2, 1) layout.addWidget(QLabel("TTS Backend:"), 0, 0) self.backend_combo = QComboBox() for key, spec in self.BACKENDS.items(): self.backend_combo.addItem(spec["label"], userData=key) layout.addWidget(self.backend_combo, 0, 1) self._extras_box = QWidget() self._extras_layout = QHBoxLayout(self._extras_box) self._extras_layout.setContentsMargins(0, 0, 0, 0) self._extras_layout.setSpacing(10) layout.addWidget(self._extras_box, 0, 2) self.widgets_for_backend: dict[str, dict[str, tuple[QLabel, QComboBox]]] = {} for key, spec in self.BACKENDS.items(): wdict = {} for extra_key, meta in spec["extras"].items(): lbl = QLabel(meta["label"]) cmb = QComboBox() cmb.setObjectName(extra_key) cmb.addItems(meta["options"]) if key == "whisperspeech" and extra_key == "speaker": self._disable_voice_cloning_item(cmb) cmb.currentTextChanged.connect(self._save_to_yaml) wdict[extra_key] = (lbl, cmb) self.widgets_for_backend[key] = wdict self.backend_combo.currentIndexChanged.connect(self._update_visible_extras) self.widgets_for_backend["kyutai"]["model"][1].currentTextChanged.connect( self._update_kyutai_voice_visibility ) self._pocket_quantize_checkbox = QCheckBox("Quantize (int8)") self._pocket_quantize_checkbox.setChecked(True) self._pocket_quantize_checkbox.setToolTip(KYUTAI_POCKET_QUANTIZE_TOOLTIP) self._pocket_quantize_checkbox.toggled.connect(self._save_to_yaml) def _config_path(self) -> Path: return Path("config.yaml") def _load_from_yaml(self): cfg = self._try_read_yaml() tts_cfg = cfg.get("tts", {}) if cfg else {} backend = tts_cfg.get("model", "whisperspeech") idx = self.backend_combo.findData(backend) self.backend_combo.setCurrentIndex(idx if idx != -1 else 0) bark_cfg = cfg.get("bark", {}) if cfg else {} for (lbl, cmb) in self.widgets_for_backend["bark"].values(): if cmb.objectName() == "size": cmb.setCurrentText(bark_cfg.get("size", "small")) else: cmb.setCurrentText(bark_cfg.get("speaker", "v2/en_speaker_6")) if tts_cfg.get("model") == "whisperspeech": self.widgets_for_backend["whisperspeech"]["s2a"][1].setCurrentText( self._find_key_by_value( WHISPER_SPEECH_MODELS["s2a"], tts_cfg.get("s2a") ) ) self.widgets_for_backend["whisperspeech"]["t2s"][1].setCurrentText( self._find_key_by_value( WHISPER_SPEECH_MODELS["t2s"], tts_cfg.get("t2s") ) ) speaker = tts_cfg.get("speaker", WHISPER_SPEECH_SPEAKERS[0]) if speaker not in WHISPER_SPEECH_SPEAKERS: speaker = WHISPER_SPEECH_SPEAKERS[0] self.widgets_for_backend["whisperspeech"]["speaker"][1].setCurrentText(speaker) pocket_cfg = cfg.get("kyutaipocket", {}) if cfg else {} for extra_key, (lbl, cmb) in self.widgets_for_backend["kyutaipocket"].items(): if extra_key == "voice": voice = pocket_cfg.get("voice", "alba") if voice not in KYUTAI_POCKET_VOICES: voice = "alba" cmb.setCurrentText(voice) self._pocket_quantize_checkbox.setChecked( bool(pocket_cfg.get("quantize", True)) ) kyutai_cfg = cfg.get("kyutai", {}) if cfg else {} for extra_key, (lbl, cmb) in self.widgets_for_backend["kyutai"].items(): if extra_key == "model": cmb.setCurrentText(kyutai_cfg.get("model_display_name", "1.6B (EN+FR, ~4.2GB VRAM)")) elif extra_key == "voice": cmb.setCurrentText(kyutai_cfg.get("voice_display_name", "Happy Male")) def _save_to_yaml(self): cfg = self._try_read_yaml() backend_key = self.backend_combo.currentData() tts_cfg = cfg.setdefault("tts", {}) tts_cfg["model"] = backend_key if backend_key == "bark": bark = cfg.setdefault("bark", {}) bark["size"] = self.widgets_for_backend["bark"]["size"][1].currentText() bark["speaker"] = ( self.widgets_for_backend["bark"]["speaker"][1].currentText() ) elif backend_key == "whisperspeech": tts_cfg["s2a"] = WHISPER_SPEECH_MODELS["s2a"][ self.widgets_for_backend["whisperspeech"]["s2a"][1].currentText() ][0] tts_cfg["t2s"] = WHISPER_SPEECH_MODELS["t2s"][ self.widgets_for_backend["whisperspeech"]["t2s"][1].currentText() ][0] speaker_choice = self.widgets_for_backend["whisperspeech"]["speaker"][1].currentText() if speaker_choice in WHISPER_SPEECH_SPEAKERS: tts_cfg["speaker"] = speaker_choice elif backend_key == "kyutaipocket": pocket = cfg.setdefault("kyutaipocket", {}) pocket["language"] = "english" pocket["voice"] = self.widgets_for_backend["kyutaipocket"]["voice"][1].currentText() pocket["quantize"] = bool(self._pocket_quantize_checkbox.isChecked()) pocket["temp"] = 0.7 elif backend_key == "kyutai": kyutai = cfg.setdefault("kyutai", {}) model_mapping = { "1.6B (EN+FR, ~4.2GB VRAM)": ("kyutai/tts-1.6b-en_fr", 32), "0.75B (EN, ~2GB VRAM)": ("kyutai/tts-0.75b-en-public", 16), } selected_model_display = self.widgets_for_backend["kyutai"]["model"][1].currentText() hf_repo, n_q = model_mapping[selected_model_display] kyutai["model_display_name"] = selected_model_display kyutai["hf_repo"] = hf_repo kyutai["n_q"] = n_q voice_mapping = { "Default Male": "expresso/ex04-ex03_default_002_channel2_239s.wav", "Fast Male 1": "expresso/ex01-ex02_fast_001_channel1_104s.wav", "Fast Female": "expresso/ex01-ex02_fast_001_channel2_73s.wav", "Fast Male 2": "expresso/ex04-ex03_fast_001_channel2_25s.wav", "Happy Male": "expresso/ex03-ex01_happy_001_channel1_334s.wav", "Happy Female 1": "expresso/ex04-ex02_happy_001_channel1_118s.wav", "Happy Female 2": "expresso/ex04-ex02_happy_001_channel2_140s.wav", "Enunciated Female": "expresso/ex04-ex03_enunciated_001_channel2_342s.wav", } selected_voice_display = self.widgets_for_backend["kyutai"]["voice"][1].currentText() kyutai["voice"] = voice_mapping[selected_voice_display] kyutai["voice_display_name"] = selected_voice_display kyutai["temp"] = 0.6 kyutai["cfg_coef"] = 2.0 with self._config_path().open("w") as f: yaml.dump(cfg, f, sort_keys=False) def _try_read_yaml(self): try: with self._config_path().open() as f: return yaml.safe_load(f) or {} except FileNotFoundError: return {} except Exception as e: QMessageBox.warning(self, "Configuration Error", str(e)) return {} def _update_visible_extras(self): while self._extras_layout.count(): item = self._extras_layout.takeAt(0) if (w := item.widget()): w.setParent(None) chosen = self.backend_combo.currentData() for lbl, cmb in self.widgets_for_backend[chosen].values(): self._extras_layout.addWidget(lbl) self._extras_layout.addWidget(cmb) lbl.show() cmb.show() if chosen == "kyutai": self._update_kyutai_voice_visibility() elif chosen == "kyutaipocket": self._extras_layout.addWidget(self._pocket_quantize_checkbox) self._pocket_quantize_checkbox.show() self._save_to_yaml() def _update_kyutai_voice_visibility(self): model_text = self.widgets_for_backend["kyutai"]["model"][1].currentText() voice_lbl, voice_cmb = self.widgets_for_backend["kyutai"]["voice"] supports_voices = model_text.startswith("1.6B") voice_lbl.setVisible(supports_voices) voice_cmb.setVisible(supports_voices) @staticmethod def _find_key_by_value(d: dict, value: str | None): for k, v in d.items(): if v[0] == value: return k return next(iter(d)) @staticmethod def _disable_voice_cloning_item(cmb: QComboBox): idx = cmb.findText(WHISPER_SPEECH_VOICE_CLONING_LABEL) if idx == -1: return model = cmb.model() item = model.item(idx) if item is not None: item.setFlags(item.flags() & ~Qt.ItemIsEnabled & ~Qt.ItemIsSelectable) item.setToolTip("Coming soon") ================================================ FILE: gui/tabs_settings/vision.py ================================================ import yaml from pathlib import Path import torch from PySide6.QtCore import Qt from PySide6.QtWidgets import QLabel, QGridLayout, QVBoxLayout, QComboBox, QWidget from core.constants import VISION_MODELS CONFIG_FILE = "config.yaml" def _read_cfg() -> dict: p = Path(CONFIG_FILE) if not p.exists(): return {} try: with p.open("r", encoding="utf-8") as f: return yaml.safe_load(f) or {} except Exception: return {} def _write_cfg(cfg: dict) -> None: with Path(CONFIG_FILE).open("w", encoding="utf-8") as f: yaml.safe_dump(cfg, f, sort_keys=True) def is_cuda_available(): return torch.cuda.is_available() def get_cuda_capability(): if is_cuda_available(): return torch.cuda.get_device_capability(0) return (0, 0) class VisionSettingsTab(QWidget): def __init__(self): super().__init__() mainVLayout = QVBoxLayout() self.setLayout(mainVLayout) gridLayout = QGridLayout() for col, stretch in enumerate((3, 1, 1, 4, 2, 1)): gridLayout.setColumnStretch(col, stretch) mainVLayout.addLayout(gridLayout) for col, text in enumerate(("Model", "Size", "VRAM", "Vision Component", "Chat Component", "Avg Length")): header = QLabel(text) header.setAlignment(Qt.AlignCenter) gridLayout.addWidget(header, 0, col) self.modelComboBox = QComboBox() self.populate_model_combobox() self.modelComboBox.setMinimumWidth(175) gridLayout.addWidget(self.modelComboBox, 1, 0) self.sizeLabel = QLabel("—") self.sizeLabel.setAlignment(Qt.AlignCenter) gridLayout.addWidget(self.sizeLabel, 1, 1) self.vramLabel = QLabel("—") self.vramLabel.setAlignment(Qt.AlignCenter) gridLayout.addWidget(self.vramLabel, 1, 2) self.visionComponentLabel = QLabel("—") self.visionComponentLabel.setAlignment(Qt.AlignCenter) self.visionComponentLabel.setWordWrap(True) gridLayout.addWidget(self.visionComponentLabel, 1, 3) self.chatComponentLabel = QLabel("—") self.chatComponentLabel.setAlignment(Qt.AlignCenter) self.chatComponentLabel.setWordWrap(True) gridLayout.addWidget(self.chatComponentLabel, 1, 4) self.avgLenLabel = QLabel("—") self.avgLenLabel.setAlignment(Qt.AlignCenter) gridLayout.addWidget(self.avgLenLabel, 1, 5) cfg = _read_cfg() saved = (cfg.get("vision") or {}).get("chosen_model") if saved and saved in VISION_MODELS: self.modelComboBox.setCurrentText(saved) self.modelComboBox.currentTextChanged.connect(self._apply_model_to_labels) self._apply_model_to_labels(self.modelComboBox.currentText()) def populate_model_combobox(self): self.modelComboBox.clear() self.modelComboBox.addItems(VISION_MODELS.keys()) def _apply_model_to_labels(self, model_name: str): info = VISION_MODELS.get(model_name, {}) or {} size = info.get("size", "—") vram = info.get("vram", "—") vision_component = info.get("vision_component", "—") chat_component = info.get("chat_component", "—") avg_length = info.get("avg_length", "—") self.sizeLabel.setText(str(size)) self.vramLabel.setText(str(vram)) self.visionComponentLabel.setText(str(vision_component)) self.chatComponentLabel.setText(str(chat_component)) self.avgLenLabel.setText(str(avg_length)) cfg = _read_cfg() cfg.setdefault("vision", {}) if cfg["vision"].get("chosen_model") != model_name: cfg["vision"]["chosen_model"] = model_name _write_cfg(cfg) ================================================ FILE: gui/tabs_tools/__init__.py ================================================ ================================================ FILE: gui/tabs_tools/misc.py ================================================ from PySide6.QtWidgets import QVBoxLayout, QHBoxLayout, QPushButton, QWidget, QMessageBox, QSpinBox from PySide6.QtCore import QThread, Signal, QTimer from core.initialize import restore_vector_db_backup from core.utilities import backup_database from core.constants import CustomButtonStyles class WorkerThread(QThread): finished = Signal(bool) def __init__(self, function, *args, **kwargs): super().__init__() self.function = function self.args = args self.kwargs = kwargs def run(self): try: self.function(*self.args, **self.kwargs) self.finished.emit(True) except Exception as e: print(f"Error during {self.function.__name__}: {e}") self.finished.emit(False) class MiscTab(QWidget): def __init__(self): super().__init__() self.layout = QVBoxLayout(self) self.backup_all_button = QPushButton("Backup Databases") self.backup_all_button.clicked.connect(self.backup_all_databases) self.backup_all_button.setToolTip("Create a backup of all databases in the Vector_DB folder") self.restore_backup_button = QPushButton("Restore Databases") self.restore_backup_button.clicked.connect(self.restore_backup) self.restore_backup_button.setToolTip("Restore databases from the most recent backup") self.chart_gpus_button = QPushButton("GPUs") self.chart_gpus_button.clicked.connect(self.chart_gpus) self.chart_gpus_button.setToolTip("Compare GPUs by V-RAM") self.chart_chat_models_button = QPushButton("Chat Models") self.chart_chat_models_button.clicked.connect(self.chart_chat_models) self.chart_chat_models_button.setToolTip("Compare various chat models.") self.chart_vision_models_button = QPushButton("Vision Models") self.chart_vision_models_button.clicked.connect(self.chart_vision_models) self.chart_vision_models_button.setToolTip("Compare various vision models.") self.chart_vector_models_button = QPushButton("Vector Models") self.chart_vector_models_button.clicked.connect(self.chart_vector_models) self.chart_vector_models_button.setToolTip("Compare various vector/embedding models.") self.min_vram_spin = QSpinBox() self.min_vram_spin.setRange(1, 128) self.min_vram_spin.setValue(8) self.min_vram_spin.setPrefix("Min ") self.min_vram_spin.setSuffix(" GB") self.min_vram_spin.setToolTip("Minimum GPU V-RAM (in GB)") self.max_vram_spin = QSpinBox() self.max_vram_spin.setRange(1, 128) self.max_vram_spin.setValue(16) self.max_vram_spin.setPrefix("Max ") self.max_vram_spin.setSuffix(" GB") self.max_vram_spin.setToolTip("Maximum GPU V-RAM (in GB)") self.backup_all_button.setStyleSheet(CustomButtonStyles.RED_BUTTON_STYLE) self.restore_backup_button.setStyleSheet(CustomButtonStyles.RED_BUTTON_STYLE) self.chart_gpus_button.setStyleSheet(CustomButtonStyles.GREEN_BUTTON_STYLE) self.chart_chat_models_button.setStyleSheet(CustomButtonStyles.BLUE_BUTTON_STYLE) self.chart_vision_models_button.setStyleSheet(CustomButtonStyles.TEAL_BUTTON_STYLE) self.chart_vector_models_button.setStyleSheet(CustomButtonStyles.PURPLE_BUTTON_STYLE) backup_row = QHBoxLayout() backup_row.addStretch(1) backup_row.addWidget(self.backup_all_button) backup_row.addWidget(self.restore_backup_button) backup_row.addStretch(1) charts_row = QHBoxLayout() charts_row.addStretch(1) charts_row.addWidget(self.chart_gpus_button) charts_row.addWidget(self.min_vram_spin) charts_row.addWidget(self.max_vram_spin) charts_row.addWidget(self.chart_chat_models_button) charts_row.addWidget(self.chart_vision_models_button) charts_row.addWidget(self.chart_vector_models_button) charts_row.addStretch(1) self.layout.addLayout(backup_row) self.layout.addLayout(charts_row) self.backup_thread = None self.restore_thread = None def set_buttons_enabled(self, enabled, buttons): for button in buttons: button.setEnabled(enabled) def set_button_text(self, button: QPushButton, text: str): button.setText(text) def backup_all_databases(self): confirm = QMessageBox.question( self, "Confirm Backup", "Warning. This will erase any existing backups and overwrite them with the current state of the \"Vector_DB\" folder.\n\nAre you sure you want to proceed?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if confirm == QMessageBox.Yes: self.set_button_text(self.backup_all_button, "Backing up...") self.set_buttons_enabled(False, [self.backup_all_button, self.restore_backup_button]) self.backup_thread = WorkerThread(backup_database) self.backup_thread.finished.connect(self.on_backup_finished) self.backup_thread.start() else: pass def on_backup_finished(self, success): self.set_buttons_enabled(True, [self.backup_all_button, self.restore_backup_button]) self.set_button_text(self.backup_all_button, "Backup Databases") if success: QMessageBox.information(self, "Backup Complete", "All databases have been successfully backed up.") else: QMessageBox.critical(self, "Backup Failed", "Failed to backup the databases. Check the console for error details.") def restore_backup(self): confirm = QMessageBox.question( self, "Confirm Restoration", "Warning. This will overwrite current databases with the backup. Are you sure you want to proceed?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if confirm == QMessageBox.Yes: self.set_button_text(self.restore_backup_button, "Restoring...") self.set_buttons_enabled(False, [self.restore_backup_button, self.backup_all_button]) self.restore_thread = WorkerThread(restore_vector_db_backup) self.restore_thread.finished.connect(self.on_restore_finished) self.restore_thread.start() else: pass def on_restore_finished(self, success): self.set_buttons_enabled(True, [self.restore_backup_button, self.backup_all_button]) self.set_button_text(self.restore_backup_button, "Restore Databases") if success: QMessageBox.information(self, "Restoration Complete", "The databases have been successfully restored from the backup.") else: QMessageBox.critical(self, "Restoration Failed", "Failed to restore the database backup. Check the console for error details.") def chart_gpus(self): import matplotlib matplotlib.use('QtAgg') import matplotlib.pyplot as plt from charts.all_gpus import create_gpu_comparison_plot self.chart_gpus_button.setEnabled(False) self.set_button_text(self.chart_gpus_button, "Charting...") min_vram = self.min_vram_spin.value() max_vram = self.max_vram_spin.value() if min_vram > max_vram: QMessageBox.warning(self, "Invalid Range", "Minimum V-RAM value cannot exceed maximum V-RAM value.") self.reset_chart_button() return fig = create_gpu_comparison_plot(min_vram, max_vram) plt.figure(fig.number) plt.show(block=False) QTimer.singleShot(500, self.reset_chart_button) def reset_chart_button(self): self.set_button_text(self.chart_gpus_button, "GPUs") self.chart_gpus_button.setEnabled(True) def chart_chat_models(self): import matplotlib matplotlib.use('QtAgg') import matplotlib.pyplot as plt from charts.models_chat import create_chat_models_comparison_plot self.chart_chat_models_button.setEnabled(False) self.set_button_text(self.chart_chat_models_button, "Charting...") fig = create_chat_models_comparison_plot() plt.figure(fig.number) plt.show(block=False) QTimer.singleShot(500, self.reset_chart_chat_models_button) def reset_chart_chat_models_button(self): self.set_button_text(self.chart_chat_models_button, "Chat Models") self.chart_chat_models_button.setEnabled(True) def chart_vision_models(self): import matplotlib matplotlib.use('QtAgg') import matplotlib.pyplot as plt from charts.models_vision import create_vision_models_comparison_plot self.chart_vision_models_button.setEnabled(False) self.set_button_text(self.chart_vision_models_button, "Charting...") fig = create_vision_models_comparison_plot() plt.figure(fig.number) plt.show(block=False) QTimer.singleShot(500, self.reset_chart_vision_models_button) def reset_chart_vision_models_button(self): self.set_button_text(self.chart_vision_models_button, "Vision Models") self.chart_vision_models_button.setEnabled(True) def chart_vector_models(self): import matplotlib matplotlib.use('QtAgg') import matplotlib.pyplot as plt from charts.models_vector import create_vector_models_comparison_plot self.chart_vector_models_button.setEnabled(False) self.set_button_text(self.chart_vector_models_button, "Charting...") fig = create_vector_models_comparison_plot() plt.figure(fig.number) plt.show(block=False) QTimer.singleShot(500, self.reset_chart_vector_models_button) def reset_chart_vector_models_button(self): self.set_button_text(self.chart_vector_models_button, "Vector Models") self.chart_vector_models_button.setEnabled(True) ================================================ FILE: gui/tabs_tools/ocr.py ================================================ import time from pathlib import Path import fitz from PySide6.QtWidgets import ( QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QLabel, QComboBox, QFileDialog, QMessageBox ) from PySide6.QtCore import QThread, Signal from modules.ocr import process_documents def get_pdf_page_count(pdf_path): try: with fitz.open(pdf_path) as doc: return doc.page_count except Exception as e: print(f"Error reading PDF: {e}") return 0 def run_ocr_process(pdf_path, backend): try: process_documents( pdf_paths=Path(pdf_path), backend=backend, ) return True, None except Exception as e: return False, str(e) class OcrWorkerThread(QThread): finished_signal = Signal(bool, str, float) def __init__(self, pdf_path, backend, parent=None): super().__init__(parent) self.pdf_path = pdf_path self.backend = backend def run(self): start_time = time.time() result = run_ocr_process(self.pdf_path, self.backend) elapsed_time = time.time() - start_time self.finished_signal.emit(*result, elapsed_time) class OCRToolSettingsTab(QWidget): ENGINE_MAPPING = { "Tesseract": "tesseract" } def __init__(self): super().__init__() self.selected_pdf_file = None self.create_layout() self.setButtons(True) self.worker_thread = None def create_layout(self): main_layout = QVBoxLayout() engine_selection_hbox = QHBoxLayout() engine_label = QLabel("OCR Engine") engine_selection_hbox.addWidget(engine_label) self.engine_combo = QComboBox() self.engine_combo.addItems(["Tesseract"]) self.engine_combo.setCurrentText("Tesseract") engine_selection_hbox.addWidget(self.engine_combo) self.select_pdf_button = QPushButton("Choose PDF") self.select_pdf_button.clicked.connect(self.select_pdf_file) engine_selection_hbox.addWidget(self.select_pdf_button) self.process_button = QPushButton("Process") self.process_button.clicked.connect(self.start_ocr_process) engine_selection_hbox.addWidget(self.process_button) engine_selection_hbox.setStretchFactor(engine_label, 1) engine_selection_hbox.setStretchFactor(self.engine_combo, 2) engine_selection_hbox.setStretchFactor(self.select_pdf_button, 1) engine_selection_hbox.setStretchFactor(self.process_button, 1) main_layout.addLayout(engine_selection_hbox) self.file_path_label = QLabel("No PDF file selected") main_layout.addWidget(self.file_path_label) self.status_label = QLabel("") self.status_label.setStyleSheet("color: gray;") main_layout.addWidget(self.status_label) self.setLayout(main_layout) def setButtons(self, enabled): self.select_pdf_button.setEnabled(enabled) self.process_button.setEnabled(enabled) self.engine_combo.setEnabled(enabled) if enabled: self.status_label.setText("") def select_pdf_file(self): current_dir = Path.cwd() file_name, _ = QFileDialog.getOpenFileName( self, "Select PDF File", str(current_dir), "PDF Files (*.pdf)" ) if file_name: file_path = Path(file_name) short_path = f"...{file_path.parent.name}/{file_path.name}" self.file_path_label.setText(short_path) self.file_path_label.setToolTip(str(file_path.absolute())) self.selected_pdf_file = file_name self.status_label.setText("") def show_error_message(self, message): self.status_label.setStyleSheet("color: red;") self.status_label.setText("Error: OCR process failed") QMessageBox.critical(self, "Error", f"OCR process failed:\n{message}") def show_success_message(self): self.status_label.setStyleSheet("color: #4CAF50;") minutes, seconds = divmod(self.elapsed_time, 60) time_str = f"{int(minutes)}m {seconds:.1f}s" if minutes > 0 else f"{seconds:.1f}s" self.status_label.setText(f"Success! Completed in {time_str}") if not self.selected_pdf_file: return original_file = Path(self.selected_pdf_file) processed_file = original_file.with_stem(f"{original_file.stem}_OCR").with_suffix(".pdf") if processed_file.exists(): file_link = f'Open New File' else: file_link = "The processed file could not be found." QMessageBox.information( self, "Success!", f"""Processing completed in {time_str}!

    A new .pdf ending in '_OCR' has been saved in the same directory as the original file.

    {file_link} """ ) def start_ocr_process(self): if not self.selected_pdf_file: QMessageBox.warning(self, "Warning", "Please select a PDF file first.") return selected_engine = self.engine_combo.currentText() backend = self.ENGINE_MAPPING[selected_engine] self.status_label.setStyleSheet("color: #0074D9;") self.status_label.setText(f"Processing with {selected_engine}...") print(f"Starting OCR process for {self.selected_pdf_file}") self.setButtons(False) if self.worker_thread and self.worker_thread.isRunning(): self.worker_thread.wait() self.worker_thread = OcrWorkerThread(self.selected_pdf_file, backend) self.worker_thread.finished_signal.connect(self.ocr_finished) self.worker_thread.start() def ocr_finished(self, success, message, elapsed_time): self.setButtons(True) self.elapsed_time = elapsed_time if self.worker_thread: self.worker_thread.quit() self.worker_thread.wait() self.worker_thread = None from PySide6.QtCore import QTimer QTimer.singleShot(1000, lambda: self._show_completion_message(success, message)) def _show_completion_message(self, success, message): if success: self.show_success_message() else: self.show_error_message(message) ================================================ FILE: gui/tabs_tools/scrape.py ================================================ import os import platform import shutil import subprocess from PySide6.QtCore import Qt, QThread, QSettings from PySide6.QtGui import QColor, QStandardItem, QStandardItemModel from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QHBoxLayout, QLabel, QComboBox, QPushButton, QMessageBox, QListWidget, QListWidgetItem, ) from modules.scraper import ScraperRegistry, ScraperWorker from core.constants import scrape_documentation, PROJECT_ROOT MAX_CONCURRENT_SCRAPES = 6 QSETTINGS_ORG = "VectorDB-Plugin" QSETTINGS_APP = "ScrapeDocumentation" RATE_LIMITED_KEY = "rate_limited_scrapes" def _load_rate_limited_set() -> set[str]: s = QSettings(QSETTINGS_ORG, QSETTINGS_APP) val = s.value(RATE_LIMITED_KEY, []) if isinstance(val, str): val = [val] if val else [] if val is None: val = [] return {str(v) for v in val} def _save_rate_limited_set(names: set[str]) -> None: s = QSettings(QSETTINGS_ORG, QSETTINGS_APP) s.setValue(RATE_LIMITED_KEY, sorted(names)) def _mark_rate_limited_persistent(name: str) -> None: names = _load_rate_limited_set() names.add(name) _save_rate_limited_set(names) def _clear_rate_limited_persistent(name: str) -> None: names = _load_rate_limited_set() names.discard(name) _save_rate_limited_set(names) class ScrapeRowWidget(QWidget): """One row in the active-scrapes list. Owns the per-scrape Cancel/Open buttons.""" def __init__(self, doc_name: str, folder_path: str, on_cancel, on_open): super().__init__() self.doc_name = doc_name self.folder_path = folder_path self._on_cancel = on_cancel self._on_open = on_open layout = QHBoxLayout(self) layout.setContentsMargins(4, 2, 4, 2) layout.setSpacing(8) self.label = QLabel() self.label.setTextFormat(Qt.RichText) self._set_label("Starting...", count=0, color="#FF9800") layout.addWidget(self.label, 1) self.cancel_btn = QPushButton("Cancel") self.cancel_btn.clicked.connect(self._cancel_clicked) layout.addWidget(self.cancel_btn) self.open_btn = QPushButton("Open") self.open_btn.clicked.connect(self._open_clicked) layout.addWidget(self.open_btn) def _set_label(self, status_text: str, count: int, color: str): self.label.setText( f'{self.doc_name} ' f'{status_text} ' f'Pages scraped: {count}' ) def update_count(self, count: int): self._set_label("Scraping...", count=count, color="#FF9800") def mark_completed(self, count: int): self._set_label("Completed.", count=count, color="#4CAF50") self.cancel_btn.setEnabled(False) def mark_cancelled(self, count: int): self._set_label("Cancelled.", count=count, color="#9E9E9E") self.cancel_btn.setEnabled(False) def mark_rate_limited(self, count: int): self._set_label( "Rate-limited - partial state saved. Click 'Scrape' again and choose Resume.", count=count, color="#FFC107", ) self.cancel_btn.setEnabled(False) def _cancel_clicked(self): self.cancel_btn.setEnabled(False) self._set_label("Cancelling...", count=self._current_count(), color="#9E9E9E") self._on_cancel(self.doc_name) def _open_clicked(self): self._on_open(self.folder_path) def _current_count(self) -> int: try: if os.path.exists(self.folder_path): return len([f for f in os.listdir(self.folder_path) if f.endswith(".html")]) except Exception: pass return 0 class ScrapeDocumentationTab(QWidget): def __init__(self) -> None: super().__init__() self.setToolTip( "Tab for scraping documentation from the selected source." ) self.active_workers: dict[str, dict] = {} self.restored_rate_limited_rows: dict[str, QListWidgetItem] = {} self.init_ui() self._restore_rate_limited_rows() def init_ui(self) -> None: main_layout = QVBoxLayout(self) label = QLabel("Select Documentation:") label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter) main_layout.addWidget(label) hbox = QHBoxLayout() self.doc_combo = QComboBox() self.populate_combo_box() hbox.addWidget(self.doc_combo) self.scrape_button = QPushButton("Scrape") self.scrape_button.clicked.connect(self.start_scraping) hbox.addWidget(self.scrape_button) hbox.setStretch(0, 1) hbox.setStretch(1, 1) main_layout.addLayout(hbox) self.summary_label = QLabel() self.summary_label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter) self._refresh_summary() main_layout.addWidget(self.summary_label) self.scrape_list = QListWidget() self.scrape_list.setSelectionMode(QListWidget.NoSelection) main_layout.addWidget(self.scrape_list, 1) def _refresh_summary(self) -> None: n = len(self.active_workers) self.summary_label.setText( f'Active scrapes: ' f'{n} / {MAX_CONCURRENT_SCRAPES}' ) def _restore_rate_limited_rows(self) -> None: persisted = _load_rate_limited_set() if not persisted: return scraped_dir = os.path.join(str(PROJECT_ROOT), "Scraped_Documentation") for doc_name in sorted(persisted): doc_info = scrape_documentation.get(doc_name) if not doc_info or "folder" not in doc_info: _clear_rate_limited_persistent(doc_name) continue folder_path = os.path.join(scraped_dir, doc_info["folder"]) if not os.path.exists(folder_path): _clear_rate_limited_persistent(doc_name) continue count = 0 try: count = len([f for f in os.listdir(folder_path) if f.endswith(".html")]) except Exception: pass row = ScrapeRowWidget( doc_name=doc_name, folder_path=folder_path, on_cancel=lambda _n: None, on_open=self.open_folder, ) row.mark_rate_limited(count) item = QListWidgetItem(self.scrape_list) item.setSizeHint(row.sizeHint()) self.scrape_list.addItem(item) self.scrape_list.setItemWidget(item, row) self.restored_rate_limited_rows[doc_name] = item def populate_combo_box(self) -> None: doc_options = sorted(scrape_documentation.keys(), key=str.lower) model = QStandardItemModel() scraped_dir = os.path.join( str(PROJECT_ROOT), "Scraped_Documentation", ) for doc in doc_options: folder = scrape_documentation[doc]["folder"] folder_path = os.path.join(scraped_dir, folder) item = QStandardItem(doc) if os.path.exists(folder_path): item.setForeground(QColor("#e75959")) item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) model.appendRow(item) self.doc_combo.setModel(model) def start_scraping(self) -> None: selected_doc = self.doc_combo.currentText() doc_info = scrape_documentation.get(selected_doc) if not doc_info or "URL" not in doc_info or "folder" not in doc_info: self.show_error("Incomplete configuration for the selection.") return if selected_doc in self.active_workers: QMessageBox.information( self, "Already Scraping", f"'{selected_doc}' is already being scraped.", ) return if len(self.active_workers) >= MAX_CONCURRENT_SCRAPES: QMessageBox.warning( self, "Concurrent Scrape Limit Reached", f"You can run at most {MAX_CONCURRENT_SCRAPES} scrapes at the same time. " f"Wait for one to finish (or cancel one) before starting another.", ) return url = doc_info["URL"] folder = doc_info["folder"] scraper_name = doc_info.get("scraper_class", "BaseScraper") scraper_class = ScraperRegistry.get_scraper(scraper_name) folder_path = os.path.join( str(PROJECT_ROOT), "Scraped_Documentation", folder, ) resume = False if os.path.exists(folder_path): msg_box = QMessageBox( QMessageBox.Warning, "Existing Folder", f"A scrape folder already exists for {selected_doc}.", QMessageBox.NoButton, self, ) msg_box.setInformativeText( "Resume: pick up where the last run left off (already-saved pages are skipped; " "queued and failed URLs are retried).\n\n" "Start Fresh: delete the existing folder contents and re-scrape from scratch.\n\n" "Cancel: do nothing." ) resume_btn = msg_box.addButton("Resume", QMessageBox.AcceptRole) fresh_btn = msg_box.addButton("Start Fresh", QMessageBox.DestructiveRole) cancel_btn = msg_box.addButton("Cancel", QMessageBox.RejectRole) msg_box.setDefaultButton(resume_btn) msg_box.exec() clicked = msg_box.clickedButton() if clicked is None or clicked == cancel_btn: return resume = (clicked == resume_btn) if not resume: _clear_rate_limited_persistent(selected_doc) for filename in os.listdir(folder_path): file_path = os.path.join(folder_path, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception: pass if selected_doc in self.restored_rate_limited_rows: old_item = self.restored_rate_limited_rows.pop(selected_doc) old_row = self.scrape_list.row(old_item) if old_row >= 0: self.scrape_list.takeItem(old_row) row = ScrapeRowWidget( doc_name=selected_doc, folder_path=folder_path, on_cancel=self.cancel_scrape, on_open=self.open_folder, ) item = QListWidgetItem(self.scrape_list) item.setSizeHint(row.sizeHint()) self.scrape_list.addItem(item) self.scrape_list.setItemWidget(item, row) worker = ScraperWorker(url, folder, scraper_class, name=selected_doc, resume=resume) thread = QThread() worker.moveToThread(thread) thread.started.connect(worker.run) worker.status_updated.connect(self.update_status) # Phase 1: worker emits → update the row UI + ask the thread to quit. # We must NOT drop our Python references to worker/thread here, because # thread.quit() takes a moment to wind down the event loop. worker.scraping_finished.connect(self._on_worker_finished) worker.scraping_finished.connect(thread.quit) # Phase 2: thread truly exited — safe to release references. thread.finished.connect(lambda n=selected_doc: self._on_thread_finished(n)) thread.finished.connect(thread.deleteLater) self.active_workers[selected_doc] = { "worker": worker, "thread": thread, "row": row, "item": item, "folder_path": folder_path, } thread.start() self._refresh_summary() def update_status(self, doc_name: str, status: str) -> None: entry = self.active_workers.get(doc_name) if not entry: return try: count = int(status) except ValueError: count = 0 entry["row"].update_count(count) def _on_worker_finished(self, doc_name: str, was_cancelled: bool, was_rate_limited: bool) -> None: """Phase 1: worker emitted scraping_finished. Update the row UI but do NOT drop Python references — thread.quit() still has to wind down.""" entry = self.active_workers.get(doc_name) if not entry: return row = entry["row"] folder_path = entry["folder_path"] count = 0 try: if os.path.exists(folder_path): count = len([f for f in os.listdir(folder_path) if f.endswith(".html")]) except Exception: pass if was_cancelled: row.mark_cancelled(count) elif was_rate_limited: row.mark_rate_limited(count) _mark_rate_limited_persistent(doc_name) else: row.mark_completed(count) _clear_rate_limited_persistent(doc_name) self.populate_combo_box() idx = self.doc_combo.findText(doc_name) if idx >= 0: self.doc_combo.setCurrentIndex(idx) def _on_thread_finished(self, doc_name: str) -> None: """Phase 2: thread event loop has exited. Now safe to release refs.""" self.active_workers.pop(doc_name, None) self._refresh_summary() def cancel_scrape(self, doc_name: str) -> None: entry = self.active_workers.get(doc_name) if not entry: return try: entry["worker"].cancel() except Exception as e: print(f"Error cancelling {doc_name}: {e}") def show_error(self, message: str) -> None: QMessageBox.critical(self, "Error", message) def open_folder(self, folder_path: str) -> None: if not os.path.exists(folder_path): QMessageBox.information( self, "Folder Not Found", "The folder hasn't been created yet (no pages scraped).", ) return system = platform.system() if system == "Windows": os.startfile(folder_path) elif system == "Darwin": subprocess.Popen(["open", folder_path]) else: subprocess.Popen(["xdg-open", folder_path]) ================================================ FILE: gui/tabs_tools/tools.py ================================================ from PySide6.QtWidgets import QVBoxLayout, QGroupBox, QWidget from PySide6.QtCore import QThread, Signal from gui.tabs_tools.transcribe import TranscriberToolSettingsTab from gui.tabs_tools.vision import VisionToolSettingsTab from gui.tabs_tools.scrape import ScrapeDocumentationTab from gui.tabs_tools.ocr import OCRToolSettingsTab from gui.tabs_tools.misc import MiscTab from core.initialize import restore_vector_db_backup from core.utilities import backup_database class RestoreBackupThread(QThread): finished = Signal(bool) def run(self): try: restore_vector_db_backup() self.finished.emit(True) except Exception as e: print(f"Error during backup restoration: {e}") self.finished.emit(False) class BackupDatabaseThread(QThread): finished = Signal(bool) def run(self): try: backup_database() self.finished.emit(True) except Exception as e: print(f"Error during database backup: {e}") self.finished.emit(False) class GuiSettingsTab(QWidget): def __init__(self): super().__init__() self.layout = QVBoxLayout(self) self.groups = {} classes = { "TRANSCRIBE FILE": (TranscriberToolSettingsTab, 3), "SCRAPE DOCUMENTATION": (ScrapeDocumentationTab, 5), "TEST VISION MODELS": (VisionToolSettingsTab, 2), "OPTICAL CHARACTER RECOGNITION": (OCRToolSettingsTab, 2), "MISC": (MiscTab, 3), } for title, (TabClass, stretch) in classes.items(): settings = TabClass() group = QGroupBox(title, checkable=True, checked=True) group.setLayout(QVBoxLayout()) group.layout().addWidget(settings) self.groups[group] = stretch self.layout.addWidget(group, stretch) group.toggled.connect(lambda checked, g=group, s=settings: (s.setVisible(checked), self.adjust_stretch())) def adjust_stretch(self): for group, factor in self.groups.items(): self.layout.setStretchFactor(group, factor if group.isChecked() else 0) ================================================ FILE: gui/tabs_tools/transcribe.py ================================================ import threading from pathlib import Path import yaml import torch from PySide6.QtCore import Qt from PySide6.QtWidgets import ( QWidget, QHBoxLayout, QVBoxLayout, QGridLayout, QPushButton, QFileDialog, QLabel, QComboBox, QSlider, QSizePolicy ) from modules.transcribe import WhisperTranscriber from core.utilities import my_cprint, has_bfloat16_support from core.constants import WHISPER_MODELS, TOOLTIPS class TranscriberToolSettingsTab(QWidget): CONFIG_FILE = 'config.yaml' def __init__(self): super().__init__() self.selected_audio_file = None self.create_layout() def set_buttons_enabled(self, enabled): self.transcribe_button.setEnabled(enabled) self.select_file_button.setEnabled(enabled) def create_layout(self): main_layout = QVBoxLayout() grid = QGridLayout() grid.setColumnStretch(0, 2) grid.setColumnStretch(1, 2) grid.setColumnStretch(2, 1) model_row = QHBoxLayout() model_label = QLabel("Model") model_label.setToolTip(TOOLTIPS["WHISPER_MODEL_SELECT"]) model_row.addWidget(model_label) self.model_combo = QComboBox() self.populate_model_combo() self.model_combo.setToolTip(TOOLTIPS["WHISPER_MODEL_SELECT"]) model_row.addWidget(self.model_combo, 1) grid.addLayout(model_row, 0, 0) self.select_file_button = QPushButton("Select File") self.select_file_button.clicked.connect(self.select_audio_file) self.select_file_button.setToolTip(TOOLTIPS["AUDIO_FILE_SELECT"]) grid.addWidget(self.select_file_button, 0, 1) self.transcribe_button = QPushButton("Transcribe") self.transcribe_button.clicked.connect(self.start_transcription) self.transcribe_button.setToolTip(TOOLTIPS["TRANSCRIBE_BUTTON"]) self.transcribe_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) grid.addWidget(self.transcribe_button, 0, 2, 2, 1) batch_row = QHBoxLayout() batch_label = QLabel("Batch:") batch_label.setToolTip(TOOLTIPS["WHISPER_BATCH_SIZE"]) batch_row.addWidget(batch_label) self.number_slider = QSlider(Qt.Horizontal) self.number_slider.setMinimum(1) self.number_slider.setMaximum(150) self.number_slider.setValue(8) self.number_slider.valueChanged.connect(self.update_slider_label) self.number_slider.setToolTip(TOOLTIPS["WHISPER_BATCH_SIZE"]) batch_row.addWidget(self.number_slider, 1) self.slider_label = QLabel("8") self.slider_label.setToolTip(TOOLTIPS["WHISPER_BATCH_SIZE"]) batch_row.addWidget(self.slider_label) grid.addLayout(batch_row, 1, 0, 1, 2) main_layout.addLayout(grid) self.file_path_label = QLabel("No file currently selected") main_layout.addWidget(self.file_path_label) self.setLayout(main_layout) def populate_model_combo(self): cuda_available = torch.cuda.is_available() bfloat16_supported = has_bfloat16_support() filtered_models = [] for model_name, model_info in WHISPER_MODELS.items(): precision = model_info['precision'] if precision == 'float32': filtered_models.append(model_name) elif precision == 'bfloat16' and bfloat16_supported: filtered_models.append(model_name) elif precision == 'float16' and cuda_available: filtered_models.append(model_name) self.model_combo.addItems(filtered_models) def update_slider_label(self, value): self.slider_label.setText(str(value)) def update_config_file(self): with open(self.CONFIG_FILE, 'w') as file: yaml.dump(self.config, file) def select_audio_file(self): current_dir = Path.cwd() file_name, _ = QFileDialog.getOpenFileName(self, "Select Audio File", str(current_dir)) if file_name: file_path = Path(file_name) short_path = f"...{file_path.parent.name}/{file_path.name}" self.file_path_label.setText(short_path) self.file_path_label.setToolTip(str(file_path.absolute())) self.selected_audio_file = file_name def start_transcription(self): if not self.selected_audio_file: print("Please select an audio file.") return selected_model_key = self.model_combo.currentText() selected_batch_size = int(self.slider_label.text()) def transcription_thread(): self.set_buttons_enabled(False) try: transcriber = WhisperTranscriber( model_key=selected_model_key, batch_size=selected_batch_size ) transcriber.start_transcription_process(self.selected_audio_file) my_cprint("Transcription created and ready to be input into vector database.", 'green') except Exception as e: my_cprint(f"Transcription failed: {e}", 'red') finally: self.set_buttons_enabled(True) threading.Thread(target=transcription_thread, daemon=True).start() ================================================ FILE: gui/tabs_tools/vision.py ================================================ import sys import textwrap import subprocess from pathlib import Path import logging import yaml import tempfile import os import traceback import gc import time from PIL import Image import torch from PySide6.QtCore import QThread, Signal as pyqtSignal, Qt from PySide6.QtWidgets import ( QWidget, QVBoxLayout, QPushButton, QHBoxLayout, QMessageBox, QFileDialog, QProgressDialog, QDialog, QCheckBox, QListWidget, QListWidgetItem, QLabel ) import modules.process_images as module_process_images from modules.process_images import choose_image_loader from core.constants import VISION_MODELS CONFIG_FILE = 'config.yaml' def _load_cfg() -> dict: p = Path(CONFIG_FILE) if not p.exists(): return {} try: with p.open('r', encoding='utf-8') as f: return yaml.safe_load(f) or {} except Exception: return {} class ModelComparisonProgressDialog(QDialog): PENDING = "⏸" # ⏸ RUNNING = "⏳" # ⏳ SUCCESS = "✅" # ✅ FAILED = "❌" # ❌ def __init__(self, model_names, parent=None): super().__init__(parent) self.setWindowTitle("Vision Model Comparison") self.setModal(True) self.setMinimumWidth(420) self._was_cancelled = False self._finished = False layout = QVBoxLayout(self) self.header_label = QLabel( f"Processing image with {len(model_names)} selected model(s)..." ) layout.addWidget(self.header_label) self.list_widget = QListWidget() self.list_widget.setSelectionMode(QListWidget.NoSelection) self.list_widget.setFocusPolicy(Qt.NoFocus) for name in model_names: item = QListWidgetItem(f"{self.PENDING} {name}") self.list_widget.addItem(item) layout.addWidget(self.list_widget) button_row = QHBoxLayout() button_row.addStretch(1) self.action_button = QPushButton("Cancel") self.action_button.clicked.connect(self._on_action_clicked) button_row.addWidget(self.action_button) layout.addLayout(button_row) self._model_names = list(model_names) def _set_row(self, index, icon, suffix=""): if 0 <= index < len(self._model_names): text = f"{icon} {self._model_names[index]}" if suffix: text += f" {suffix}" self.list_widget.item(index).setText(text) def on_model_started(self, index, name): self._set_row(index, self.RUNNING, "processing…") def on_model_completed(self, index, name, elapsed): self._set_row(index, self.SUCCESS, f"({elapsed:.1f} s)") def on_model_failed(self, index, name): self._set_row(index, self.FAILED) def mark_finished(self): self._finished = True self.action_button.setText("Close") self.header_label.setText("Done. See the comparison output file for details.") def was_cancelled(self): return self._was_cancelled def _on_action_clicked(self): if not self._finished: self._was_cancelled = True self.close() def closeEvent(self, event): if not self._finished: self._was_cancelled = True super().closeEvent(event) class ModelSelectionDialog(QDialog): def __init__(self, models, parent=None): super().__init__(parent) self.setWindowTitle("Select Vision Models") layout = QVBoxLayout() self.checkboxes = {} for model_name, info in models.items(): vram_text = info.get('vram', '—') checkbox = QCheckBox(f"{model_name} (VRAM: {vram_text})") checkbox.setChecked(True) self.checkboxes[model_name] = checkbox layout.addWidget(checkbox) buttons_layout = QHBoxLayout() ok_button = QPushButton("OK") cancel_button = QPushButton("Cancel") ok_button.clicked.connect(self.accept) cancel_button.clicked.connect(self.reject) buttons_layout.addWidget(ok_button) buttons_layout.addWidget(cancel_button) layout.addLayout(buttons_layout) self.setLayout(layout) def get_selected_models(self): return [model for model, checkbox in self.checkboxes.items() if checkbox.isChecked()] class ImageProcessorThread(QThread): finished = pyqtSignal(list) error = pyqtSignal(str) def run(self): try: cfg = _load_cfg() chosen_model = ((cfg.get('vision') or {}).get('chosen_model') or next(iter(VISION_MODELS.keys()))) print(f"[Tools] Using chosen_model from config: {chosen_model}") documents = None try: documents = choose_image_loader({"vision": {"chosen_model": chosen_model}}) except TypeError: try: module_process_images.DEFAULT_VISION_MODEL_OVERRIDE = chosen_model print("[Tools] Set module_process_images.DEFAULT_VISION_MODEL_OVERRIDE") except Exception: pass documents = choose_image_loader() self.finished.emit(documents) except Exception as e: error_msg = f"Error in image processing: {str(e)}\n{traceback.format_exc()}" self.error.emit(error_msg) class MultiModelProcessorThread(QThread): finished = pyqtSignal(list) error = pyqtSignal(str) progress = pyqtSignal(int) model_started = pyqtSignal(int, str) model_completed = pyqtSignal(int, str, float) model_failed = pyqtSignal(int, str) def __init__(self, image_path, selected_models): super().__init__() self.image_path = image_path self.selected_models = selected_models self.is_cancelled = False def cancel(self): self.is_cancelled = True def run(self): try: results = [] with Image.open(self.image_path) as raw_image: for i, model_name in enumerate(self.selected_models): if self.is_cancelled: print("\nProcessing cancelled by user") torch.cuda.empty_cache() gc.collect() break self.model_started.emit(i, model_name) try: print(f"\nProcessing with {model_name}...") model_config = {"vision": {"chosen_model": model_name}} loader_name = VISION_MODELS[model_name]['loader'] loader_class = getattr(module_process_images, loader_name) loader = loader_class(model_config) loader.model, loader.tokenizer, loader.processor = loader.initialize_model_and_tokenizer() start_time = time.time() description = loader.process_single_image(raw_image) process_time = time.time() - start_time description = textwrap.fill(description, width=10) results.append((model_name, description, process_time)) if hasattr(loader, 'model') and loader.model is not None: loader.model.cpu() del loader.model if hasattr(loader, 'tokenizer') and loader.tokenizer is not None: del loader.tokenizer if hasattr(loader, 'processor') and loader.processor is not None: del loader.processor torch.cuda.empty_cache() gc.collect() print(f"Completed {model_name}") self.progress.emit(i + 1) self.model_completed.emit(i, model_name, process_time) except Exception as e: error_msg = f"Error processing with {model_name}: {str(e)}\n{traceback.format_exc()}" results.append((model_name, error_msg, 0.0)) print(error_msg) torch.cuda.empty_cache() gc.collect() self.model_failed.emit(i, model_name) torch.cuda.empty_cache() gc.collect() self.finished.emit(results) except Exception as e: torch.cuda.empty_cache() gc.collect() self.error.emit(str(e)) class VisionToolSettingsTab(QWidget): def __init__(self): super().__init__() mainVLayout = QVBoxLayout() self.setLayout(mainVLayout) hBoxLayout = QHBoxLayout() mainVLayout.addLayout(hBoxLayout) processButton = QPushButton("Multiple Files + One Vision Model") hBoxLayout.addWidget(processButton) processButton.clicked.connect(self.confirmationBeforeProcessing) newButton = QPushButton("Single Image + All Vision Models") hBoxLayout.addWidget(newButton) newButton.clicked.connect(self.selectSingleImage) self.thread = None self.progress = None def confirmationBeforeProcessing(self): msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText( "1. Create Database Tab:\n" "Select files you theoretically want in the vector database.\n\n" "2. Settings Tab:\n" "Select the vision model you want to test.\n\n" "3. Click the 'Process' button.\n\n" "This will test the selected vision model before actually entering the images into the vector database.\n\n" "Do you want to proceed?" ) msgBox.setWindowTitle("Confirm Processing") msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) returnValue = msgBox.exec() if returnValue == QMessageBox.Ok: self.startProcessing() def startProcessing(self): if self.thread is None: self.thread = ImageProcessorThread() self.thread.finished.connect(self.onProcessingFinished) self.thread.error.connect(self.onProcessingError) self.thread.start() def onProcessingFinished(self, documents): self.thread = None print(f"Processed {len(documents)} documents") contents = self.extract_page_content(documents) self.save_page_contents(contents) def onProcessingError(self, error_msg): self.thread = None logging.error(f"Processing error: {error_msg}") QMessageBox.critical(self, "Processing Error", f"An error occurred during image processing:\n\n{error_msg}") def selectSingleImage(self): file_path, _ = QFileDialog.getOpenFileName( self, "Select Image File", "", "Image Files (*.png *.jpg *.jpeg *.gif *.bmp *.tif *.tiff)" ) if file_path: dialog = ModelSelectionDialog(VISION_MODELS, self) if dialog.exec(): selected_models = dialog.get_selected_models() if not selected_models: QMessageBox.warning(self, "Warning", "Please select at least one model.") return msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText( "Process this image with the selected vision models?\n\n" "This will test each model sequentially and may take several minutes.\n" "Models will be loaded and unloaded to manage memory usage.\n\n" "Do you want to proceed?" ) msgBox.setWindowTitle("Confirm Processing") msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) returnValue = msgBox.exec() if returnValue == QMessageBox.Ok: self.progress = ModelComparisonProgressDialog(selected_models, self) self.progress.rejected.connect(self.cancelProcessing) self.thread = MultiModelProcessorThread(file_path, selected_models) self.thread.finished.connect(self.onMultiModelProcessingFinished) self.thread.error.connect(self.onMultiModelProcessingError) self.thread.model_started.connect(self.progress.on_model_started) self.thread.model_completed.connect(self.progress.on_model_completed) self.thread.model_failed.connect(self.progress.on_model_failed) self.thread.start() self.progress.show() def cancelProcessing(self): if self.thread is not None and hasattr(self.thread, "cancel"): self.thread.cancel() def onMultiModelProcessingFinished(self, results): if self.progress: self.progress.mark_finished() try: output_file = self.save_comparison_results(self.thread.image_path, results) self.open_file(output_file) except Exception as e: QMessageBox.critical(self, "Error", f"An error occurred while saving results:\n\n{str(e)}") self.thread = None def onMultiModelProcessingError(self, error_msg): if self.progress: self.progress.mark_finished() self.progress.close() QMessageBox.critical(self, "Error", f"An error occurred during processing:\n\n{error_msg}") self.thread = None def extract_page_content(self, documents): contents = [] total_length = 0 for doc in documents: if hasattr(doc, 'page_content') and hasattr(doc, 'metadata'): content = doc.page_content filepath = doc.metadata.get('source', doc.metadata.get('file_path', doc.metadata.get('file_name', 'Unknown filepath'))) elif isinstance(doc, dict): content = doc.get("page_content", "Document is missing 'page_content'.") filepath = doc.get("metadata", {}).get('source', doc.get("metadata", {}).get('file_path', doc.get("metadata", {}).get('file_name', 'Unknown filepath'))) else: content = "Document is missing 'page_content'." filepath = 'Unknown filepath' content_length = len(content) total_length += content_length wrapped_content = textwrap.fill(content, width=100) contents.append((filepath, wrapped_content, content_length)) avg_length = total_length / len(documents) if documents else 0 return contents, avg_length def save_page_contents(self, contents): contents, avg_length = contents with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', encoding='utf-8', delete=False) as temp_file: temp_file.write(f"Average Summary Length: {avg_length:.2f} characters\n") temp_file.write("="*50 + "\n\n") for filepath, content, length in contents: temp_file.write(f"File Path: {filepath}\n") temp_file.write(f"Summary Length: {length} characters\n") temp_file.write("-"*50 + "\n") temp_file.write(f"{content}\n\n") temp_file.write("="*50 + "\n\n") temp_name = temp_file.name self.open_file(temp_name) def save_comparison_results(self, image_path, results): with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', encoding='utf-8', delete=False) as temp_file: model_col_width = 23 count_col_width = 12 time_col_width = 12 speed_col_width = 12 temp_file.write(f"Image Path: {image_path}\n") temp_file.write(f"Generated: {time.strftime('%Y-%m-%d %H:%M:%S')}\n\n") chunk_advice = ( "Remember to adjust your 'chunk size' setting to exceed the longest image summary that you expect. " "For large bodies of text (e.g. from a .pdf) splitting/overlapping chunks of text is fine, but for image " "summaries you want any/all summaries to fit within a single chunk that will be put into the vector database." ) temp_file.write(textwrap.fill(chunk_advice, width=100) + "\n\n") temp_file.write("Model Performance Comparison Table:\n") temp_file.write("+" + "-"*model_col_width + "+" + "-"*count_col_width + "+" + "-"*time_col_width + "+" + "-"*speed_col_width + "+\n") temp_file.write("|" + "Model Name".center(model_col_width) + "|" + "Char Count".center(count_col_width) + "|" + "Time (sec)".center(time_col_width) + "|" + "Char/Sec".center(speed_col_width) + "|\n") temp_file.write("+" + "-"*model_col_width + "+" + "-"*count_col_width + "+" + "-"*time_col_width + "+" + "-"*speed_col_width + "+\n") for model_name, description, process_time in results: char_count = len(description) chars_per_sec = char_count / process_time if process_time > 0 else 0 temp_file.write("|" + model_name.ljust(model_col_width) + "|" + str(char_count).center(count_col_width) + "|" + f"{process_time:.2f}".center(time_col_width) + "|" + f"{chars_per_sec:.1f}".center(speed_col_width) + "|\n") temp_file.write("+" + "-"*model_col_width + "+" + "-"*count_col_width + "+" + "-"*time_col_width + "+" + "-"*speed_col_width + "+\n\n") for model_name, description, process_time in results: char_count = len(description) chars_per_sec = char_count / process_time if process_time > 0 else 0 temp_file.write(f"Model: {model_name}\n") temp_file.write(f"Summary Length: {char_count}\n") temp_file.write(f"Processing Time: {process_time:.2f} seconds\n") temp_file.write(f"Characters per Second: {chars_per_sec:.1f}\n") temp_file.write("="*50 + "\n") if description.strip(): temp_file.write(textwrap.fill(description, width=100) + "\n\n") else: temp_file.write("[No output generated]\n\n") temp_file.write("-"*50 + "\n\n") temp_name = temp_file.name self.open_file(temp_name) return temp_name def open_file(self, file_path): try: os.startfile(file_path) except Exception as e: error_msg = f"Error opening file: {e}" logging.error(error_msg) QMessageBox.warning(self, "Error", error_msg) ================================================ FILE: gui.py ================================================ import faulthandler faulthandler.enable(all_threads=True) import multiprocessing multiprocessing.set_start_method('spawn', force=True) from core.utilities import set_cuda_paths set_cuda_paths() from gui.main_window import main if __name__ == '__main__': main() ================================================ FILE: modules/__init__.py ================================================ ================================================ FILE: modules/kokoro.py ================================================ import sys import os from pathlib import Path import queue import threading import re import torch import sounddevice as sd import warnings import logging from typing import Optional, Union class KokoroTTS: VOICES = [ 'af', 'af_bella', 'af_sarah', 'am_adam', 'am_michael', 'bf_emma', 'bf_isabella', 'bm_george', 'bm_lewis', 'af_nicole', 'af_sky' ] def __init__(self, repo_path: str): self.REPO_PATH = Path(repo_path) sys.path.append(str(self.REPO_PATH)) from models import build_model from kokoro import generate, generate_full, phonemize self.generate = generate self.generate_full = generate_full self.phonemize = phonemize self.sentence_queue = queue.Queue() self.audio_queue = queue.Queue() self.stop_event = threading.Event() self.model = None self.voicepack_cache = {} self.current_voice_name = None warnings.filterwarnings("ignore", category=FutureWarning) warnings.filterwarnings("ignore", category=UserWarning) def _load_model_and_voice(self, voice_name: str): device = 'cpu' if self.model is None: model_path = self.REPO_PATH / 'kokoro-v0_19.pth' if not model_path.exists(): raise FileNotFoundError(f"Model file not found at {model_path}") from models import build_model self.model = build_model(str(model_path), device) if voice_name not in self.voicepack_cache: voices_path = self.REPO_PATH / 'voices' if not voices_path.exists(): raise FileNotFoundError(f"Voices directory not found at {voices_path}") voicepack_path = voices_path / f'{voice_name}.pt' if not voicepack_path.exists(): raise FileNotFoundError(f"Voice file not found at {voicepack_path}") self.voicepack_cache[voice_name] = torch.load(str(voicepack_path), weights_only=True).to(device) print(f"Loaded voicepack for {voice_name}") self.current_voice_name = voice_name @staticmethod def _drain_queue(q): while not q.empty(): try: q.get_nowait() except queue.Empty: break def stop(self): self.stop_event.set() self._drain_queue(self.sentence_queue) self._drain_queue(self.audio_queue) def _process_sentences(self, speed: float, force_accent: Optional[str]): while not self.stop_event.is_set(): try: sentence = self.sentence_queue.get(timeout=1) if sentence is None: self.audio_queue.put(None) break if self.stop_event.is_set(): break lang = force_accent if force_accent else self.current_voice_name[0] logging.debug("About to generate phonemes...") ps = self.phonemize(sentence, lang) logging.debug(f"Generated phonemes: {ps}") if self.stop_event.is_set(): break try: voicepack = self.voicepack_cache[self.current_voice_name] audio, phonemes = self.generate_full( self.model, sentence, voicepack, lang=lang, speed=speed, ps=ps ) if audio is not None and not self.stop_event.is_set(): self.audio_queue.put(audio) elif audio is None: print(f"Failed to generate audio for sentence: {sentence}") except Exception as e: if not self.stop_event.is_set(): print(f"Error generating speech for sentence: {str(e)}") print(f"Error type: {type(e)}") import traceback traceback.print_exc() continue except queue.Empty: continue def _play_audio(self): while not self.stop_event.is_set(): try: audio = self.audio_queue.get(timeout=1) if audio is None: break if self.stop_event.is_set(): break try: sd.play(audio, 24000) while sd.get_stream().active and not self.stop_event.is_set(): sd.sleep(50) if self.stop_event.is_set(): try: sd.stop() except Exception as e: print(f"Safe stop error (expected): {e}") break else: sd.wait() except Exception as e: if not self.stop_event.is_set(): print(f"Audio playback error: {e}") try: sd.stop() except: pass break except queue.Empty: continue except Exception as e: if not self.stop_event.is_set(): print(f"Audio queue error: {e}") break def speak(self, text: str, voice: str = 'bm_george', speed: float = 1.3, force_accent: Optional[str] = None) -> None: if voice not in self.VOICES: raise ValueError(f"Invalid voice. Choose from: {self.VOICES}") if force_accent and force_accent not in ['a', 'b']: raise ValueError("force_accent must be 'a' for American, 'b' for British, or None") if speed <= 0: raise ValueError("Speed must be positive") self.stop_event.clear() self.sentence_queue = queue.Queue() self.audio_queue = queue.Queue() self._load_model_and_voice(voice) sentences = [s.strip() for s in re.split(r'[.!?;]+\s*', text) if s.strip()] process_thread = threading.Thread( target=self._process_sentences, args=(speed, force_accent) ) playback_thread = threading.Thread(target=self._play_audio) process_thread.daemon = True playback_thread.daemon = True process_thread.start() playback_thread.start() for sentence in sentences: if self.stop_event.is_set(): break self.sentence_queue.put(sentence) if not self.stop_event.is_set(): self.sentence_queue.put(None) process_thread.join() playback_thread.join() ================================================ FILE: modules/ocr.py ================================================ import os import io import tempfile import threading import queue import time from pathlib import Path from io import BytesIO from abc import ABC, abstractmethod from core.constants import PROJECT_ROOT from concurrent.futures import ThreadPoolExecutor, as_completed from multiprocessing import Process, Queue import fitz import psutil from PIL import Image import tesserocr from ocrmypdf.hocrtransform import HocrTransform import tqdm from typing import Union, List, Tuple thread_local = threading.local() class OCRProcessor(ABC): def __init__(self, zoom: int = 2, progress_queue: Queue = None): self.zoom = zoom self.show_progress = False self.progress_queue = progress_queue backend_name = self.__class__.__name__ print(f"\033[92mUsing {backend_name} backend\033[0m") if backend_name == "TesseractOCR": thread_count = self.get_optimal_threads() print(f"\033[92mUsing up to {thread_count} threads\033[0m") def convert_page_to_image(self, page) -> Image.Image: mat = fitz.Matrix(self.zoom, self.zoom) pix = page.get_pixmap(matrix=mat) img_data = pix.tobytes("png") return Image.open(io.BytesIO(img_data)) @abstractmethod def process_page(self, page_num: int, pdf_path: str) -> Tuple[int, str]: pass @abstractmethod def initialize(self): pass @abstractmethod def clean_text(self, text: str) -> str: pass def validate_pdf(self, pdf_path: Path) -> bool: try: with fitz.open(str(pdf_path)) as doc: if doc.page_count == 0: return False _ = doc[0].get_text() return True except Exception: return False def process_document(self, pdf_path: Path, output_path: Path = None): if not self.validate_pdf(pdf_path): raise ValueError(f"Invalid or corrupted PDF file: {pdf_path}") if output_path is None: output_path = pdf_path.with_suffix('.txt') with fitz.open(str(pdf_path)) as pdf_document: total_pages = len(pdf_document) if self.progress_queue: self.progress_queue.put(('total', total_pages)) results = {} with ThreadPoolExecutor(max_workers=self.get_optimal_threads()) as executor: future_to_page = { executor.submit(self.process_page, page_num, str(pdf_path)): page_num for page_num in range(total_pages) } for future in as_completed(future_to_page): page_num, processed_text = future.result() results[page_num] = processed_text if self.progress_queue: self.progress_queue.put(('update', 1)) with open(output_path, 'w', encoding='utf-8') as f: for page_num in range(total_pages): text = results.get(page_num, '').strip() if text: f.write(f"[[page{page_num + 1}]]{text}") if self.progress_queue: self.progress_queue.put(('done', None)) @staticmethod def get_optimal_threads() -> int: return max(4, psutil.cpu_count(logical=True) - 3) class TesseractOCR(OCRProcessor): def __init__(self, zoom: int = 2, progress_queue: Queue = None): super().__init__(zoom, progress_queue) self.tessdata_path = None self.temp_dir = None self.show_progress = True def initialize(self): script_dir = PROJECT_ROOT self.temp_dir = script_dir / "temp_ocr" self.temp_dir.mkdir(exist_ok=True) os.environ['TMP'] = str(self.temp_dir) os.environ['TEMP'] = str(self.temp_dir) tempfile.tempdir = str(self.temp_dir) self.tessdata_path = script_dir / 'share' / 'tessdata' os.environ['TESSDATA_PREFIX'] = str(self.tessdata_path) def clean_text(self, text: str) -> str: return text def cleanup(self): self.cleanup_temp_pdfs() if 'TESSDATA_PREFIX' in os.environ: del os.environ['TESSDATA_PREFIX'] def process_document(self, pdf_path: Path, output_path: Path = None): if not self.validate_pdf(pdf_path): raise ValueError(f"Invalid or corrupted PDF file: {pdf_path}") if output_path is None: output_path = pdf_path.with_stem(f"{pdf_path.stem}_OCR") if self.temp_dir is None: self.initialize() self.cleanup_temp_pdfs() with fitz.open(str(pdf_path)) as pdf_document: num_pages = len(pdf_document) if self.progress_queue: self.progress_queue.put(('total', num_pages)) results = [] with ThreadPoolExecutor(max_workers=self.get_optimal_threads()) as executor: futures = {executor.submit(self.process_page, page_num, str(pdf_path)): page_num for page_num in range(num_pages)} for future in as_completed(futures): page_num, temp_pdf_path = future.result() results.append((temp_pdf_path, page_num)) if self.progress_queue: self.progress_queue.put(('update', 1)) results.sort(key=lambda x: x[1]) with fitz.open() as output_pdf: for temp_pdf_path, _ in results: with fitz.open(temp_pdf_path) as src: output_pdf.insert_pdf(src) Path(temp_pdf_path).unlink(missing_ok=True) output_pdf.save(output_path) self.optimize_final_pdf(pdf_path, output_path) self.cleanup_temp_pdfs() if self.progress_queue: self.progress_queue.put(('done', None)) def process_page(self, page_num: int, pdf_path: str) -> Tuple[int, str]: fd, temp_pdf_path = tempfile.mkstemp(suffix=".pdf", dir=self.temp_dir) os.close(fd) with fitz.open(pdf_path) as pdf_document, fitz.open() as out_pdf: page = pdf_document[page_num] api = getattr(thread_local, 'api', None) if api is None: api = tesserocr.PyTessBaseAPI(lang="eng", path=str(self.tessdata_path)) thread_local.api = api page.remove_rotation() pix = page.get_pixmap(matrix=fitz.Matrix(self.zoom, self.zoom)) pil_image = Image.open(BytesIO(pix.tobytes("png"))) api.SetImage(pil_image) hocr_text = api.GetHOCRText(0) hocr_output = f"{self.temp_dir}/page_{page_num}.hocr" Path(hocr_output).write_text(hocr_text, encoding="utf-8") fd, text_pdf = tempfile.mkstemp(suffix=".pdf", dir=self.temp_dir) os.close(fd) pdf_width_pts = page.rect.width pdf_height_pts = page.rect.height dpi_x = (pix.width * 72) / pdf_width_pts dpi_y = (pix.height * 72) / pdf_height_pts dpi = (dpi_x + dpi_y) / 2.0 hocr_transform = HocrTransform(hocr_filename=hocr_output, dpi=dpi) # HocrTransform.to_pdf reads self.width/self.height. __init__ tries to set # them from the hOCR
    coords, but tesserocr's hOCR # may omit that div (or its bbox), leaving the attrs undefined and causing # AttributeError. Force them to the true PDF page dimensions in pts. hocr_transform.width = pdf_width_pts hocr_transform.height = pdf_height_pts hocr_transform.to_pdf(out_filename=text_pdf, invisible_text=True) out_pdf.insert_pdf(page.parent, from_page=page_num, to_page=page_num) with fitz.open(text_pdf) as text_page: out_pdf[0].show_pdf_page(out_pdf[0].rect, text_page, 0, overlay=True) Path(hocr_output).unlink(missing_ok=True) for _ in range(10): try: Path(text_pdf).unlink() break except PermissionError: time.sleep(0.1) out_pdf.save(temp_pdf_path) return page_num, temp_pdf_path def optimize_final_pdf(self, original_pdf_path: Path, ocr_pdf_path: Path) -> None: with fitz.open(original_pdf_path) as original_doc: orig_pages = [] for page in original_doc: orig_pages.append({'width': page.rect.width, 'height': page.rect.height, 'mediabox': page.mediabox, 'cropbox': getattr(page, 'cropbox', None)}) temp_path = str(ocr_pdf_path) + ".optimized" with fitz.open(ocr_pdf_path) as ocr_doc: for i, page in enumerate(ocr_doc): if i < len(orig_pages): orig = orig_pages[i] page.set_mediabox(orig['mediabox']) if orig['cropbox']: try: cropbox = orig['cropbox'] mediabox = orig['mediabox'] if cropbox[0] >= mediabox[0] and cropbox[1] >= mediabox[1] and cropbox[2] <= mediabox[2] and cropbox[3] <= mediabox[3]: page.set_cropbox(cropbox) except ValueError: pass ocr_doc.save(temp_path, garbage=4, deflate=True, clean=True) os.replace(temp_path, ocr_pdf_path) def cleanup_temp_pdfs(self): if self.temp_dir is None: return for temp_file in Path(self.temp_dir).glob("tmp*.pdf"): try: temp_file.unlink() except PermissionError: pass def _process_documents_worker(pdf_paths: List[Path], backend: str, model_path: str, output_dir: Path, progress_queue: Queue): if backend.lower() == 'tesseract': processor = TesseractOCR(progress_queue=progress_queue) else: raise ValueError(f"Unsupported backend: {backend}") processor.initialize() try: for pdf_path in pdf_paths: output_path = None if output_dir: output_path = output_dir / f"{pdf_path.stem}_ocr.txt" processor.process_document(pdf_path, output_path) finally: if hasattr(processor, 'cleanup'): processor.cleanup() def process_documents(pdf_paths: Union[Path, List[Path]], backend: str = 'tesseract', model_path: str = None, output_dir: Path = None): if isinstance(pdf_paths, Path): pdf_paths = [pdf_paths] progress_queue = Queue() process = Process(target=_process_documents_worker, args=(pdf_paths, backend, model_path, output_dir, progress_queue)) process.start() total_pages = None pbar = None try: while True: try: msg = progress_queue.get(timeout=1.0) cmd, data = msg if cmd == 'total': total_pages = data pbar = tqdm.tqdm(total=total_pages, desc="Processing pages") elif cmd == 'update': if pbar: pbar.update(data) elif cmd == 'done': break except queue.Empty: if not process.is_alive(): break finally: if pbar: pbar.close() if process.is_alive(): process.join(timeout=5.0) if process.is_alive(): process.terminate() process.join(timeout=3.0) if process.is_alive(): process.kill() process.join(timeout=1.0) time.sleep(0.5) if process.exitcode is not None and process.exitcode != 0: raise RuntimeError(f"OCR worker exited with code {process.exitcode}") ================================================ FILE: modules/process_images.py ================================================ import os import traceback import inspect import time import warnings from concurrent.futures import ProcessPoolExecutor from pathlib import Path import torch import torchvision.transforms as T from torchvision.transforms.functional import InterpolationMode import yaml from PIL import Image from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoModel, AutoTokenizer, AutoProcessor, BitsAndBytesConfig, Qwen2_5_VLForConditionalGeneration, GenerationConfig, AutoConfig, AutoModelForVision2Seq, AutoModelForImageTextToText ) from db.document_processor import Document from core.extract_metadata import extract_typed_metadata from core.utilities import my_cprint, has_bfloat16_support, set_cuda_paths from core.constants import VISION_MODELS, PROJECT_ROOT set_cuda_paths() warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*") ALLOWED_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tif', '.tiff'] current_directory = PROJECT_ROOT CACHE_DIR = current_directory / "models" / "vision" CACHE_DIR.mkdir(parents=True, exist_ok=True) IMAGE_PROMPT = ( "Describe this image in as much detail as possible but do not repeat yourself. " "Your response should be no more than one paragraph, but the paragraph can be as long as you want." ) def get_best_device(): return 'cuda' if torch.cuda.is_available() else 'cpu' def check_for_images(image_dir: Path) -> bool: try: filenames = os.listdir(str(image_dir)) return any(Path(f).suffix.lower() in ALLOWED_EXTENSIONS for f in filenames) except FileNotFoundError: return False except OSError: return False def run_loader_in_process(loader_func): try: return loader_func() except Exception as e: error_message = f"Error processing images: {e}\n\nTraceback:\n{traceback.format_exc()}" my_cprint(error_message, "red") return [] def choose_image_loader(model_config: dict | None = None): if model_config is None: cfg_path = Path('config.yaml') if not cfg_path.exists(): raise FileNotFoundError("config.yaml not found and no model_config provided") with cfg_path.open('r', encoding='utf-8') as f: model_config = yaml.safe_load(f) or {} vision_cfg = (model_config.get('vision') or {}) chosen_model = vision_cfg.get('chosen_model') if not chosen_model: raise ValueError("vision.chosen_model missing in config/model_config") if chosen_model not in VISION_MODELS: raise KeyError(f"Unknown vision model: {chosen_model}") loader_name = VISION_MODELS[chosen_model]['loader'] loader_class = globals()[loader_name] loader = loader_class(model_config) image_dir = PROJECT_ROOT / "Docs_for_DB" if not check_for_images(image_dir): return [] with ProcessPoolExecutor(1, initializer=set_cuda_paths) as executor: future = executor.submit(run_loader_in_process, loader.process_images) try: processed_docs = future.result() except Exception as e: my_cprint(f"Error occurred during image processing: {e}", "red") return [] return processed_docs or [] class BaseLoader: def __init__(self, config): self.config = config self.device = get_best_device() self.model = None self.tokenizer = None self.processor = None @staticmethod def detect_dtype(): use_bf16 = torch.cuda.get_device_capability()[0] >= 8 return (torch.bfloat16, "bfloat16") if use_bf16 else (torch.float16, "float16") @staticmethod def normalize_response(text): return ' '.join(line.strip() for line in text.split('\n') if line.strip()) def initialize_model_and_tokenizer(self): raise NotImplementedError def process_images(self): image_dir = PROJECT_ROOT / "Docs_for_DB" documents = [] try: image_files = [file for file in image_dir.iterdir() if file.suffix.lower() in ALLOWED_EXTENSIONS] except OSError: image_files = [] print(f"Error accessing directory {image_dir}") self.model, self.tokenizer, self.processor = self.initialize_model_and_tokenizer() print("Processing images.") start_time = time.time() with tqdm(total=len(image_files), unit="image") as progress_bar: for full_path in image_files: try: with Image.open(full_path) as raw_image: extracted_text = self.process_single_image(raw_image) extracted_metadata = extract_typed_metadata(full_path, "image") documents.append(Document(page_content=extracted_text, metadata=extracted_metadata)) progress_bar.update(1) except Exception as e: print(f"{full_path.name}: Error processing image - {e}") total_time = time.time() - start_time print(f"Loaded {len(documents)} image(s).") print(f"Total image processing time: {total_time:.2f} seconds") my_cprint("Vision model removed from memory.", "red") return documents def process_single_image(self, raw_image): raise NotImplementedError class loader_internvl(BaseLoader): def initialize_model_and_tokenizer(self): chosen_model = self.config['vision']['chosen_model'] info = VISION_MODELS[chosen_model] cache_dir = CACHE_DIR / info["cache_dir"] cache_dir.mkdir(parents=True, exist_ok=True) if self.device == "cuda": dtype, precision_str = self.detect_dtype() quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=dtype, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=[ "vision_model", "language_model.model.norm", "language_model.output", "language_model.model.rotary_emb", "language_model.lm_head", "mlp1" ] ) model = AutoModel.from_pretrained( info['repo_id'], quantization_config=quant_config, torch_dtype=dtype, low_cpu_mem_usage=True, trust_remote_code=True, cache_dir=cache_dir, token=False ).eval() device_str = "CUDA" else: dtype = torch.float32 precision_str = "float32" model = AutoModel.from_pretrained( info['repo_id'], torch_dtype=dtype, low_cpu_mem_usage=True, trust_remote_code=True, cache_dir=cache_dir, token=False, device_map={"": "cpu"} ).eval() device_str = "CPU" self.model_dtype = dtype my_cprint(f"{chosen_model} loaded into memory on {device_str} ({precision_str})", "green") tokenizer = AutoTokenizer.from_pretrained( info['repo_id'], trust_remote_code=True, cache_dir=cache_dir, token=False ) return model, tokenizer, None def find_closest_aspect_ratio(self, aspect_ratio, ratios, w, h, sz): best_diff = float('inf') best = (1, 1) area = w * h for r in ratios: ar = r[0] / r[1] diff = abs(aspect_ratio - ar) if diff < best_diff or (diff == best_diff and area > 0.5 * sz * sz * r[0] * r[1]): best_diff = diff best = r return best def _build_transform(self, size): mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) return T.Compose([ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), T.Resize((size, size), interpolation=InterpolationMode.LANCZOS, antialias=True), T.ToTensor(), T.Normalize(mean=mean, std=std) ]) def dynamic_preprocess(self, img, min_num=1, max_num=12, image_size=448, use_thumbnail=False): w, h = img.size ar = w / h ratios = sorted( {(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num}, key=lambda x: x[0] * x[1] ) best = self.find_closest_aspect_ratio(ar, ratios, w, h, image_size) tw, th = image_size * best[0], image_size * best[1] resized = img.resize((tw, th)) blocks = best[0] * best[1] cols = tw // image_size parts = [] for i in range(blocks): x = (i % cols) * image_size y = (i // cols) * image_size parts.append(resized.crop((x, y, x + image_size, y + image_size))) if use_thumbnail and len(parts) != 1: parts.append(img.resize((image_size, image_size))) return parts def _prepare_image(self, raw_image, input_size=448, max_num=24): imgs = self.dynamic_preprocess(raw_image, image_size=input_size, use_thumbnail=True, max_num=max_num) tf = self._build_transform(input_size) return torch.stack([tf(im) for im in imgs]) @torch.inference_mode() def process_single_image(self, raw_image): pv = self._prepare_image(raw_image).to(self.model_dtype).to(self.device) question = f"\n{IMAGE_PROMPT}" gen_cfg = { 'num_beams': 1, 'max_new_tokens': 512, 'do_sample': False, 'pad_token_id': self.tokenizer.pad_token_id } resp = self.model.chat(self.tokenizer, pv, question, gen_cfg) return self.normalize_response(resp) class loader_granite(BaseLoader): def initialize_model_and_tokenizer(self): chosen_model = self.config['vision']['chosen_model'] model_id = VISION_MODELS[chosen_model]['repo_id'] save_dir = VISION_MODELS[chosen_model]["cache_dir"] cache_dir = CACHE_DIR / save_dir cache_dir.mkdir(parents=True, exist_ok=True) processor = AutoProcessor.from_pretrained( model_id, use_fast=True, cache_dir=cache_dir, token=False ) low_tiling_pinpoints = [[384, 384], [768, 384], [384, 768]] medium_tiling_pinpoints = [ [384, 384], [384, 768], [768, 384], [384, 1152], [1152, 384], [384, 1536], [768, 768], [1536, 384], ] high_tiling_pinpoints = [ [384, 384], [384, 768], [768, 384], [384, 1152], [1152, 384], [384, 1536], [768, 768], [1536, 384], [384, 1920], [1920, 384], [384, 2304], [768, 1152], [1152, 768], [2304, 384], ] all_tiling_pinpoints = [ [384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [384, 2688], [384, 3072], [384, 3456], [384, 3840], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [1152, 384], [1152, 768], [1152, 1152], [1536, 384], [1536, 768], [1920, 384], [1920, 768], [2304, 384], [2688, 384], [3072, 384], [3456, 384], [3840, 384] ] custom_pinpoints = medium_tiling_pinpoints try: processor.image_grid_pinpoints = custom_pinpoints except Exception: pass ip = getattr(processor, "image_processor", None) if ip is not None and hasattr(ip, "image_grid_pinpoints"): ip.image_grid_pinpoints = custom_pinpoints if self.device == "cuda" and torch.cuda.is_available(): dtype, precision_str = self.detect_dtype() quant_cfg = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=dtype, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=[ "vision_tower", "multi_modal_projector", "language_model.embed_tokens", "language_model.norm", "lm_head" ] ) model = AutoModelForVision2Seq.from_pretrained( model_id, quantization_config=quant_cfg, torch_dtype=dtype, low_cpu_mem_usage=True, cache_dir=cache_dir, token=False, device_map="auto" ) my_cprint(f"{chosen_model} loaded into memory on CUDA ({precision_str})", "green") else: model = AutoModelForVision2Seq.from_pretrained( model_id, torch_dtype=torch.float32, low_cpu_mem_usage=True, cache_dir=cache_dir, token=False, device_map={"": "cpu"} ) my_cprint(f"{chosen_model} loaded into memory on CPU (float32)", "green") try: if hasattr(model, "config") and hasattr(model.config, "image_grid_pinpoints"): model.config.image_grid_pinpoints = custom_pinpoints except Exception: pass if hasattr(model, "image_grid_pinpoints"): try: setattr(model, "image_grid_pinpoints", custom_pinpoints) except Exception: pass model.eval() self.model = model self.processor = processor return model, None, processor @torch.inference_mode() def process_single_image(self, raw_image): if raw_image.mode != "RGB": raw_image = raw_image.convert("RGB") prompt = f"<|user|>\n\n{IMAGE_PROMPT}\n<|assistant|>\n" inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(self.device) output = self.model.generate( **inputs, max_new_tokens=512, do_sample=False, num_beams=1 ) resp = self.processor.decode(output[0], skip_special_tokens=True).split('<|assistant|>')[-1].strip() return self.normalize_response(resp) class loader_qwenvl(BaseLoader): def initialize_model_and_tokenizer(self): chosen_model = self.config['vision']['chosen_model'] model_info = VISION_MODELS[chosen_model] model_id = model_info['repo_id'] save_dir = model_info['cache_dir'] cache_dir = CACHE_DIR / save_dir cache_dir.mkdir(parents=True, exist_ok=True) dtype, _ = self.detect_dtype() quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=dtype, bnb_4bit_use_double_quant=True, llm_int8_threshold=6.0, llm_int8_skip_modules=[ "lm_head", "merger", "visual.blocks.0.attn", "visual.blocks.0.mlp", "visual.blocks.1.attn", "visual.blocks.1.mlp", "visual.blocks.2.attn", "visual.blocks.2.mlp", "visual.blocks.3.attn", "visual.blocks.3.mlp", "visual.blocks.4.attn", "visual.blocks.5.mlp", "visual.blocks.7.attn", "visual.blocks.7.mlp", "visual.blocks.8.mlp", "visual.blocks.10.mlp", "visual.blocks.12.mlp", "visual.blocks.13.mlp", "visual.blocks.14.attn", "visual.blocks.14.mlp", "visual.blocks.15.attn", "visual.blocks.15.mlp", "visual.blocks.17.mlp", "visual.blocks.31.mlp.down_proj" ] ) processor = AutoProcessor.from_pretrained( model_id, use_fast=True, min_pixels=28*28, max_pixels=1280*28*28, trust_remote_code=True, cache_dir=cache_dir, token=False ) model = AutoModelForImageTextToText.from_pretrained( model_id, quantization_config=quantization_config, torch_dtype=dtype, low_cpu_mem_usage=True, trust_remote_code=True, cache_dir=cache_dir, token=False, device_map="auto", ) model.eval() _, precision_str = self.detect_dtype() device_str = "CUDA" if self.device == "cuda" else "CPU" my_cprint(f"{chosen_model} loaded into memory on {device_str} ({precision_str})", "green") return model, None, processor @torch.inference_mode() def process_single_image(self, raw_image): prompt = ( "<|im_start|>user\n" f"{IMAGE_PROMPT} <|vis_start|><|image_pad|><|vis_end|>\n" "<|im_end|>\n" "<|im_start|>assistant\n" ) inputs = self.processor( images=raw_image, text=prompt, return_tensors="pt" ).to(self.device) output = self.model.generate( **inputs, max_new_tokens=1024, do_sample=False, top_k=None, top_p=None, num_beams=1, temperature=None ) response = self.processor.decode(output[0], skip_special_tokens=True) response = response.split('assistant')[-1].strip() return self.normalize_response(response) class loader_glmv4_thinking(BaseLoader): PIXELS_LOW = 294_912 PIXELS_MEDIUM = 589_824 PIXELS_HIGH = 1_179_648 PIXELS_DEFAULT = 4_816_896 def initialize_model_and_tokenizer(self): chosen_model = self.config['vision']['chosen_model'] info = VISION_MODELS[chosen_model] model_id = info['repo_id'] save_dir = info["cache_dir"] cache_dir = CACHE_DIR / save_dir cache_dir.mkdir(parents=True, exist_ok=True) quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4", ) processor = AutoProcessor.from_pretrained(model_id, use_fast=True, cache_dir=cache_dir) self.pixel_cap = self.PIXELS_HIGH model = AutoModelForImageTextToText.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map="auto", attn_implementation="sdpa", quantization_config=quant_config, cache_dir=cache_dir, ).eval() self.device = torch.device("cuda") self.model = model self.processor = processor my_cprint(f"{chosen_model} loaded into memory on CUDA (bfloat16)", "green") return model, None, processor def _cap_pixels_for_glm4v(self, pil_img, max_pixels_2d, divisor=28): w, h = pil_img.size area = w * h if area <= max_pixels_2d: new_w = max(divisor, (w // divisor) * divisor) new_h = max(divisor, (h // divisor) * divisor) if new_w == w and new_h == h: return pil_img return pil_img.resize((new_w, new_h), Image.BICUBIC) scale = (max_pixels_2d / float(area)) ** 0.5 new_w = max(divisor, int((w * scale) // divisor * divisor)) new_h = max(divisor, int((h * scale) // divisor * divisor)) if new_w < divisor or new_h < divisor: new_w = new_h = divisor return pil_img.resize((new_w, new_h), Image.BICUBIC) @torch.inference_mode() def process_single_image(self, raw_image): if raw_image.mode != "RGB": raw_image = raw_image.convert("RGB") ip = getattr(self.processor, "image_processor", None) patch_size = getattr(ip, "patch_size", 14) merge_size = getattr(ip, "merge_size", 2) divisor = patch_size * merge_size raw_image = self._cap_pixels_for_glm4v( raw_image, max_pixels_2d=self.pixel_cap, divisor=divisor, ) prompt = ( "[gMASK]<|user|>\n" "<|begin_of_image|><|image|><|end_of_image|>" f"{IMAGE_PROMPT}" "<|assistant|>\n" ) inputs = self.processor( text=prompt, images=raw_image, return_tensors="pt", ).to("cuda") out_ids = self.model.generate( **inputs, max_new_tokens=512, do_sample=False ) torch.cuda.synchronize() generated_ids_trimmed = [out_ids[0][len(inputs.input_ids[0]):]] response = self.processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False, )[0].strip() if '' in response and '' in response: start_idx = response.find('') + len('') end_idx = response.find('') response = response[start_idx:end_idx].strip() return response class loader_liquidvl(BaseLoader): def initialize_model_and_tokenizer(self): chosen_model = self.config['vision']['chosen_model'] info = VISION_MODELS[chosen_model] source = info.get('model_path') or info['repo_id'] cache_dir = CACHE_DIR / info.get('cache_dir', '') cache_dir.mkdir(parents=True, exist_ok=True) if torch.cuda.is_available(): dtype, precision_str = self.detect_dtype() device_map = "auto" else: dtype = torch.float32 precision_str = "float32" device_map = {"": "cpu"} model = AutoModelForImageTextToText.from_pretrained( source, trust_remote_code=True, torch_dtype=dtype, cache_dir=cache_dir, device_map=device_map, ).eval() processor = AutoProcessor.from_pretrained( source, trust_remote_code=True, cache_dir=cache_dir, ) if hasattr(processor, "tokenizer") and hasattr(processor.tokenizer, "add_bos_token"): processor.tokenizer.add_bos_token = False if torch.cuda.is_available(): device_str = "CUDA" else: device_str = "CPU" my_cprint(f"{chosen_model} loaded into memory on {device_str} ({precision_str})", "green") return model, None, processor @torch.inference_mode() def process_single_image(self, raw_image): if raw_image.mode != "RGB": raw_image = raw_image.convert("RGB") system_text = "You are a helpful multimodal assistant." chatml = ( "<|startoftext|><|im_start|>system\n" f"{system_text}<|im_end|>\n" "<|im_start|>user\n" f"{IMAGE_PROMPT}<|im_end|>\n" "<|im_start|>assistant\n" ) inputs = self.processor( text=[chatml], images=[[raw_image]], return_tensors="pt", use_image_special_tokens=True, do_image_splitting=True, min_image_tokens=64, max_image_tokens=256, ) move_to = getattr(self.model, "device", None) if move_to is not None: inputs = inputs.to(move_to) input_len = inputs["input_ids"].shape[1] eos_id = self.processor.tokenizer.convert_tokens_to_ids("<|im_end|>") pad_id = self.processor.tokenizer.pad_token_id or eos_id outputs = self.model.generate( **inputs, max_new_tokens=512, do_sample=False, eos_token_id=eos_id, pad_token_id=pad_id, ) new_tokens = outputs[:, input_len:] text = self.processor.batch_decode(new_tokens, skip_special_tokens=True)[0].strip() return self.normalize_response(text) ================================================ FILE: modules/scraper.py ================================================ import os import re import json import asyncio import textwrap import aiofiles import markdown from bs4 import BeautifulSoup from copy import deepcopy import hashlib from urllib.parse import urljoin, urlparse, urlsplit, urlunsplit from PySide6.QtCore import Signal, QObject from curl_cffi.requests import AsyncSession from curl_cffi.requests.errors import RequestsError from core.constants import PROJECT_ROOT _VERSION_SUFFIX_RE = re.compile(r"^v?\d+(\.\d+)*$") # Cruft commonly embedded INSIDE the main article element by Sphinx-style themes. # Stripped post-extraction so the saved HTML is closer to "vector-DB-ready" content. _CRUFT_TAGS = ("script", "style", "nav", "footer", "svg") _CRUFT_CLASSES = ( "toctree-wrapper", # Sphinx project TOC tree, often dumped at bottom of index pages "related", # Sphinx prev/next bar "sphinxsidebar", # classic Sphinx sidebar, when scoped inside the content "footer", # Sphinx classic theme div.footer (copyright/build info) "edit-this-page", # MkDocs Material "Edit this page" "md-source-file", # MkDocs Material source-file metadata "prev-next-area", # Pydata-theme prev/next nav "prev-next-bottom", "prev-next-top", "bd-toc", # Pydata "On this page" sidebar "bd-sidebar-secondary", "feedback-widget", # Pydantic.dev "Was this page helpful?" "pagination-links", # Pydantic.dev / generic prev-next pagination "footer-version", # PyMuPDF "This documentation covers all versions..." "try_examples_button_container", # SciPy "Try it in your browser! / Open in Tab" "try_examples_outer_iframe", # SciPy interactive-examples sandbox iframe "sidemenu", # lxml.de project nav inside div.document "banner", # lxml.de donation banner ("Like the tool? Help making it better!") "sr-only", ) _CRUFT_IDS = ( "indices-and-tables", # Sphinx auto-generated bottom-of-index "Index/ModIndex/Search" stub "search", "footerDisclaimer", # PyMuPDF "This software is provided AS-IS..." ) def _strip_trailing_version(path: str) -> str: """Strip a trailing '-vX.Y.Z' / '-1.2.3' from a URL path. Used by is_valid_url so that, e.g., a seed of /foo-v1.2/ still matches /foo-v1.3/. Does NOT strip non-version suffixes (so /array-api-compat does not become /array-api). """ parts = path.rsplit("-", 1) if len(parts) == 2 and _VERSION_SUFFIX_RE.match(parts[1]): return parts[0] return path _CRUFT_HEADINGS = {"copyright", "copyrights", "license", "licenses"} def _strip_embedded_cruft(content): """Remove TOC trees, nav, scripts, etc. that sit INSIDE the extracted main element.""" for tag_name in _CRUFT_TAGS: for el in content.find_all(tag_name): el.decompose() for cls in _CRUFT_CLASSES: for el in content.find_all(class_=cls): el.decompose() for cruft_id in _CRUFT_IDS: for el in content.find_all(id=cruft_id): el.decompose() # Strip
    whose first heading is a boilerplate heading like "Copyright" or "License". for section in content.find_all("section"): h = section.find(["h1", "h2", "h3", "h4", "h5", "h6"]) if h and h.get_text(strip=True).lower() in _CRUFT_HEADINGS: section.decompose() # Strip

    elements whose only meaningful content is Prev/Next pagination anchors # (e.g. ruamel.yaml's "

    Prev Next

    " with no class/id to target). _NAV_LABELS = {"prev", "previous", "next"} for p in content.find_all("p"): anchors = p.find_all("a") if not anchors: continue anchor_text = " ".join(a.get_text(strip=True).lower() for a in anchors).split() if anchor_text and all(w in _NAV_LABELS for w in anchor_text): # Confirm there's no extra non-anchor text in the paragraph. non_anchor_text = "".join( str(c) for c in p.contents if getattr(c, "name", None) != "a" ).strip() if not non_anchor_text: p.decompose() return content class BaseScraper: def __init__(self, url, folder): self.url = url self.folder = folder self.save_dir = os.path.join( str(PROJECT_ROOT), "Scraped_Documentation", folder, ) def process_html(self, soup): main_content = self.extract_main_content(soup) if main_content: cleaned = _strip_embedded_cruft(deepcopy(main_content)) new_soup = BeautifulSoup("", "lxml") new_soup.body.append(cleaned) return new_soup # Fallback differs based on whether the user configured a scraper_class: # - BaseScraper directly (no scraper_class set) => preserve full page # - any subclass (selector configured but missed) => save empty stub # so a misconfigured selector is visible as a tiny file instead of # silently saving the full untrimmed page with TOC/nav cruft. if type(self) is BaseScraper: return soup return BeautifulSoup("", "lxml") def extract_main_content(self, soup): return None SCRAPER_SELECTORS = { "HuggingfaceScraper": ("div", {"class_": "prose-doc prose relative mx-auto max-w-4xl break-words"}), "ReadthedocsScraper": ("div", {"class_": "rst-content"}), "PyTorchScraper": ("article", {"id": "pytorch-article"}), "TileDBScraper": ("main", {"id": "content"}), "RstContentScraper": ("div", {"class_": "rst-content"}), "FuroThemeScraper": ("article", {"id": "furo-main-content"}), "PydataThemeScraper": ("article", {"class_": "bd-article"}), "FastcoreScraper": ("main", {"id": "quarto-document-content", "class_": "content"}), "RtdThemeScraper": ("div", {"attrs": {"itemprop": "articleBody"}}), "BodyRoleMainScraper": ("div", {"class_": "body", "attrs": {"role": "main"}}), "ArticleMdContentInnerMdTypesetScraper": ("article", {"class_": "md-content__inner md-typeset"}), "DivClassDocumentScraper": ("div", {"class_": "document"}), "MainIdMainContentRoleMainScraper": ("main", {"id": "main-content", "attrs": {"role": "main"}}), "DivIdMainContentRoleMainScraper": ("div", {"id": "main-content", "attrs": {"role": "main"}}), "MainScraper": ("main", {}), "DivClassThemeDocMarkdownMarkdownScraper": ("div", {"class_": ["theme-doc-markdown", "markdown"]}), "DivIdContentScraper": ("div", {"id": "content"}), "DivClassTdContentScraper": ("div", {"class_": "td-content"}), "BodyScraper": ("body", {}), "ArticleRoleMainScraper": ("article", {"attrs": {"role": "main"}}), "ArticleClassMainContent8zFCHScraper": ("article", {"class_": "main_content__8zFCH"}), } class SelectorScraper(BaseScraper): def __init__(self, url, folder, selector_key): super().__init__(url, folder) tag, kwargs = SCRAPER_SELECTORS[selector_key] self._tag = tag kwargs = dict(kwargs) attrs = kwargs.pop("attrs", None) if attrs: kwargs["attrs"] = attrs self._kwargs = kwargs def extract_main_content(self, soup): return soup.find(self._tag, **self._kwargs) class PymupdfScraper(BaseScraper): def extract_main_content(self, soup): article_container = soup.find("div", class_="article-container") if article_container: return article_container.find("section") return None _MINTLIFY_MDX_COMPONENTS = { "Tabs": "UNWRAP", "Tab": "HEADING", "CodeGroup": "UNWRAP", "CardGroup": "UNWRAP", "Card": "HEADING", "Steps": "UNWRAP", "Step": "HEADING", "AccordionGroup": "UNWRAP", "Accordion": "HEADING", "Frame": "UNWRAP", "Tooltip": "UNWRAP", "Note": "QUOTE", "Warning": "QUOTE", "Tip": "QUOTE", "Info": "QUOTE", "Caution": "QUOTE", } _MINTLIFY_DOC_INDEX_RE = re.compile( r"^> ## Documentation Index\n(?:>.*\n)+\n*", flags=re.MULTILINE, ) _MINTLIFY_FENCE_OPEN_RE = re.compile(r"^(\s*)```(\S+)(\s+.*)?$") _MINTLIFY_FENCE_KV_ATTR_RE = re.compile(r"\s+\w+=(?:\{[^}]*\}|\S+)") _MINTLIFY_TITLE_ATTR_RE = re.compile(r'\btitle="([^"]*)"') def _mintlify_unwrap(md, name): pat = re.compile(rf"<{name}(\s[^>]*)?>(.*?)", re.DOTALL) while True: new = pat.sub( lambda m: "\n\n" + textwrap.dedent(m.group(2)).strip("\n") + "\n\n", md, ) if new == md: return new md = new def _mintlify_heading(md, name): pat = re.compile(rf"<{name}(\s[^>]*)?>(.*?)", re.DOTALL) while True: def repl(m): attrs = m.group(1) or "" inner = textwrap.dedent(m.group(2)).strip("\n") tm = _MINTLIFY_TITLE_ATTR_RE.search(attrs) if tm: title = tm.group(1).replace("#", r"\#") return f"\n\n## {title}\n\n{inner}\n\n" return f"\n\n{inner}\n\n" new = pat.sub(repl, md) if new == md: return new md = new def _mintlify_quote(md, name): pat = re.compile(rf"<{name}(\s[^>]*)?>(.*?)", re.DOTALL) while True: def repl(m): inner = textwrap.dedent(m.group(2)).strip("\n") lines = inner.split("\n") return "\n\n" + "\n".join(f"> {ln}" for ln in lines) + "\n\n" new = pat.sub(repl, md) if new == md: return new md = new def _mintlify_normalize_fences(md_text): out = [] for line in md_text.split("\n"): m = _MINTLIFY_FENCE_OPEN_RE.match(line) if not m: out.append(line) continue indent, lang, rest = m.group(1), m.group(2), (m.group(3) or "") label = _MINTLIFY_FENCE_KV_ATTR_RE.sub("", rest).strip() if label: out.append(f"{indent}**{label}**") out.append("") out.append(f"{indent}```{lang}") return "\n".join(out) def render_mintlify_markdown(md_text): md_text = _MINTLIFY_DOC_INDEX_RE.sub("", md_text, count=1) for name, action in _MINTLIFY_MDX_COMPONENTS.items(): if action == "UNWRAP": md_text = _mintlify_unwrap(md_text, name) elif action == "HEADING": md_text = _mintlify_heading(md_text, name) elif action == "QUOTE": md_text = _mintlify_quote(md_text, name) md_text = _mintlify_normalize_fences(md_text) return markdown.markdown( md_text, extensions=["fenced_code", "tables", "attr_list"], output_format="html", ) class MintlifyScraper(BaseScraper): async def collect_seed_urls(self, session): parsed_seed = urlparse(self.url) seed_prefix = parsed_seed.path.rstrip("/") llms_url = f"{parsed_seed.scheme}://{parsed_seed.netloc}/llms.txt" try: resp = await session.get(llms_url, timeout=30, allow_redirects=True) except Exception: return [] if resp.status_code != 200: return [] urls = [] for line in resp.text.split("\n"): m = re.search(r"\((https?://[^)\s]+\.md)\)", line) if not m: continue base_url = m.group(1)[:-3] # strip ".md" p = urlparse(base_url) if p.netloc != parsed_seed.netloc: continue if seed_prefix and not p.path.startswith(seed_prefix): continue urls.append(base_url) return urls def fetch_url_for(self, url): u = url.rstrip("/") if u.endswith(".md"): return u return u + ".md" def transform_response(self, text, url): head = text.lstrip()[:200].lower() if head.startswith("{rendered}" def extract_main_content(self, soup): return soup.body if soup.body else soup class DivIdContentSecondScraper(BaseScraper): def extract_main_content(self, soup): content_divs = soup.find_all("div", id="content") if len(content_divs) >= 2: return content_divs[1] return None class PropCacheScraper(BaseScraper): def __init__(self, url, folder): super().__init__(url, folder) if self.url.rstrip("/").endswith("propcache.aio-libs.org"): self.url = urljoin(self.url, "en/latest/") if not self.url.endswith("/"): self.url += "/" self.base_url = self.url def extract_main_content(self, soup): return soup.find("div", class_="body", attrs={"role": "main"}) class FileDownloader(BaseScraper): def extract_main_content(self, soup): return None async def save_file(self, content: bytes, url: str, save_dir: str): from pathlib import Path basename = Path(url).name or "download" filename = os.path.join(save_dir, basename) async with aiofiles.open(filename, "wb") as f: await f.write(content) class ScraperRegistry: _special_scrapers = { "BaseScraper": BaseScraper, "PymupdfScraper": PymupdfScraper, "DivIdContentSecondScraper": DivIdContentSecondScraper, "PropCacheScraper": PropCacheScraper, "MintlifyScraper": MintlifyScraper, "FileDownloader": FileDownloader, } @classmethod def get_scraper(cls, scraper_name): if scraper_name in cls._special_scrapers: return cls._special_scrapers[scraper_name] if scraper_name in SCRAPER_SELECTORS: key = scraper_name return lambda url, folder: SelectorScraper(url, folder, key) return BaseScraper class ScraperWorker(QObject): status_updated = Signal(str, str) scraping_finished = Signal(str, bool, bool) RATE_LIMIT_THRESHOLD = 5 def __init__(self, url, folder, scraper_class=BaseScraper, name="", resume=False): super().__init__() self.url = url self.folder = folder self.name = name self.scraper = scraper_class(url, folder) self.save_dir = self.scraper.save_dir os.makedirs(self.save_dir, exist_ok=True) self.stats = {"scraped": 0} self._loop = None self._task = None self._cancelled = False self._rate_limited = False self._429s_since_last_success = 0 self.resume = resume self._log_lock = None def run(self): self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) try: self._task = self._loop.create_task(self.crawl_domain()) try: self._loop.run_until_complete(self._task) except asyncio.CancelledError: pass finally: if not self._cancelled and not self._rate_limited: self._finalize_clean_run() self.cleanup() self._loop.close() self.scraping_finished.emit(self.name, self._cancelled, self._rate_limited) def _finalize_clean_run(self): try: for fname in os.listdir(self.save_dir): if fname.endswith(".links.json"): try: os.remove(os.path.join(self.save_dir, fname)) except Exception: pass except Exception: pass log_file = os.path.join(self.save_dir, "failed_urls.log") try: if os.path.exists(log_file) and os.path.getsize(log_file) == 0: os.remove(log_file) except Exception: pass def cancel(self): self._cancelled = True if self._loop and self._task and not self._task.done(): self._loop.call_soon_threadsafe(self._task.cancel) def count_saved_files(self): return len([f for f in os.listdir(self.save_dir) if f.endswith(".html")]) async def crawl_domain( self, max_concurrent_requests: int = 20, batch_size: int = 50, page_limit: int = 5_000, ): parsed_url = urlparse(self.url) acceptable_domain = parsed_url.netloc acceptable_domain_extension = parsed_url.path.rstrip("/") log_file = os.path.join(self.save_dir, "failed_urls.log") semaphore = asyncio.BoundedSemaphore(max_concurrent_requests) visited = set() if self.resume: to_visit = self._build_resume_queue(log_file) else: to_visit = [self.url] async def process_batch(batch_urls, session): pending = [ (u, self.fetch( session, u, acceptable_domain, semaphore, self.save_dir, log_file, acceptable_domain_extension, )) for u in batch_urls if u not in visited ] urls_for_tasks = [u for u, _ in pending] tasks = [t for _, t in pending] results = await asyncio.gather(*tasks, return_exceptions=True) visited.update(batch_urls) out = [] for url, r in zip(urls_for_tasks, results): if isinstance(r, set): out.append(r) elif isinstance(r, Exception): print(f"Scrape task for {url} raised {type(r).__name__}: {r}") try: await self.log_failed_url(url, log_file) except Exception: pass return out async with AsyncSession(impersonate="chrome") as session: if not self.resume and hasattr(self.scraper, "collect_seed_urls"): try: extra = await self.scraper.collect_seed_urls(session) if extra: already = set(to_visit) for u in extra: if u not in already: to_visit.append(u) already.add(u) except Exception as e: print(f"collect_seed_urls failed: {type(e).__name__}: {e}") while to_visit: if self._cancelled or self._rate_limited: break current_batch = to_visit[:batch_size] to_visit = to_visit[batch_size:] for new_links in await process_batch(current_batch, session): new_to_visit = new_links - visited to_visit.extend(new_to_visit) if self._rate_limited: break await asyncio.sleep(0.2) if len(visited) >= page_limit: break return visited def _build_resume_queue(self, log_file): candidates = set() try: for fname in os.listdir(self.save_dir): if fname.endswith(".links.json"): try: with open(os.path.join(self.save_dir, fname), "r", encoding="utf-8") as f: for link in json.load(f): if isinstance(link, str): candidates.add(link) except Exception: pass except Exception: pass if os.path.exists(log_file): try: with open(log_file, "r", encoding="utf-8") as f: for line in f: line = line.strip() if line: candidates.add(line) except Exception: pass try: os.remove(log_file) except Exception: pass candidates.add(self.url) return list(candidates) async def fetch( self, session, url, base_domain, semaphore, save_dir, log_file, acceptable_domain_extension, retries: int = 3, ): filename = os.path.join(save_dir, self.sanitize_filename(url) + ".html") if os.path.exists(filename): return set() fetch_url = ( self.scraper.fetch_url_for(url) if hasattr(self.scraper, "fetch_url_for") else url ) has_response_transform = hasattr(self.scraper, "transform_response") async with semaphore: for attempt in range(1, retries + 1): if self._rate_limited or self._cancelled: return set() try: response = await session.get(fetch_url, timeout=30, allow_redirects=True) except (asyncio.TimeoutError, RequestsError, OSError): if attempt == retries: await self.log_failed_url(url, log_file) self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) await asyncio.sleep(2) continue if response.status_code == 429: self._429s_since_last_success += 1 if self._429s_since_last_success >= self.RATE_LIMIT_THRESHOLD: self._rate_limited = True await self.log_failed_url(url, log_file) self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return set() if response.status_code != 200: await self.log_failed_url(url, log_file) self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return set() self._429s_since_last_success = 0 content_type = response.headers.get("content-type", "").lower() if not has_response_transform and "text/html" not in content_type: self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return set() html = response.text if has_response_transform: try: html = self.scraper.transform_response(html, url) except Exception: await self.log_failed_url(url, log_file) self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return set() try: links = self.extract_links( html, url, base_domain, acceptable_domain_extension ) await self.save_html(html, url, save_dir, links=links) except Exception: await self.log_failed_url(url, log_file) self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return set() self.stats["scraped"] = self.count_saved_files() self.status_updated.emit(self.name, str(self.stats["scraped"])) return links return set() async def save_html(self, content, url, save_dir, links=None): filename = os.path.join(save_dir, self.sanitize_filename(url) + ".html") soup = BeautifulSoup(content, "lxml") processed_soup = self.scraper.process_html(soup) source_link = processed_soup.new_tag("a", href=url) source_link.string = "Original Source" if processed_soup.body: processed_soup.body.insert(0, source_link) elif processed_soup.html: new_body = processed_soup.new_tag("body") new_body.insert(0, source_link) processed_soup.html.insert(0, new_body) else: new_html = processed_soup.new_tag("html") new_body = processed_soup.new_tag("body") new_body.insert(0, source_link) new_html.insert(0, new_body) processed_soup.insert(0, new_html) try: async with aiofiles.open(filename, "x", encoding="utf-8") as f: await f.write(str(processed_soup)) except FileExistsError: pass if links: sidecar = filename[:-5] + ".links.json" tmp = sidecar + ".tmp" try: async with aiofiles.open(tmp, "w", encoding="utf-8") as f: await f.write(json.dumps(sorted(links))) await asyncio.to_thread(os.replace, tmp, sidecar) except Exception: try: await asyncio.to_thread(os.remove, tmp) except Exception: pass def sanitize_filename(self, url: str) -> str: original_url = url base_url = url.split("?", 1)[0].split("#", 1)[0] for open_br, close_br in ("[]", "()"): while open_br in base_url and close_br in base_url: start, end = base_url.find(open_br), base_url.find(close_br) if 0 <= start < end: base_url = base_url[:start] + base_url[end + 1 :] filename = ( base_url.replace("https://", "") .replace("http://", "") .replace("/", "_") .replace("\\", "_") ) for ch in '<>:"|?*': filename = filename.replace(ch, "_") if filename.lower().endswith(".html"): filename = filename[:-5] reserved = {"con", "prn", "aux", "nul"} | {f"com{i}" for i in range(1, 10)} | {f"lpt{i}" for i in range(1, 10)} if filename.strip(" .").lower() in reserved: filename = f"file_{filename}" need_hash = ("?" in original_url or "#" in original_url) MAX_WIN_PATH = 250 full_path = os.path.join(self.save_dir, filename + ".html") if need_hash or len(full_path) > MAX_WIN_PATH: allowed = MAX_WIN_PATH - len(self.save_dir) - len(os.sep) - len(".html") - 9 allowed = max(1, allowed) filename = ( filename[:allowed] + "_" + hashlib.md5(original_url.encode()).hexdigest()[:8] ) return filename.rstrip(". ") async def log_failed_url(self, url, log_file): if self._log_lock is None: self._log_lock = asyncio.Lock() async with self._log_lock: async with aiofiles.open(log_file, "a", encoding="utf-8") as f: await f.write(url + "\n") def extract_links( self, html, base_url, base_domain, acceptable_domain_extension, ): soup = BeautifulSoup(html, "lxml") links = set() for a_tag in soup.find_all("a", href=True): href = a_tag["href"].replace("&num;", "#") if href.startswith("www."): href = "https://" + href elif href.startswith("https/"): href = "https://" + href[len("https/"):] elif href.startswith("http/"): href = "http://" + href[len("http/"):] url = ( urljoin(f"https://{base_domain}", href) if href.startswith("/") else urljoin(base_url, href) ) p = urlsplit(url) canon = urlunsplit((p.scheme, p.netloc, p.path, "", "")) if self.is_valid_url( canon, base_domain, acceptable_domain_extension ): links.add(canon) return links def is_valid_url(self, url, base_domain, acceptable_domain_extension): def strip_www(netloc: str) -> str: return netloc[4:] if netloc.startswith("www.") else netloc parsed = urlparse(url) if strip_www(parsed.netloc) != strip_www(base_domain): return False if acceptable_domain_extension: base_no_version = _strip_trailing_version(acceptable_domain_extension) return ( parsed.path.startswith(acceptable_domain_extension) or parsed.path.startswith(base_no_version) ) return True def cleanup(self): pass ================================================ FILE: modules/transcribe.py ================================================ from multiprocessing import Process from pathlib import Path import warnings import shutil import json import torch import av import whisper_s2t from whisper_s2t.backends.ctranslate2.hf_utils import download_model from core.extract_metadata import extract_typed_metadata from core.constants import WHISPER_MODELS, PROJECT_ROOT warnings.filterwarnings("ignore") current_directory = PROJECT_ROOT CACHE_DIR = current_directory / "Models" / "whisper" CACHE_DIR.mkdir(parents=True, exist_ok=True) class WhisperTranscriber: def __init__(self, model_key, batch_size): model_info = WHISPER_MODELS[model_key] self.model_identifier = model_info['repo_id'] self.compute_type = model_info['precision'] self.batch_size = batch_size self.cache_dir = str(CACHE_DIR) script_dir = PROJECT_ROOT self.model_dir = script_dir / "Models" / "whisper" self.model_dir.mkdir(parents=True, exist_ok=True) self.model_kwargs = { 'compute_type': self.compute_type, 'asr_options': { "beam_size": 5, "best_of": 1, "patience": 2, "length_penalty": 1, "repetition_penalty": 1.01, "no_repeat_ngram_size": 0, "compression_ratio_threshold": 2.4, "log_prob_threshold": -1.0, "no_speech_threshold": 0.5, "prefix": None, "suppress_blank": True, "suppress_tokens": [-1], "without_timestamps": True, "max_initial_timestamp": 1.0, "word_timestamps": False, "sampling_temperature": 1.0, "return_scores": True, "return_no_speech_prob": True, "word_aligner_model": 'tiny', }, 'model_identifier': self.model_identifier, } if 'large-v3' in self.model_identifier: self.model_kwargs['n_mels'] = 128 def start_transcription_process(self, audio_file): self.audio_file = audio_file process = Process(target=self.transcribe_and_create_document) process.start() process.join() if process.exitcode is not None and process.exitcode != 0: raise RuntimeError(f"Transcription worker exited with code {process.exitcode}") @torch.inference_mode() def transcribe_and_create_document(self): audio_file_str = str(self.audio_file) converted_audio_file = self.convert_to_wav(audio_file_str) try: downloaded_path = download_model( size_or_id=self.model_identifier, cache_dir=str(CACHE_DIR) ) model_kwargs = self.model_kwargs.copy() model_kwargs.pop('model_identifier', None) model_kwargs.pop('cache_dir', None) model = whisper_s2t.load_model( model_identifier=downloaded_path, **model_kwargs ) transcription = self.transcribe(model, [str(converted_audio_file)]) self.create_document_object(transcription, audio_file_str) except Exception as e: print(f"Error during transcription: {e}") raise finally: if converted_audio_file != audio_file_str and Path(converted_audio_file).exists(): try: Path(converted_audio_file).unlink() print(f"Deleted temporary file: {converted_audio_file}") except Exception as e: print(f"Error deleting temporary file {converted_audio_file}: {e}") def convert_to_wav(self, audio_file): if self.is_correct_format(audio_file): print("File is already in the correct format. No pre-processing is necessary.") return str(audio_file) ffmpeg_available = shutil.which('ffmpeg') is not None if ffmpeg_available: print("FFmpeg detected. Sending the audio file to WhisperS2T for pre-processing and transcription.") return str(audio_file) else: print("FFmpeg not detected. Pre-processing with the av library then sending to WhisperS2T for transcription.") output_file = f"{Path(audio_file).stem}_temp_converted.wav" output_path = PROJECT_ROOT / output_file return self.convert_with_av(audio_file, output_path) def is_correct_format(self, audio_file): try: with av.open(audio_file) as container: stream = container.streams.audio[0] return stream.sample_rate == 16000 and stream.channels == 1 and container.format.name == 'wav' except Exception as e: print(f"Error checking audio format: {e}") return False def convert_with_av(self, audio_file, output_path): try: with av.open(audio_file) as input_container, \ av.open(str(output_path), mode='w') as output_container: input_stream = input_container.streams.audio[0] output_stream = output_container.add_stream('pcm_s16le', rate=16000) output_stream.channels = 1 resampler = av.AudioResampler(format='s16', layout='mono', rate=16000) for frame in input_container.decode(audio=0): frame.pts = None resampled_frames = resampler.resample(frame) if resampled_frames: for resampled_frame in resampled_frames: for packet in output_stream.encode(resampled_frame): output_container.mux(packet) for packet in output_stream.encode(None): output_container.mux(packet) print("Conversion using av complete.") return str(output_path) except Exception as e: print(f"Error converting file with av library {audio_file}: {e}") raise def transcribe(self, model, files, lang_codes=['en'], tasks=['transcribe'], initial_prompts=[None]): out = model.transcribe_with_vad(files, lang_codes=lang_codes, tasks=tasks, initial_prompts=initial_prompts, batch_size=self.batch_size) transcription = " ".join([_['text'] for _ in out[0]]).strip() return transcription def create_document_object(self, transcription_text, audio_file_path): metadata = extract_typed_metadata(audio_file_path, "audio") script_dir = PROJECT_ROOT docs_dir = script_dir / "Docs_for_DB" docs_dir.mkdir(exist_ok=True) audio_file_name = Path(audio_file_path).stem json_file_path = docs_dir / f"{audio_file_name}.json" doc_dict = { "page_content": transcription_text, "metadata": metadata } json_file_path.write_text(json.dumps(doc_dict, indent=4), encoding='utf-8') ================================================ FILE: modules/tts.py ================================================ import queue import re import threading from pathlib import Path import io import numpy as np import sounddevice as sd import torch import yaml from tqdm import tqdm from transformers import AutoProcessor, BarkModel import soundfile as sf from gtts import gTTS from gtts.tokenizer import pre_processors, tokenizer_cases from core.utilities import my_cprint from core.constants import WHISPER_SPEECH_MODELS, PROJECT_ROOT current_directory = PROJECT_ROOT CACHE_DIR = current_directory / "models" / "tts" CACHE_DIR.mkdir(parents=True, exist_ok=True) class BaseAudio: def __init__(self): self.sentence_queue = queue.Queue() self.processing_queue = queue.Queue() self.audio_queue = queue.Queue() self.stop_event = threading.Event() self.lock = threading.Lock() self.config = {} self.processing_thread = None def load_config(self, config_file, section): with open(config_file, 'r', encoding='utf-8') as f: config_data = yaml.safe_load(f) if section in config_data: self.config = config_data[section] else: print(f"Warning: Section '{section}' not found in config file.") self.config = {} def initialize_device(self): if torch.cuda.is_available(): self.device = 'cuda' else: raise RuntimeError("CUDA is not available, but it's required for this program.") def play_audio_from_queue(self): while not self.stop_event.is_set(): try: queue_item = self.audio_queue.get(timeout=5) if queue_item is None or self.stop_event.is_set(): break audio_array, sampling_rate = queue_item try: if len(audio_array.shape) == 1: audio_array = np.expand_dims(audio_array, axis=1) elif len(audio_array.shape) == 2 and audio_array.shape[1] != 1: audio_array = audio_array.T sd.play(audio_array, samplerate=sampling_rate) sd.wait() except Exception as e: print(f"Error playing audio: {e}") except queue.Empty: if self.processing_thread is None or not self.processing_thread.is_alive(): break def run(self, input_text_file): try: with open(input_text_file, 'r', encoding='utf-8') as file: input_text = file.read() sentences = re.split(r'[.!?;]+\s*', input_text) except Exception as e: print(f"Error reading {input_text_file}: {e}") return self.processing_thread = threading.Thread(target=self.process_text_to_audio, args=(sentences,)) playback_thread = threading.Thread(target=self.play_audio_from_queue) self.processing_thread.daemon = True playback_thread.daemon = True self.processing_thread.start() playback_thread.start() self.processing_thread.join() playback_thread.join() def stop(self): self.stop_event.set() self.audio_queue.put(None) class ChatterboxAudio(BaseAudio): PITCH_FACTOR = 0.93 SPEED_FACTOR = 0.93 def __init__(self): super().__init__() import torch, warnings device_pref = getattr(self, "DEVICE", "auto") self.device = self._select_device(device_pref) if self.device != "cpu": _orig_load = torch.load torch.load = lambda *a, **k: _orig_load( *a, **{**k, "map_location": k.get("map_location", self.device)} ) if self.device == "cuda": torch.backends.cudnn.benchmark = True from chatterbox.tts import ChatterboxTTS print(f"Loading Chatterbox TTS on [{self.device}] …") self.model = ChatterboxTTS.from_pretrained(device=self.device) self.sr = self.model.sr print("Model ready!") accent = getattr(self, "ACCENT_PRESET", None) if accent and hasattr(self, "ACCENT_SETTINGS"): style = self.ACCENT_SETTINGS.get(accent, {}) self.exaggeration = style.get("exaggeration", 0.5) self.cfg_weight = style.get("cfg_weight", 0.5) else: self.exaggeration = getattr(self, "EXAGGERATION", 0.5) self.cfg_weight = getattr(self, "CFG_WEIGHT", 0.5) self.pitch_factor = getattr(self, "PITCH_FACTOR", 1.0) self.speed_factor = getattr(self, "SPEED_FACTOR", 1.0) self.tone = getattr(self, "TONE", "neutral") self.normalise = getattr(self, "NORMALISE", True) self.int16_output = getattr(self, "INT16_OUTPUT", False) def _select_device(self, pref): import torch, warnings pref = pref.lower() if pref == "cpu": return "cpu" if pref in ("gpu", "cuda"): if torch.cuda.is_available(): return "cuda" if getattr(self, "GPU_STRICT", False): raise RuntimeError("CUDA requested but unavailable.") warnings.warn("CUDA not available – falling back to CPU.", RuntimeWarning) return "cpu" if torch.cuda.is_available(): return "cuda" if torch.backends.mps.is_available(): return "mps" return "cpu" @staticmethod def _apply_voice_modifications(wav, sr, pitch_factor=1.0, speed_factor=1.0, tone="neutral"): try: import torch, torchaudio.functional as F if pitch_factor != 1.0: tgt_sr = int(sr * pitch_factor) wav = F.resample(wav, sr, tgt_sr) wav = F.resample(wav, tgt_sr, sr) if speed_factor != 1.0 and wav.numel(): tgt_len = int(wav.shape[-1] / speed_factor) if tgt_len > 0: wav = torch.nn.functional.interpolate( wav.unsqueeze(0), size=tgt_len, mode="linear", align_corners=False ).squeeze(0) if tone == "happy": wav *= 1.1 elif tone == "serious": wav *= 0.9 elif tone == "calm": wav = torch.tanh(wav * 0.8) elif tone == "excited": wav *= 1.2 return torch.clamp(wav, -1.0, 1.0) except Exception as e: print(f"Voice-mod skipped: {e}") return wav @torch.inference_mode() def process_text_to_audio(self, sentences): import numpy as np, torch for sentence in sentences: if not sentence.strip() or self.stop_event.is_set(): continue try: wav = self.model.generate( sentence, exaggeration=self.exaggeration, cfg_weight=self.cfg_weight, ) except Exception as e: print(f"Generation failed: {e}") self.audio_queue.put(None) break wav = self._apply_voice_modifications( wav, self.sr, pitch_factor=self.pitch_factor, speed_factor=self.speed_factor, tone=self.tone, ) audio = wav.squeeze().cpu().numpy().astype(np.float32) if self.normalise and audio.size: peak = np.max(np.abs(audio)) if peak: audio /= peak if self.int16_output: audio = (audio * 32767).astype(np.int16) self.audio_queue.put((audio, self.sr)) if torch.cuda.is_available(): del wav torch.cuda.empty_cache() self.audio_queue.put(None) class BarkAudio(BaseAudio): def __init__(self): super().__init__() self.load_config('config.yaml', 'bark') self.initialize_device() self.initialize_model_and_processor() def initialize_model_and_processor(self): repository_id = "suno/bark" if self.config['size'] == 'normal' else f"suno/bark-{self.config['size']}" self.processor = AutoProcessor.from_pretrained(repository_id, token=False, cache_dir=CACHE_DIR) self.model = BarkModel.from_pretrained( repository_id, torch_dtype=torch.float16, cache_dir=CACHE_DIR, token=False ).to(self.device) self.model.eval() my_cprint("Bark model loaded (float16)", "green") @torch.inference_mode() def process_text_to_audio(self, sentences): for sentence in tqdm(sentences, desc="Processing Sentences"): if sentence.strip(): print(f"Processing sentence: {sentence}") try: inputs = self.processor(text=sentence, voice_preset=self.config['speaker'], return_tensors="pt") inputs = {k: v.to(self.device) if hasattr(v, 'to') else v for k, v in inputs.items()} speech_output = self.model.generate( **inputs, use_cache=True, do_sample=True, pad_token_id=0, ) audio_array = speech_output[0].cpu().numpy() audio_array = np.int16(audio_array / np.max(np.abs(audio_array)) * 32767) self.audio_queue.put((audio_array, self.model.generation_config.sample_rate)) except Exception as e: print(f"Exception during audio generation: {str(e)}") continue self.audio_queue.put(None) class WhisperSpeechAudio(BaseAudio): def __init__(self): super().__init__() self.load_config('config.yaml', 'tts') self.pipe = None self.initialize_model() # Models known to be incompatible with CUDA graph capture; CUDA graphs # are auto-disabled when either the s2a or t2s side picks one of these. CUDA_GRAPH_INCOMPATIBLE_MODELS = { 's2a-v1.95-small-fast-en.model', 't2s-v1.1-small-en+pl.model', } def get_whisper_speech_models(self): s2a_model = self.config.get('s2a', 's2a-q4-hq-fast-en+pl.model') s2a = f"WhisperSpeech/WhisperSpeech:{s2a_model}" t2s_model = self.config.get('t2s', 't2s-base-en+pl.model') t2s = f"WhisperSpeech/WhisperSpeech:{t2s_model}" return s2a, t2s, s2a_model, t2s_model def initialize_model(self): s2a, t2s, s2a_model, t2s_model = self.get_whisper_speech_models() from whisperspeech2.pipeline import Pipeline use_cuda_graph = ( torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0) and s2a_model not in self.CUDA_GRAPH_INCOMPATIBLE_MODELS and t2s_model not in self.CUDA_GRAPH_INCOMPATIBLE_MODELS ) try: self.pipe = Pipeline( s2a_ref=s2a, t2s_ref=t2s, optimize=True, torch_compile=False, use_cuda_graph=use_cuda_graph, ) my_cprint(f"{s2a.split(':')[-1]} loaded\n{t2s.split(':')[-1]} loaded.", "green") except Exception as e: my_cprint(f"Error initializing WhisperSpeech models: {str(e)}", "red") self.pipe = None @torch.inference_mode() def process_text_to_audio(self, sentences): speaker = self.config.get("speaker") or "default" for sentence in tqdm(sentences, desc="Processing Sentences"): if sentence and not self.stop_event.is_set(): try: audio_tensor = self.pipe.generate(sentence, speaker=speaker) audio_np = (audio_tensor.cpu().numpy() * 32767).astype(np.int16) if len(audio_np.shape) == 1: audio_np = np.expand_dims(audio_np, axis=1) else: audio_np = audio_np.T self.audio_queue.put((audio_np, 24000)) except Exception as e: my_cprint(f"Error processing sentence: {str(e)}", "red") self.audio_queue.put(None) def run(self, input_text_file): self.initialize_device() super().run(input_text_file) class ChatTTSAudio(BaseAudio): def __init__(self): super().__init__() global ChatTTS import ChatTTS print("Initializing ChatTTSAudio...") self.initialize_device() self.chat = ChatTTS.Chat() chattts_dir = CACHE_DIR / "2Noise--ChatTTS" chattts_dir.mkdir(parents=True, exist_ok=True) self.chat.load( source="huggingface", device=self.device, compile=False, use_flash_attn=False, ) torch.manual_seed(11) self.rand_spk = self.chat.sample_random_speaker() self.params_infer_code = ChatTTS.Chat.InferCodeParams( spk_emb=self.rand_spk, temperature=0.7, top_P=1, top_K=40, prompt='[speed_5]' ) self.params_refine_text = ChatTTS.Chat.RefineTextParams( prompt='[oral_0][laugh_0][break_0]', temperature=0.7, top_P=0.7, top_K=20 ) @torch.inference_mode() def process_text_to_audio(self, sentences): print(f"Starting text processing... ({len(sentences)} sentences)") for sentence in sentences: if not sentence or not sentence.strip(): continue print(f"Processing sentence: {sentence}") try: wavs = self.chat.infer( sentence, params_refine_text=self.params_refine_text, params_infer_code=self.params_infer_code, split_text=False ) if wavs is not None and len(wavs) > 0: audio_data = wavs[0] if isinstance(audio_data, torch.Tensor): audio_data = audio_data.cpu().numpy() audio_data = audio_data.squeeze() if np.prod(audio_data.shape) > 0: print(f"Audio data shape: {audio_data.shape}") if np.abs(audio_data).max() > 1.0: audio_data = audio_data / np.abs(audio_data).max() print(f"Audio range: [{audio_data.min():.3f}, {audio_data.max():.3f}]") self.audio_queue.put((audio_data, 24000)) print("Audio data queued") except Exception as e: print(f"Error processing sentence: {str(e)}\n{type(e)}") import traceback print(traceback.format_exc()) continue print("Text processing complete, sending end signal") self.audio_queue.put(None) class GoogleTTSAudio: def __init__(self, lang='en', slow=False, tld='com', silence_threshold=0.01, max_silence_ms=100): self.lang = lang self.slow = slow self.tld = tld self.silence_threshold = silence_threshold self.max_silence_ms = max_silence_ms def run(self, input_text_file): try: with open(input_text_file, 'r', encoding='utf-8') as file: text = file.read() except FileNotFoundError: print(f"Error: File not found at {input_text_file}") return except IOError: print(f"Error: Unable to read file at {input_text_file}") return processed_text = self.preprocess_text(text) tokens = self.tokenize_and_minimize(processed_text) all_audio_data = [] samplerate = None for token in tokens: if token.strip(): print(f"Processing token: '{token}'") fp = io.BytesIO() if token.startswith(""): token = token[10:].strip() tts = gTTS(text=token, lang=self.lang, slow=self.slow, tld=self.tld) tts.write_to_fp(fp) fp.seek(0) data, samplerate = sf.read(fp) all_audio_data.append(data) if all_audio_data: combined_audio = np.concatenate(all_audio_data) processed_audio = self.trim_silence(combined_audio, samplerate) sd.play(processed_audio, samplerate) sd.wait() else: print("No audio data generated.") @staticmethod def preprocess_text(text): text = pre_processors.abbreviations(text) text = pre_processors.end_of_line(text) text = pre_processors.tone_marks(text) return text @staticmethod def tokenize_and_minimize(text): sentences = re.split('(?<=[.!?])\s+', text) minimized_tokens = [] for sentence in sentences: if len(sentence) <= 100: minimized_tokens.append(sentence) else: words = sentence.split() current_chunk = "" for word in words: if len(current_chunk) + len(word) + 1 > 100: if current_chunk: minimized_tokens.append(current_chunk.strip()) current_chunk = " " + word else: minimized_tokens.append(word) else: current_chunk += " " + word if current_chunk: minimized_tokens.append(current_chunk.strip()) return minimized_tokens def trim_silence(self, audio, samplerate): max_silence_samples = int(self.max_silence_ms * samplerate / 1000) is_silent = np.abs(audio) < self.silence_threshold silent_regions = np.where(np.diff(is_silent.astype(int)))[0] if len(silent_regions) < 2: return audio processed_chunks = [] start = 0 for i in range(0, len(silent_regions) - 1, 2): silence_start, silence_end = silent_regions[i], silent_regions[i + 1] chunk_start = max(start, silence_start - max_silence_samples) chunk_end = min(silence_end, silence_start + max_silence_samples) processed_chunks.append(audio[chunk_start:chunk_end]) start = silence_end processed_chunks.append(audio[start:]) return np.concatenate(processed_chunks) class KyutaiAudio(BaseAudio): REQUIRED_PACKAGES = { "moshi": "0.2.13", "sphn": "0.2.0" } def __init__(self): super().__init__() from core.utilities import check_and_install_dependencies if not check_and_install_dependencies( self.REQUIRED_PACKAGES, backend_name="Kyutai" ): raise RuntimeError("Kyutai dependencies not available") self.load_config('config.yaml', 'kyutai') self.initialize_device() self.initialize_model() def create_checkpoint_info_from_cache(self, downloaded_paths, raw_config, weight_files): from moshi.models.loaders import CheckpointInfo from pathlib import Path moshi_weights = Path(downloaded_paths[weight_files["moshi_name"]]) mimi_weights = Path(downloaded_paths[weight_files["mimi_name"]]) tokenizer_path = Path(downloaded_paths[weight_files["tokenizer_name"]]) lm_config = dict(raw_config) tts_config = lm_config.pop("tts_config", {}) stt_config = lm_config.pop("stt_config", {}) model_id = lm_config.pop("model_id", {}) lm_gen_config = lm_config.pop("lm_gen_config", {}) model_type = lm_config.pop("model_type", "moshi") lm_config.pop("moshi_name", None) lm_config.pop("mimi_name", None) lm_config.pop("tokenizer_name", None) lm_config.pop("lora_name", None) return CheckpointInfo( moshi_weights=moshi_weights, mimi_weights=mimi_weights, tokenizer=tokenizer_path, lm_config=lm_config, raw_config=raw_config, model_type=model_type, lora_weights=None, lm_gen_config=lm_gen_config, tts_config=tts_config, stt_config=stt_config, model_id=model_id, ) def initialize_model(self): try: torch._dynamo.config.disable = True import json from moshi.models.tts import TTSModel from huggingface_hub import hf_hub_download my_cprint("Loading Kyutai TTS model...", "yellow") hf_repo = self.config.get('hf_repo', 'kyutai/tts-1.6b-en_fr') n_q = self.config.get('n_q', 32) temp = self.config.get('temp', 0.6) config_path = hf_hub_download( repo_id=hf_repo, filename="config.json", cache_dir=CACHE_DIR, token=False, ) with open(config_path, 'r') as f: raw_config = json.load(f) weight_files = { "moshi_name": raw_config["moshi_name"], "mimi_name": raw_config["mimi_name"], "tokenizer_name": raw_config["tokenizer_name"], } required_files = ["config.json", *weight_files.values()] need_download = False for filename in required_files: try: hf_hub_download(repo_id=hf_repo, filename=filename, cache_dir=CACHE_DIR, token=False, local_files_only=True) except Exception: need_download = True break if need_download: my_cprint(f"Downloading Kyutai model files for {hf_repo}...", "yellow") downloaded_paths = {"config.json": config_path} for filename in weight_files.values(): try: file_path = hf_hub_download( repo_id=hf_repo, filename=filename, cache_dir=CACHE_DIR, token=False ) downloaded_paths[filename] = file_path except Exception as e: my_cprint(f"Error downloading {filename}: {e}", "red") raise checkpoint_info = self.create_checkpoint_info_from_cache( downloaded_paths, raw_config, weight_files ) self.tts_model = TTSModel.from_checkpoint_info( checkpoint_info, n_q=n_q, temp=temp, device=torch.device(self.device), ) my_cprint(f"Kyutai model loaded successfully! ({hf_repo}, n_q: {n_q})", "green") self.setup_voice_conditioning() except Exception as e: my_cprint(f"Error initializing Kyutai model: {str(e)}", "red") raise def setup_voice_conditioning(self): if not self.tts_model.multi_speaker: my_cprint( "This Kyutai model is single-speaker and does not accept voice " "conditioning; voice may drift between sentences.", "yellow" ) self.condition_attributes = self.tts_model.make_condition_attributes([], cfg_coef=None) return try: voice_name = self.config.get('voice', 'expresso/ex03-ex01_happy_001_channel1_334s.wav') cfg_coef = self.config.get('cfg_coef', 2.0) if not self.tts_model.valid_cfg_conditionings: cfg_coef = None voice_path = self.tts_model.get_voice_path(voice_name) self.condition_attributes = self.tts_model.make_condition_attributes([voice_path], cfg_coef=cfg_coef) voice_display = self.config.get('voice_display_name', 'Happy Male') my_cprint(f"Voice conditioning loaded: {voice_display}", "green") except Exception as voice_error: my_cprint(f"Voice loading failed: {voice_error}", "yellow") my_cprint("Using model without voice conditioning", "yellow") self.condition_attributes = self.tts_model.make_condition_attributes([], cfg_coef=None) @torch.inference_mode() def generate_speech_for_sentence(self, sentence): try: entries = self.tts_model.prepare_script([sentence], padding_between=1) pcms = [] def on_frame(frame): if (frame != -1).all(): pcm = self.tts_model.mimi.decode(frame[:, 1:, :]).cpu().numpy() pcms.append(np.clip(pcm[0, 0], -1, 1)) all_entries = [entries] all_condition_attributes = [self.condition_attributes] with self.tts_model.mimi.streaming(len(all_entries)): result = self.tts_model.generate(all_entries, all_condition_attributes, on_frame=on_frame) if pcms: audio = np.concatenate(pcms, axis=-1) return audio else: return None except Exception as e: return None @torch.inference_mode() def process_text_to_audio(self, sentences): for sentence in tqdm(sentences, desc="Processing Sentences"): if not sentence.strip() or self.stop_event.is_set(): continue try: audio = self.generate_speech_for_sentence(sentence.strip()) if audio is not None: self.audio_queue.put((audio, self.tts_model.mimi.sample_rate)) else: print("Failed to generate audio for sentence") except Exception as e: print(f"Error processing sentence: {str(e)}") continue self.audio_queue.put(None) class KyutaiPocketAudio(BaseAudio): REQUIRED_PACKAGES = { "pocket_tts": "2.0.0" } def __init__(self): super().__init__() from core.utilities import check_and_install_dependencies if not check_and_install_dependencies( self.REQUIRED_PACKAGES, backend_name="Kyutai Pocket" ): raise RuntimeError("Kyutai Pocket dependencies not available") self.load_config('config.yaml', 'kyutaipocket') self.device = 'cpu' self.initialize_model() def initialize_model(self): try: from pocket_tts import TTSModel language = self.config.get('language', 'english') voice = self.config.get('voice', 'alba') quantize = self.config.get('quantize', True) temp = self.config.get('temp', 0.7) my_cprint(f"Loading Kyutai Pocket TTS model (language={language}, quantize={quantize})...", "yellow") self.tts_model = TTSModel.load_model( language=language, temp=temp, quantize=quantize, ) self.sample_rate = self.tts_model.sample_rate my_cprint(f"Loading voice: {voice}", "yellow") self.voice_state = self.tts_model.get_state_for_audio_prompt(voice) my_cprint(f"Kyutai Pocket model loaded successfully! (voice: {voice})", "green") except Exception as e: my_cprint(f"Error initializing Kyutai Pocket model: {str(e)}", "red") raise # Note: do NOT wrap these in @torch.inference_mode(). Pocket-TTS mutates the # pre-computed voice_state in place on every generate_audio call, and tensors # created outside inference_mode cannot be inplace-modified inside it. # The library handles its own @torch.no_grad internally. def generate_speech_for_sentence(self, sentence): try: audio = self.tts_model.generate_audio(self.voice_state, sentence) if isinstance(audio, torch.Tensor): audio_np = audio.cpu().numpy() else: audio_np = np.array(audio) if audio_np.ndim > 1: audio_np = audio_np.squeeze() return audio_np except Exception as e: print(f"Pocket-TTS generation failed: {e}") return None def process_text_to_audio(self, sentences): for sentence in tqdm(sentences, desc="Processing Sentences"): if not sentence.strip() or self.stop_event.is_set(): continue try: audio = self.generate_speech_for_sentence(sentence.strip()) if audio is not None and len(audio) > 0: self.audio_queue.put((audio, self.sample_rate)) else: print("Failed to generate audio for sentence") except Exception as e: print(f"Error processing sentence: {str(e)}") continue self.audio_queue.put(None) def run_tts(config_path, input_text_file): with open(config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) tts_model = config.get('tts', {}).get('model', 'bark') if tts_model == 'bark': audio_class = BarkAudio() elif tts_model == 'whisperspeech': audio_class = WhisperSpeechAudio() elif tts_model == 'chattts': audio_class = ChatTTSAudio() elif tts_model == 'googletts': audio_class = GoogleTTSAudio() elif tts_model == 'chatterbox': audio_class = ChatterboxAudio() elif tts_model == 'kyutai': audio_class = KyutaiAudio() elif tts_model == 'kyutaipocket': audio_class = KyutaiPocketAudio() else: raise ValueError(f"Invalid TTS model specified in config.yaml: {tts_model}") audio_class.run(input_text_file) ================================================ FILE: modules/voice_recorder.py ================================================ import tempfile from pathlib import Path import psutil import sounddevice as sd import numpy as np import soundfile as sf from PySide6.QtCore import QThread, Signal import whisper_s2t from core.utilities import my_cprint def get_logical_core_count(): return psutil.cpu_count(logical=False) CPU_THREADS = max(4, get_logical_core_count() - 8) DEVICE = "cpu" COMPUTE_TYPE = "float32" MODEL_IDENTIFIER = "ctranslate2-4you/distil-whisper-small.en-ct2-float32" class TranscriptionThread(QThread): transcription_complete = Signal(str) def __init__(self, audio_file, voice_recorder): super().__init__() self.audio_file = audio_file self.voice_recorder = voice_recorder def run(self): transcription_text = "" try: model_kwargs = { 'compute_type': COMPUTE_TYPE, 'model_identifier': MODEL_IDENTIFIER, "device": DEVICE, "cpu_threads": CPU_THREADS, } self.model = whisper_s2t.load_model(**model_kwargs) my_cprint("Whisper model loaded.", 'green') out = self.model.transcribe_with_vad([self.audio_file], lang_codes=['en'], tasks=['transcribe'], initial_prompts=[None], batch_size=4) transcription_text = " ".join(item['text'] for item in out[0]).strip() except Exception as e: my_cprint(f"Transcription error: {e}", 'red') transcription_text = f"[Transcription failed: {e}]" finally: self.transcription_complete.emit(transcription_text) try: Path(self.audio_file).unlink(missing_ok=True) except Exception: pass if hasattr(self, 'model'): del self.model class RecordingThread(QThread): def __init__(self, voice_recorder): super().__init__() self.voice_recorder = voice_recorder def run(self): self.voice_recorder.record_audio() class VoiceRecorder: def __init__(self, gui_instance, channels=1, rate=16000, chunk=1024): self.gui_instance = gui_instance self.channels, self.rate, self.chunk = channels, rate, chunk self.is_recording, self.frames = False, [] self.recording_thread = None self.transcription_thread = None def record_audio(self): def callback(indata, frames, time, status): if status: print(status) self.frames.append(indata.copy()) try: with sd.InputStream(samplerate=self.rate, channels=self.channels, callback=callback, blocksize=self.chunk): while self.is_recording: sd.sleep(100) except sd.PortAudioError as e: my_cprint(f"Audio recording error: {str(e)}", 'red') self.is_recording = False self.gui_instance.update_transcription("Error: Failed to access microphone") def save_audio(self): self.is_recording = False if not self.frames: my_cprint("No audio data recorded.", 'yellow') return with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f: temp_file = Path(f.name) audio_data = np.concatenate(self.frames, axis=0) sf.write(str(temp_file), audio_data, self.rate) self.frames.clear() if temp_file.stat().st_size < 1024: my_cprint("Recording too short, discarding.", 'yellow') temp_file.unlink() return self.transcription_thread = TranscriptionThread(str(temp_file), self) self.transcription_thread.transcription_complete.connect(self.gui_instance.update_transcription) self.transcription_thread.start() def start_recording(self): if not self.is_recording: self.is_recording = True self.recording_thread = RecordingThread(self) self.recording_thread.start() def stop_recording(self): self.is_recording = False if self.recording_thread is not None: self.recording_thread.wait() self.save_audio() ================================================ FILE: setup_windows.py ================================================ import os import subprocess import sys cache_dir = os.path.join( os.environ.get("USERPROFILE", os.path.expanduser("~")), ".triton" ) if os.path.isdir(cache_dir): print(f"\nRemoving Triton cache at {cache_dir} via OS command…") subprocess.run(f'rmdir /S /Q "{cache_dir}"', shell=True, check=False) print("Triton cache removed.\n") else: print("\nNo Triton cache found to clean.\n") import subprocess import time import tkinter as tk from tkinter import messagebox from tools.replace_sourcecode import ( replace_sentence_transformer_file, replace_chattts_file, add_cuda_files, setup_vector_db, check_embedding_model_dimensions, ) from core.constants import priority_libs, libs, full_install_libs start_time = time.time() def has_nvidia_gpu(): try: result = subprocess.run( ["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) return result.returncode == 0 except FileNotFoundError: return False python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" hardware_type = "GPU" if has_nvidia_gpu() else "CPU" def tkinter_message_box(title, message, type="info", yes_no=False): root = tk.Tk() root.withdraw() if yes_no: result = messagebox.askyesno(title, message) elif type == "error": messagebox.showerror(title, message) result = False else: messagebox.showinfo(title, message) result = True root.destroy() return result def check_python_version_and_confirm(): major, minor = map(int, sys.version.split()[0].split('.')[:2]) if major == 3 and minor in [11, 12, 13]: return tkinter_message_box( "Confirmation", f"Python version {sys.version.split()[0]} was detected, which is compatible.\n\nClick YES to proceed or NO to exit.", yes_no=True ) else: tkinter_message_box( "Python Version Error", "This program requires Python 3.11, 3.12 or 3.13\n\nPython versions prior to 3.11 or after 3.14 are not yet supported.\n\nExiting the installer...", type="error" ) return False def is_nvidia_gpu_installed(): try: subprocess.check_output(["nvidia-smi"]) return True except (FileNotFoundError, subprocess.CalledProcessError): return False def manual_installation_confirmation(): if not tkinter_message_box("Confirmation", "Have you installed Git?\n\nClick YES to confirm or NO to cancel installation.", yes_no=True): return False if not tkinter_message_box("Confirmation", "Have you installed Git Large File Storage?\n\nClick YES to confirm or NO to cancel installation.", yes_no=True): return False if not tkinter_message_box("Confirmation", "Have you installed Pandoc?\n\nClick YES to confirm or NO to cancel installation.", yes_no=True): return False if not tkinter_message_box("Confirmation", "Have you installed Microsoft Build Tools and/or Visual Studio with the necessary libraries to compile code?\n\nClick YES to confirm or NO to cancel installation.", yes_no=True): return False return True if not check_python_version_and_confirm(): sys.exit(1) nvidia_gpu_detected = is_nvidia_gpu_installed() if nvidia_gpu_detected: message = "An NVIDIA GPU has been detected.\n\nDo you want to proceed with the installation?" else: message = "No NVIDIA GPU has been detected. An NVIDIA GPU is required for this script to function properly.\n\nDo you still want to proceed with the installation?" if not tkinter_message_box("GPU Detection", message, yes_no=True): sys.exit(1) if not manual_installation_confirmation(): sys.exit(1) def upgrade_pip_setuptools_wheel(max_retries=5, delay=3): upgrade_commands = [ [sys.executable, "-m", "pip", "install", "--upgrade", "pip", "--no-cache-dir"], [sys.executable, "-m", "pip", "install", "--upgrade", "setuptools", "--no-cache-dir"], [sys.executable, "-m", "pip", "install", "--upgrade", "wheel", "--no-cache-dir"] ] for command in upgrade_commands: package = command[5] for attempt in range(max_retries): try: print(f"\nAttempt {attempt + 1} of {max_retries}: Upgrading {package}...") subprocess.run(command, check=True, capture_output=True, text=True, timeout=480) print(f"\033[92mSuccessfully upgraded {package}\033[0m") break except subprocess.CalledProcessError as e: print(f"Attempt {attempt + 1} failed. Error: {e.stderr.strip()}") if attempt < max_retries - 1: print(f"Retrying in {delay} seconds...") time.sleep(delay) else: print(f"Failed to upgrade {package} after {max_retries} attempts.") except Exception as e: print(f"An unexpected error occurred while upgrading {package}: {str(e)}") if attempt < max_retries - 1: print(f"Retrying in {delay} seconds...") time.sleep(delay) else: print(f"Failed to upgrade {package} after {max_retries} attempts due to unexpected errors.") def pip_install(library, with_deps=False, max_retries=5, delay=3): pip_args = ["uv", "pip", "install", library] if not with_deps: pip_args.append("--no-deps") for attempt in range(max_retries): try: print(f"\nAttempt {attempt + 1} of {max_retries}: Installing {library}{' with dependencies' if with_deps else ''}") subprocess.run(pip_args, check=True, capture_output=True, text=True, timeout=600) print(f"\033[92mSuccessfully installed {library}{' with dependencies' if with_deps else ''}\033[0m") return attempt + 1 except subprocess.CalledProcessError as e: print(f"Attempt {attempt + 1} failed. Error: {e.stderr.strip()}") if attempt < max_retries - 1: print(f"Retrying in {delay} seconds...") time.sleep(delay) else: print(f"Failed to install {library} after {max_retries} attempts.") return 0 def install_libraries(libraries): failed_installations = [] multiple_attempts = [] for library in libraries: attempts = pip_install(library) if attempts == 0: failed_installations.append(library) elif attempts > 1: multiple_attempts.append((library, attempts)) time.sleep(0.1) return failed_installations, multiple_attempts def install_libraries_with_deps(libraries): failed_installations = [] multiple_attempts = [] for library in libraries: attempts = pip_install(library, with_deps=True) if attempts == 0: failed_installations.append(library) elif attempts > 1: multiple_attempts.append((library, attempts)) time.sleep(0.1) return failed_installations, multiple_attempts print("Upgrading pip, setuptools, and wheel:") upgrade_pip_setuptools_wheel() print("Installing uv:") subprocess.run(["pip", "install", "uv"], check=True) print("\nInstalling priority libraries:") try: hardware_specific_libs = priority_libs[python_version][hardware_type] try: common_libs = priority_libs[python_version]["COMMON"] except KeyError: common_libs = [] all_priority_libs = hardware_specific_libs + common_libs priority_failed, priority_multiple = install_libraries(all_priority_libs) except KeyError: tkinter_message_box("Version Error", f"No libraries configured for Python {python_version} with {hardware_type} configuration", type="error") sys.exit(1) print("\nInstalling other libraries:") other_failed, other_multiple = install_libraries(libs) print("\nInstalling libraries with dependencies:") full_install_failed, full_install_multiple = install_libraries_with_deps(full_install_libs) print("\n----- Installation Summary -----") all_failed = priority_failed + other_failed + full_install_failed all_multiple = priority_multiple + other_multiple + full_install_multiple if all_failed: print("\033[91m\nThe following libraries failed to install:\033[0m") for lib in all_failed: print(f"\033[91m- {lib}\033[0m") if all_multiple: print("\033[93m\nThe following libraries required multiple attempts to install:\033[0m") for lib, attempts in all_multiple: print(f"\033[93m- {lib} (took {attempts} attempts)\033[0m") if not all_failed and not all_multiple: print("\033[92mAll libraries installed successfully on the first attempt.\033[0m") elif not all_failed: print("\033[92mAll libraries were eventually installed successfully.\033[0m") if all_failed: sys.exit(1) from core.utilities import clean_triton_cache clean_triton_cache() replace_sentence_transformer_file() replace_chattts_file() add_cuda_files() setup_vector_db() check_embedding_model_dimensions() def create_directory_structure(): base_dir = os.path.dirname(os.path.abspath(__file__)) models_dir = os.path.join(base_dir, "Models") subdirs = ["chat", "tts", "vector", "vision", "whisper"] if not os.path.exists(models_dir): os.makedirs(models_dir) print(f"Created Models directory: {models_dir}") for subdir in subdirs: subdir_path = os.path.join(models_dir, subdir) os.makedirs(subdir_path, exist_ok=True) print(f"Ensured subdirectory exists: {subdir_path}") create_directory_structure() def update_config_yaml(): import yaml script_dir = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(script_dir, 'config.yaml') with open(config_path, 'r', encoding='utf-8') as file: config = yaml.safe_load(file) or {} vector_model_path = os.path.join(script_dir, 'Models', 'vector', 'BAAI--bge-small-en-v1.5') if 'created_databases' not in config: config['created_databases'] = {} config['created_databases']['user_manual'] = { 'chunk_overlap': 599, 'chunk_size': 1200, 'model': vector_model_path } if 'openai' not in config: config['openai'] = {} if 'api_key' not in config['openai']: config['openai']['api_key'] = '' if 'model' not in config['openai']: config['openai']['model'] = 'gpt-4o-mini' if 'reasoning_effort' not in config['openai']: config['openai']['reasoning_effort'] = 'medium' if 'server' not in config: config['server'] = {} if 'api_key' not in config['server']: config['server']['api_key'] = '' if 'connection_str' not in config['server']: config['server']['connection_str'] = 'http://localhost:1234/v1' if 'show_thinking' not in config['server']: config['server']['show_thinking'] = 'medium' server_allowed_keys = {'api_key', 'connection_str', 'show_thinking'} server_keys = list(config['server'].keys()) for key in server_keys: if key not in server_allowed_keys: del config['server'][key] if 'chatterbox' not in config: config['chatterbox'] = { 'device': 'auto' } if 'minimax' not in config: config['minimax'] = {} if 'api_key' not in config['minimax']: config['minimax']['api_key'] = None if 'model' not in config['minimax']: config['minimax']['model'] = 'MiniMax-M2.7' with open(config_path, 'w', encoding='utf-8') as file: yaml.dump(config, file, default_flow_style=False) update_config_yaml() end_time = time.time() total_time = end_time - start_time hours, rem = divmod(total_time, 3600) minutes, seconds = divmod(rem, 60) print(f"\033[92m\nTotal installation time: {int(hours):02d}:{int(minutes):02d}:{seconds:05.2f}\033[0m") ================================================ FILE: tools/__init__.py ================================================ ================================================ FILE: tools/check_packages.py ================================================ import importlib.util import importlib.metadata import sys import urllib.request import json import subprocess from PySide6.QtWidgets import ( QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QLineEdit, QPushButton, QTableWidget, QTableWidgetItem, QLabel, QMessageBox, QProgressBar, QMenu, QFileDialog, QDialog, QHeaderView, QScrollArea, QCheckBox ) from PySide6.QtCore import Qt, QObject, QThread, Signal, QPoint from packaging import version def _get_latest_version(package_name): url = f"https://pypi.org/pypi/{package_name}/json" try: with urllib.request.urlopen(url, timeout=10) as response: if response.status != 200: raise Exception(f"PyPI returned status code {response.status}") return json.load(response)['info']['version'] except urllib.error.URLError as e: raise Exception(f"Network error: {str(e)}") except TimeoutError: raise Exception("Connection timed out") except json.JSONDecodeError: raise Exception("Invalid response from PyPI") except Exception as e: raise Exception(f"Error fetching version: {str(e)}") class OutdatedPackagesWorker(QObject): finished = Signal(list) error = Signal(str) def run(self): try: installed_packages = list(importlib.metadata.distributions()) outdated_packages = [] for dist in installed_packages: name = dist.metadata['Name'] version_installed = dist.version latest_version = _get_latest_version(name) if latest_version and version.parse(latest_version) > version.parse(version_installed): outdated_packages.append((name, version_installed, latest_version)) self.finished.emit(outdated_packages) except Exception as e: self.error.emit(str(e)) class VersionsWorker(QObject): finished = Signal(list) error = Signal(str) def __init__(self, package_name): super().__init__() self.package_name = package_name def run(self): try: versions = self.get_all_versions(self.package_name) self.finished.emit(versions) except Exception as e: self.error.emit(str(e)) def get_all_versions(self, package_name): url = f"https://pypi.org/pypi/{package_name}/json" versions = [] try: with urllib.request.urlopen(url, timeout=10) as response: if response.status != 200: raise Exception(f"PyPI returned status code {response.status}") data = json.load(response) for ver, release_info in data['releases'].items(): if release_info: release_date = release_info[0].get('upload_time', 'N/A') versions.append((ver, release_date)) versions = sorted(versions, key=lambda v: version.parse(v[0])) except urllib.error.URLError as e: raise Exception(f"Network error: {str(e)}") except TimeoutError: raise Exception("Connection timed out") except json.JSONDecodeError: raise Exception("Invalid response from PyPI") except Exception as e: raise Exception(f"Error fetching versions: {str(e)}") return versions class PipWorker(QObject): finished = Signal(str) error = Signal(str) def __init__(self, package_name, selected_version): super().__init__() self.package_name = package_name self.selected_version = selected_version def run(self): try: command = [sys.executable, "-m", "pip", "install", f"{self.package_name}=={self.selected_version}", "--no-deps"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) stdout, stderr = process.communicate() if process.returncode == 0: self.finished.emit(stdout) else: self.error.emit(stderr) except Exception as e: self.error.emit(str(e)) class ImportWorker(QObject): finished = Signal(str) error = Signal(str) def __init__(self, packages): super().__init__() self.packages = packages def run(self): try: for package, version_installed in self.packages: command = [sys.executable, "-m", "pip", "install", f"{package}=={version_installed}", "--no-deps"] process = subprocess.run(command, capture_output=True, text=True) if process.returncode != 0: raise Exception(f"Failed to install {package}=={version_installed}:\n{process.stderr}") self.finished.emit("Import operation completed.") except Exception as e: self.error.emit(str(e)) class LatestVersionWorker(QObject): finished = Signal(str) error = Signal(str) def __init__(self, package_name): super().__init__() self.package_name = package_name def run(self): try: latest_version = _get_latest_version(self.package_name) self.finished.emit(latest_version) except Exception as e: self.error.emit(str(e)) class CompareDependenciesDialog(QDialog): def __init__(self, parent, package_name, current_version, latest_version, current_deps, latest_deps): super().__init__(parent) self.setWindowTitle(f"Dependency Comparison - {package_name}") self.setMinimumWidth(600) self.setFixedHeight(600) self.package_name = package_name self.current_version = current_version self.latest_version = latest_version self.current_deps_full = current_deps self.latest_deps_full = latest_deps self.hide_extras = True self.main_layout = QVBoxLayout(self) version_layout = QVBoxLayout() version_layout.addWidget(QLabel(f"Current Version: {self.current_version}")) version_layout.addWidget(QLabel(f"Latest Version: {self.latest_version}")) version_layout.addWidget(QLabel("Dependencies:")) self.main_layout.addLayout(version_layout) self.hide_extras_checkbox = QCheckBox("Hide Extra Dependencies") self.hide_extras_checkbox.stateChanged.connect(self.update_display) self.main_layout.addWidget(self.hide_extras_checkbox) self.deps_layout = QHBoxLayout() self.main_layout.addLayout(self.deps_layout) self.current_scroll_area = QScrollArea() self.current_scroll_area.setWidgetResizable(True) self.current_scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.current_widget = QWidget() self.current_layout = QVBoxLayout(self.current_widget) self.current_label = QLabel() self.current_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.current_label.setStyleSheet("background-color: #2E2E2E; padding: 10px; color: white;") self.current_label.setWordWrap(True) self.current_layout.addWidget(self.current_label) self.current_layout.addStretch() self.current_scroll_area.setWidget(self.current_widget) self.current_scroll_area.setMinimumHeight(200) self.deps_layout.addWidget(self.current_scroll_area) self.latest_scroll_area = QScrollArea() self.latest_scroll_area.setWidgetResizable(True) self.latest_scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.latest_widget = QWidget() self.latest_layout = QVBoxLayout(self.latest_widget) self.latest_label = QLabel() self.latest_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.latest_label.setStyleSheet("background-color: #2E2E2E; padding: 10px; color: white;") self.latest_label.setWordWrap(True) self.latest_layout.addWidget(self.latest_label) self.latest_layout.addStretch() self.latest_scroll_area.setWidget(self.latest_widget) self.latest_scroll_area.setMinimumHeight(200) self.deps_layout.addWidget(self.latest_scroll_area) self.changes_scroll = QScrollArea() self.changes_scroll.setWidgetResizable(True) self.changes_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.changes_widget = QWidget() self.changes_layout = QVBoxLayout(self.changes_widget) self.changes_title = QLabel("Changes:") self.changes_title.setStyleSheet("font-weight: bold;") self.changes_layout.addWidget(self.changes_title) self.added_label = QLabel() self.added_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.added_label.setWordWrap(True) self.added_label.setStyleSheet("padding: 5px;") self.removed_label = QLabel() self.removed_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.removed_label.setWordWrap(True) self.removed_label.setStyleSheet("padding: 5px;") self.changes_layout.addWidget(self.added_label) self.changes_layout.addWidget(self.removed_label) self.changes_layout.addStretch() self.changes_scroll.setWidget(self.changes_widget) self.main_layout.addWidget(self.changes_scroll) self.update_display() def update_display(self): self.hide_extras = self.hide_extras_checkbox.isChecked() filtered_current_deps = self.filter_extras(self.current_deps_full) filtered_latest_deps = self.filter_extras(self.latest_deps_full) if filtered_current_deps: current_text = "\n".join(filtered_current_deps) else: current_text = "No dependencies found." self.current_label.setText(f"Current:\n{current_text}") if filtered_latest_deps: latest_text = "\n".join(filtered_latest_deps) else: latest_text = "No dependencies found." self.latest_label.setText(f"Latest:\n{latest_text}") added = set(filtered_latest_deps) - set(filtered_current_deps) removed = set(filtered_current_deps) - set(filtered_latest_deps) if added: added_text = "Added: " + ", ".join(sorted(added)) self.added_label.setText(added_text) self.added_label.setVisible(True) else: self.added_label.setText("") self.added_label.setVisible(False) if removed: removed_text = "Removed: " + ", ".join(sorted(removed)) self.removed_label.setText(removed_text) self.removed_label.setVisible(True) else: self.removed_label.setText("") self.removed_label.setVisible(False) if not added and not removed: self.changes_title.setText("Changes:") self.added_label.setText("No changes in dependencies.") self.removed_label.setText("") self.added_label.setVisible(True) else: self.changes_title.setText("Changes:") def filter_extras(self, deps): if not self.hide_extras: return deps filtered = [dep for dep in deps if 'extra' not in dep.lower()] return filtered class PackageChecker(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("Python Package Checker") self.setMinimumSize(800, 1200) central_widget = QWidget() self.setCentralWidget(central_widget) layout = QVBoxLayout(central_widget) search_layout = QHBoxLayout() self.search_input = QLineEdit() self.search_input.setPlaceholderText("Enter package name...") search_layout.addWidget(self.search_input) self.check_button = QPushButton("Check Package") search_layout.addWidget(self.check_button) self.check_all_button = QPushButton("Check All") search_layout.addWidget(self.check_all_button) self.check_outdated_button = QPushButton("Check Outdated") search_layout.addWidget(self.check_outdated_button) self.export_button = QPushButton("Export Requirements") search_layout.addWidget(self.export_button) self.import_button = QPushButton("Import Requirements") search_layout.addWidget(self.import_button) layout.addLayout(search_layout) self.progress_bar = QProgressBar() self.progress_bar.setRange(0, 0) self.progress_bar.setVisible(False) layout.addWidget(self.progress_bar) self.results_table = QTableWidget() self.results_table.setColumnCount(3) self.results_table.setHorizontalHeaderLabels(["Package", "Current Version", "Latest Version"]) header = self.results_table.horizontalHeader() header.setSectionResizeMode(0, QHeaderView.Stretch) header.setSectionResizeMode(1, QHeaderView.Stretch) header.setSectionResizeMode(2, QHeaderView.Stretch) self.results_table.setEditTriggers(QTableWidget.NoEditTriggers) self.results_table.setSelectionBehavior(QTableWidget.SelectRows) self.results_table.setAlternatingRowColors(True) self.results_table.setContextMenuPolicy(Qt.CustomContextMenu) self.results_table.customContextMenuRequested.connect(self.open_context_menu) layout.addWidget(self.results_table) self.check_button.clicked.connect(self.check_package) self.check_all_button.clicked.connect(self.check_all_packages) self.check_outdated_button.clicked.connect(self.check_outdated_packages) self.export_button.clicked.connect(self.export_requirements) self.import_button.clicked.connect(self.import_requirements) self.search_input.returnPressed.connect(self.check_package) self.current_mode = None self.outdated_packages = [] self.requires_map = {} self.required_by_map = {} self.current_thread = None def verify_installation(self, package_name, expected_version): try: installed_version = importlib.metadata.version(package_name) return installed_version == expected_version except importlib.metadata.PackageNotFoundError: return False def _is_package_available(self, pkg_name: str): package_exists = importlib.util.find_spec(pkg_name) is not None package_version = "N/A" if package_exists: try: package_version = importlib.metadata.version(pkg_name) except importlib.metadata.PackageNotFoundError: if pkg_name == "torch": try: package = importlib.import_module(pkg_name) temp_version = getattr(package, "__version__", "N/A") if "dev" in temp_version: package_version = temp_version package_exists = True else: package_exists = False except ImportError: package_exists = False else: package_exists = False return package_exists, package_version def check_package(self): package_name = self.search_input.text().strip() if not package_name: self.show_message("Input Error", "Please enter a package name.") return exists, version_installed = self._is_package_available(package_name) self.results_table.clearContents() self.results_table.setRowCount(0) if exists: self.results_table.setRowCount(1) self.results_table.setItem(0, 0, QTableWidgetItem(package_name)) self.results_table.setItem(0, 1, QTableWidgetItem(version_installed)) self.results_table.setItem(0, 2, QTableWidgetItem("N/A")) self.set_tooltip_for_package(0, package_name) else: self.show_message("Package Not Found", f"Package '{package_name}' is not installed.") def check_all_packages(self): self.current_mode = 'all' self.results_table.clearContents() self.results_table.setRowCount(0) try: installed_packages = list(importlib.metadata.distributions()) installed_packages.sort(key=lambda x: x.metadata['Name'].lower()) self.requires_map = {} self.required_by_map = {} package_names = {} for dist in installed_packages: name = dist.metadata['Name'] package_names[name.lower()] = name requires = dist.requires or [] self.requires_map[name] = requires for req in requires: req_name = req.split()[0] if req_name in self.required_by_map: self.required_by_map[req_name].append(name) else: self.required_by_map[req_name] = [name] self.results_table.setRowCount(len(installed_packages)) for row, dist in enumerate(installed_packages): name = dist.metadata['Name'] version_installed = dist.version self.results_table.setItem(row, 0, QTableWidgetItem(name)) self.results_table.setItem(row, 1, QTableWidgetItem(version_installed)) self.results_table.setItem(row, 2, QTableWidgetItem("N/A")) self.set_tooltip_for_package(row, name) self.show_message("Check All Complete", f"Total packages installed: {len(installed_packages)}") except Exception as e: self.show_message("Error", f"Error while checking packages: {str(e)}") self.results_table.scrollToTop() def check_outdated_packages(self): self.current_mode = 'outdated' self.results_table.clearContents() self.results_table.setRowCount(0) self.results_table.setSortingEnabled(False) self.progress_bar.setVisible(True) self.check_outdated_button.setEnabled(False) self.thread = QThread() self.worker = OutdatedPackagesWorker() self.worker.moveToThread(self.thread) self.thread.started.connect(self.worker.run) self.worker.finished.connect(self.on_outdated_packages_checked) self.worker.error.connect(self.on_worker_error) self.worker.finished.connect(self.thread.quit) self.worker.finished.connect(self.worker.deleteLater) self.worker.error.connect(self.thread.quit) self.worker.error.connect(self.worker.deleteLater) self.thread.finished.connect(self.thread.deleteLater) self.thread.start() def on_outdated_packages_checked(self, outdated_packages): self.progress_bar.setVisible(False) self.check_outdated_button.setEnabled(True) if not outdated_packages: self.show_message("Up to Date", "All packages are up to date!") return self.outdated_packages = outdated_packages self.requires_map = {} self.required_by_map = {} for pkg in outdated_packages: name = pkg[0] try: dist = importlib.metadata.distribution(name) requires = dist.requires or [] self.requires_map[name] = requires for req in requires: req_name = req.split()[0] if req_name in self.required_by_map: self.required_by_map[req_name].append(name) else: self.required_by_map[req_name] = [name] except importlib.metadata.PackageNotFoundError: pass self.results_table.setRowCount(len(outdated_packages)) for row, (name, current, latest) in enumerate(outdated_packages): self.results_table.setItem(row, 0, QTableWidgetItem(name)) self.results_table.setItem(row, 1, QTableWidgetItem(current)) self.results_table.setItem(row, 2, QTableWidgetItem(latest)) self.set_tooltip_for_package(row, name) self.show_message("Outdated Packages", f"Total outdated packages: {len(outdated_packages)}") self.results_table.setSortingEnabled(True) self.results_table.sortItems(0, Qt.AscendingOrder) self.results_table.scrollToTop() def compare_dependencies(self, package_name): try: current_dist = importlib.metadata.distribution(package_name) current_version = current_dist.version current_requires = current_dist.requires or [] current_deps = sorted([req for req in current_requires]) url = f"https://pypi.org/pypi/{package_name}/json" with urllib.request.urlopen(url, timeout=10) as response: if response.status != 200: raise Exception(f"PyPI returned status code {response.status}") data = json.load(response) latest_version = data['info']['version'] latest_requires = data['info'].get('requires_dist', []) or [] latest_deps = sorted([req for req in latest_requires if req]) dialog = CompareDependenciesDialog( self, package_name, current_version, latest_version, current_deps, latest_deps ) dialog.exec() except Exception as e: self.show_message("Error", f"Error comparing dependencies: {str(e)}") def on_worker_error(self, error_message): self.progress_bar.setVisible(False) self.check_outdated_button.setEnabled(True) self.show_message("Error", f"Error while checking outdated packages: {error_message}") def open_context_menu(self, position: QPoint): selected_row = self.results_table.currentRow() if selected_row < 0: return package_item = self.results_table.item(selected_row, 0) if not package_item: return package_name = package_item.text() menu = QMenu(self) upgrade_action = menu.addAction("Upgrade/Downgrade") upgrade_action.triggered.connect(lambda: self.fetch_versions(package_name, position)) info_action = menu.addAction("View Package Info") info_action.triggered.connect(lambda: self.show_package_info(package_name)) deps_action = menu.addAction("Show Reverse Dependencies") deps_action.triggered.connect(lambda: self.show_reverse_dependencies(package_name)) compare_deps_action = menu.addAction("Compare Dependencies") compare_deps_action.triggered.connect(lambda: self.compare_dependencies(package_name)) menu.exec(self.results_table.viewport().mapToGlobal(position)) def show_reverse_dependencies(self, package_name): try: command = ["pipdeptree", "--reverse", "--packages", package_name, "--depth", "1"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) stdout, stderr = process.communicate() print("\n=== Reverse Dependencies for", package_name, "===") print(stdout) if stderr: print("Errors/Warnings:") print(stderr) print("=====================================\n") except Exception as e: print(f"Error running pipdeptree: {str(e)}") def fetch_versions(self, package_name, position): self.package_name_to_upgrade = package_name self.position_for_menu = position self.thread_versions = QThread() self.worker_versions = VersionsWorker(package_name) self.worker_versions.moveToThread(self.thread_versions) self.thread_versions.started.connect(self.worker_versions.run) self.worker_versions.finished.connect(self.on_versions_fetched) self.worker_versions.error.connect(self.on_versions_error) self.worker_versions.finished.connect(self.thread_versions.quit) self.worker_versions.finished.connect(self.worker_versions.deleteLater) self.worker_versions.error.connect(self.thread_versions.quit) self.worker_versions.error.connect(self.worker_versions.deleteLater) self.thread_versions.finished.connect(self.thread_versions.deleteLater) self.thread_versions.start() def on_versions_fetched(self, versions): self.show_versions_menu(self.package_name_to_upgrade, versions, self.position_for_menu) def show_versions_menu(self, package_name, versions, position): if not versions: self.show_message("No Versions Found", f"No available versions found for '{package_name}'.") return menu = QMenu(self) for ver, release_date in reversed(versions): action_text = f"{ver} ({release_date})" action = menu.addAction(action_text) action.triggered.connect(lambda checked, v=ver: self.upgrade_downgrade_package(package_name, v)) menu.exec(self.results_table.viewport().mapToGlobal(position)) def upgrade_downgrade_package(self, package_name, selected_version): reply = QMessageBox.question( self, "Confirm Upgrade/Downgrade", f"Are you sure you want to install version {selected_version} of '{package_name}'?\n\nThis will not install dependencies.", QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if reply == QMessageBox.No: return self.progress_bar.setVisible(True) self.results_table.setEnabled(False) self.package_name_being_updated = package_name self.selected_version = selected_version self.thread_pip = QThread() self.worker_pip = PipWorker(package_name, selected_version) self.worker_pip.moveToThread(self.thread_pip) self.thread_pip.started.connect(self.worker_pip.run) self.worker_pip.finished.connect(self.on_pip_finished) self.worker_pip.error.connect(self.on_pip_error) self.worker_pip.finished.connect(self.thread_pip.quit) self.worker_pip.error.connect(self.thread_pip.quit) self.worker_pip.finished.connect(self.worker_pip.deleteLater) self.worker_pip.error.connect(self.worker_pip.deleteLater) self.thread_pip.finished.connect(self.thread_pip.deleteLater) self.thread_pip.start() def on_pip_finished(self, output): self.progress_bar.setVisible(False) self.results_table.setEnabled(True) if not self.verify_installation(self.package_name_being_updated, self.selected_version): self.show_message("Error", f"Package '{self.package_name_being_updated}' installation verification failed.") return self.show_message("Success", f"Package '{self.package_name_being_updated}' upgraded/downgraded successfully.\n\nOutput:\n{output}") if self.current_mode == 'outdated': self.thread_latest = QThread() self.worker_latest = LatestVersionWorker(self.package_name_being_updated) self.worker_latest.moveToThread(self.thread_latest) self.thread_latest.started.connect(self.worker_latest.run) self.worker_latest.finished.connect(self.on_latest_version_fetched) self.worker_latest.error.connect(self.on_latest_version_error) self.worker_latest.finished.connect(self.thread_latest.quit) self.worker_latest.finished.connect(self.worker_latest.deleteLater) self.worker_latest.error.connect(self.thread_latest.quit) self.worker_latest.error.connect(self.worker_latest.deleteLater) self.thread_latest.finished.connect(self.thread_latest.deleteLater) self.thread_latest.start() else: self.check_all_packages() def on_pip_error(self, error_message): self.progress_bar.setVisible(False) self.results_table.setEnabled(True) self.show_message("Error", f"Error while upgrading/downgrading '{self.package_name_being_updated}':\n{error_message}") def on_latest_version_fetched(self, latest_version): self.update_outdated_after_upgrade(self.package_name_being_updated, latest_version) def on_latest_version_error(self, error_message): self.show_message("Error", f"Error fetching latest version for '{self.package_name_being_updated}': {error_message}") def update_outdated_after_upgrade(self, package_name, latest_version): try: installed_version = importlib.metadata.version(package_name) if version.parse(installed_version) >= version.parse(latest_version): self.outdated_packages = [pkg for pkg in self.outdated_packages if pkg[0].lower() != package_name.lower()] self.remove_package_from_table(package_name) else: for i, pkg in enumerate(self.outdated_packages): if pkg[0].lower() == package_name.lower(): self.outdated_packages[i] = (pkg[0], installed_version, latest_version) row = self.find_row(package_name) if row is not None: self.results_table.setItem(row, 1, QTableWidgetItem(installed_version)) self.results_table.setItem(row, 2, QTableWidgetItem(latest_version)) self.set_tooltip_for_package(row, package_name) self.show_message("Update Complete", f"Package '{package_name}' has been updated in the outdated list.") except importlib.metadata.PackageNotFoundError: self.show_message("Error", f"Package '{package_name}' not found after installation.") def find_row(self, package_name): for row in range(self.results_table.rowCount()): item = self.results_table.item(row, 0) if item and item.text().lower() == package_name.lower(): return row return None def remove_package_from_table(self, package_name): row = self.find_row(package_name) if row is not None: self.results_table.removeRow(row) def on_versions_error(self, error_message): self.show_message("Error", f"Error while fetching versions: {error_message}") def show_package_info(self, package_name): url = f"https://pypi.org/pypi/{package_name}/json" try: with urllib.request.urlopen(url, timeout=10) as response: if response.status != 200: raise Exception(f"PyPI returned status code {response.status}") data = json.load(response) info = data['info'] description = info.get('summary', 'No description available.') author = info.get('author', 'N/A') homepage = info.get('home_page', 'N/A') package_url = info.get('package_url', f"https://pypi.org/project/{package_name}/") project_urls = info.get('project_urls', {}) documentation = project_urls.get('Documentation', 'N/A') info_dialog = QDialog(self) info_dialog.setWindowTitle(f"Package Info: {package_name}") layout = QVBoxLayout(info_dialog) layout.addWidget(QLabel(f"Package: {package_name}")) layout.addWidget(QLabel(f"Author: {author}")) homepage_label = QLabel(f"Homepage: {homepage}") homepage_label.setTextFormat(Qt.RichText) homepage_label.setTextInteractionFlags(Qt.TextBrowserInteraction) homepage_label.setOpenExternalLinks(True) layout.addWidget(homepage_label) pypi_label = QLabel(f"PyPI Page: {package_url}") pypi_label.setTextFormat(Qt.RichText) pypi_label.setTextInteractionFlags(Qt.TextBrowserInteraction) pypi_label.setOpenExternalLinks(True) layout.addWidget(pypi_label) if documentation != 'N/A': doc_label = QLabel(f"Documentation: {documentation}") doc_label.setTextFormat(Qt.RichText) doc_label.setTextInteractionFlags(Qt.TextBrowserInteraction) doc_label.setOpenExternalLinks(True) layout.addWidget(doc_label) description_label = QLabel(f"Description: {description}") description_label.setWordWrap(True) layout.addWidget(description_label) info_dialog.setLayout(layout) info_dialog.exec() except urllib.error.URLError as e: self.show_message("Error", f"Network error: {str(e)}") except TimeoutError: self.show_message("Error", "Connection timed out") except json.JSONDecodeError: self.show_message("Error", "Invalid response from PyPI") except Exception as e: self.show_message("Error", f"Error fetching package info: {str(e)}") def export_requirements(self): filename, _ = QFileDialog.getSaveFileName( self, "Save Requirements", "", "Text Files (*.txt)" ) if filename: try: with open(filename, 'w') as f: for row in range(self.results_table.rowCount()): package_item = self.results_table.item(row, 0) version_item = self.results_table.item(row, 1) if package_item is not None and version_item is not None: package = package_item.text() version_installed = version_item.text() f.write(f"{package}=={version_installed}\n") self.show_message("Export Successful", f"Requirements exported to {filename}.") except Exception as e: self.show_message("Error", f"Error exporting requirements: {str(e)}") def import_requirements(self): filename, _ = QFileDialog.getOpenFileName( self, "Import Requirements", "", "Text Files (*.txt)") if filename: try: with open(filename, 'r') as f: packages = [line.strip().split('==') for line in f if line.strip()] self.progress_bar.setVisible(True) self.results_table.setEnabled(False) self.thread_import = QThread() self.worker_import = ImportWorker(packages) self.worker_import.moveToThread(self.thread_import) self.thread_import.started.connect(self.worker_import.run) self.worker_import.finished.connect(self.on_import_finished) self.worker_import.error.connect(self.on_import_error) self.worker_import.finished.connect(self.thread_import.quit) self.worker_import.error.connect(self.thread_import.quit) self.worker_import.finished.connect(self.worker_import.deleteLater) self.worker_import.error.connect(self.worker_import.deleteLater) self.thread_import.finished.connect(self.thread_import.deleteLater) self.thread_import.start() except Exception as e: self.show_message("Error", f"Error importing requirements: {str(e)}") def on_import_finished(self, output): self.progress_bar.setVisible(False) self.results_table.setEnabled(True) self.show_message("Import Successful", f"Requirements imported successfully.\n\nOutput:\n{output}") self.check_all_packages() def on_import_error(self, error_message): self.progress_bar.setVisible(False) self.results_table.setEnabled(True) self.show_message("Error", f"Error importing requirements:\n{error_message}") def show_message(self, title, message): msg_box = QMessageBox() msg_box.setWindowTitle(title) msg_box.setText(message) icon = QMessageBox.Information if title != "Error" else QMessageBox.Critical msg_box.setIcon(icon) msg_box.exec() def set_tooltip_for_package(self, row, package_name): requires = self.requires_map.get(package_name, []) required_by = self.required_by_map.get(package_name, []) requires_text = ", ".join([req.split()[0] for req in requires]) if requires else "None" required_by_text = ", ".join(required_by) if required_by else "None" tooltip_text = f"Requires: {requires_text}\nRequired by: {required_by_text}" package_item = self.results_table.item(row, 0) package_item.setToolTip(tooltip_text) def main(): app = QApplication(sys.argv) app.setStyle('Fusion') window = PackageChecker() window.show() sys.exit(app.exec()) if __name__ == '__main__': main() ================================================ FILE: tools/chunk_userguide.py ================================================ import sys import os import shutil from PySide6.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QLabel, QTextEdit, QFileDialog, QMessageBox, QFrame) from PySide6.QtCore import Qt from PySide6.QtGui import QFont, QClipboard class MarkdownChunker(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("Markdown File Chunker") self.setGeometry(100, 100, 900, 700) self.longest_chunk = "" self.shortest_chunk = "" self.longest_length = 0 self.shortest_length = float('inf') self.selected_file = None self.setup_ui() def setup_ui(self): central_widget = QWidget() self.setCentralWidget(central_widget) layout = QVBoxLayout(central_widget) layout.setContentsMargins(20, 20, 20, 20) layout.setSpacing(15) title_label = QLabel("Markdown File Chunker") title_font = QFont() title_font.setPointSize(16) title_font.setBold(True) title_label.setFont(title_font) title_label.setAlignment(Qt.AlignCenter) layout.addWidget(title_label) file_frame = QFrame() file_layout = QHBoxLayout(file_frame) self.file_label = QLabel("No file selected") self.file_label.setWordWrap(True) file_layout.addWidget(self.file_label, 1) select_button = QPushButton("Select Markdown File") select_button.setStyleSheet("QPushButton { background-color: #4CAF50; color: white; font-weight: bold; padding: 8px; }") select_button.clicked.connect(self.select_file) file_layout.addWidget(select_button) layout.addWidget(file_frame) self.process_button = QPushButton("Process File") self.process_button.setStyleSheet("QPushButton { background-color: #2196F3; color: white; font-weight: bold; padding: 10px; font-size: 12px; }") self.process_button.setEnabled(False) self.process_button.clicked.connect(self.process_file) layout.addWidget(self.process_button) self.stats_label = QLabel("") stats_font = QFont() stats_font.setBold(True) self.stats_label.setFont(stats_font) layout.addWidget(self.stats_label) longest_label = QLabel("Longest Chunk:") longest_font = QFont() longest_font.setBold(True) longest_label.setFont(longest_font) layout.addWidget(longest_label) self.longest_text = QTextEdit() self.longest_text.setMaximumHeight(150) self.longest_text.setReadOnly(True) layout.addWidget(self.longest_text) shortest_label = QLabel("Shortest Chunk:") shortest_font = QFont() shortest_font.setBold(True) shortest_label.setFont(shortest_font) layout.addWidget(shortest_label) self.shortest_text = QTextEdit() self.shortest_text.setMaximumHeight(150) self.shortest_text.setReadOnly(True) layout.addWidget(self.shortest_text) def select_file(self): file_path, _ = QFileDialog.getOpenFileName( self, "Select Markdown File", "", "Markdown files (*.md);;Text files (*.txt);;All files (*.*)" ) if file_path: self.selected_file = file_path self.file_label.setText(f"Selected: {os.path.basename(file_path)}") self.process_button.setEnabled(True) def create_output_directory(self): current_dir = os.getcwd() assets_dir = os.path.join(current_dir, "Assets") chunks_dir = os.path.join(assets_dir, "User_Guide_Chunks") if not os.path.exists(assets_dir): os.makedirs(assets_dir) if os.path.exists(chunks_dir): for filename in os.listdir(chunks_dir): file_path = os.path.join(chunks_dir, filename) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(f"Error deleting {file_path}: {e}") else: os.makedirs(chunks_dir) return chunks_dir def extract_chunks(self, content): chunks = [] lines = content.split('\n') current_chunk = [] for line in lines: if line.strip().startswith('###'): if current_chunk: chunk_text = '\n'.join(current_chunk).strip() if chunk_text: chunks.append(chunk_text) current_chunk = [line] elif current_chunk: if line.strip() or len(current_chunk) == 1: current_chunk.append(line) else: continue if current_chunk: chunk_text = '\n'.join(current_chunk).strip() if chunk_text: chunks.append(chunk_text) return chunks def save_chunks(self, chunks, output_dir): for i, chunk in enumerate(chunks, 1): filename = f"chunk_{i:03d}.txt" filepath = os.path.join(output_dir, filename) with open(filepath, 'w', encoding='utf-8') as f: f.write(chunk) return len(chunks) def analyze_chunks(self, chunks): if not chunks: return self.longest_chunk = chunks[0] self.shortest_chunk = chunks[0] self.longest_length = len(chunks[0]) self.shortest_length = len(chunks[0]) for chunk in chunks: chunk_length = len(chunk) if chunk_length > self.longest_length: self.longest_length = chunk_length self.longest_chunk = chunk if chunk_length < self.shortest_length: self.shortest_length = chunk_length self.shortest_chunk = chunk def create_master_questions(self, chunks): master_questions = [] for chunk in chunks: lines = chunk.split('\n') first_line = lines[0].strip() if first_line.startswith('###'): question = first_line[3:].strip() master_questions.append(question) dictionary_str = "master_questions = [\n" for question in master_questions: dictionary_str += f' "{question}",\n' dictionary_str += "]" clipboard = QApplication.clipboard() clipboard.setText(dictionary_str) def update_display(self, num_chunks): stats_text = f"Processing complete! Created {num_chunks} chunks.\n" stats_text += f"Longest chunk: {self.longest_length} characters\n" stats_text += f"Shortest chunk: {self.shortest_length} characters" self.stats_label.setText(stats_text) self.longest_text.setPlainText(self.longest_chunk) self.shortest_text.setPlainText(self.shortest_chunk) def process_file(self): if not self.selected_file: QMessageBox.critical(self, "Error", "Please select a file first.") return try: with open(self.selected_file, 'r', encoding='utf-8') as f: content = f.read() output_dir = self.create_output_directory() chunks = self.extract_chunks(content) if not chunks: QMessageBox.warning(self, "Warning", "No chunks found in the file.") return self.analyze_chunks(chunks) num_chunks = self.save_chunks(chunks, output_dir) self.create_master_questions(chunks) self.update_display(num_chunks) QMessageBox.information(self, "Success", f"Successfully processed {num_chunks} chunks!\n" f"Files saved to: {output_dir}\n" f"Dictionary copied to clipboard!") except Exception as e: QMessageBox.critical(self, "Error", f"An error occurred: {str(e)}") def main(): app = QApplication(sys.argv) window = MarkdownChunker() window.show() sys.exit(app.exec()) if __name__ == "__main__": main() ================================================ FILE: tools/replace_sourcecode.py ================================================ import hashlib from pathlib import Path import shutil import sys import zipfile from core.constants import PROJECT_ROOT class DependencyUpdater: def __init__(self): self.site_packages_path = self.get_site_packages_path() def get_site_packages_path(self): paths = sys.path site_packages_paths = [Path(path) for path in paths if 'site-packages' in path.lower()] return site_packages_paths[0] if site_packages_paths else None def find_dependency_path(self, dependency_path_segments): current_path = self.site_packages_path if current_path and current_path.exists(): for segment in dependency_path_segments: next_path = next((current_path / child for child in current_path.iterdir() if child.name.lower() == segment.lower()), None) if next_path is None: return None current_path = next_path return current_path return None @staticmethod def hash_file(filepath): hasher = hashlib.sha256() with open(filepath, 'rb') as afile: buf = afile.read() hasher.update(buf) return hasher.hexdigest() @staticmethod def copy_and_overwrite_if_necessary(source_path, target_path): if not target_path.exists() or DependencyUpdater.hash_file(source_path) != DependencyUpdater.hash_file(target_path): shutil.copy(source_path, target_path) DependencyUpdater.print_status("SUCCESS", f"{source_path} has been successfully copied to {target_path}.") else: DependencyUpdater.print_status("SKIP", f"{target_path} is already up to date.") def update_file_in_dependency(self, source_folder, file_name, dependency_path_segments): target_path = self.find_dependency_path(dependency_path_segments) if target_path is None: self.print_status("ERROR", "Target dependency path not found.") return source_path = PROJECT_ROOT / source_folder / file_name if not source_path.exists(): self.print_status("ERROR", f"{file_name} not found in {source_folder}.") return target_file = None for child in target_path.iterdir(): if child.is_file() and child.name.lower() == file_name.lower(): target_file = child break if target_file: target_file_path = target_file else: target_file_path = target_path / file_name self.copy_and_overwrite_if_necessary(source_path, target_file_path) @staticmethod def print_status(status, message): colors = { "SUCCESS": "\033[92m", "SKIP": "\033[93m", "ERROR": "\033[91m", "INFO": "\033[94m" } reset_color = "\033[0m" print(f"{colors.get(status, reset_color)}[{status}] {message}{reset_color}") @staticmethod def print_ascii_table(title, rows): table_width = max(len(title), max(len(row) for row in rows)) + 4 border = f"+{'-' * (table_width - 2)}+" print(border) print(f"| {title.center(table_width - 4)} |") print(border) for row in rows: print(f"| {row.ljust(table_width - 4)} |") print(border) def replace_sentence_transformer_file(): updater = DependencyUpdater() updater.update_file_in_dependency("Assets", "SentenceTransformer.py", ["sentence_transformers"]) def replace_chattts_file(): updater = DependencyUpdater() updater.update_file_in_dependency("Assets", "core.py", ["ChatTTS"]) def add_cuda_files(): updater = DependencyUpdater() updater.print_ascii_table("CUDA FILES UPDATE", ["Copying ptxas.exe", "Extracting cudart_lib.zip"]) source_path = updater.find_dependency_path(["nvidia", "cuda_nvcc", "bin"]) if source_path is None: updater.print_status("ERROR", "Source path for ptxas.exe not found.") return source_file = source_path / "ptxas.exe" if not source_file.exists(): updater.print_status("ERROR", "ptxas.exe not found in the source directory.") return target_path = updater.find_dependency_path(["nvidia", "cuda_runtime", "bin"]) if target_path is None: updater.print_status("ERROR", "Target path (cuda_runtime) not found.") return target_file = target_path / "ptxas.exe" updater.copy_and_overwrite_if_necessary(source_file, target_file) zip_path = PROJECT_ROOT / "Assets" / "cudart_lib.zip" if not zip_path.exists(): updater.print_status("ERROR", "cudart_lib.zip not found.") return cuda_lib_runtime_path = target_path.parent if target_path is None or not target_path.exists(): updater.print_status("ERROR", "Parent directory of cuda_runtime/bin not found.") return try: with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(cuda_lib_runtime_path) updater.print_status("SUCCESS", f"Extracted cudart_lib.zip to {cuda_lib_runtime_path}") except zipfile.BadZipFile: updater.print_status("ERROR", "cudart_lib.zip is corrupted or not a zip file.") except PermissionError: updater.print_status("ERROR", "Permission denied when extracting cudart_lib.zip.") except Exception as e: updater.print_status("ERROR", f"Unexpected error during extraction: {str(e)}") def setup_vector_db(): updater = DependencyUpdater() zip_path = PROJECT_ROOT / "Assets" / "user_manual_db.zip" if not zip_path.exists(): updater.print_status("ERROR", "user_manual_db.zip not found in Assets folder.") return vector_db_path = PROJECT_ROOT / "Vector_DB" vector_db_backup_path = PROJECT_ROOT / "Vector_DB_Backup" try: vector_db_path.mkdir(exist_ok=True) vector_db_backup_path.mkdir(exist_ok=True) except PermissionError: updater.print_status("ERROR", "Insufficient permissions to create directories.") return except Exception as e: updater.print_status("ERROR", f"Error creating directories: {str(e)}") return user_manual_paths = [ vector_db_path / "user_manual", vector_db_backup_path / "user_manual" ] for path in user_manual_paths: if path.exists(): try: shutil.rmtree(path, ignore_errors=False) updater.print_status("INFO", f"Removed existing user_manual folder from {path.parent}") except PermissionError: updater.print_status("ERROR", f"Permission denied when trying to remove {path}") return except Exception as e: updater.print_status("ERROR", f"Error removing {path}: {str(e)}") return try: with zipfile.ZipFile(zip_path, 'r') as zip_ref: if zip_ref.testzip() is not None: updater.print_status("ERROR", "Zip file is corrupted.") return zip_ref.extractall(vector_db_path) zip_ref.extractall(vector_db_backup_path) updater.print_status("SUCCESS", f"Successfully extracted user_manual_db.zip to {vector_db_path} and {vector_db_backup_path}") except PermissionError: updater.print_status("ERROR", "Permission denied when extracting zip file.") except Exception as e: updater.print_status("ERROR", f"Error extracting zip file: {str(e)}") def check_embedding_model_dimensions(): import yaml updater = DependencyUpdater() config_path = PROJECT_ROOT / "config.yaml" if not config_path.exists(): updater.print_status("ERROR", "config.yaml not found in current directory.") return try: with open(config_path, 'r') as file: config = yaml.safe_load(file) if config is None: config = {} if 'EMBEDDING_MODEL_DIMENSIONS' not in config: config['EMBEDDING_MODEL_DIMENSIONS'] = None with open(config_path, 'w') as file: yaml.dump(config, file, default_flow_style=False) updater.print_status("SUCCESS", "Added EMBEDDING_MODEL_DIMENSIONS: null to config.yaml") else: updater.print_status("SKIP", "EMBEDDING_MODEL_DIMENSIONS already exists in config.yaml") except yaml.YAMLError as e: updater.print_status("ERROR", f"Error parsing config.yaml: {str(e)}") except Exception as e: updater.print_status("ERROR", f"Unexpected error while processing config.yaml: {str(e)}") if __name__ == "__main__": DependencyUpdater.print_ascii_table("DEPENDENCY UPDATER", [ "Replace Sentence Transformer File", "Replace ChatTTS File", "Add CUDA Files", "Setup Vector DB", "Check Config EMBEDDING_MODEL_DIMENSIONS" ]) replace_sentence_transformer_file() replace_chattts_file() add_cuda_files() setup_vector_db() check_embedding_model_dimensions()