[
  {
    "path": ".codecov.yml",
    "content": "ignore:\n  - \"**/main.go\"\n  - \"./internal/readerwriter/*\"\ncoverage:\n  status:\n    project:\n      default:\n        target: 50%\n        threshold: null\n    patch: false\n    changes: false\n  range: 70..95    \n  round: up      \n  precision: 1     \n\n\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: gomod\n    directory: \"/\"\n    schedule:\n      interval: daily\n  - package-ecosystem: github-actions\n    directory: \"/\"\n    schedule:\n      interval: daily"
  },
  {
    "path": ".github/workflows/ci.yaml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - master\n  pull_request:\n\njobs:\n\n  test-coverage:\n    name: Test Coverage\n    runs-on: ubuntu-latest\n    env:\n      VERBOSE: 1\n      GOFLAGS: -mod=readonly\n\n    steps:\n    - uses: vahid-sohrabloo/clickhouse-action@v1\n      with:\n        version: '22.9'\n\n        \n    - name: Set up Go\n      uses: actions/setup-go@v3\n      with:\n        go-version: 1.19\n\n    - name: Checkout code\n      uses: actions/checkout@v3.3.0\n\n    - name: Test\n      run: make test-cover\n    - name: Send coverage\n      uses: codecov/codecov-action@v3\n      with:\n        file: coverage.out\n  test:\n    name: Test\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        golang-version: [1.18.5, 1.19]\n        clickhouse-version: ['22.11', '22.10', '22.9', '22.8', '22.7', '22.6', '22.5', '22.4']\n    env:\n      VERBOSE: 1\n      GOFLAGS: -mod=readonly\n\n    steps:\n    - uses: vahid-sohrabloo/clickhouse-action@v1\n      with:\n        version: '${{ matrix.clickhouse-version }}'\n        \n    - name: Set up Go\n      uses: actions/setup-go@v3\n      with:\n        go-version: 1.18.5\n\n    - name: Checkout code\n      uses: actions/checkout@v3.3.0\n\n    - name: Test\n      run: make test"
  },
  {
    "path": ".github/workflows/lint.yaml",
    "content": "name: golangci-lint\non:\n  push:\n    branches:\n      - main\n  pull_request:\n\njobs:\n  lint:\n    name: lint\n    runs-on: ubuntu-latest\n    steps:        \n    - name: Set up Go\n      uses: actions/setup-go@v3\n      with:\n        go-version: 1.19\n    - name: Checkout code\n      uses: actions/checkout@v3.3.0\n    - name: golangci-lint\n      uses: golangci/golangci-lint-action@v3\n      with:\n        version: v1.50\n        args: --timeout=10m "
  },
  {
    "path": ".gitignore",
    "content": ".envrc\nbin/\nvendor/\nbuild/\ncoverage.out\n"
  },
  {
    "path": ".golangci.yml",
    "content": "linters-settings:\n  dupl:\n    threshold: 100\n  funlen:\n    lines: 130\n    statements: 60\n  goconst:\n    min-len: 5\n    min-occurrences: 3\n  gocritic:\n    enabled-tags:\n      - diagnostic\n      - experimental\n      - opinionated\n      - performance\n      - style\n    disabled-checks:\n      - dupImport # https://github.com/go-critic/go-critic/issues/845\n      - ifElseChain\n      - octalLiteral\n      - whyNoLint\n      - wrapperFunc\n  gocyclo:\n    min-complexity: 20\n  goimports:\n    local-prefixes: github.com/golangci/golangci-lint\n  gomnd:\n    settings:\n      mnd:\n        # don't include the \"operation\" and \"assign\"\n        checks: argument,case,condition,return\n        ignored-numbers: 1000000\n\n  govet:\n    check-shadowing: false\n  lll:\n    line-length: 140\n  maligned:\n    suggest-new: true\n  misspell:\n    locale: US\n  nolintlint:\n    allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space)\n    allow-unused: false # report any unused nolint directives\n    require-explanation: false # don't require an explanation for nolint directives\n    require-specific: false # don't require nolint directives to be specific about which linter is being skipped\n\nlinters:\n  disable-all: true\n  enable:\n    # - bodyclose\n    - depguard\n    - dogsled\n    - dupl\n    - errcheck\n    - exportloopref\n    - funlen\n    - gochecknoinits\n    - goconst\n    - gocritic\n    - gocyclo\n    - gofmt\n    - goimports\n    - goprintffuncname\n    - gosec\n    - gosimple\n    - govet\n    - ineffassign\n    - lll\n    - misspell\n    - nakedret\n    # - noctx\n    - nolintlint\n    - staticcheck\n    - stylecheck\n    - typecheck\n    - unconvert\n    # - unparam\n    - unused\n    - whitespace\n\n  # don't enable:\n  # - asciicheck\n  # - scopelint\n  # - gochecknoglobals\n  # - gocognit\n  # - godot\n  # - godox\n  # - goerr113\n  # - interfacer\n  # - maligned\n  # - nestif\n  # - prealloc\n  # - testpackage\n  # - revive\n  # - wsl\n  # - gomnd\n\n\nissues:\n  # Excluding configuration per-path, per-linter, per-text and per-source\n  exclude-rules:\n    - path: _test\\.go\n      linters:\n        - goconst\n        - dupl\n        - funlen\n        - gocyclo\n        - gosec\n        - goerr113\n        - maligned\n        - errcheck\n    - path: cmd/chgogen\n      linters:\n        - goconst\n        - funlen\n        - gocyclo\n    - path: _unsafe\\.go\n      linters:\n        - dupl\n    - path: main\\.go\n      linters:\n        - goconst\n        - gocritic\n        - dupl # todo fix later\n\nrun:\n  skip-dirs:\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2020 vahid-sohrabloo\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": "# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html\n\nOS = $(shell uname | tr A-Z a-z)\nexport PATH := $(abspath bin/):${PATH}\n\n# Build variables\nBUILD_DIR ?= build\nVERSION ?= $(shell git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)\nCOMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null)\nDATE_FMT = +%FT%T%z\nifdef SOURCE_DATE_EPOCH\n    BUILD_DATE ?= $(shell date -u -d \"@$(SOURCE_DATE_EPOCH)\" \"$(DATE_FMT)\" 2>/dev/null || date -u -r \"$(SOURCE_DATE_EPOCH)\" \"$(DATE_FMT)\" 2>/dev/null || date -u \"$(DATE_FMT)\")\nelse\n    BUILD_DATE ?= $(shell date \"$(DATE_FMT)\")\nendif\nLDFLAGS += -X main.version=${VERSION} -X main.commitHash=${COMMIT_HASH} -X main.buildDate=${BUILD_DATE}\nexport CGO_ENABLED ?= 1\nifeq (${VERBOSE}, 1)\nifeq ($(filter -v,${GOARGS}),)\n\tGOARGS += -v\nendif\nTEST_FORMAT = short-verbose\nendif\n\n# Project variables\n\n# Dependency versions\nGOTESTSUM_VERSION = 1.8.1\nGOLANGCI_VERSION = 1.50.0\n\nGOLANG_VERSION = 1.14\n\n# Add the ability to override some variables\n# Use with care\n-include override.mk\n\n.PHONY: up\nup: start config.toml ## Set up the development environment\n\n.PHONY: down\ndown: clear ## Destroy the development environment\n\tdocker-compose down --volumes --remove-orphans --rmi local\n\trm -rf var/docker/volumes/*\n\n.PHONY: reset\nreset: down up ## Reset the development environment\n\n.PHONY: clear\nclear: ## Clear the working area and the project\n\trm -rf bin/\n\ndocker-compose.override.yml:\n\tcp docker-compose.override.yml.dist docker-compose.override.yml\n\n.PHONY: start\nstart: docker-compose.override.yml ## Start docker development environment\n\t@ if [ docker-compose.override.yml -ot docker-compose.override.yml.dist ]; then diff -u docker-compose.override.yml docker-compose.override.yml.dist || (echo \"!!! The distributed docker-compose.override.yml example changed. Please update your file accordingly (or at least touch it). !!!\" && false); fi\n\tdocker-compose up -d\n\n.PHONY: stop\nstop: ## Stop docker development environment\n\tdocker-compose stop\n\nconfig.toml:\n\tsed 's/production/development/g; s/debug = false/debug = true/g; s/shutdownTimeout = \"15s\"/shutdownTimeout = \"0s\"/g; s/format = \"json\"/format = \"logfmt\"/g; s/level = \"info\"/level = \"debug\"/g; s/addr = \":10000\"/addr = \"127.0.0.1:10000\"/g; s/httpAddr = \":8000\"/httpAddr = \"127.0.0.1:8000\"/g; s/grpcAddr = \":8001\"/grpcAddr = \"127.0.0.1:8001\"/g' config.toml.dist > config.toml\n\n.PHONY: run-%\nrun-%: build-%\n\t${BUILD_DIR}/$*\n\n.PHONY: run\nrun: $(patsubst cmd/%,run-%,$(wildcard cmd/*)) ## Build and execute a binary\n\n.PHONY: clean\nclean: ## Clean builds\n\trm -rf ${BUILD_DIR}/\n\trm -rf cmd/*/pkged.go\n\n.PHONY: goversion\ngoversion:\nifneq (${IGNORE_GOLANG_VERSION_REQ}, 1)\n\t@printf \"${GOLANG_VERSION}\\n$$(go version | awk '{sub(/^go/, \"\", $$3);print $$3}')\" | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -g | head -1 | grep -q -E \"^${GOLANG_VERSION}$$\" || (printf \"Required Go version is ${GOLANG_VERSION}\\nInstalled: `go version`\" && exit 1)\nendif\n\n.PHONY: build-%\nbuild-%: goversion\nifeq (${VERBOSE}, 1)\n\tgo env\nendif\n\n\tgo build ${GOARGS} -tags \"${GOTAGS}\" -ldflags \"${LDFLAGS}\" -o ${BUILD_DIR}/$* ./cmd/$*\n\n.PHONY: build\nbuild: goversion ## Build all binaries\nifeq (${VERBOSE}, 1)\n\tgo env\nendif\n\n\t@mkdir -p ${BUILD_DIR}\n\tgo build ${GOARGS} -tags \"${GOTAGS}\" -ldflags \"${LDFLAGS}\" -o ${BUILD_DIR}/ ./cmd/...\n\n.PHONY: build-release\nbuild-release:\n\t@${MAKE} LDFLAGS=\"-w ${LDFLAGS}\" GOARGS=\"${GOARGS} -trimpath\" BUILD_DIR=\"${BUILD_DIR}/release\" build\n\n.PHONY: build-debug\nbuild-debug: ## Build all binaries with remote debugging capabilities\n\t@${MAKE} GOARGS=\"${GOARGS} -gcflags \\\"all=-N -l\\\"\" BUILD_DIR=\"${BUILD_DIR}/debug\" build\n\n\n\n.PHONY: check\ncheck: test-all lint ## Run tests and linters\n\nbin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION}\n\t@ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum\nbin/gotestsum-${GOTESTSUM_VERSION}:\n\t@mkdir -p bin\n\tcurl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}\n\n\nTEST_PKGS ?= ./...\nTEST_REPORT_NAME ?= results.xml\n.PHONY: test\ntest: TEST_REPORT ?= main\ntest: TEST_FORMAT ?= short\ntest: SHELL = /bin/bash\ntest: bin/gotestsum ## Run tests\n\tbin/gotestsum  --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverprofile=coverage.out -race -parallel 1 $(if ${TEST_PKGS},${TEST_PKGS},./...)\n\t@go tool cover -func=coverage.out\n\t@rm coverage.out\n\n\n.PHONY: test-purego\ntest-purego: TEST_REPORT ?= main\ntest-purego: TEST_FORMAT ?= standard-quiet\ntest-purego: SHELL = /bin/bash\ntest-purego: bin/gotestsum ## Run tests\n\tbin/gotestsum  --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverprofile=coverage.out -race -parallel 1  -tags purego $(if ${TEST_PKGS},${TEST_PKGS},./...)\n\t@go tool cover -func=coverage.out\n\t@rm coverage.out\n\n\nCVPKG = $(shell go list ./... | grep -v 'chgogen\\|generator' | tr '\\n' ',')\n.PHONY: test-cover\ntest-cover: TEST_REPORT ?= main\ntest-cover: TEST_FORMAT ?= standard-quiet\ntest-cover: SHELL = /bin/bash\ntest-cover: bin/gotestsum ## Run tests\n\tbin/gotestsum  --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverpkg=${CVPKG} -coverprofile=coverage.out -covermode=atomic -parallel 1 $(if ${TEST_PKGS},${TEST_PKGS},./...)\n\t@go tool cover -func=coverage.out\n\n\n\n.PHONY: test-all\ntest-all: ## Run all tests\n\t@${MAKE} GOARGS=\"${GOARGS} -run .\\* \" TEST_REPORT=all test\n\n.PHONY: test-integration\ntest-integration: ## Run integration tests\n\t@${MAKE} GOARGS=\"${GOARGS} -run ^TestIntegration\\$$\\$$\" TEST_REPORT=integration test\n\n.PHONY: test-functional\ntest-functional: ## Run functional tests\n\t@${MAKE} GOARGS=\"${GOARGS} -run ^TestFunctional\\$$\\$$\" TEST_REPORT=functional test\n\nbin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}\n\t@ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint\nbin/golangci-lint-${GOLANGCI_VERSION}:\n\t@mkdir -p bin\n\tcurl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINARY=golangci-lint bash -s -- v${GOLANGCI_VERSION}\n\t@mv bin/golangci-lint $@\n\n.PHONY: lint\nlint: bin/golangci-lint ## Run linter\n\tbin/golangci-lint run --deadline=20m --concurrency 1\nlint-fix: bin/golangci-lint ## Run linter\n\tbin/golangci-lint run --deadline=20m --concurrency 1 --fix\n\n\n\n\nrelease-%: TAG_PREFIX = v\nrelease-%:\nifneq (${DRY}, 1)\n\t@sed -e \"s/^## \\[Unreleased\\]$$/## [Unreleased]\\\\\"$$'\\n'\"\\\\\"$$'\\n'\"\\\\\"$$'\\n'\"## [$*] - $$(date +%Y-%m-%d)/g; s|^\\[Unreleased\\]: \\(.*\\/compare\\/\\)\\(.*\\)...HEAD$$|[Unreleased]: \\1${TAG_PREFIX}$*...HEAD\\\\\"$$'\\n'\"[$*]: \\1\\2...${TAG_PREFIX}$*|g\" CHANGELOG.md > CHANGELOG.md.new\n\t@mv CHANGELOG.md.new CHANGELOG.md\n\nifeq (${TAG}, 1)\n\tgit add CHANGELOG.md\n\tgit commit -m 'Prepare release $*'\n\tgit tag -m 'Release $*' ${TAG_PREFIX}$*\nifeq (${PUSH}, 1)\n\tgit push; git push origin ${TAG_PREFIX}$*\nendif\nendif\nendif\n\n\t@echo \"Version updated to $*!\"\nifneq (${PUSH}, 1)\n\t@echo\n\t@echo \"Review the changes made by this script then execute the following:\"\nifneq (${TAG}, 1)\n\t@echo\n\t@echo \"git add CHANGELOG.md && git commit -m 'Prepare release $*' && git tag -m 'Release $*' ${TAG_PREFIX}$*\"\n\t@echo\n\t@echo \"Finally, push the changes:\"\nendif\n\t@echo\n\t@echo \"git push; git push origin ${TAG_PREFIX}$*\"\nendif\n\n.PHONY: patch\npatch: ## Release a new patch version\n\t@${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo \"0.0.0\") | sed 's/^v//' | awk -F'[ .]' '{print $$1\".\"$$2\".\"$$3+1}')\n\n.PHONY: minor\nminor: ## Release a new minor version\n\t@${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo \"0.0.0\") | sed 's/^v//' | awk -F'[ .]' '{print $$1\".\"$$2+1\".0\"}')\n\n.PHONY: major\nmajor: ## Release a new major version\n\t@${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo \"0.0.0\") | sed 's/^v//' | awk -F'[ .]' '{print $$1+1\".0.0\"}')\n\n.PHONY: list\nlist: ## List all make targets\n\t@${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ \"^[#.]\") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort\n\n.PHONY: help\n.DEFAULT_GOAL := help\nhelp:\n\t@grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\n# Variable outputting/exporting rules\nvar-%: ; @echo $($*)\nvarexport-%: ; @echo $*=$($*)\n"
  },
  {
    "path": "README.md",
    "content": "[![Go Reference](https://pkg.go.dev/badge/github.com/vahid-sohrabloo/chconn/v2.svg)](https://pkg.go.dev/github.com/vahid-sohrabloo/chconn/v2)\n[![codecov](https://codecov.io/gh/vahid-sohrabloo/chconn/branch/master/graph/badge.svg?token=K3JN6XWFVV)](https://codecov.io/gh/vahid-sohrabloo/chconn)\n[![Go Report Card](https://goreportcard.com/badge/github.com/vahid-sohrabloo/chconn/v2)](https://goreportcard.com/report/github.com/vahid-sohrabloo/chconn/v2)\n[![Actions Status](https://github.com/vahid-sohrabloo/chconn/workflows/CI/badge.svg)](https://github.com/vahid-sohrabloo/chconn/actions)\n[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn?ref=badge_shield)\n\n# chconn - ClickHouse low level Driver\n\nchconn is a pure generic Go (1.18+) driver for [ClickHouse](https://clickhouse.com/) that use Native protocol\nchconn aims to be low-level, fast, and performant.\n\nFor comparison with other libraries, please see https://github.com/vahid-sohrabloo/go-ch-benchmark and https://github.com/go-faster/ch-bench#benchmarks\n\nIf you have any suggestion or comment, please feel free to open an issue\n\n## Example Usage\n```go\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/chpool\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc main() {\n\tconn, err := chpool.New(os.Getenv(\"DATABASE_URL\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer conn.Close()\n\n\t// to check if the connection is alive\n\terr = conn.Ping(context.Background())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = conn.Exec(context.Background(), `DROP TABLE IF EXISTS example_table`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = conn.Exec(context.Background(), `CREATE TABLE  example_table (\n\t\tuint64 UInt64,\n\t\tuint64_nullable Nullable(UInt64)\n\t) Engine=Memory`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcol1 := column.New[uint64]()\n\tcol2 := column.New[uint64]().Nullable()\n\trows := 1_000_0000 // Ten million rows - insert in 10 times\n\tnumInsert := 10\n\tcol1.SetWriteBufferSize(rows)\n\tcol2.SetWriteBufferSize(rows)\n\tstartInsert := time.Now()\n\tfor i := 0; i < numInsert; i++ {\n\t\tfor y := 0; y < rows; y++ {\n\t\t\tcol1.Append(uint64(i))\n\t\t\tif i%2 == 0 {\n\t\t\t\tcol2.Append(uint64(i))\n\t\t\t} else {\n\t\t\t\tcol2.AppendNil()\n\t\t\t}\n\t\t}\n\n\t\tctxInsert, cancelInsert := context.WithTimeout(context.Background(), time.Second*30)\n\t\t// insert data\n\t\terr = conn.Insert(ctxInsert, \"INSERT INTO example_table (uint64,uint64_nullable) VALUES\", col1, col2)\n\t\tif err != nil {\n\t\t\tcancelInsert()\n\t\t\tpanic(err)\n\t\t}\n\t\tcancelInsert()\n\t}\n\tfmt.Println(\"inserted 10M rows in \", time.Since(startInsert))\n\n\t// select data\n\tcol1Read := column.New[uint64]()\n\tcol2Read := column.New[uint64]().Nullable()\n\n\tctxSelect, cancelSelect := context.WithTimeout(context.Background(), time.Second*30)\n\tdefer cancelSelect()\n\n\tstartSelect := time.Now()\n\tselectStmt, err := conn.Select(ctxSelect, \"SELECT uint64,uint64_nullable FROM  example_table\", col1Read, col2Read)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// make sure the stmt close after select. but it's not necessary\n\tdefer selectStmt.Close()\n\n\tvar col1Data []uint64\n\tvar col2DataNil []bool\n\tvar col2Data []uint64\n\t// read data block by block\n\t// for more information about block, see: https://clickhouse.com/docs/en/development/architecture/#block\n\tfor selectStmt.Next() {\n\t\tcol1Data = col1Data[:0]\n\t\tcol1Data = col1Read.Read(col1Data)\n\n\t\tcol2DataNil = col2DataNil[:0]\n\t\tcol2DataNil = col2Read.ReadNil(col2DataNil)\n\n\t\tcol2Data = col2Data[:0]\n\t\tcol2Data = col2Read.Read(col2Data)\n\t}\n\n\t// check errors\n\tif selectStmt.Err() != nil {\n\t\tpanic(selectStmt.Err())\n\t}\n\tfmt.Println(\"selected 10M rows in \", time.Since(startSelect))\n}\n```\n```\ninserted 10M rows in  1.206666188s\nselected 10M rows in  880.505004ms\n```\n\n\n**For moe information**, please see the [documentation](https://github.com/vahid-sohrabloo/chconn/wiki)\n## Features\n*   Generics (go1.18) for column types\n*   Connection pool with after-connect hook for arbitrary connection setup similar to pgx (thanks @jackc)\n*   Support DSN and Query connection string  (thanks @jackc)\n*   Support All ClickHouse data types\n*   Read and write data in column-oriented (like ClickHouse)\n*   Do not use `interface{}` , `reflect`\n*   Batch select and insert\n*   Full TLS connection control\n*   Read raw binary data\n*   Supports profile and progress \n*   database url connection very like pgx (thanks @jackc)\n*   Code generator for Insert\n*   Support LZ4 and ZSTD compression protocol\n*   Support execution telemetry streaming profiles and progress\n\n## Supported types\n*   UInt8, UInt16, UInt32, UInt64, UInt128, UInt256\n*   Int8, Int16, Int32, Int64, Int128, Int256\n*   Date, Date32, DateTime, DateTime64\n*   Decimal32, Decimal64, Decimal128, Decimal256\n*   IPv4, IPv6\n*   String, FixedString(N)\n*   UUID\n*   Array(T)\n*   Enums\n*   LowCardinality(T)\n*   Map(K, V)\n*   Tuple(T1, T2, ..., Tn)\n*   Nullable(T)\n*   Point, Ring, Polygon, MultiPolygon\n\n\n\n# Benchmarks\nthe source code of this benchmark here\nhttps://github.com/vahid-sohrabloo/go-ch-benchmark\n\n```\nname \\ time/op           chconn              chgo       go-clickhouse         uptrace\nTestSelect100MUint64-16   150ms             154ms \t       8019ms \t       3045ms \t\nTestSelect10MString-16    271ms \t    447ms \t        969ms \t        822ms \t\nTestInsert10M-16          198ms \t    514ms \t        561ms \t        304ms \t\n\nname \\ alloc/op          chconn              chgo       go-clickhouse         uptrace\nTestSelect100MUint64-16   111kB \t    262kB \t    3202443kB \t     800941kB \t\nTestSelect10MString-16   1.63MB \t   1.79MB \t    1626.51MB \t     241.03MB \t\nTestInsert10M-16         26.0MB \t  283.7MB \t     1680.4MB \t      240.2MB \t\n\nname \\ allocs/op         chconn              chgo       go-clickhouse         uptrace\nTestSelect100MUint64-16    35.0 \t   6683.0 \t  200030937.0 \t  100006069.0 \t\nTestSelect10MString-16     49.0 \t   1748.0 \t   30011991.0 \t   20001120.0 \t\nTestInsert10M-16           26.0 \t     80.0 \t        224.0 \t         50.0 \t\n```\n\n## License\n[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn?ref=badge_large)\n"
  },
  {
    "path": "block.go",
    "content": "package chconn\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// Column contains details of ClickHouse column\ntype chColumn struct {\n\tChType []byte\n\tName   []byte\n}\n\ntype block struct {\n\tColumns      []chColumn\n\tNumRows      uint64\n\tNumColumns   uint64\n\tinfo         blockInfo\n\theaderWriter *readerwriter.Writer\n}\n\nfunc newBlock() *block {\n\treturn &block{\n\t\theaderWriter: readerwriter.NewWriter(),\n\t}\n}\n\nfunc (block *block) reset() {\n\tblock.headerWriter.Reset()\n\tblock.Columns = block.Columns[:0]\n\tblock.NumRows = 0\n\tblock.NumColumns = 0\n}\n\nfunc (block *block) read(ch *conn) error {\n\tif _, err := ch.reader.ByteString(); err != nil { // temporary table\n\t\treturn &readError{\"block: temporary table\", err}\n\t}\n\tch.reader.SetCompress(ch.compress)\n\tdefer ch.reader.SetCompress(false)\n\n\tvar err error\n\terr = block.info.read(ch.reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tblock.NumColumns, err = ch.reader.Uvarint()\n\tif err != nil {\n\t\treturn &readError{\"block: read NumColumns\", err}\n\t}\n\n\tblock.NumRows, err = ch.reader.Uvarint()\n\tif err != nil {\n\t\treturn &readError{\"block: read NumRows\", err}\n\t}\n\treturn nil\n}\n\nfunc (block *block) readColumns(ch *conn) error {\n\tch.reader.SetCompress(ch.compress)\n\tdefer ch.reader.SetCompress(false)\n\tblock.Columns = make([]chColumn, block.NumColumns)\n\n\tfor i := uint64(0); i < block.NumColumns; i++ {\n\t\tcol, err := block.nextColumn(ch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tblock.Columns[i] = col\n\t}\n\treturn nil\n}\n\nfunc (block *block) readColumnsData(ch *conn, needValidateData bool, columns ...column.ColumnBasic) error {\n\tch.reader.SetCompress(ch.compress)\n\tdefer ch.reader.SetCompress(false)\n\tfor _, col := range columns {\n\t\terr := col.HeaderReader(ch.reader, true, ch.serverInfo.Revision)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read column header: %w\", err)\n\t\t}\n\t\tif needValidateData {\n\t\t\tif errValidate := col.Validate(); errValidate != nil {\n\t\t\t\treturn fmt.Errorf(\"validate %q: %w\", col.Name(), errValidate)\n\t\t\t}\n\t\t}\n\t\terr = col.ReadRaw(int(block.NumRows), ch.reader)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read data %q: %w\", col.Name(), err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (block *block) reorderColumns(columns []column.ColumnBasic) ([]column.ColumnBasic, error) {\n\tfor i, c := range block.Columns {\n\t\t// check if already sorted\n\t\tif bytes.Equal(columns[i].Name(), block.Columns[i].Name) {\n\t\t\tcontinue\n\t\t}\n\t\tindex, col := findColumn(columns, c.Name)\n\t\tif col == nil {\n\t\t\treturn nil, &ColumnNotFoundError{\n\t\t\t\tColumn: string(c.Name),\n\t\t\t}\n\t\t}\n\t\tcolumns[index] = columns[i]\n\t\tcolumns[i] = col\n\t}\n\treturn columns, nil\n}\n\nfunc findColumn(columns []column.ColumnBasic, name []byte) (int, column.ColumnBasic) {\n\tfor i, col := range columns {\n\t\tif bytes.Equal(col.Name(), name) {\n\t\t\treturn i, col\n\t\t}\n\t}\n\treturn 0, nil\n}\n\nfunc (block *block) nextColumn(ch *conn) (chColumn, error) {\n\tcol := chColumn{}\n\tvar err error\n\tif col.Name, err = ch.reader.ByteString(); err != nil {\n\t\treturn col, &readError{\"block: read column name\", err}\n\t}\n\tif col.ChType, err = ch.reader.ByteString(); err != nil {\n\t\treturn col, &readError{\"block: read column type\", err}\n\t}\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolWithCustomSerialization {\n\t\tcustomSerialization, err := ch.reader.ReadByte()\n\t\tif err != nil {\n\t\t\treturn col, &readError{\"block: read custom serialization\", err}\n\t\t}\n\t\tif customSerialization == 1 {\n\t\t\treturn col, &readError{\"block: custom serialization not supported\", nil}\n\t\t}\n\t}\n\treturn col, nil\n}\n\nfunc (block *block) writeHeader(ch *conn, numRows int) error {\n\tblock.info.write(ch.writer)\n\t// NumColumns\n\tch.writer.Uvarint(block.NumColumns)\n\t// NumRows\n\tch.writer.Uvarint(uint64(numRows))\n\t_, err := ch.writer.WriteTo(ch.writerToCompress)\n\tif err != nil {\n\t\treturn &writeError{\"write block info\", err}\n\t}\n\terr = ch.flushCompress()\n\tif err != nil {\n\t\treturn &writeError{\"flush block info\", err}\n\t}\n\n\treturn nil\n}\n\nfunc (block *block) writeColumnsBuffer(ch *conn, columns ...column.ColumnBasic) error {\n\tnumRows := columns[0].NumRow()\n\tfor i, column := range block.Columns {\n\t\tif numRows != columns[i].NumRow() {\n\t\t\treturn &NumberWriteError{\n\t\t\t\tFirstNumRow: numRows,\n\t\t\t\tNumRow:      columns[i].NumRow(),\n\t\t\t\tColumn:      string(column.Name),\n\t\t\t\tFirstColumn: string(block.Columns[0].Name),\n\t\t\t}\n\t\t}\n\t\tblock.headerWriter.Reset()\n\t\tblock.headerWriter.ByteString(column.Name)\n\t\tblock.headerWriter.ByteString(column.ChType)\n\n\t\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolWithCustomSerialization {\n\t\t\tblock.headerWriter.Uint8(0)\n\t\t}\n\n\t\tcolumns[i].HeaderWriter(block.headerWriter)\n\t\tif _, err := block.headerWriter.WriteTo(ch.writerToCompress); err != nil {\n\t\t\treturn &writeError{\"block: write header block data for column \" + string(column.Name), err}\n\t\t}\n\t\tif _, err := columns[i].WriteTo(ch.writerToCompress); err != nil {\n\t\t\treturn &writeError{\"block: write block data for column \" + string(column.Name), err}\n\t\t}\n\t}\n\terr := ch.flushCompress()\n\tif err != nil {\n\t\treturn &writeError{\"block: flush block data\", err}\n\t}\n\treturn nil\n}\n\ntype blockInfo struct {\n\tfield1      uint64\n\tisOverflows uint8\n\tfield2      uint64\n\tbucketNum   int32\n\tnum3        uint64\n}\n\nfunc (info *blockInfo) read(r *readerwriter.Reader) error {\n\tvar err error\n\tif info.field1, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"blockInfo: read field1\", err}\n\t}\n\tif info.isOverflows, err = r.ReadByte(); err != nil {\n\t\treturn &readError{\"blockInfo: read isOverflows\", err}\n\t}\n\tif info.field2, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"blockInfo: read field2\", err}\n\t}\n\tif info.bucketNum, err = r.Int32(); err != nil {\n\t\treturn &readError{\"blockInfo: read bucketNum\", err}\n\t}\n\tif info.num3, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"blockInfo: read num3\", err}\n\t}\n\treturn nil\n}\n\nfunc (info *blockInfo) write(w *readerwriter.Writer) {\n\tw.Uvarint(1)\n\tw.Uint8(info.isOverflows)\n\tw.Uvarint(2)\n\n\tif info.bucketNum == 0 {\n\t\tinfo.bucketNum = -1\n\t}\n\tw.Int32(info.bucketNum)\n\tw.Uvarint(0)\n}\n"
  },
  {
    "path": "block_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestBlockReadError(t *testing.T) {\n\tstartValidReader := 15\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"blockInfo: temporary table\",\n\t\t\twantErr:     \"block: temporary table\",\n\t\t\tnumberValid: startValidReader - 1,\n\t\t}, {\n\t\t\tname:        \"blockInfo: read field1\",\n\t\t\twantErr:     \"blockInfo: read field1\",\n\t\t\tnumberValid: startValidReader,\n\t\t}, {\n\t\t\tname:        \"blockInfo: read isOverflows\",\n\t\t\twantErr:     \"blockInfo: read isOverflows\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t}, {\n\t\t\tname:        \"blockInfo: read field2\",\n\t\t\twantErr:     \"blockInfo: read field2\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t}, {\n\t\t\tname:        \"blockInfo: read bucketNum\",\n\t\t\twantErr:     \"blockInfo: read bucketNum\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t}, {\n\t\t\tname:        \"blockInfo: read num3\",\n\t\t\twantErr:     \"blockInfo: read num3\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t}, {\n\t\t\tname:        \"block: read NumColumns\",\n\t\t\twantErr:     \"block: read NumColumns\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t}, {\n\t\t\tname:        \"block: read NumRows\",\n\t\t\twantErr:     \"block: read NumRows\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t}, {\n\t\t\tname:        \"block: read column name\",\n\t\t\twantErr:     \"block: read column name\",\n\t\t\tnumberValid: startValidReader + 8,\n\t\t}, {\n\t\t\tname:        \"block: read column type\",\n\t\t\twantErr:     \"block: read column type\",\n\t\t\tnumberValid: startValidReader + 10,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 5;\")\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Nil(t, stmt)\n\n\t\t\treadErr, ok := err.(*readError)\n\t\t\trequire.True(t, ok)\n\t\t\trequire.Equal(t, readErr.msg, tt.wantErr)\n\t\t\trequire.EqualError(t, readErr.Unwrap(), \"timeout\")\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "chconn.go",
    "content": "package chconn\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/ctxwatch\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\nconst (\n\tconnStatusUninitialized = iota\n\tconnStatusConnecting\n\tconnStatusClosed\n\tconnStatusIdle\n\tconnStatusBusy\n)\n\nconst (\n\t// Name, version, revision, default DB\n\tclientHello = 0\n\t// whether the compression must be used,\n\t// query text (without data for INSERTs).\n\tclientQuery = 1\n\t// A block of data (compressed or not).\n\tclientData = 2\n\t// Check that connection to the server is alive.\n\tclientPing = 4\n)\n\nconst (\n\t// Name, version, revision.\n\tserverHello = 0\n\t// A block of data (compressed or not).\n\tserverData = 1\n\t// The exception during query execution.\n\tserverException = 2\n\t// Query execution progress: rows read, bytes read.\n\tserverProgress = 3\n\t// Ping response\n\tserverPong = 4\n\t// All packets were transmitted\n\tserverEndOfStream = 5\n\t// Packet with profiling info.\n\tserverProfileInfo = 6\n\t// A block with totals (compressed or not).\n\tserverTotals = 7\n\t// A block with minimums and maximums (compressed or not).\n\tserverExtremes = 8\n\t// Columns' description for default values calculation\n\tserverTableColumns = 11\n\t// list of unique parts ids.\n\t//nolint:deadcode,unused,varcheck\n\tserverPartUUIDs = 12\n\t// String (UUID) describes a request for which next task is needed\n\t//nolint:deadcode,unused,varcheck\n\tserverReadTaskRequest = 13\n\t// Packet with profile events from server\n\tserverProfileEvents = 14\n)\n\nconst (\n\tdbmsVersionMajor    = 1\n\tdbmsVersionMinor    = 0\n\tdbmsVersionPatch    = 0\n\tdbmsVersionRevision = 54460\n)\n\ntype queryProcessingStage uint64\n\nconst (\n\n\t// queryProcessingStageComplete Completely.\n\tqueryProcessingStageComplete queryProcessingStage = 2\n)\n\n// DialFunc is a function that can be used to connect to a ClickHouse server.\ntype DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)\n\n// LookupFunc is a function that can be used to lookup IPs addrs from host.\ntype LookupFunc func(ctx context.Context, host string) (addrs []string, err error)\n\n// ReaderFunc is a function that can be used get reader for read from server\ntype ReaderFunc func(io.Reader) io.Reader\n\n// WriterFunc is a function that can be used to get writer to writer from server\n// Note: DO NOT use bufio.Writer, chconn doesn't support flush\ntype WriterFunc func(io.Writer) io.Writer\n\n// Conn is a low-level Clickhouse connection handle. It is not safe for concurrent usage.\ntype Conn interface {\n\t// RawConn Get Raw Connection. Do not use unless you know what you want to do\n\tRawConn() net.Conn\n\t// Close the connection to database\n\tClose() error\n\t// IsClosed reports if the connection has been closed.\n\tIsClosed() bool\n\t// IsBusy reports if the connection is busy.\n\tIsBusy() bool\n\t// ServerInfo get Server info\n\tServerInfo() *ServerInfo\n\t// Ping sends a ping to check that the connection to the server is alive.\n\tPing(ctx context.Context) error\n\t// Exec executes a query without returning any rows.\n\t// NOTE: don't use it for insert and select query\n\tExec(ctx context.Context, query string) error\n\t// ExecWithOption executes a query without returning any rows with Query options.\n\t// NOTE: don't use it for insert and select query\n\tExecWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *QueryOptions,\n\t) error\n\t// Insert executes a insert query and commit all columns data.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsert(ctx context.Context, query string, columns ...column.ColumnBasic) error\n\t// InsertWithOption executes a insert query with the query options and commit all columns data.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsertWithOption(ctx context.Context, query string, queryOptions *QueryOptions, columns ...column.ColumnBasic) error\n\t// Insert executes a insert query and return a InsertStmt.\n\t//\n\t// NOTE: only use for insert query\n\tInsertStream(ctx context.Context, query string) (InsertStmt, error)\n\t// InsertWithOption executes a insert query with the query options and return a InsertStmt.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsertStreamWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *QueryOptions) (InsertStmt, error)\n\t// Select executes a query and return select stmt.\n\t//\n\t// NOTE: only use for select query\n\tSelect(ctx context.Context, query string, columns ...column.ColumnBasic) (SelectStmt, error)\n\t// Select executes a query with the the query options and return select stmt.\n\t//\n\t// NOTE: only use for select query\n\tSelectWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *QueryOptions,\n\t\tcolumns ...column.ColumnBasic,\n\t) (SelectStmt, error)\n}\n\ntype writeFlusher interface {\n\tio.Writer\n\tFlush() error\n}\n\ntype conn struct {\n\tconn              net.Conn          // the underlying TCP connection\n\tparameterStatuses map[string]string // parameters that have been reported by the server\n\tserverInfo        *ServerInfo\n\tclientInfo        *ClientInfo\n\n\tconfig *Config\n\n\tstatus byte // One of connStatus* constants\n\n\twriter           *readerwriter.Writer\n\twriterTo         io.Writer\n\twriterToCompress io.Writer\n\n\treader   *readerwriter.Reader\n\tcompress bool\n\n\tcontextWatcher *ctxwatch.ContextWatcher\n\tblock          *block\n\n\tprofileEvent *ProfileEvent\n}\n\n// Connect establishes a connection to a ClickHouse server using the environment and connString (in URL or DSN format)\n// to provide configuration. See documentation for ParseConfig for details. ctx can be used to cancel a connect attempt.\nfunc Connect(ctx context.Context, connString string) (Conn, error) {\n\tconfig, err := ParseConfig(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ConnectConfig(ctx, config)\n}\n\n// ConnectConfig establishes a connection to a ClickHouse server using config. config must have been constructed with\n// ParseConfig. ctx can be used to cancel a connect attempt.\n//\n// If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An\n// authentication error will terminate the chain of attempts (like libpq:\n// https://www.postgresql.org/docs/12/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error. Otherwise,\n// if all attempts fail the last error is returned.\nfunc ConnectConfig(octx context.Context, config *Config) (c Conn, err error) {\n\t// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from\n\t// zero values.\n\tif !config.createdByParseConfig {\n\t\tpanic(\"config must be created by ParseConfig\")\n\t}\n\n\t// Simplify usage by treating primary config and fallbacks the same.\n\tfallbackConfigs := []*FallbackConfig{\n\t\t{\n\t\t\tHost:      config.Host,\n\t\t\tPort:      config.Port,\n\t\t\tTLSConfig: config.TLSConfig,\n\t\t},\n\t}\n\tfallbackConfigs = append(fallbackConfigs, config.Fallbacks...)\n\tctx := octx\n\tfallbackConfigs, err = expandWithIPs(ctx, config.LookupFunc, fallbackConfigs)\n\tif err != nil {\n\t\treturn nil, &connectError{config: config, msg: \"hostname resolving error\", err: err}\n\t}\n\n\tif len(fallbackConfigs) == 0 {\n\t\treturn nil, &connectError{config: config, msg: \"hostname resolving error\", err: ErrIPNotFound}\n\t}\n\n\tfoundBestServer := false\n\tvar fallbackConfig *FallbackConfig\n\tfor _, fc := range fallbackConfigs {\n\t\t// ConnectTimeout restricts the whole connection process.\n\t\tif config.ConnectTimeout != 0 {\n\t\t\tvar cancel context.CancelFunc\n\t\t\tctx, cancel = context.WithTimeout(octx, config.ConnectTimeout)\n\t\t\t//nolint:gocritic\n\t\t\tdefer cancel()\n\t\t} else {\n\t\t\tctx = octx\n\t\t}\n\t\tc, err = connect(ctx, config, fc)\n\t\tif err == nil {\n\t\t\tfoundBestServer = true\n\t\t\tbreak\n\t\t} else if chErr, ok := err.(*ChError); ok {\n\t\t\treturn nil, &connectError{config: config, msg: \"server error\", err: chErr}\n\t\t}\n\t}\n\n\tif !foundBestServer && fallbackConfig != nil {\n\t\tc, err = connect(ctx, config, fallbackConfig)\n\t\tif cherr, ok := err.(*ChError); ok {\n\t\t\terr = &connectError{config: config, msg: \"server error\", err: cherr}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn nil, err // no need to wrap in connectError because it will already be wrapped in all cases except ChError\n\t}\n\n\tif config.AfterConnect != nil {\n\t\terr := config.AfterConnect(ctx, c)\n\t\tif err != nil {\n\t\t\tc.RawConn().Close()\n\t\t\treturn nil, &connectError{config: config, msg: \"AfterConnect error\", err: err}\n\t\t}\n\t}\n\n\treturn c, nil\n}\n\nfunc expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*FallbackConfig) ([]*FallbackConfig, error) {\n\tvar configs []*FallbackConfig\n\n\tfor _, fb := range fallbacks {\n\t\tips, err := lookupFn(ctx, fb.Host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, ip := range ips {\n\t\t\tsplitIP, splitPort, err := net.SplitHostPort(ip)\n\t\t\tif err == nil {\n\t\t\t\tport, err := strconv.ParseUint(splitPort, 10, 16)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error parsing port (%s) from lookup: %w\", splitPort, err)\n\t\t\t\t}\n\t\t\t\tconfigs = append(configs, &FallbackConfig{\n\t\t\t\t\tHost:      splitIP,\n\t\t\t\t\tPort:      uint16(port),\n\t\t\t\t\tTLSConfig: fb.TLSConfig,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tconfigs = append(configs, &FallbackConfig{\n\t\t\t\t\tHost:      ip,\n\t\t\t\t\tPort:      fb.Port,\n\t\t\t\t\tTLSConfig: fb.TLSConfig,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn configs, nil\n}\n\nfunc connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig) (Conn, error) {\n\tc := new(conn)\n\tc.config = config\n\n\tc.compress = config.Compress != CompressNone\n\n\tvar err error\n\tnetwork, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port)\n\tc.conn, err = config.DialFunc(ctx, network, address)\n\tif err != nil {\n\t\tvar netErr net.Error\n\t\tif errors.As(err, &netErr) && netErr.Timeout() {\n\t\t\terr = &errTimeout{err: err}\n\t\t}\n\t\treturn nil, &connectError{config: config, msg: \"dial error\", err: err}\n\t}\n\n\tc.parameterStatuses = make(map[string]string)\n\n\tif fallbackConfig.TLSConfig != nil {\n\t\tc.conn = tls.Client(c.conn, fallbackConfig.TLSConfig)\n\t}\n\n\tc.status = connStatusConnecting\n\tc.contextWatcher = ctxwatch.NewContextWatcher(\n\t\tfunc() {\n\t\t\tc.conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) //nolint:errcheck //no need\n\t\t},\n\t\tfunc() {\n\t\t\tc.conn.SetDeadline(time.Time{}) //nolint:errcheck //no need\n\t\t},\n\t)\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\tc.contextWatcher.Watch(ctx)\n\t\tdefer c.contextWatcher.Unwatch()\n\t}\n\n\tc.writer = readerwriter.NewWriter()\n\tif config.ReaderFunc != nil {\n\t\tc.reader = readerwriter.NewReader(config.ReaderFunc(c.conn))\n\t} else {\n\t\tc.reader = readerwriter.NewReader(bufio.NewReaderSize(c.conn, c.config.MinReadBufferSize))\n\t}\n\tif config.WriterFunc != nil {\n\t\tc.writerTo = config.WriterFunc(c.conn)\n\t} else {\n\t\tc.writerTo = c.conn\n\t}\n\tif c.compress {\n\t\tc.writerToCompress = readerwriter.NewCompressWriter(c.writerTo, byte(config.Compress))\n\t} else {\n\t\tc.writerToCompress = c.writerTo\n\t}\n\n\tc.serverInfo = &ServerInfo{}\n\terr = c.hello()\n\tif err != nil {\n\t\treturn nil, preferContextOverNetTimeoutError(ctx, err)\n\t}\n\n\tc.sendAddendum()\n\n\tc.block = newBlock()\n\tc.profileEvent = newProfileEvent()\n\tc.status = connStatusIdle\n\n\treturn c, nil\n}\n\nfunc (ch *conn) sendAddendum() {\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolWithQuotaKey {\n\t\tch.writer.String(ch.config.QuotaKey)\n\t}\n}\n\nfunc (ch *conn) flushCompress() error {\n\tif w, ok := ch.writerToCompress.(writeFlusher); ok {\n\t\treturn w.Flush()\n\t}\n\treturn nil\n}\n\nfunc (ch *conn) RawConn() net.Conn {\n\treturn ch.conn\n}\n\n// send hello to ClickHouse\nfunc (ch *conn) hello() error {\n\tch.writer.Uvarint(clientHello)\n\tch.writer.String(ch.config.ClientName)\n\tch.writer.Uvarint(dbmsVersionMajor)\n\tch.writer.Uvarint(dbmsVersionMinor)\n\tch.writer.Uvarint(dbmsVersionRevision)\n\tch.writer.String(ch.config.Database)\n\tch.writer.String(ch.config.User)\n\tch.writer.String(ch.config.Password)\n\n\tif _, err := ch.writer.WriteTo(ch.writerTo); err != nil {\n\t\treturn fmt.Errorf(\"write hello: %w\", err)\n\t}\n\n\tres, err := ch.receiveAndProcessData(emptyOnProgress)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ch.serverInfo.Revision == 0 {\n\t\treturn &unexpectedPacket{expected: \"serverHello\", actual: res}\n\t}\n\treturn nil\n}\n\n// IsClosed reports if the connection has been closed.\nfunc (ch *conn) IsClosed() bool {\n\treturn ch.status < connStatusIdle\n}\n\n// IsBusy reports if the connection is busy.\nfunc (ch *conn) IsBusy() bool {\n\treturn ch.status == connStatusBusy\n}\n\n// lock locks the connection.\nfunc (ch *conn) lock() error {\n\tswitch ch.status {\n\tcase connStatusBusy:\n\t\treturn &connLockError{status: \"conn busy\"} // This only should be possible in case of an application bug.\n\tcase connStatusClosed:\n\t\treturn &connLockError{status: \"conn closed\"}\n\tcase connStatusUninitialized:\n\t\treturn &connLockError{status: \"conn uninitialized\"}\n\t}\n\tch.status = connStatusBusy\n\treturn nil\n}\n\nfunc (ch *conn) unlock() {\n\tswitch ch.status {\n\tcase connStatusBusy:\n\t\tch.status = connStatusIdle\n\tcase connStatusClosed:\n\tdefault:\n\t\tpanic(\"BUG: cannot unlock unlocked connection\") // This should only be possible if there is a bug in this package.\n\t}\n}\n\nfunc (ch *conn) sendQueryWithOption(\n\tquery,\n\tqueryID string,\n\tsettings Settings,\n\tparameters *Parameters,\n) error {\n\tch.writer.Uvarint(clientQuery)\n\tch.writer.String(queryID)\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithClientInfo {\n\t\tif ch.clientInfo == nil {\n\t\t\tch.clientInfo = &ClientInfo{}\n\t\t}\n\n\t\tch.clientInfo.fillOSUserHostNameAndVersionInfo()\n\t\tch.clientInfo.ClientName = ch.config.Database + \" \" + ch.config.ClientName\n\n\t\tch.clientInfo.write(ch)\n\t}\n\n\t// setting\n\tif settings != nil && ch.serverInfo.Revision >= helper.DbmsMinRevisionWithSettingsSerializedAsStrings {\n\t\tsettings.write(ch.writer)\n\t}\n\n\tch.writer.String(\"\")\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithInterServerSecret {\n\t\tch.writer.String(\"\")\n\t}\n\n\tch.writer.Uvarint(uint64(queryProcessingStageComplete))\n\n\t// compression\n\tif ch.compress {\n\t\tch.writer.Uint8(1)\n\t} else {\n\t\tch.writer.Uint8(0)\n\t}\n\n\tch.writer.String(query)\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolWithParameters {\n\t\tparameters.write(ch.writer)\n\t\tch.writer.String(\"\")\n\t} else if parameters.hasParam() {\n\t\treturn errors.New(\"parameters are not supported by the server\")\n\t}\n\n\treturn ch.sendEmptyBlock()\n}\n\nfunc (ch *conn) sendData(block *block, numRows int) error {\n\tch.writer.Uvarint(clientData)\n\t// name\n\tch.writer.String(\"\")\n\n\t// if compress enable we must send this part with uncompressed data\n\tif ch.compress {\n\t\t_, err := ch.writer.WriteTo(ch.writerTo)\n\t\tif err != nil {\n\t\t\treturn &writeError{\"write block info\", err}\n\t\t}\n\t}\n\treturn block.writeHeader(ch, numRows)\n}\n\nfunc (ch *conn) sendEmptyBlock() error {\n\tch.block.reset()\n\treturn ch.sendData(ch.block, 0)\n}\n\nfunc (ch *conn) Close() error {\n\tif ch.status == connStatusClosed {\n\t\treturn nil\n\t}\n\tch.contextWatcher.Unwatch()\n\tch.status = connStatusClosed\n\treturn ch.conn.Close()\n}\n\nfunc (ch *conn) readTableColumn() {\n\t// todo check errors\n\tch.reader.String() //nolint:errcheck //no needed\n\tch.reader.String() //nolint:errcheck //no needed\n}\nfunc (ch *conn) receiveAndProcessData(onProgress func(*Progress)) (interface{}, error) {\n\tpacket, err := ch.reader.Uvarint()\n\tif err != nil {\n\t\treturn nil, &readError{\"packet: read packet type\", err}\n\t}\n\tswitch packet {\n\tcase serverData, serverTotals, serverExtremes:\n\t\tch.block.reset()\n\t\terr = ch.block.read(ch)\n\t\treturn ch.block, err\n\tcase serverProfileInfo:\n\t\tprofile := newProfile()\n\n\t\terr = profile.read(ch)\n\t\treturn profile, err\n\tcase serverProgress:\n\t\tprogress := newProgress()\n\t\terr = progress.read(ch)\n\t\tif err == nil && onProgress != nil {\n\t\t\tonProgress(progress)\n\t\t\treturn ch.receiveAndProcessData(onProgress)\n\t\t}\n\t\treturn progress, err\n\tcase serverHello:\n\t\terr = ch.serverInfo.read(ch.reader)\n\t\treturn nil, err\n\tcase serverPong:\n\t\treturn &pong{}, err\n\tcase serverException:\n\t\terr := &ChError{}\n\t\tdefer ch.Close()\n\t\tif errRead := err.read(ch.reader); errRead != nil {\n\t\t\treturn nil, errRead\n\t\t}\n\t\treturn nil, err\n\tcase serverEndOfStream:\n\t\treturn nil, nil\n\n\tcase serverTableColumns:\n\t\tch.readTableColumn()\n\t\treturn ch.receiveAndProcessData(onProgress)\n\tcase serverProfileEvents:\n\t\tch.block.reset()\n\t\toldCompress := ch.compress\n\t\tdefer func() {\n\t\t\tch.compress = oldCompress\n\t\t}()\n\t\tch.compress = false\n\t\terr = ch.block.read(ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr := ch.profileEvent.read(ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ch.profileEvent, nil\n\t}\n\treturn nil, &notImplementedPacket{packet: packet}\n}\n\nvar emptyOnProgress = func(*Progress) {\n\n}\n\nvar emptyQueryOptions = &QueryOptions{\n\tOnProgress: emptyOnProgress,\n}\n\ntype QueryOptions struct {\n\tQueryID        string\n\tSettings       Settings\n\tOnProgress     func(*Progress)\n\tOnProfile      func(*Profile)\n\tOnProfileEvent func(*ProfileEvent)\n\tParameters     *Parameters\n\tUseGoTime      bool\n}\n\nfunc (ch *conn) Exec(ctx context.Context, query string) error {\n\treturn ch.ExecWithOption(ctx, query, nil)\n}\n\nfunc (ch *conn) ExecWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *QueryOptions,\n) error {\n\terr := ch.lock()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tch.unlock()\n\t\tif err != nil {\n\t\t\tch.Close()\n\t\t}\n\t}()\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\tch.contextWatcher.Watch(ctx)\n\t\tdefer ch.contextWatcher.Unwatch()\n\t}\n\n\tif queryOptions == nil {\n\t\tqueryOptions = emptyQueryOptions\n\t}\n\n\terr = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters)\n\tif err != nil {\n\t\treturn preferContextOverNetTimeoutError(ctx, err)\n\t}\n\tif queryOptions.OnProgress == nil {\n\t\tqueryOptions.OnProgress = emptyOnProgress\n\t}\n\n\t_, err = ch.receiveAndProcessData(queryOptions.OnProgress)\n\treturn preferContextOverNetTimeoutError(ctx, err)\n}\n"
  },
  {
    "path": "chconn_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestConnect(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\") + \" connect_timeout=10\"\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, conn.Ping(context.Background()))\n\n\trequire.NotEmpty(t, conn.ServerInfo().String())\n\n\trequire.Nil(t, conn.Close())\n\trequire.True(t, conn.IsClosed())\n\t// test protected two close\n\n\trequire.Nil(t, conn.Close())\n}\n\nfunc TestConnectError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\tconfig.Password = \"invalid password\"\n\tconfig.User = \"invalid username\"\n\tconn, err := ConnectConfig(context.Background(), config)\n\tassert.Contains(t,\n\t\terr.Error(),\n\t\t\"server error ( DB::Exception (516)\")\n\tassert.Contains(t,\n\t\terrors.Unwrap(err).Error(),\n\t\t\" DB::Exception (516):\")\n\tassert.Nil(t, conn)\n\n\tconn, err = Connect(context.Background(), \"host>0\")\n\tassert.EqualError(t,\n\t\terr,\n\t\t\"cannot parse `host>0`: failed to parse as DSN (invalid dsn)\")\n\tassert.Nil(t, conn)\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\tconn, err = Connect(ctx, connString)\n\tassert.Error(t,\n\t\terrors.Unwrap(err),\n\t\tcontext.Canceled)\n\tassert.Nil(t, conn)\n\n\tconn, err = Connect(context.Background(), \"host=invalid_host\")\n\tassert.Contains(t,\n\t\terr.Error(),\n\t\t\"hostname resolving error\")\n\tassert.Nil(t, conn)\n\n\tconfig, err = ParseConfig(connString)\n\trequire.NoError(t, err)\n\tconfig.Port = 63666\n\n\tconn, err = ConnectConfig(context.Background(), config)\n\tassert.Contains(t,\n\t\terr.Error(),\n\t\t\"connect: connection refused\")\n\tassert.Nil(t, conn)\n\n\tconfig, err = ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tconfig.AfterConnect = func(ctx context.Context, c Conn) error {\n\t\treturn errors.New(\"afterConnect err\")\n\t}\n\n\t_, err = ConnectConfig(context.Background(), config)\n\tassert.EqualError(t,\n\t\terrors.Unwrap(err),\n\t\t\"afterConnect err\")\n\n\tconfig, err = ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tw:           w,\n\t\t\tnumberValid: 0,\n\t\t}\n\t}\n\n\t_, err = ConnectConfig(context.Background(), config)\n\tassert.EqualError(t, err, \"write hello: timeout\")\n}\n\nfunc TestEndOfStream(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(), `CREATE TABLE IF NOT EXISTS example (\n\t\t\t\tcountry_code FixedString(2),\n\t\t\t\tos_id        UInt8,\n\t\t\t\tbrowser_id   UInt8,\n\t\t\t\tcategories   Array(Int16),\n\t\t\t\taction_day   Date,\n\t\t\t\taction_time  DateTime\n\t\t\t) engine=Memory`)\n\n\trequire.NoError(t, err)\n}\n\nfunc TestException(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, conn.Ping(context.Background()))\n\terr = conn.Exec(context.Background(), `invalid query`)\n\n\tvar chError *ChError\n\trequire.True(t, errors.As(err, &chError))\n\trequire.Equal(t, chError.Code, ChErrorSyntaxError)\n\trequire.Equal(t, chError.Name, \"DB::Exception\")\n}\n\nfunc TestTlsPreferConnect(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_TLS_CONN_STRING\")\n\n\tif connString == \"\" {\n\t\tt.Skip(\"please set CHX_TEST_TCP_TLS_CONN_STRING env\")\n\t\treturn\n\t}\n\n\tconn, err := Connect(context.Background(), connString)\n\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, conn.Ping(context.Background()))\n\n\tif _, ok := conn.RawConn().(*tls.Conn); !ok {\n\t\tt.Error(\"not a TLS connection\")\n\t}\n\n\tconn.RawConn().Close()\n}\n\nfunc TestConnectConfigRequiresConnConfigFromParseConfig(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := &Config{}\n\n\trequire.PanicsWithValue(t, \"config must be created by ParseConfig\", func() {\n\t\tConnectConfig(context.Background(), config)\n\t})\n}\n\nfunc TestLockError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tc, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\tc.(*conn).status = connStatusBusy\n\trequire.EqualError(t, c.(*conn).lock(), \"conn busy\")\n\n\tc.(*conn).status = connStatusClosed\n\trequire.EqualError(t, c.(*conn).lock(), \"conn closed\")\n\n\tc.(*conn).status = connStatusUninitialized\n\trequire.EqualError(t, c.(*conn).lock(), \"conn uninitialized\")\n\n\tresSelect, err := c.Select(context.Background(), \"SET enable_http_compression=1\")\n\trequire.EqualError(t, err, \"conn uninitialized\")\n\trequire.Nil(t, resSelect)\n\trequire.EqualError(t, c.(*conn).lock(), \"conn uninitialized\")\n}\n\nfunc TestUnlockError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tc, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\tc.(*conn).status = connStatusUninitialized\n\trequire.PanicsWithValue(t, \"BUG: cannot unlock unlocked connection\", func() {\n\t\tc.(*conn).unlock()\n\t})\n}\n\nfunc TestExecError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tc.(*conn).status = connStatusUninitialized\n\terr = c.Exec(context.Background(), \"SET enable_http_compression=1\")\n\trequire.EqualError(t, err, \"conn uninitialized\")\n\trequire.EqualError(t, c.(*conn).lock(), \"conn uninitialized\")\n\tc.Close()\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tw:           w,\n\t\t\tnumberValid: 1,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), \"SET enable_http_compression=1\")\n\trequire.EqualError(t, err, \"write block info (timeout)\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestExecCtxError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\terr = c.Exec(ctx, \"select * from system.numbers limit 1\")\n\trequire.EqualError(t, err, \"timeout: context already done: context canceled\")\n\tassert.False(t, c.IsClosed())\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerSlowHelper{\n\t\t\tw:     w,\n\t\t\tsleep: time.Second,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)\n\tdefer cancel()\n\terr = c.Exec(ctx, \"select * from system.numbers\")\n\trequire.EqualError(t, errors.Unwrap(err), \"context deadline exceeded\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestReceivePackError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\treturn &readErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tr:           r,\n\t\t\tnumberValid: 13,\n\t\t}\n\t}\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Exec(context.Background(), `SELECT * FROM system.numbers limit 1`)\n\trequire.EqualError(t, err, \"packet: read packet type (timeout)\")\n\tassert.True(t, c.IsClosed())\n}\n"
  },
  {
    "path": "chpool/common_test.go",
    "content": "package chpool\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\n// Conn.Release is an asynchronous process that returns immediately. There is no signal when the actual work is\n// completed. To test something that relies on the actual work for Conn.Release being completed we must simply wait.\n// This function wraps the sleep so there is more meaning for the callers.\nfunc waitForReleaseToComplete() {\n\ttime.Sleep(500 * time.Millisecond)\n}\n\ntype execer interface {\n\tExec(ctx context.Context, sql string) error\n}\n\nfunc testExec(t *testing.T, db execer) {\n\terr := db.Exec(context.Background(), \"SET enable_http_compression=1\")\n\trequire.NoError(t, err)\n}\n\ntype selecter interface {\n\tSelect(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error)\n}\n\nfunc testSelect(t *testing.T, db selecter) {\n\tvar (\n\t\tnum []uint64\n\t)\n\tcol := column.New[uint64]()\n\tstmt, err := db.Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 5;\", col)\n\trequire.NoError(t, err)\n\tfor stmt.Next() {\n\t\tassert.NoError(t, err)\n\t\tnum = col.Read(num)\n\t\tassert.NoError(t, err)\n\t}\n\tassert.NoError(t, stmt.Err())\n\tassert.Equal(t, 5, len(num))\n\tstmt.Close()\n\tassert.ElementsMatch(t, []uint64{0, 1, 2, 3, 4}, num)\n}\n\nfunc assertConfigsEqual(t *testing.T, expected, actual *Config, testName string) {\n\tif !assert.NotNil(t, expected) {\n\t\treturn\n\t}\n\tif !assert.NotNil(t, actual) {\n\t\treturn\n\t}\n\n\tassert.Equalf(t, expected.ConnString(), actual.ConnString(), \"%s - ConnString\", testName)\n\n\t// Can't test function equality, so just test that they are set or not.\n\tassert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, \"%s - AfterConnect\", testName)\n\tassert.Equalf(t, expected.BeforeAcquire == nil, actual.BeforeAcquire == nil, \"%s - BeforeAcquire\", testName)\n\tassert.Equalf(t, expected.AfterRelease == nil, actual.AfterRelease == nil, \"%s - AfterRelease\", testName)\n\n\tassert.Equalf(t, expected.MaxConnLifetime, actual.MaxConnLifetime, \"%s - MaxConnLifetime\", testName)\n\tassert.Equalf(t, expected.MaxConnIdleTime, actual.MaxConnIdleTime, \"%s - MaxConnIdleTime\", testName)\n\tassert.Equalf(t, expected.MaxConns, actual.MaxConns, \"%s - MaxConns\", testName)\n\tassert.Equalf(t, expected.MinConns, actual.MinConns, \"%s - MinConns\", testName)\n\tassert.Equalf(t, expected.HealthCheckPeriod, actual.HealthCheckPeriod, \"%s - HealthCheckPeriod\", testName)\n\n\tassertConnConfigsEqual(t, expected.ConnConfig, actual.ConnConfig, testName)\n}\n\nfunc assertConnConfigsEqual(t *testing.T, expected, actual *chconn.Config, testName string) {\n\tif !assert.NotNil(t, expected) {\n\t\treturn\n\t}\n\tif !assert.NotNil(t, actual) {\n\t\treturn\n\t}\n\n\tassert.Equalf(t, expected.ConnString(), actual.ConnString(), \"%s - ConnString\", testName)\n\n\tassert.Equalf(t, expected.Host, actual.Host, \"%s - Host\", testName)\n\tassert.Equalf(t, expected.Database, actual.Database, \"%s - Database\", testName)\n\tassert.Equalf(t, expected.Port, actual.Port, \"%s - Port\", testName)\n\tassert.Equalf(t, expected.User, actual.User, \"%s - User\", testName)\n\tassert.Equalf(t, expected.Password, actual.Password, \"%s - Password\", testName)\n\tassert.Equalf(t, expected.ConnectTimeout, actual.ConnectTimeout, \"%s - ConnectTimeout\", testName)\n\tassert.Equalf(t, expected.RuntimeParams, actual.RuntimeParams, \"%s - RuntimeParams\", testName)\n\n\t// Can't test function equality, so just test that they are set or not.\n\tassert.Equalf(t, expected.ValidateConnect == nil, actual.ValidateConnect == nil, \"%s - ValidateConnect\", testName)\n\tassert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, \"%s - AfterConnect\", testName)\n\n\tif assert.Equalf(t, expected.TLSConfig == nil, actual.TLSConfig == nil, \"%s - TLSConfig\", testName) {\n\t\tif expected.TLSConfig != nil {\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.TLSConfig.InsecureSkipVerify,\n\t\t\t\tactual.TLSConfig.InsecureSkipVerify,\n\t\t\t\t\"%s - TLSConfig InsecureSkipVerify\", testName)\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.TLSConfig.ServerName,\n\t\t\t\tactual.TLSConfig.ServerName,\n\t\t\t\t\"%s - TLSConfig ServerName\", testName)\n\t\t}\n\t}\n\n\tif assert.Equalf(t, len(expected.Fallbacks), len(actual.Fallbacks), \"%s - Fallbacks\", testName) {\n\t\tfor i := range expected.Fallbacks {\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.Fallbacks[i].Host,\n\t\t\t\tactual.Fallbacks[i].Host,\n\t\t\t\t\"%s - Fallback %d - Host\", testName, i)\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.Fallbacks[i].Port,\n\t\t\t\tactual.Fallbacks[i].Port,\n\t\t\t\t\"%s - Fallback %d - Port\", testName, i)\n\n\t\t\tif assert.Equalf(t,\n\t\t\t\texpected.Fallbacks[i].TLSConfig == nil,\n\t\t\t\tactual.Fallbacks[i].TLSConfig == nil,\n\t\t\t\t\"%s - Fallback %d - TLSConfig\", testName, i) {\n\t\t\t\tif expected.Fallbacks[i].TLSConfig != nil {\n\t\t\t\t\tassert.Equalf(t,\n\t\t\t\t\t\texpected.Fallbacks[i].TLSConfig.InsecureSkipVerify,\n\t\t\t\t\t\tactual.Fallbacks[i].TLSConfig.InsecureSkipVerify,\n\t\t\t\t\t\t\"%s - Fallback %d - TLSConfig InsecureSkipVerify\", testName)\n\t\t\t\t\tassert.Equalf(t,\n\t\t\t\t\t\texpected.Fallbacks[i].TLSConfig.ServerName,\n\t\t\t\t\t\tactual.Fallbacks[i].TLSConfig.ServerName,\n\t\t\t\t\t\t\"%s - Fallback %d - TLSConfig ServerName\", testName)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "chpool/conn.go",
    "content": "package chpool\n\nimport (\n\t\"context\"\n\t\"sync/atomic\"\n\n\tpuddle \"github.com/jackc/puddle/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\n// Conn is an acquired *chconn.Conn from a Pool.\ntype Conn interface {\n\tRelease()\n\t// ExecWithOption executes a query without returning any rows with Query options.\n\t// NOTE: don't use it for insert and select query\n\tExecWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *chconn.QueryOptions,\n\t) error\n\t// Select executes a query with the the query options and return select stmt.\n\t// NOTE: only use for select query\n\tSelectWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *chconn.QueryOptions,\n\t\tcolumns ...column.ColumnBasic,\n\t) (chconn.SelectStmt, error)\n\t// InsertWithSetting executes a query with the query options and commit all columns data.\n\t// NOTE: only use for insert query\n\tInsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error\n\t// InsertWithSetting executes a query with the query options and commit all columns data.\n\t// NOTE: only use for insert query\n\tInsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error)\n\t// Conn get the underlying chconn.Conn\n\tConn() chconn.Conn\n\t// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack\n\t// will panic if called on an already released or hijacked connection.\n\tHijack() chconn.Conn\n\tPing(ctx context.Context) error\n}\ntype conn struct {\n\tres *puddle.Resource[*connResource]\n\tp   *pool\n}\n\n// Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called.\n// However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored.\nfunc (c *conn) Release() {\n\tif c.res == nil {\n\t\treturn\n\t}\n\n\tconn := c.Conn()\n\tres := c.res\n\tc.res = nil\n\n\tif conn.IsClosed() || conn.IsBusy() {\n\t\tres.Destroy()\n\t\t// Signal to the health check to run since we just destroyed a connections\n\t\t// and we might be below minConns now\n\t\tc.p.triggerHealthCheck()\n\t\treturn\n\t}\n\n\t// If the pool is consistently being used, we might never get to check the\n\t// lifetime of a connection since we only check idle connections in checkConnsHealth\n\t// so we also check the lifetime here and force a health check\n\tif c.p.isExpired(res) {\n\t\tatomic.AddInt64(&c.p.lifetimeDestroyCount, 1)\n\t\tres.Destroy()\n\t\t// Signal to the health check to run since we just destroyed a connections\n\t\t// and we might be below minConns now\n\t\tc.p.triggerHealthCheck()\n\t\treturn\n\t}\n\n\tif c.p.afterRelease == nil {\n\t\tres.Release()\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tif c.p.afterRelease(conn) {\n\t\t\tres.Release()\n\t\t} else {\n\t\t\tres.Destroy()\n\t\t\t// Signal to the health check to run since we just destroyed a connections\n\t\t\t// and we might be below minConns now\n\t\t\tc.p.triggerHealthCheck()\n\t\t}\n\t}()\n}\n\n// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack\n// will panic if called on an already released or hijacked connection.\nfunc (c *conn) Hijack() chconn.Conn {\n\tif c.res == nil {\n\t\tpanic(\"cannot hijack already released or hijacked connection\")\n\t}\n\n\tconn := c.Conn()\n\tres := c.res\n\tc.res = nil\n\n\tres.Hijack()\n\n\treturn conn\n}\n\nfunc (c *conn) ExecWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *chconn.QueryOptions,\n) error {\n\treturn c.Conn().ExecWithOption(ctx, query, queryOptions)\n}\n\nfunc (c *conn) Ping(ctx context.Context) error {\n\treturn c.Conn().Ping(ctx)\n}\n\nfunc (c *conn) SelectWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *chconn.QueryOptions,\n\tcolumns ...column.ColumnBasic,\n) (chconn.SelectStmt, error) {\n\ts, err := c.Conn().SelectWithOption(ctx, query, queryOptions, columns...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &selectStmt{\n\t\tSelectStmt: s,\n\t\tconn:       c,\n\t}, nil\n}\n\nfunc (c *conn) InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error {\n\treturn c.Conn().InsertWithOption(ctx, query, queryOptions, columns...)\n}\nfunc (c *conn) InsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) {\n\ts, err := c.Conn().InsertStreamWithOption(ctx, query, queryOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &insertStmt{\n\t\tInsertStmt: s,\n\t\tconn:       c,\n\t}, nil\n}\n\nfunc (c *conn) Conn() chconn.Conn {\n\treturn c.connResource().conn\n}\n\nfunc (c *conn) connResource() *connResource {\n\treturn c.res.Value()\n}\n"
  },
  {
    "path": "chpool/insert_stmt.go",
    "content": "package chpool\n\nimport (\n\t\"context\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n)\n\ntype insertStmt struct {\n\tchconn.InsertStmt\n\tconn Conn\n}\n\nfunc (s *insertStmt) Flush(ctx context.Context) error {\n\tif s.conn == nil {\n\t\treturn nil\n\t}\n\tdefer s.conn.Release()\n\treturn s.InsertStmt.Flush(ctx)\n}\n\nfunc (s *insertStmt) Close() {\n\tif s.conn == nil {\n\t\treturn\n\t}\n\ts.InsertStmt.Close()\n\ts.conn.Release()\n}\n"
  },
  {
    "path": "chpool/pool.go",
    "content": "package chpool\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\tpuddle \"github.com/jackc/puddle/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nvar defaultMaxConns = int32(4)\nvar defaultMinConns = int32(0)\nvar defaultCreateIdleTimeout = time.Second * 10\nvar defaultMaxConnLifetime = time.Hour\nvar defaultMaxConnIdleTime = time.Minute * 30\nvar defaultHealthCheckPeriod = time.Minute\n\ntype connResource struct {\n\tconn  chconn.Conn\n\tconns []conn\n}\n\nfunc (cr *connResource) getConn(p *pool, res *puddle.Resource[*connResource]) Conn {\n\tif len(cr.conns) == 0 {\n\t\tcr.conns = make([]conn, 128)\n\t}\n\n\tc := &cr.conns[len(cr.conns)-1]\n\tcr.conns = cr.conns[0 : len(cr.conns)-1]\n\n\tc.res = res\n\tc.p = p\n\n\treturn c\n}\n\n// Pool is a connection pool for chconn\ntype Pool interface {\n\t// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned\n\t// to pool and closed.\n\tClose()\n\tAcquire(ctx context.Context) (Conn, error)\n\t// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the\n\t// call of f. The return value is either an error acquiring the Conn or the return value of f. The Conn is\n\t// automatically released after the call of f.\n\tAcquireFunc(ctx context.Context, f func(Conn) error) error\n\t// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and\n\t// keep-alive functionality. It does not update pool statistics.\n\tAcquireAllIdle(ctx context.Context) []Conn\n\t// Exec executes a query without returning any rows.\n\t// NOTE: don't use it for insert and select query\n\tExec(ctx context.Context, query string) error\n\t// ExecWithOption executes a query without returning any rows with Query options.\n\t// NOTE: don't use it for insert and select query\n\tExecWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *chconn.QueryOptions,\n\t) error\n\t// Insert executes a insert query and commit all columns data.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsert(ctx context.Context, query string, columns ...column.ColumnBasic) error\n\t// InsertWithOption executes a insert query with the query options and commit all columns data.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error\n\t// Insert executes a insert query and return a InsertStmt.\n\t//\n\t// NOTE: only use for insert query\n\tInsertStream(ctx context.Context, query string) (chconn.InsertStmt, error)\n\t// InsertWithOption executes a insert query with the query options and return a InsertStmt.\n\t//\n\t// If the query is successful, the columns buffer will be reset.\n\t//\n\t// NOTE: only use for insert query\n\tInsertStreamWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *chconn.QueryOptions) (chconn.InsertStmt, error)\n\t// Select executes a query and return select stmt.\n\t//\n\t// NOTE: only use for select query\n\tSelect(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error)\n\t// Select executes a query with the the query options and return select stmt.\n\t//\n\t// NOTE: only use for select query\n\tSelectWithOption(\n\t\tctx context.Context,\n\t\tquery string,\n\t\tqueryOptions *chconn.QueryOptions,\n\t\tcolumns ...column.ColumnBasic,\n\t) (chconn.SelectStmt, error)\n\t// Ping sends a ping to check that the connection to the server is alive.\n\tPing(ctx context.Context) error\n\t// Stat returns a chpool.Stat struct with a snapshot of Pool statistics.\n\tStat() *Stat\n\t// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would\n\t// disrupt all connections (such as a network interruption or a server state change).\n\t//\n\t// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned\n\t// to the pool.\n\tReset()\n\t// Config returns a copy of config that was used to initialize this pool.\n\tConfig() *Config\n}\ntype pool struct {\n\tp                     *puddle.Pool[*connResource]\n\tconfig                *Config\n\tbeforeConnect         func(context.Context, *chconn.Config) error\n\tafterConnect          func(context.Context, chconn.Conn) error\n\tbeforeAcquire         func(context.Context, chconn.Conn) bool\n\tafterRelease          func(chconn.Conn) bool\n\tminConns              int32\n\tmaxConns              int32\n\tmaxConnLifetime       time.Duration\n\tmaxConnLifetimeJitter time.Duration\n\tmaxConnIdleTime       time.Duration\n\thealthCheckPeriod     time.Duration\n\n\thealthCheckChan chan struct{}\n\n\tnewConnsCount        int64\n\tlifetimeDestroyCount int64\n\tidleDestroyCount     int64\n\n\tcloseOnce sync.Once\n\tcloseChan chan struct{}\n}\n\n// Config is the configuration struct for creating a pool. It must be created by ParseConfig and then it can be\n// modified. A manually initialized Config will cause ConnectConfig to panic.\ntype Config struct {\n\tConnConfig *chconn.Config\n\n\t// BeforeConnect is called before a new connection is made. It is passed a copy of the underlying chconn.Config and\n\t// will not impact any existing open connections.\n\tBeforeConnect func(context.Context, *chconn.Config) error\n\n\t// AfterConnect is called after a connection is established, but before it is added to the pool.\n\tAfterConnect func(context.Context, chconn.Conn) error\n\n\t// BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the\n\t// acquision or false to indicate that the connection should be destroyed and a different connection should be\n\t// acquired.\n\tBeforeAcquire func(context.Context, chconn.Conn) bool\n\n\t// AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to\n\t// return the connection to the pool or false to destroy the connection.\n\tAfterRelease func(chconn.Conn) bool\n\n\t// MaxConnLifetime is the duration since creation after which a connection will be automatically closed.\n\tMaxConnLifetime time.Duration\n\n\t// MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.\n\t// This helps prevent all connections from being closed at the exact same time, starving the pool.\n\tMaxConnLifetimeJitter time.Duration\n\n\t// MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.\n\tMaxConnIdleTime time.Duration\n\n\t// MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().\n\tMaxConns int32\n\n\t// MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low\n\t// number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance\n\t// to create new connections.\n\tMinConns int32\n\n\t// HealthCheckPeriod is the duration between checks of the health of idle connections.\n\tHealthCheckPeriod time.Duration\n\n\t// CreateIdleTimeout is  the timeout for create idle connection\n\tCreateIdleTimeout time.Duration\n\n\tcreatedByParseConfig bool // Used to enforce created by ParseConfig rule.\n}\n\n// Copy returns a deep copy of the config that is safe to use and modify.\n// The only exception is the tls.Config:\n// according to the tls.Config docs it must not be modified after creation.\nfunc (c *Config) Copy() *Config {\n\tnewConfig := new(Config)\n\t*newConfig = *c\n\tnewConfig.ConnConfig = c.ConnConfig.Copy()\n\treturn newConfig\n}\n\n// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.\nfunc (c *Config) ConnString() string { return c.ConnConfig.ConnString() }\n\n// New creates a new Pool. See ParseConfig for information on connString format.\nfunc New(connString string) (Pool, error) {\n\tconfig, err := ParseConfig(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewWithConfig(config)\n}\n\n// NewWithConfig creates a new Pool. config must have been created by ParseConfig.\nfunc NewWithConfig(config *Config) (Pool, error) {\n\t// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from\n\t// zero values.\n\tif !config.createdByParseConfig {\n\t\tpanic(\"config must be created by ParseConfig\")\n\t}\n\n\tp := &pool{\n\t\tconfig:                config,\n\t\tbeforeConnect:         config.BeforeConnect,\n\t\tafterConnect:          config.AfterConnect,\n\t\tbeforeAcquire:         config.BeforeAcquire,\n\t\tafterRelease:          config.AfterRelease,\n\t\tminConns:              config.MinConns,\n\t\tmaxConns:              config.MaxConns,\n\t\tmaxConnLifetime:       config.MaxConnLifetime,\n\t\tmaxConnLifetimeJitter: config.MaxConnLifetimeJitter,\n\t\tmaxConnIdleTime:       config.MaxConnIdleTime,\n\t\thealthCheckPeriod:     config.HealthCheckPeriod,\n\t\thealthCheckChan:       make(chan struct{}, 1),\n\t\tcloseChan:             make(chan struct{}),\n\t}\n\n\tvar err error\n\tp.p, err = puddle.NewPool(\n\t\t&puddle.Config[*connResource]{\n\t\t\tConstructor: func(ctx context.Context) (*connResource, error) {\n\t\t\t\tconnConfig := p.config.ConnConfig.Copy()\n\n\t\t\t\t// Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.\n\t\t\t\tif connConfig.ConnectTimeout <= 0 {\n\t\t\t\t\tconnConfig.ConnectTimeout = 2 * time.Minute\n\t\t\t\t}\n\n\t\t\t\tif p.beforeConnect != nil {\n\t\t\t\t\tif err := p.beforeConnect(ctx, connConfig); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tc, err := chconn.ConnectConfig(ctx, connConfig)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tif p.afterConnect != nil {\n\t\t\t\t\terr := p.afterConnect(ctx, c)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.Close()\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcr := &connResource{\n\t\t\t\t\tconn:  c,\n\t\t\t\t\tconns: make([]conn, 64),\n\t\t\t\t}\n\n\t\t\t\treturn cr, nil\n\t\t\t},\n\t\t\tDestructor: func(value *connResource) {\n\t\t\t\tvalue.conn.Close()\n\t\t\t},\n\t\t\tMaxSize: config.MaxConns,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\t//nolint:errcheck // todo find a way to handle this error\n\t\tp.createIdleResources(int(p.minConns))\n\t\tp.backgroundHealthCheck()\n\t}()\n\n\treturn p, nil\n}\n\n// ParseConfig builds a Config from connString. It parses connString with the same behavior as chconn.ParseConfig with the\n// addition of the following variables:\n//\n// pool_max_conns: integer greater than 0\n// pool_min_conns: integer 0 or greater\n// pool_max_conn_lifetime: duration string\n// pool_max_conn_idle_time: duration string\n// pool_health_check_period: duration string\n// pool_max_conn_lifetime_jitter: duration string\n// pool_create_idle_timeout: duration string\n//\n// See Config for definitions of these arguments.\n//\n//\t# Example DSN\n//\tuser=vahid password=secret host=clickhouse.example.com port=9000 dbname=mydb sslmode=verify-ca pool_max_conns=10\n//\n//\t# Example URL\n//\tclickhouse://vahid:secret@ch.example.com:9000/mydb?sslmode=verify-ca&pool_max_conns=10\nfunc ParseConfig(connString string) (*Config, error) {\n\tchConfig, err := chconn.ParseConfig(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := &Config{\n\t\tConnConfig:           chConfig,\n\t\tcreatedByParseConfig: true,\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_max_conns\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_max_conns\")\n\t\tn, err := strconv.ParseInt(s, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot parse pool_max_conns: %w\", err)\n\t\t}\n\t\tif n < 1 {\n\t\t\t//nolint:goerr113\n\t\t\treturn nil, fmt.Errorf(\"pool_max_conns too small: %d\", n)\n\t\t}\n\t\tconfig.MaxConns = int32(n)\n\t} else {\n\t\tconfig.MaxConns = defaultMaxConns\n\t\tif numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {\n\t\t\tconfig.MaxConns = numCPU\n\t\t}\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_min_conns\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_min_conns\")\n\t\tn, err := strconv.ParseInt(s, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot parse pool_min_conns: %w\", err)\n\t\t}\n\t\tconfig.MinConns = int32(n)\n\t} else {\n\t\tconfig.MinConns = defaultMinConns\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_max_conn_lifetime\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_max_conn_lifetime\")\n\t\td, err := time.ParseDuration(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pool_max_conn_lifetime: %w\", err)\n\t\t}\n\t\tconfig.MaxConnLifetime = d\n\t} else {\n\t\tconfig.MaxConnLifetime = defaultMaxConnLifetime\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_max_conn_idle_time\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_max_conn_idle_time\")\n\t\td, err := time.ParseDuration(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pool_max_conn_idle_time: %w\", err)\n\t\t}\n\t\tconfig.MaxConnIdleTime = d\n\t} else {\n\t\tconfig.MaxConnIdleTime = defaultMaxConnIdleTime\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_health_check_period\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_health_check_period\")\n\t\td, err := time.ParseDuration(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pool_health_check_period: %w\", err)\n\t\t}\n\t\tconfig.HealthCheckPeriod = d\n\t} else {\n\t\tconfig.HealthCheckPeriod = defaultHealthCheckPeriod\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_max_conn_lifetime_jitter\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_max_conn_lifetime_jitter\")\n\t\td, err := time.ParseDuration(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pool_max_conn_lifetime_jitter: %w\", err)\n\t\t}\n\t\tconfig.MaxConnLifetimeJitter = d\n\t}\n\n\tif s, ok := config.ConnConfig.RuntimeParams[\"pool_create_idle_timeout\"]; ok {\n\t\tdelete(config.ConnConfig.RuntimeParams, \"pool_create_idle_timeout\")\n\t\td, err := time.ParseDuration(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pool_create_idle_timeout: %w\", err)\n\t\t}\n\t\tconfig.CreateIdleTimeout = d\n\t} else {\n\t\tconfig.CreateIdleTimeout = defaultCreateIdleTimeout\n\t}\n\n\treturn config, nil\n}\n\n// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned\n// to pool and closed.\nfunc (p *pool) Close() {\n\tp.closeOnce.Do(func() {\n\t\tclose(p.closeChan)\n\t\tp.p.Close()\n\t})\n}\n\nfunc (p *pool) isExpired(res *puddle.Resource[*connResource]) bool {\n\tnow := time.Now()\n\t// Small optimization to avoid rand. If it's over lifetime AND jitter, immediately\n\t// return true.\n\tif now.Sub(res.CreationTime()) > p.maxConnLifetime+p.maxConnLifetimeJitter {\n\t\treturn true\n\t}\n\tif p.maxConnLifetimeJitter == 0 {\n\t\treturn false\n\t}\n\t//nolint:gosec // rand is not used for security purposes\n\tjitterSecs := rand.Float64() * p.maxConnLifetimeJitter.Seconds()\n\treturn now.Sub(res.CreationTime()) > p.maxConnLifetime+(time.Duration(jitterSecs)*time.Second)\n}\n\nfunc (p *pool) triggerHealthCheck() {\n\tgo func() {\n\t\t// Destroy is asynchronous so we give it time to actually remove itself from\n\t\t// the pool otherwise we might try to check the pool size too soon\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tselect {\n\t\tcase p.healthCheckChan <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}()\n}\n\nfunc (p *pool) backgroundHealthCheck() {\n\tticker := time.NewTicker(p.healthCheckPeriod)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-p.closeChan:\n\t\t\treturn\n\t\tcase <-p.healthCheckChan:\n\t\t\tp.checkHealth()\n\t\tcase <-ticker.C:\n\t\t\tp.checkHealth()\n\t\t}\n\t}\n}\n\nfunc (p *pool) checkHealth() {\n\tfor {\n\t\t// If checkMinConns failed we don't destroy any connections since we couldn't\n\t\t// even get to minConns\n\t\tif err := p.checkMinConns(); err != nil {\n\t\t\t// Should we log this error somewhere?\n\t\t\tbreak\n\t\t}\n\t\tif !p.checkConnsHealth() {\n\t\t\t// Since we didn't destroy any connections we can stop looping\n\t\t\tbreak\n\t\t}\n\t\t// Technically Destroy is asynchronous but 500ms should be enough for it to\n\t\t// remove it from the underlying pool\n\t\tselect {\n\t\tcase <-p.closeChan:\n\t\t\treturn\n\t\tcase <-time.After(500 * time.Millisecond):\n\t\t}\n\t}\n}\n\n// checkConnsHealth will check all idle connections, destroy a connection if\n// it's idle or too old, and returns true if any were destroyed\nfunc (p *pool) checkConnsHealth() bool {\n\tvar destroyed bool\n\ttotalConns := p.Stat().TotalConns()\n\tresources := p.p.AcquireAllIdle()\n\tfor _, res := range resources {\n\t\t// We're okay going under minConns if the lifetime is up\n\t\tif p.isExpired(res) && totalConns >= p.minConns {\n\t\t\tatomic.AddInt64(&p.lifetimeDestroyCount, 1)\n\t\t\tres.Destroy()\n\t\t\tdestroyed = true\n\t\t\t// Since Destroy is async we manually decrement totalConns.\n\t\t\ttotalConns--\n\t\t} else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {\n\t\t\tatomic.AddInt64(&p.idleDestroyCount, 1)\n\t\t\tres.Destroy()\n\t\t\tdestroyed = true\n\t\t\t// Since Destroy is async we manually decrement totalConns.\n\t\t\ttotalConns--\n\t\t} else {\n\t\t\tres.ReleaseUnused()\n\t\t}\n\t}\n\treturn destroyed\n}\n\nfunc (p *pool) checkMinConns() error {\n\t// TotalConns can include ones that are being destroyed but we should have\n\t// sleep(500ms) around all of the destroys to help prevent that from throwing\n\t// off this check\n\ttoCreate := p.minConns - p.Stat().TotalConns()\n\tif toCreate > 0 {\n\t\treturn p.createIdleResources(int(toCreate))\n\t}\n\treturn nil\n}\n\nfunc (p *pool) createIdleResources(targetResources int) error {\n\tctx, cancel := context.WithTimeout(context.Background(), p.config.CreateIdleTimeout)\n\tdefer cancel()\n\n\terrs := make(chan error, targetResources)\n\n\tfor i := 0; i < targetResources; i++ {\n\t\tgo func() {\n\t\t\tatomic.AddInt64(&p.newConnsCount, 1)\n\t\t\terr := p.p.CreateResource(ctx)\n\t\t\terrs <- err\n\t\t}()\n\t}\n\n\tvar firstError error\n\tfor i := 0; i < targetResources; i++ {\n\t\terr := <-errs\n\t\tif err != nil && firstError == nil {\n\t\t\tcancel()\n\t\t\tfirstError = err\n\t\t}\n\t}\n\n\treturn firstError\n}\n\n// Acquire returns a connection (Conn) from the Pool\nfunc (p *pool) Acquire(ctx context.Context) (Conn, error) {\n\tfor {\n\t\tres, err := p.p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"acquire: %w\", err)\n\t\t}\n\n\t\tcr := res.Value()\n\n\t\tif res.IdleDuration() > time.Second {\n\t\t\terr := cr.conn.Ping(ctx)\n\t\t\tif err != nil {\n\t\t\t\tres.Destroy()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {\n\t\t\treturn cr.getConn(p, res), nil\n\t\t}\n\n\t\tres.Destroy()\n\t}\n}\n\n// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the\n// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is\n// automatically released after the call of f.\nfunc (p *pool) AcquireFunc(ctx context.Context, f func(Conn) error) error {\n\tconn, err := p.Acquire(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Release()\n\n\treturn f(conn)\n}\n\n// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and\n// keep-alive functionality. It does not update pool statistics.\nfunc (p *pool) AcquireAllIdle(ctx context.Context) []Conn {\n\tresources := p.p.AcquireAllIdle()\n\tconns := make([]Conn, 0, len(resources))\n\tfor _, res := range resources {\n\t\tcr := res.Value()\n\t\tif p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {\n\t\t\tconns = append(conns, cr.getConn(p, res))\n\t\t} else {\n\t\t\tres.Destroy()\n\t\t}\n\t}\n\n\treturn conns\n}\n\n// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would\n// disrupt all connections (such as a network interruption or a server state change).\n//\n// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned\n// to the pool.\nfunc (p *pool) Reset() {\n\tp.p.Reset()\n}\n\n// Config returns a copy of config that was used to initialize this pool.\nfunc (p *pool) Config() *Config { return p.config.Copy() }\n\n// Stat returns a chpool.Stat struct with a snapshot of Pool statistics.\nfunc (p *pool) Stat() *Stat {\n\treturn &Stat{\n\t\ts:                    p.p.Stat(),\n\t\tnewConnsCount:        atomic.LoadInt64(&p.newConnsCount),\n\t\tlifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),\n\t\tidleDestroyCount:     atomic.LoadInt64(&p.idleDestroyCount),\n\t}\n}\n\nfunc (p *pool) Exec(ctx context.Context, query string) error {\n\treturn p.ExecWithOption(ctx, query, nil)\n}\n\nfunc (p *pool) ExecWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *chconn.QueryOptions,\n) error {\n\tfor {\n\t\tc, err := p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.ExecWithOption(ctx, query, queryOptions)\n\t\tc.Release()\n\t\tif errors.Is(err, syscall.EPIPE) {\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n}\n\nfunc (p *pool) Select(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error) {\n\treturn p.SelectWithOption(ctx, query, nil, columns...)\n}\n\nfunc (p *pool) SelectWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *chconn.QueryOptions,\n\tcolumns ...column.ColumnBasic,\n) (chconn.SelectStmt, error) {\n\tfor {\n\t\tc, err := p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ts, err := c.SelectWithOption(ctx, query, queryOptions, columns...)\n\t\tif err != nil {\n\t\t\tc.Release()\n\t\t\tif errors.Is(err, syscall.EPIPE) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s, nil\n\t}\n}\n\nfunc (p *pool) Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error {\n\treturn p.InsertWithOption(ctx, query, nil, columns...)\n}\n\nfunc (p *pool) InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error {\n\tfor {\n\t\tc, err := p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = c.InsertWithOption(ctx, query, queryOptions, columns...)\n\t\tc.Release()\n\t\tif err != nil && errors.Is(err, syscall.EPIPE) {\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n}\n\nfunc (p *pool) InsertStream(ctx context.Context, query string) (chconn.InsertStmt, error) {\n\treturn p.InsertStreamWithOption(ctx, query, nil)\n}\n\nfunc (p *pool) InsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) {\n\tfor {\n\t\tc, err := p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ts, err := c.InsertStreamWithOption(ctx, query, queryOptions)\n\t\tif err != nil {\n\t\t\tc.Release()\n\t\t\tif errors.Is(err, syscall.EPIPE) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s, nil\n\t}\n}\n\n// Ping acquires a connection from the Pool and send ping\n// If returns without error, the database Ping is considered successful, otherwise, the error is returned.\nfunc (p *pool) Ping(ctx context.Context) error {\n\tfor {\n\t\tc, err := p.Acquire(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = c.Ping(ctx)\n\t\tc.Release()\n\t\tif errors.Is(err, syscall.EPIPE) {\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n}\n"
  },
  {
    "path": "chpool/pool_test.go",
    "content": "package chpool\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestNew(t *testing.T) {\n\tt.Parallel()\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\tpool, err := New(connString)\n\trequire.NoError(t, err)\n\tassert.Equal(t, connString, pool.Config().ConnString())\n\tpool.Close()\n}\n\nfunc TestNewWithConfig(t *testing.T) {\n\tt.Parallel()\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\tpool, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tassertConfigsEqual(t, config, pool.Config(), \"Pool.Config() returns original config\")\n\tpool.Close()\n}\n\nfunc TestParseConfigExtractsPoolArguments(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(`pool_max_conns=42\n\t\t\t\t\t\t\t\tpool_min_conns=1\n\t\t\t\t\t\t\t\tpool_max_conn_lifetime=30s\n\t\t\t\t\t\t\t\tpool_max_conn_idle_time=31s\n\t\t\t\t\t\t\t\tpool_health_check_period=32s`)\n\tassert.NoError(t, err)\n\tassert.EqualValues(t, 42, config.MaxConns)\n\tassert.EqualValues(t, 42, config.MaxConns)\n\tassert.EqualValues(t, time.Second*30, config.MaxConnLifetime)\n\tassert.EqualValues(t, time.Second*31, config.MaxConnIdleTime)\n\tassert.EqualValues(t, time.Second*32, config.HealthCheckPeriod)\n\n\tassert.NotContains(t, config.ConnConfig.RuntimeParams, \"pool_max_conns\")\n\tassert.NotContains(t, config.ConnConfig.RuntimeParams, \"pool_min_conns\")\n\tassert.NotContains(t, config.ConnConfig.RuntimeParams, \"pool_max_conn_lifetime\")\n\tassert.NotContains(t, config.ConnConfig.RuntimeParams, \"pool_max_conn_idle_time\")\n\tassert.NotContains(t, config.ConnConfig.RuntimeParams, \"pool_health_check_period\")\n}\n\nfunc TestConnectConfigRequiresConnConfigFromParseConfig(t *testing.T) {\n\tt.Parallel()\n\n\tconfig := &Config{}\n\n\trequire.PanicsWithValue(t, \"config must be created by ParseConfig\", func() {\n\t\tNewWithConfig(config)\n\t})\n}\n\nfunc TestConfigCopyReturnsEqualConfig(t *testing.T) {\n\tconnString := \"clickhouse://vahid:secret@localhost:9000/mydb?client_name=chxtest&connect_timeout=5\"\n\toriginal, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tcopied := original.Copy()\n\n\tassertConfigsEqual(t, original, copied, t.Name())\n}\n\nfunc TestConfigCopyCanBeUsedToNew(t *testing.T) {\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\toriginal, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tcopied := original.Copy()\n\tassert.NotPanics(t, func() {\n\t\t_, err = NewWithConfig(copied)\n\t})\n\tassert.NoError(t, err)\n}\n\nfunc TestPoolAcquireAndConnRelease(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tc, err := pool.Acquire(context.Background())\n\trequire.NoError(t, err)\n\tc.Release()\n}\n\nfunc TestPoolAcquireAndConnHijack(t *testing.T) {\n\tt.Parallel()\n\n\tctx := context.Background()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tc, err := pool.Acquire(ctx)\n\trequire.NoError(t, err)\n\n\tconnsBeforeHijack := pool.Stat().TotalConns()\n\n\tconn := c.Hijack()\n\tdefer conn.Close()\n\n\tconnsAfterHijack := pool.Stat().TotalConns()\n\trequire.Equal(t, connsBeforeHijack-1, connsAfterHijack)\n\n\tcol := column.New[uint64]()\n\tstmt, err := conn.Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 5;\", col)\n\trequire.NoError(t, err)\n\tfor stmt.Next() {\n\t}\n\n\trequire.NoError(t, stmt.Err())\n}\n\nfunc TestPoolAcquireFunc(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\terr = pool.AcquireFunc(context.Background(), func(c Conn) error {\n\t\treturn c.Ping(context.Background())\n\t})\n\trequire.NoError(t, err)\n}\n\nfunc TestPoolAcquireFuncReturnsFnError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\terr = pool.AcquireFunc(context.Background(), func(c Conn) error {\n\t\treturn fmt.Errorf(\"some error\")\n\t})\n\trequire.EqualError(t, err, \"some error\")\n}\n\nfunc TestPoolBeforeConnect(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.BeforeConnect = func(_ context.Context, cfg *chconn.Config) error {\n\t\tcfg.ClientName = \"chx2\"\n\t\treturn nil\n\t}\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdb.Close()\n\n\t// todo find a way to check it\n}\n\nfunc TestPoolAfterConnect(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tvar trigger bool\n\tconfig.AfterConnect = func(_ context.Context, _ chconn.Conn) error {\n\t\ttrigger = true\n\t\treturn nil\n\t}\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\terr = db.Ping(context.Background())\n\trequire.NoError(t, err)\n\n\tassert.True(t, trigger)\n}\n\nfunc TestPoolBeforeAcquire(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tacquireAttempts := 0\n\n\tconfig.BeforeAcquire = func(ctx context.Context, c chconn.Conn) bool {\n\t\tacquireAttempts++\n\t\treturn acquireAttempts%2 == 0\n\t}\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tconns := make([]Conn, 4)\n\tfor i := range conns {\n\t\tconns[i], err = db.Acquire(context.Background())\n\t\tassert.NoError(t, err)\n\t}\n\n\tfor _, c := range conns {\n\t\tc.Release()\n\t}\n\twaitForReleaseToComplete()\n\n\tassert.EqualValues(t, 8, acquireAttempts)\n\n\tconns = db.AcquireAllIdle(context.Background())\n\tassert.Len(t, conns, 2)\n\n\tfor _, c := range conns {\n\t\tc.Release()\n\t}\n\twaitForReleaseToComplete()\n\n\tassert.EqualValues(t, 12, acquireAttempts)\n}\n\nfunc TestPoolAfterRelease(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tafterReleaseCount := 0\n\n\tconfig.AfterRelease = func(c chconn.Conn) bool {\n\t\tafterReleaseCount++\n\t\treturn afterReleaseCount%2 == 1\n\t}\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tconns := map[string]struct{}{}\n\n\tfor i := 0; i < 10; i++ {\n\t\tconn, err := db.Acquire(context.Background())\n\t\tassert.NoError(t, err)\n\t\tconns[conn.Conn().RawConn().LocalAddr().String()] = struct{}{}\n\t\tconn.Release()\n\t\twaitForReleaseToComplete()\n\t}\n\n\tassert.EqualValues(t, 5, len(conns))\n}\n\nfunc TestPoolAcquireAllIdle(t *testing.T) {\n\tt.Parallel()\n\n\tdb, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tconns := make([]Conn, 3)\n\tfor i := range conns {\n\t\tconns[i], err = db.Acquire(context.Background())\n\t\tassert.NoError(t, err)\n\t}\n\n\tfor _, c := range conns {\n\t\tif c != nil {\n\t\t\tc.Release()\n\t\t}\n\t}\n\twaitForReleaseToComplete()\n\n\tconns = db.AcquireAllIdle(context.Background())\n\tassert.Len(t, conns, 3)\n\n\tfor _, c := range conns {\n\t\tc.Release()\n\t}\n}\n\nfunc TestPoolReset(t *testing.T) {\n\tt.Parallel()\n\n\tdb, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tconns := make([]Conn, 3)\n\tfor i := range conns {\n\t\tconns[i], err = db.Acquire(context.Background())\n\t\tassert.NoError(t, err)\n\t}\n\n\tdb.Reset()\n\n\tfor _, c := range conns {\n\t\tif c != nil {\n\t\t\tc.Release()\n\t\t}\n\t}\n\twaitForReleaseToComplete()\n\n\trequire.EqualValues(t, 0, db.Stat().TotalConns())\n}\n\nfunc TestConnReleaseChecksMaxConnLifetime(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.MaxConnLifetime = 250 * time.Millisecond\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tc, err := db.Acquire(context.Background())\n\trequire.NoError(t, err)\n\n\ttime.Sleep(config.MaxConnLifetime)\n\n\tc.Release()\n\twaitForReleaseToComplete()\n\n\tstats := db.Stat()\n\tassert.EqualValues(t, 0, stats.TotalConns())\n}\n\nfunc TestConnReleaseClosesBusyConn(t *testing.T) {\n\tt.Parallel()\n\n\tdb, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tc, err := db.Acquire(context.Background())\n\trequire.NoError(t, err)\n\tcol := column.New[uint64]()\n\t_, err = c.Conn().Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 10;\", col)\n\trequire.NoError(t, err)\n\n\tc.Release()\n\twaitForReleaseToComplete()\n\n\t// wait for the connection to actually be destroyed\n\tfor i := 0; i < 1000; i++ {\n\t\tif db.Stat().TotalConns() == 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\n\tstats := db.Stat()\n\tassert.EqualValues(t, 0, stats.TotalConns())\n}\n\nfunc TestPoolBackgroundChecksMaxConnLifetime(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.MaxConnLifetime = 100 * time.Millisecond\n\tconfig.HealthCheckPeriod = 100 * time.Millisecond\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tc, err := db.Acquire(context.Background())\n\trequire.NoError(t, err)\n\tc.Release()\n\ttime.Sleep(config.MaxConnLifetime + 100*time.Millisecond)\n\n\tstats := db.Stat()\n\tassert.EqualValues(t, 0, stats.TotalConns())\n\tassert.EqualValues(t, 0, stats.MaxIdleDestroyCount())\n\tassert.EqualValues(t, 1, stats.MaxLifetimeDestroyCount())\n}\n\nfunc TestPoolBackgroundChecksMaxConnIdleTime(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.MaxConnLifetime = 1 * time.Minute\n\tconfig.MaxConnIdleTime = 100 * time.Millisecond\n\tconfig.HealthCheckPeriod = 150 * time.Millisecond\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\tc, err := db.Acquire(context.Background())\n\trequire.NoError(t, err)\n\tc.Release()\n\ttime.Sleep(config.HealthCheckPeriod)\n\n\tfor i := 0; i < 1000; i++ {\n\t\tif db.Stat().TotalConns() == 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\n\tstats := db.Stat()\n\tassert.EqualValues(t, 0, stats.TotalConns())\n\tassert.EqualValues(t, 1, stats.MaxIdleDestroyCount())\n\tassert.EqualValues(t, 0, stats.MaxLifetimeDestroyCount())\n}\n\nfunc TestPoolBackgroundChecksMinConns(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.HealthCheckPeriod = 100 * time.Millisecond\n\tconfig.MinConns = 2\n\n\tdb, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer db.Close()\n\n\ttime.Sleep(config.HealthCheckPeriod + 500*time.Millisecond)\n\n\tstats := db.Stat()\n\tassert.EqualValues(t, 2, stats.TotalConns())\n\tassert.EqualValues(t, 0, stats.MaxLifetimeDestroyCount())\n\tassert.EqualValues(t, 2, stats.NewConnsCount())\n\n\tc, err := db.Acquire(context.Background())\n\trequire.NoError(t, err)\n\terr = c.Conn().Close()\n\trequire.NoError(t, err)\n\tc.Release()\n\n\ttime.Sleep(config.HealthCheckPeriod + 500*time.Millisecond)\n\n\tstats = db.Stat()\n\tassert.EqualValues(t, 2, stats.TotalConns())\n\tassert.EqualValues(t, 0, stats.MaxIdleDestroyCount())\n\tassert.EqualValues(t, 3, stats.NewConnsCount())\n}\n\nfunc TestPoolExec(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\ttestExec(t, pool)\n}\n\nfunc TestPoolExecError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\ttestExec(t, pool)\n\n\tpool.Close()\n\n\terr = pool.Exec(context.Background(), \"SET enable_http_compression=1\")\n\tif assert.Error(t, err) {\n\t\tassert.Equal(t, \"acquire: closed pool\", err.Error())\n\t}\n}\n\nfunc TestPoolSelect(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\t// Test common usage\n\ttestSelect(t, pool)\n\twaitForReleaseToComplete()\n\n\t// Test expected pool behavior\n\tcol := column.New[uint64]()\n\tstmt, err := pool.Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 5;\", col)\n\trequire.NoError(t, err)\n\tstats := pool.Stat()\n\tassert.EqualValues(t, 1, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n\tfor stmt.Next() {\n\t}\n\n\trequire.NoError(t, stmt.Err())\n\n\twaitForReleaseToComplete()\n\n\tstats = pool.Stat()\n\tassert.EqualValues(t, 0, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n\n\t// more coverage\n\n\tassert.EqualValues(t, 2, stats.AcquireCount())\n\tassert.GreaterOrEqual(t, int64(time.Second), int64(stats.AcquireDuration()))\n\tassert.EqualValues(t, 0, stats.AcquiredConns())\n\tassert.EqualValues(t, 0, stats.CanceledAcquireCount())\n\tassert.EqualValues(t, 0, stats.ConstructingConns())\n\tassert.EqualValues(t, 1, stats.EmptyAcquireCount())\n\tassert.EqualValues(t, 1, stats.IdleConns())\n\tmaxConns := defaultMaxConns\n\tif numCPU := int32(runtime.NumCPU()); numCPU > maxConns {\n\t\tmaxConns = numCPU\n\t}\n\tassert.EqualValues(t, maxConns, stats.MaxConns())\n}\n\nfunc TestPoolSelectError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\t// Test common usage\n\ttestSelect(t, pool)\n\twaitForReleaseToComplete()\n\n\t// Test expected pool behavior\n\tstmt, err := pool.Select(context.Background(), \"SELECT * FROM not_fount_table LIMIT 10;\")\n\tassert.Error(t, err)\n\tassert.Nil(t, stmt)\n\n\tpool.Close()\n\n\tstmt, err = pool.Select(context.Background(), \"SELECT * FROM not_fount_table LIMIT 10;\")\n\n\tif assert.Error(t, err) {\n\t\tassert.Equal(t, \"acquire: closed pool\", err.Error())\n\t}\n\n\trequire.Nil(t, stmt)\n}\nfunc TestPoolAcquireSelectError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\t// Test common usage\n\ttestSelect(t, pool)\n\twaitForReleaseToComplete()\n\n\t// Test expected pool behavior\n\tconn, err := pool.Acquire(context.Background())\n\trequire.NoError(t, err)\n\tconn.Conn().RawConn().Close()\n\t_, err = conn.Conn().Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 5;\")\n\tconn.Release()\n\trequire.Error(t, err)\n}\n\nfunc TestPoolInsert(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\trequire.NoError(t, pool.Ping(context.Background()))\n\n\terr = pool.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_pool`)\n\trequire.NoError(t, err)\n\terr = pool.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_pool (\n\t\t\t\tint8  Int8\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tcol := column.New[int8]()\n\tfor i := 1; i <= 10; i++ {\n\t\tcol.Append(int8(-1 * i))\n\t}\n\tstmt, err := pool.InsertStream(context.Background(), `INSERT INTO clickhouse_test_insert_pool (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\trequire.NoError(t, err)\n\n\tstats := pool.Stat()\n\tassert.EqualValues(t, 1, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n\n\trequire.NoError(t, stmt.Write(context.Background(), col))\n\trequire.NoError(t, stmt.Write(context.Background(), col))\n\trequire.NoError(t, stmt.Flush(context.Background()))\n\n\twaitForReleaseToComplete()\n\n\tstats = pool.Stat()\n\tassert.EqualValues(t, 0, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n}\n\nfunc TestPoolInsertError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\terr = pool.Insert(context.Background(), `INSERT INTO not_found_table (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\tif assert.Error(t, err) {\n\t\tassert.Equal(t, \" DB::Exception (60): Table default.not_found_table doesn't exist\", err.Error())\n\t}\n\n\tpool.Close()\n\n\terr = pool.Insert(context.Background(), `INSERT INTO not_found_table (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\n\tif assert.Error(t, err) {\n\t\tassert.Equal(t, \"acquire: closed pool\", err.Error())\n\t}\n}\n\nfunc TestPoolInsertStream(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\trequire.NoError(t, pool.Ping(context.Background()))\n\n\terr = pool.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_pool_stream`)\n\trequire.NoError(t, err)\n\terr = pool.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_pool_stream (\n\t\t\t\tint8  Int8\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tcol := column.New[int8]()\n\tfor i := 1; i <= 10; i++ {\n\t\tcol.Append(int8(-1 * i))\n\t}\n\terr = pool.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_pool_stream (\n\t\t\t\tint8\n\t\t\t) VALUES`, col)\n\trequire.NoError(t, err)\n\n\tcolInt8 := column.New[int8]()\n\tselectStmt, err := pool.Select(context.Background(), `SELECT \n\t\t\t\tint8\n\t FROM clickhouse_test_insert_pool_stream`, colInt8)\n\trequire.NoError(t, err)\n\n\tstats := pool.Stat()\n\tassert.EqualValues(t, 1, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\n\tselectStmt.Close()\n\twaitForReleaseToComplete()\n\n\tstats = pool.Stat()\n\tassert.EqualValues(t, 0, stats.AcquiredConns())\n\tassert.EqualValues(t, 1, stats.TotalConns())\n}\n\nfunc TestConnReleaseClosesConnInFailedTransaction(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tc, err := pool.Acquire(ctx)\n\trequire.NoError(t, err)\n\n\tpid := c.Conn().RawConn().LocalAddr().String()\n\n\tstmt, err := c.Conn().Select(ctx, \"SELECT * FROM system.numbers2 LIMIT 5;\")\n\tassert.Error(t, err)\n\tassert.Nil(t, stmt)\n\n\tc.Release()\n\twaitForReleaseToComplete()\n\n\tc, err = pool.Acquire(ctx)\n\trequire.NoError(t, err)\n\n\tassert.NotEqual(t, pid, c.Conn().RawConn().LocalAddr().String())\n\tc.Release()\n}\n\nfunc TestConnReleaseDestroysClosedConn(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tc, err := pool.Acquire(ctx)\n\trequire.NoError(t, err)\n\tc.Conn().Close()\n\terr = c.Conn().Close()\n\trequire.NoError(t, err)\n\n\tassert.EqualValues(t, 1, pool.Stat().TotalConns())\n\n\tc.Release()\n\twaitForReleaseToComplete()\n\n\t// wait for the connection to actually be destroyed\n\tfor i := 0; i < 1000; i++ {\n\t\tif pool.Stat().TotalConns() == 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\n\tassert.EqualValues(t, 0, pool.Stat().TotalConns())\n}\n\nfunc TestConnPoolQueryConcurrentLoad(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tn := 100\n\tdone := make(chan bool)\n\n\tfor i := 0; i < n; i++ {\n\t\tgo func() {\n\t\t\tdefer func() { done <- true }()\n\t\t\ttestSelect(t, pool)\n\t\t}()\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\t<-done\n\t}\n}\n\nfunc TestParseConfigError(t *testing.T) {\n\tt.Parallel()\n\n\tparseConfigErrorTests := []struct {\n\t\tname       string\n\t\tconnString string\n\t\terr        string\n\t}{\n\t\t{\n\t\t\tname:       \"invalid host\",\n\t\t\tconnString: \"host>0\",\n\t\t\terr:        \"cannot parse `host>0`: failed to parse as DSN (invalid dsn)\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_max_conns\",\n\t\t\tconnString: \"pool_max_conns=invalid\",\n\t\t\terr:        \"cannot parse pool_max_conns: strconv.ParseInt: parsing \\\"invalid\\\": invalid syntax\",\n\t\t}, {\n\t\t\tname:       \"low pool_max_conns\",\n\t\t\tconnString: \"pool_max_conns=0\",\n\t\t\terr:        \"pool_max_conns too small: 0\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_min_conns\",\n\t\t\tconnString: \"pool_min_conns=invalid\",\n\t\t\terr:        \"cannot parse pool_min_conns: strconv.ParseInt: parsing \\\"invalid\\\": invalid syntax\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_max_conn_lifetime\",\n\t\t\tconnString: \"pool_max_conn_lifetime=invalid\",\n\t\t\terr:        \"invalid pool_max_conn_lifetime: time: invalid duration \\\"invalid\\\"\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_max_conn_idle_time\",\n\t\t\tconnString: \"pool_max_conn_idle_time=invalid\",\n\t\t\terr:        \"invalid pool_max_conn_idle_time: time: invalid duration \\\"invalid\\\"\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_health_check_period\",\n\t\t\tconnString: \"pool_health_check_period=invalid\",\n\t\t\terr:        \"invalid pool_health_check_period: time: invalid duration \\\"invalid\\\"\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_max_conn_lifetime_jitter\",\n\t\t\tconnString: \"pool_max_conn_lifetime_jitter=invalid\",\n\t\t\terr:        \"invalid pool_max_conn_lifetime_jitter: time: invalid duration \\\"invalid\\\"\",\n\t\t}, {\n\t\t\tname:       \"invalid pool_create_idle_timeout\",\n\t\t\tconnString: \"pool_create_idle_timeout=invalid\",\n\t\t\terr:        \"invalid pool_create_idle_timeout: time: invalid duration \\\"invalid\\\"\",\n\t\t},\n\t}\n\n\tfor i, tt := range parseConfigErrorTests {\n\t\t_, err := ParseConfig(tt.connString)\n\t\tif !assert.Errorf(t, err, \"Test %d (%s)\", i, tt.name) {\n\t\t\tcontinue\n\t\t}\n\t\tif !assert.Equalf(t, err.Error(), tt.err, \"Test %d (%s)\", i, tt.name) {\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestNewParseError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(\"host>0\")\n\tassert.Nil(t, pool)\n\tassert.Equal(t, \"cannot parse `host>0`: failed to parse as DSN (invalid dsn)\", err.Error())\n}\n\nfunc TestNewError(t *testing.T) {\n\tt.Parallel()\n\n\tpool, err := New(\"host=invalidhost\")\n\tassert.NotNil(t, pool)\n\tassert.NoError(t, err)\n\terr = pool.Ping(context.Background())\n\tassert.Error(t, err)\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tconfig.AfterConnect = func(ctx context.Context, c chconn.Conn) error {\n\t\treturn errors.New(\"afterConnect err\")\n\t}\n\n\tpool, err = NewWithConfig(config)\n\trequire.NoError(t, err)\n\terr = pool.Ping(context.Background())\n\tassert.Error(t, err)\n\tassert.EqualError(t, err, \"acquire: afterConnect err\")\n}\n\nfunc TestIdempotentPoolClose(t *testing.T) {\n\tpool, err := New(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\t// Close the open pool.\n\trequire.NotPanics(t, func() { pool.Close() })\n\n\t// Close the already closed pool.\n\trequire.NotPanics(t, func() { pool.Close() })\n}\n\nfunc TestConnectEagerlyReachesMinPoolSize(t *testing.T) {\n\tt.Parallel()\n\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\n\tconfig.MinConns = int32(12)\n\tconfig.MaxConns = int32(15)\n\n\tacquireAttempts := int64(0)\n\tconnectAttempts := int64(0)\n\n\tconfig.BeforeAcquire = func(ctx context.Context, conn chconn.Conn) bool {\n\t\tatomic.AddInt64(&acquireAttempts, 1)\n\t\treturn true\n\t}\n\tconfig.BeforeConnect = func(ctx context.Context, cfg *chconn.Config) error {\n\t\tatomic.AddInt64(&connectAttempts, 1)\n\t\treturn nil\n\t}\n\n\tpool, err := NewWithConfig(config)\n\trequire.NoError(t, err)\n\tdefer pool.Close()\n\n\tfor i := 0; i < 500; i++ {\n\t\ttime.Sleep(10 * time.Millisecond)\n\n\t\tstat := pool.Stat()\n\t\tif stat.IdleConns() == 12 &&\n\t\t\tstat.AcquireCount() == 0 &&\n\t\t\tstat.TotalConns() == 12 &&\n\t\t\tatomic.LoadInt64(&acquireAttempts) == 0 &&\n\t\t\tatomic.LoadInt64(&connectAttempts) == 12 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tt.Fatal(\"did not reach min pool size\")\n}\n"
  },
  {
    "path": "chpool/select_stmt.go",
    "content": "package chpool\n\nimport (\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n)\n\ntype selectStmt struct {\n\tchconn.SelectStmt\n\tconn Conn\n}\n\nfunc (s *selectStmt) Next() bool {\n\tif s.conn == nil {\n\t\treturn false\n\t}\n\tnext := s.SelectStmt.Next()\n\tif s.SelectStmt.Err() != nil && s.conn != nil {\n\t\ts.conn.Release()\n\t\ts.conn = nil\n\t}\n\tif !next && s.conn != nil {\n\t\ts.conn.Release()\n\t\ts.conn = nil\n\t}\n\treturn next\n}\n\nfunc (s *selectStmt) Close() {\n\tif s.conn == nil {\n\t\treturn\n\t}\n\ts.SelectStmt.Close()\n\ts.conn.Release()\n}\n"
  },
  {
    "path": "chpool/stat.go",
    "content": "package chpool\n\nimport (\n\t\"time\"\n\n\t\"github.com/jackc/puddle/v2\"\n)\n\n// Stat is a snapshot of Pool statistics.\ntype Stat struct {\n\ts                    *puddle.Stat\n\tnewConnsCount        int64\n\tlifetimeDestroyCount int64\n\tidleDestroyCount     int64\n}\n\n// AcquireCount returns the cumulative count of successful acquires from the pool.\nfunc (s *Stat) AcquireCount() int64 {\n\treturn s.s.AcquireCount()\n}\n\n// AcquireDuration returns the total duration of all successful acquires from\n// the pool.\nfunc (s *Stat) AcquireDuration() time.Duration {\n\treturn s.s.AcquireDuration()\n}\n\n// AcquiredConns returns the number of currently acquired connections in the pool.\nfunc (s *Stat) AcquiredConns() int32 {\n\treturn s.s.AcquiredResources()\n}\n\n// CanceledAcquireCount returns the cumulative count of acquires from the pool\n// that were canceled by a context.\nfunc (s *Stat) CanceledAcquireCount() int64 {\n\treturn s.s.CanceledAcquireCount()\n}\n\n// ConstructingConns returns the number of conns with construction in progress in\n// the pool.\nfunc (s *Stat) ConstructingConns() int32 {\n\treturn s.s.ConstructingResources()\n}\n\n// EmptyAcquireCount returns the cumulative count of successful acquires from the pool\n// that waited for a resource to be released or constructed because the pool was\n// empty.\nfunc (s *Stat) EmptyAcquireCount() int64 {\n\treturn s.s.EmptyAcquireCount()\n}\n\n// IdleConns returns the number of currently idle conns in the pool.\nfunc (s *Stat) IdleConns() int32 {\n\treturn s.s.IdleResources()\n}\n\n// MaxConns returns the maximum size of the pool.\nfunc (s *Stat) MaxConns() int32 {\n\treturn s.s.MaxResources()\n}\n\n// TotalConns returns the total number of resources currently in the pool.\n// The value is the sum of ConstructingConns, AcquiredConns, and\n// IdleConns.\nfunc (s *Stat) TotalConns() int32 {\n\treturn s.s.TotalResources()\n}\n\n// NewConnsCount returns the cumulative count of new connections opened.\nfunc (s *Stat) NewConnsCount() int64 {\n\treturn s.newConnsCount\n}\n\n// MaxLifetimeDestroyCount returns the cumulative count of connections destroyed\n// because they exceeded MaxConnLifetime.\nfunc (s *Stat) MaxLifetimeDestroyCount() int64 {\n\treturn s.lifetimeDestroyCount\n}\n\n// MaxIdleDestroyCount returns the cumulative count of connections destroyed because\n// they exceeded MaxConnIdleTime.\nfunc (s *Stat) MaxIdleDestroyCount() int64 {\n\treturn s.idleDestroyCount\n}\n"
  },
  {
    "path": "client_info.go",
    "content": "package chconn\n\nimport (\n\t\"os/user\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n)\n\n// ClientInfo Information about client for query.\n// Some fields are passed explicitly from client and some are calculated automatically.\n// Contains info about initial query source, for tracing distributed queries\n// where one query initiates many other queries.\ntype ClientInfo struct {\n\tInitialUser    string\n\tInitialQueryID string\n\n\tOSUser         string\n\tClientHostname string\n\tClientName     string\n\n\tClientVersionMajor uint64\n\tClientVersionMinor uint64\n\tClientVersionPatch uint64\n\tClientRevision     uint64\n\tDistributedDepth   uint64\n\n\tQuotaKey string\n}\n\n// Write Only values that are not calculated automatically or passed separately are serialized.\n// Revisions are passed to use format that server will understand or client was used.\nfunc (c *ClientInfo) write(ch *conn) {\n\t// InitialQuery\n\tch.writer.Uint8(1)\n\n\tch.writer.String(c.InitialUser)\n\tch.writer.String(c.InitialQueryID)\n\n\tch.writer.String(\"[::ffff:127.0.0.1]:0\")\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithInitialQueryStartTime {\n\t\tch.writer.Uint64(0)\n\t}\n\n\t// iface type\n\tch.writer.Uint8(1) // tcp\n\tch.writer.String(c.OSUser)\n\tch.writer.String(c.ClientHostname)\n\tch.writer.String(c.ClientName)\n\tch.writer.Uvarint(c.ClientVersionMajor)\n\tch.writer.Uvarint(c.ClientVersionMinor)\n\tch.writer.Uvarint(c.ClientRevision)\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithQuotaKeyInClientInfo {\n\t\tch.writer.String(c.QuotaKey)\n\t}\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithDistributedDepth {\n\t\tch.writer.Uvarint(c.DistributedDepth)\n\t}\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithVersionPatch {\n\t\tch.writer.Uvarint(c.ClientVersionPatch)\n\t}\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithOpenTelemetry {\n\t\tch.writer.Uint8(0)\n\t}\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithParallelReplicas {\n\t\tch.writer.Uvarint(0) // collaborate_with_initiator\n\t\tch.writer.Uvarint(0) // count_participating_replicas\n\t\tch.writer.Uvarint(0) // number_of_current_replica\n\t}\n}\n\nfunc (c *ClientInfo) fillOSUserHostNameAndVersionInfo() {\n\tu, err := user.Current()\n\tif err == nil {\n\t\tc.OSUser = u.Username\n\t}\n\n\tc.ClientVersionMajor = dbmsVersionMajor\n\tc.ClientVersionMinor = dbmsVersionMinor\n\tc.ClientVersionPatch = dbmsVersionPatch\n\tc.ClientRevision = dbmsVersionRevision\n}\n"
  },
  {
    "path": "column/array.go",
    "content": "package column\n\n// Array is a column of Array(T) ClickHouse data type\ntype Array[T any] struct {\n\tArrayBase\n\tcolumnData []T\n}\n\n// NewArray create a new array column of Array(T) ClickHouse data type\nfunc NewArray[T any](dataColumn Column[T]) *Array[T] {\n\ta := &Array[T]{\n\t\tArrayBase: ArrayBase{\n\t\t\tdataColumn:   dataColumn,\n\t\t\toffsetColumn: New[uint64](),\n\t\t},\n\t}\n\ta.resetHook = func() {\n\t\ta.columnData = a.columnData[:0]\n\t}\n\treturn a\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Array[T]) Data() [][]T {\n\tvalues := make([][]T, c.offsetColumn.numRow)\n\toffsets := c.Offsets()\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i, offset := range offsets {\n\t\tval := make([]T, offset-lastOffset)\n\t\tcopy(val, columnData[lastOffset:offset])\n\t\tvalues[i] = val\n\t\tlastOffset = offset\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Array[T]) Read(value [][]T) [][]T {\n\toffsets := c.Offsets()\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor _, offset := range offsets {\n\t\tval := make([]T, offset-lastOffset)\n\t\tcopy(val, columnData[lastOffset:offset])\n\t\tvalue = append(value, val)\n\t\tlastOffset = offset\n\t}\n\treturn value\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Array[T]) Row(row int) []T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val []T\n\tval = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...)\n\treturn val\n}\n\n// Append value for insert\nfunc (c *Array[T]) Append(v ...[]T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.(Column[T]).Append(v...)\n\t}\n}\n\n// Append single item value for insert\n//\n// it should use with AppendLen\n//\n// Example:\n//\n//\tc.AppendLen(2) // insert 2 items\n//\tc.AppendItem(1, 2)\nfunc (c *Array[T]) AppendItem(v ...T) {\n\tc.dataColumn.(Column[T]).Append(v...)\n}\n\n// Array return a Array type for this column\nfunc (c *Array[T]) Array() *Array2[T] {\n\treturn NewArray2(c)\n}\n\nfunc (c *Array[T]) getColumnData() []T {\n\tif len(c.columnData) == 0 {\n\t\tc.columnData = c.dataColumn.(Column[T]).Data()\n\t}\n\treturn c.columnData\n}\n\nfunc (c *Array[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array2.go",
    "content": "package column\n\n// Array2 is a column of Array(Array(T)) ClickHouse data type\ntype Array2[T any] struct {\n\tArrayBase\n}\n\n// NewArray create a new array column of Array(Array(T)) ClickHouse data type\nfunc NewArray2[T any](array *Array[T]) *Array2[T] {\n\ta := &Array2[T]{\n\t\tArrayBase: ArrayBase{\n\t\t\tdataColumn:   array,\n\t\t\toffsetColumn: New[uint64](),\n\t\t},\n\t}\n\treturn a\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Array2[T]) Data() [][][]T {\n\tvalues := make([][][]T, c.offsetColumn.numRow)\n\tfor i := range values {\n\t\tvalues[i] = c.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Array2[T]) Read(value [][][]T) [][][]T {\n\tif cap(value)-len(value) >= c.NumRow() {\n\t\tvalue = (value)[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalue = append(value, make([][][]T, c.NumRow())...)\n\t}\n\tval := (value)[len(value)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = c.Row(i)\n\t}\n\treturn value\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Array2[T]) Row(row int) [][]T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val [][]T\n\tlastRow := c.offsetColumn.Row(row)\n\tfor ; lastOffset < lastRow; lastOffset++ {\n\t\tval = append(val, c.dataColumn.(*Array[T]).Row(int(lastOffset)))\n\t}\n\treturn val\n}\n\n// Append value for insert\nfunc (c *Array2[T]) Append(v ...[][]T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.(*Array[T]).Append(v...)\n\t}\n}\n\nfunc (c *Array2[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array2_nullable.go",
    "content": "package column\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n\n// Array is a column of Array(Array(Nullable(T))) ClickHouse data type\ntype Array2Nullable[T comparable] struct {\n\tArray2[T]\n\tdataColumn *ArrayNullable[T]\n\tcolumnData [][]*T\n}\n\n// NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type\nfunc NewArray2Nullable[T comparable](dataColumn *ArrayNullable[T]) *Array2Nullable[T] {\n\ta := &Array2Nullable[T]{\n\t\tdataColumn: dataColumn,\n\t\tArray2: Array2[T]{\n\t\t\tArrayBase: ArrayBase{\n\t\t\t\tdataColumn:   dataColumn,\n\t\t\t\toffsetColumn: New[uint64](),\n\t\t\t},\n\t\t},\n\t}\n\ta.resetHook = func() {\n\t\ta.columnData = a.columnData[:0]\n\t}\n\treturn a\n}\n\n// Data get all the nullable data in current block as a slice of pointer.\nfunc (c *Array2Nullable[T]) DataP() [][][]*T {\n\tvalues := make([][][]*T, c.offsetColumn.numRow)\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalues[i] = columnData[lastOffset:c.offsetColumn.Row(i)]\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the nullable data in current block as a slice pointer and append to the input.\nfunc (c *Array2Nullable[T]) ReadP(value [][][]*T) [][][]*T {\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalue = append(value, columnData[lastOffset:c.offsetColumn.Row(i)])\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn value\n}\n\n// RowP return the nullable value of given row as a pointer\n// NOTE: Row number start from zero\nfunc (c *Array2Nullable[T]) RowP(row int) [][]*T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val [][]*T\n\tval = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...)\n\treturn val\n}\n\n// AppendP a nullable value for insert\nfunc (c *Array2Nullable[T]) AppendP(v ...[][]*T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.AppendP(v...)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *Array2Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\terr := c.Array2.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.columnData = c.dataColumn.DataP()\n\treturn nil\n}\n\n// Array return a Array type for this column\nfunc (c *Array2Nullable[T]) Array() *Array3Nullable[T] {\n\treturn NewArray3Nullable(c)\n}\n\nfunc (c *Array2Nullable[T]) getColumnData() [][]*T {\n\tif len(c.columnData) == 0 {\n\t\tc.columnData = c.dataColumn.DataP()\n\t}\n\treturn c.columnData\n}\n\nfunc (c *Array2Nullable[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array3.go",
    "content": "package column\n\n// Array3 is a column of Array(Array(Array(T))) ClickHouse data type\ntype Array3[T any] struct {\n\tArrayBase\n}\n\n// NewArray create a new array column of Array(Array(Array(T))) ClickHouse data type\nfunc NewArray3[T any](array *Array2[T]) *Array3[T] {\n\ta := &Array3[T]{\n\t\tArrayBase: ArrayBase{\n\t\t\tdataColumn:   array,\n\t\t\toffsetColumn: New[uint64](),\n\t\t},\n\t}\n\treturn a\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Array3[T]) Data() [][][][]T {\n\tvalues := make([][][][]T, c.offsetColumn.numRow)\n\tfor i := range values {\n\t\tvalues[i] = c.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Array3[T]) Read(value [][][][]T) [][][][]T {\n\tif cap(value)-len(value) >= c.NumRow() {\n\t\tvalue = (value)[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalue = append(value, make([][][][]T, c.NumRow())...)\n\t}\n\tval := (value)[len(value)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = c.Row(i)\n\t}\n\treturn value\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Array3[T]) Row(row int) [][][]T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val [][][]T\n\tlastRow := c.offsetColumn.Row(row)\n\tfor ; lastOffset < lastRow; lastOffset++ {\n\t\tval = append(val, c.dataColumn.(*Array2[T]).Row(int(lastOffset)))\n\t}\n\treturn val\n}\n\n// Append value for insert\nfunc (c *Array3[T]) Append(v ...[][][]T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.(*Array2[T]).Append(v...)\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Array2[T]) Array() *Array3[T] {\n\treturn NewArray3(c)\n}\n\nfunc (c *Array3[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\tpanic(\"array level is too deep\")\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array3_nullable.go",
    "content": "package column\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n\n// Array is a column of Array(Array(Nullable(T))) ClickHouse data type\ntype Array3Nullable[T comparable] struct {\n\tArray3[T]\n\tdataColumn *Array2Nullable[T]\n\tcolumnData [][][]*T\n}\n\n// NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type\nfunc NewArray3Nullable[T comparable](dataColumn *Array2Nullable[T]) *Array3Nullable[T] {\n\ta := &Array3Nullable[T]{\n\t\tdataColumn: dataColumn,\n\t\tArray3: Array3[T]{\n\t\t\tArrayBase: ArrayBase{\n\t\t\t\tdataColumn:   dataColumn,\n\t\t\t\toffsetColumn: New[uint64](),\n\t\t\t},\n\t\t},\n\t}\n\ta.resetHook = func() {\n\t\ta.columnData = a.columnData[:0]\n\t}\n\treturn a\n}\n\n// Data get all the nullable data in current block as a slice of pointer.\nfunc (c *Array3Nullable[T]) DataP() [][][][]*T {\n\tvalues := make([][][][]*T, c.offsetColumn.numRow)\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalues[i] = columnData[lastOffset:c.offsetColumn.Row(i)]\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the nullable data in current block as a slice pointer and append to the input.\nfunc (c *Array3Nullable[T]) ReadP(value [][][][]*T) [][][][]*T {\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalue = append(value, columnData[lastOffset:c.offsetColumn.Row(i)])\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn value\n}\n\n// RowP return the nullable value of given row as a pointer\n// NOTE: Row number start from zero\nfunc (c *Array3Nullable[T]) RowP(row int) [][][]*T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val [][][]*T\n\tval = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...)\n\treturn val\n}\n\n// AppendP a nullable value for insert\nfunc (c *Array3Nullable[T]) AppendP(v ...[][][]*T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.AppendP(v...)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *Array3Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\terr := c.Array3.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.columnData = c.dataColumn.DataP()\n\treturn nil\n}\n\nfunc (c *Array3Nullable[T]) getColumnData() [][][]*T {\n\tif len(c.columnData) == 0 {\n\t\tc.columnData = c.dataColumn.DataP()\n\t}\n\treturn c.columnData\n}\n\nfunc (c *Array3Nullable[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\tpanic(\"array level is too deep\")\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array_base.go",
    "content": "package column\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// ArrayBase is a column of Array(T) ClickHouse data type\n//\n// ArrayBase is a base class for other arrays or use for none generic use\ntype ArrayBase struct {\n\tcolumn\n\toffsetColumn *Base[uint64]\n\tdataColumn   ColumnBasic\n\toffset       uint64\n\tresetHook    func()\n}\n\n// NewArray create a new array column of Array(T) ClickHouse data type\nfunc NewArrayBase(dataColumn ColumnBasic) *ArrayBase {\n\ta := &ArrayBase{\n\t\tdataColumn:   dataColumn,\n\t\toffsetColumn: New[uint64](),\n\t}\n\treturn a\n}\n\n// AppendLen Append len of array for insert\nfunc (c *ArrayBase) AppendLen(v int) {\n\tc.offset += uint64(v)\n\tc.offsetColumn.Append(c.offset)\n}\n\n// NumRow return number of row for this block\nfunc (c *ArrayBase) NumRow() int {\n\treturn c.offsetColumn.NumRow()\n}\n\n// Array return a Array type for this column\nfunc (c *ArrayBase) Array() *ArrayBase {\n\treturn NewArrayBase(c)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *ArrayBase) Reset() {\n\tc.offsetColumn.Reset()\n\tc.dataColumn.Reset()\n\tc.offset = 0\n}\n\n// Offsets return all the offsets in current block\n// Note: Only available in the current block\nfunc (c *ArrayBase) Offsets() []uint64 {\n\treturn c.offsetColumn.Data()\n}\n\n// TotalRows return total rows on this block of array data\nfunc (c *ArrayBase) TotalRows() int {\n\tif c.offsetColumn.totalByte == 0 {\n\t\treturn 0\n\t}\n\treturn int(binary.LittleEndian.Uint64(c.offsetColumn.b[c.offsetColumn.totalByte-8 : c.offsetColumn.totalByte]))\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *ArrayBase) SetWriteBufferSize(row int) {\n\tc.offsetColumn.SetWriteBufferSize(row)\n\tc.dataColumn.SetWriteBufferSize(row)\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *ArrayBase) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.offsetColumn.Reset()\n\terr := c.offsetColumn.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"array: read offset column: %w\", err)\n\t}\n\terr = c.dataColumn.ReadRaw(c.TotalRows(), r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"array: read data column: %w\", err)\n\t}\n\n\tif c.resetHook != nil {\n\t\tc.resetHook()\n\t}\n\treturn nil\n}\n\n// HeaderReader reads header data from reader\n// it uses internally\nfunc (c *ArrayBase) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\terr := c.readColumn(readColumn, revision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// never return error\n\t//nolint:errcheck\n\tc.offsetColumn.HeaderReader(r, false, revision)\n\n\treturn c.dataColumn.HeaderReader(r, false, revision)\n}\n\n// Column returns the sub column\nfunc (c *ArrayBase) Column() ColumnBasic {\n\treturn c.dataColumn\n}\n\nfunc (c *ArrayBase) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tswitch {\n\tcase helper.IsRing(chType):\n\t\tchType = helper.RingMainTypeStr\n\tcase helper.IsPolygon(chType):\n\t\tchType = helper.PolygonMainTypeStr\n\tcase helper.IsMultiPolygon(chType):\n\t\tchType = helper.MultiPolygonMainTypeStr\n\t}\n\n\tchType = helper.NestedToArrayType(chType)\n\n\tif !helper.IsArray(chType) {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\tc.dataColumn.SetType(chType[helper.LenArrayStr : len(chType)-1])\n\tif c.dataColumn.Validate() != nil {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *ArrayBase) ColumnType() string {\n\treturn strings.ReplaceAll(helper.ArrayTypeStr, \"<type>\", c.dataColumn.ColumnType())\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *ArrayBase) WriteTo(w io.Writer) (int64, error) {\n\tnw, err := c.offsetColumn.WriteTo(w)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"write len data: %w\", err)\n\t}\n\tn, errDataColumn := c.dataColumn.WriteTo(w)\n\n\treturn nw + n, errDataColumn\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *ArrayBase) HeaderWriter(w *readerwriter.Writer) {\n\tc.dataColumn.HeaderWriter(w)\n}\n\nfunc (c *ArrayBase) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/array_nullable.go",
    "content": "package column\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n\n// Array is a column of Array(Nullable(T)) ClickHouse data type\ntype ArrayNullable[T comparable] struct {\n\tArray[T]\n\tdataColumn NullableColumn[T]\n\tcolumnData []*T\n}\n\n// NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type\nfunc NewArrayNullable[T comparable](dataColumn NullableColumn[T]) *ArrayNullable[T] {\n\ta := &ArrayNullable[T]{\n\t\tdataColumn: dataColumn,\n\t\tArray: Array[T]{\n\t\t\tArrayBase: ArrayBase{\n\t\t\t\tdataColumn:   dataColumn,\n\t\t\t\toffsetColumn: New[uint64](),\n\t\t\t},\n\t\t},\n\t}\n\ta.resetHook = func() {\n\t\ta.columnData = a.columnData[:0]\n\t}\n\treturn a\n}\n\n// Data get all the nullable data in current block as a slice of pointer.\nfunc (c *ArrayNullable[T]) DataP() [][]*T {\n\tvalues := make([][]*T, c.offsetColumn.numRow)\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalues[i] = columnData[lastOffset:c.offsetColumn.Row(i)]\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the nullable data in current block as a slice pointer and append to the input.\nfunc (c *ArrayNullable[T]) ReadP(value [][]*T) [][]*T {\n\tvar lastOffset uint64\n\tcolumnData := c.getColumnData()\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tvalue = append(value, columnData[lastOffset:c.offsetColumn.Row(i)])\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn value\n}\n\n// RowP return the nullable value of given row as a pointer\n// NOTE: Row number start from zero\nfunc (c *ArrayNullable[T]) RowP(row int) []*T {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tvar val []*T\n\tval = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...)\n\treturn val\n}\n\n// AppendP a nullable value for insert\nfunc (c *ArrayNullable[T]) AppendP(v ...[]*T) {\n\tfor _, v := range v {\n\t\tc.AppendLen(len(v))\n\t\tc.dataColumn.AppendP(v...)\n\t}\n}\n\n//\tAppendItemP Append nullable item value for insert\n//\n// it should use with AppendLen\n//\n// Example:\n//\n//\tc.AppendLen(2) // insert 2 items\n//\tc.AppendItemP(val1, val2) // insert item 1\nfunc (c *ArrayNullable[T]) AppendItemP(v ...*T) {\n\tc.dataColumn.AppendP(v...)\n}\n\n// ArrayOf return a Array type for this column\nfunc (c *ArrayNullable[T]) ArrayOf() *Array2Nullable[T] {\n\treturn NewArray2Nullable(c)\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *ArrayNullable[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\terr := c.Array.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.columnData = c.dataColumn.DataP()\n\treturn nil\n}\n\nfunc (c *ArrayNullable[T]) getColumnData() []*T {\n\tif len(c.columnData) == 0 {\n\t\tc.columnData = c.dataColumn.DataP()\n\t}\n\treturn c.columnData\n}\n\nfunc (c *ArrayNullable[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.ArrayOf().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/base.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// Column use for most (fixed size) ClickHouse Columns type\ntype Base[T comparable] struct {\n\tcolumn\n\tsize   int\n\tnumRow int\n\tvalues []T\n\tparams []interface{}\n}\n\n// New create a new column\nfunc New[T comparable]() *Base[T] {\n\tvar tmpValue T\n\tsize := int(unsafe.Sizeof(tmpValue))\n\treturn &Base[T]{\n\t\tsize: size,\n\t}\n}\n\n// Data get all the data in current block as a slice.\n//\n// NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read\nfunc (c *Base[T]) Data() []T {\n\tvalue := *(*[]T)(unsafe.Pointer(&c.b))\n\treturn value[:c.numRow]\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Base[T]) Read(value []T) []T {\n\treturn append(value, c.Data()...)\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Base[T]) Row(row int) T {\n\ti := row * c.size\n\treturn *(*T)(unsafe.Pointer(&c.b[i]))\n}\n\n// Append value for insert\nfunc (c *Base[T]) Append(v ...T) {\n\tc.values = append(c.values, v...)\n\tc.numRow += len(v)\n}\n\n// NumRow return number of row for this block\nfunc (c *Base[T]) NumRow() int {\n\treturn c.numRow\n}\n\n// Array return a Array type for this column\nfunc (c *Base[T]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n\n// Nullable return a nullable type for this column\nfunc (c *Base[T]) Nullable() *Nullable[T] {\n\treturn NewNullable[T](c)\n}\n\n// LC return a low cardinality type for this column\nfunc (c *Base[T]) LC() *LowCardinality[T] {\n\treturn NewLC[T](c)\n}\n\n// LowCardinality return a low cardinality type for this column\nfunc (c *Base[T]) LowCardinality() *LowCardinality[T] {\n\treturn NewLowCardinality[T](c)\n}\n\n// appendEmpty append empty value for insert\n// this use internally for nullable and low cardinality nullable column\nfunc (c *Base[T]) appendEmpty() {\n\tvar emptyValue T\n\tc.Append(emptyValue)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *Base[T]) Reset() {\n\tc.numRow = 0\n\tc.values = c.values[:0]\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *Base[T]) SetWriteBufferSize(row int) {\n\tif cap(c.values) < row {\n\t\tc.values = make([]T, 0, row)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *Base[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.Reset()\n\tc.r = r\n\tc.numRow = num\n\tc.totalByte = num * c.size\n\terr := c.readBuffer()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"read data: %w\", err)\n\t}\n\tc.readyBufferHook()\n\treturn err\n}\n\nfunc (c *Base[T]) readBuffer() error {\n\tif cap(c.b) < c.totalByte {\n\t\tc.b = make([]byte, c.totalByte)\n\t} else {\n\t\tc.b = c.b[:c.totalByte]\n\t}\n\t_, err := c.r.Read(c.b)\n\treturn err\n}\n\n// HeaderReader reads header data from reader\n// it uses internally\nfunc (c *Base[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\treturn c.readColumn(readColumn, revision)\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *Base[T]) HeaderWriter(w *readerwriter.Writer) {\n}\n\nfunc (c *Base[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic {\n\tif nullable {\n\t\treturn c.Nullable().elem(arrayLevel, lc)\n\t}\n\tif lc {\n\t\treturn c.LowCardinality().elem(arrayLevel)\n\t}\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/base_big_cpu.go",
    "content": "//go:build !(386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64)\n// +build !386,!amd64,!amd64p32,!arm,!arm64,!mipsle,!mips64le,!mips64p32le,!ppc64le,!riscv,!riscv64\n\npackage column\n\n// ReadAll read all value in this block and append to the input slice\nfunc (c *Base[T]) readyBufferHook() {\n\tfor i := 0; i < c.totalByte; i += c.size {\n\t\treverseBuffer(c.b[i : i+c.size])\n\t}\n}\n\nfunc reverseBuffer(s []byte) {\n\tfor i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {\n\t\ts[i], s[j] = s[j], s[i]\n\t}\n}\n\n// slice is the runtime representation of a slice.\n// It cannot be used safely or portably and its representation may\n// change in a later release.\n// Moreover, the Data field is not sufficient to guarantee the data\n// it references will not be garbage collected, so programs must keep\n// a separate, correctly typed pointer to the underlying data.\ntype slice struct {\n\tData uintptr\n\tLen  int\n\tCap  int\n}\n\nfunc (c *Base[T]) WriteTo(w io.Writer) (int64, error) {\n\ts := *(*slice)(unsafe.Pointer(&c.values))\n\ts.Len *= c.size\n\ts.Cap *= c.size\n\tb := *(*[]byte)(unsafe.Pointer(&s))\n\tfor i := 0; i < len(b); i += c.size {\n\t\treverseBuffer(b[i : i+c.size])\n\t}\n\tvar n int64\n\tnw, err := w.Write(*(*[]byte)(unsafe.Pointer(&s)))\n\treturn int64(nw) + n, err\n}\n"
  },
  {
    "path": "column/base_little_cpu.go",
    "content": "//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64\n// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv riscv64\n\npackage column\n\nimport (\n\t\"io\"\n\t\"unsafe\"\n)\n\nfunc (c *Base[T]) readyBufferHook() {\n}\n\n// slice is the runtime representation of a slice.\n// It cannot be used safely or portably and its representation may\n// change in a later release.\n// Moreover, the Data field is not sufficient to guarantee the data\n// it references will not be garbage collected, so programs must keep\n// a separate, correctly typed pointer to the underlying data.\ntype slice struct {\n\tData uintptr\n\tLen  int\n\tCap  int\n}\n\nfunc (c *Base[T]) WriteTo(w io.Writer) (int64, error) {\n\ts := *(*slice)(unsafe.Pointer(&c.values))\n\ts.Len *= c.size\n\ts.Cap *= c.size\n\tvar n int64\n\tsrc := *(*[]byte)(unsafe.Pointer(&s))\n\tnw, err := w.Write(src)\n\treturn int64(nw) + n, err\n}\n"
  },
  {
    "path": "column/base_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/big\"\n\t\"net/netip\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestBool(t *testing.T) {\n\ttestColumn(t, true, \"Bool\", \"bool\", func(i int) bool {\n\t\treturn true\n\t}, func(i int) bool {\n\t\treturn false\n\t})\n}\n\nfunc TestBoolUint8(t *testing.T) {\n\ttestColumn(t, true, \"UInt8\", \"bool\", func(i int) bool {\n\t\treturn true\n\t}, func(i int) bool {\n\t\treturn false\n\t})\n}\n\nfunc TestUint8(t *testing.T) {\n\ttestColumn(t, true, \"UInt8\", \"uint8\", func(i int) uint8 {\n\t\treturn uint8(i)\n\t}, func(i int) uint8 {\n\t\treturn uint8(i + 1)\n\t})\n}\n\nfunc TestUint16(t *testing.T) {\n\ttestColumn(t, true, \"UInt16\", \"uint16\", func(i int) uint16 {\n\t\treturn uint16(i)\n\t}, func(i int) uint16 {\n\t\treturn uint16(i + 1)\n\t})\n}\n\nfunc TestUint32(t *testing.T) {\n\ttestColumn(t, true, \"UInt32\", \"uint32\", func(i int) uint32 {\n\t\treturn uint32(i)\n\t}, func(i int) uint32 {\n\t\treturn uint32(i + 1)\n\t})\n}\n\nfunc TestUint64(t *testing.T) {\n\ttestColumn(t, true, \"UInt64\", \"uint64\", func(i int) uint64 {\n\t\treturn uint64(i)\n\t}, func(i int) uint64 {\n\t\treturn uint64(i + 1)\n\t})\n}\n\nfunc TestUint128(t *testing.T) {\n\ttestColumn(t, true, \"UInt128\", \"uint128\", func(i int) types.Uint128 {\n\t\treturn types.Uint128FromBig(big.NewInt(int64(i)))\n\t}, func(i int) types.Uint128 {\n\t\tx := big.NewInt(int64(i))\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\treturn types.Uint128FromBig(x)\n\t})\n}\n\nfunc TestUint256(t *testing.T) {\n\ttestColumn(t, true, \"UInt256\", \"uint256\", func(i int) types.Uint256 {\n\t\treturn types.Uint256FromBig(big.NewInt(int64(i)))\n\t}, func(i int) types.Uint256 {\n\t\tx := big.NewInt(int64(i))\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\treturn types.Uint256FromBig(x)\n\t})\n}\n\nfunc TestInt8(t *testing.T) {\n\ttestColumn(t, true, \"Int8\", \"int8\", func(i int) int8 {\n\t\treturn int8(i)\n\t}, func(i int) int8 {\n\t\treturn int8(i + 1)\n\t})\n}\n\nfunc TestInt16(t *testing.T) {\n\ttestColumn(t, true, \"Int16\", \"int16\", func(i int) int16 {\n\t\treturn int16(i)\n\t}, func(i int) int16 {\n\t\treturn int16(i + 1)\n\t})\n}\n\nfunc TestInt32(t *testing.T) {\n\ttestColumn(t, true, \"Int32\", \"int32\", func(i int) int32 {\n\t\treturn int32(i)\n\t}, func(i int) int32 {\n\t\treturn int32(i + 1)\n\t})\n}\n\nfunc TestInt64(t *testing.T) {\n\ttestColumn(t, true, \"Int64\", \"int64\", func(i int) int64 {\n\t\treturn int64(i)\n\t}, func(i int) int64 {\n\t\treturn int64(i + 1)\n\t})\n}\n\nfunc TestInt128(t *testing.T) {\n\ttestColumn(t, true, \"Int128\", \"int128\", func(i int) types.Int128 {\n\t\treturn types.Int128FromBig(big.NewInt(int64(i * -1)))\n\t}, func(i int) types.Int128 {\n\t\tx := big.NewInt(int64(i) * -1)\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\treturn types.Int128FromBig(x)\n\t})\n}\n\nfunc TestInt256(t *testing.T) {\n\ttestColumn(t, true, \"Int256\", \"int256\", func(i int) types.Int256 {\n\t\treturn types.Int256FromBig(big.NewInt(int64(i)))\n\t}, func(i int) types.Int256 {\n\t\tx := big.NewInt(int64(i) * -1)\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\tx = x.Mul(x, big.NewInt(math.MaxInt64))\n\t\treturn types.Int256FromBig(x)\n\t})\n}\nfunc TestFixedString(t *testing.T) {\n\ttestColumn(t, true, \"FixedString(2)\", \"fixedString\", func(i int) [2]byte {\n\t\treturn [2]byte{byte(i), byte(i + 1)}\n\t}, func(i int) [2]byte {\n\t\treturn [2]byte{byte(i + 1), byte(i + 2)}\n\t})\n}\n\nfunc TestFloat32(t *testing.T) {\n\ttestColumn(t, true, \"Float32\", \"float32\", func(i int) float32 {\n\t\treturn float32(i)\n\t}, func(i int) float32 {\n\t\treturn float32(i + 1)\n\t})\n}\n\nfunc TestFloat64(t *testing.T) {\n\ttestColumn(t, true, \"Float64\", \"float64\", func(i int) float64 {\n\t\treturn float64(i)\n\t}, func(i int) float64 {\n\t\treturn float64(i + 1)\n\t})\n}\n\nfunc TestDecimal32(t *testing.T) {\n\ttestColumn(t, false, \"Decimal32(3)\", \"decimal32\", func(i int) types.Decimal32 {\n\t\treturn types.Decimal32(i)\n\t}, func(i int) types.Decimal32 {\n\t\treturn types.Decimal32(i + 1)\n\t})\n}\nfunc TestDecimal64(t *testing.T) {\n\ttestColumn(t, false, \"Decimal64(3)\", \"decimal64\", func(i int) types.Decimal64 {\n\t\treturn types.Decimal64(i)\n\t}, func(i int) types.Decimal64 {\n\t\treturn types.Decimal64(i + 1)\n\t})\n}\n\nfunc TestDecimal128(t *testing.T) {\n\ttestColumn(t, false, \"Decimal128(3)\", \"decimal128\", func(i int) types.Decimal128 {\n\t\treturn types.Decimal128(types.Int128FromBig(big.NewInt(int64(i))))\n\t}, func(i int) types.Decimal128 {\n\t\treturn types.Decimal128(types.Int128FromBig(big.NewInt(int64(i + 1))))\n\t})\n}\n\nfunc TestDecimal256(t *testing.T) {\n\ttestColumn(t, false, \"Decimal256(3)\", \"decimal256\", func(i int) types.Decimal256 {\n\t\treturn types.Decimal256(types.Int256FromBig(big.NewInt(int64(i))))\n\t}, func(i int) types.Decimal256 {\n\t\treturn types.Decimal256(types.Int256FromBig(big.NewInt(int64(i + 1))))\n\t})\n}\n\nfunc TestIPv4(t *testing.T) {\n\ttestColumn(t, true, \"IPv4\", \"ipv4\", func(i int) types.IPv4 {\n\t\t// or directly return types.IPv4\n\t\treturn types.IPv4FromAddr(netip.AddrFrom4([4]byte{0, 0, 0, byte(i)}))\n\t}, func(i int) types.IPv4 {\n\t\t// or directly return types.IPv4\n\t\treturn types.IPv4FromAddr(netip.AddrFrom4([4]byte{0, 0, byte(i), 0}))\n\t})\n}\n\nfunc TestIPv6(t *testing.T) {\n\ttestColumn(t, true, \"IPv6\", \"ipv6\", func(i int) types.IPv6 {\n\t\t// or directly return types.IPv6\n\t\treturn types.IPv6FromAddr(netip.MustParseAddr(\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"))\n\t}, func(i int) types.IPv6 {\n\t\t// or directly return types.IPv6\n\t\treturn types.IPv6FromAddr(netip.AddrFrom16([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i + 1)}))\n\t})\n}\n\nfunc TestUUID(t *testing.T) {\n\ttestColumn(t, true, \"UUID\", \"uuid\", func(i int) types.UUID {\n\t\treturn types.UUIDFromBigEndian(uuid.New())\n\t}, func(i int) types.UUID {\n\t\treturn types.UUIDFromBigEndian(uuid.New())\n\t})\n}\n\nfunc testColumn[T comparable](\n\tt *testing.T,\n\tisLC bool,\n\tchType, tableName string,\n\tfirstVal func(i int) T,\n\tsecondVal func(i int) T,\n) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:      \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue:     \"true\",\n\t\t\tImportant: true,\n\t\t},\n\t}\n\n\tvar sqlCreate string\n\tif isLC {\n\t\tsqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\tblock_id UInt8,\n\t\t\t%[1]s %[2]s,\n\t\t\t%[1]s_nullable Nullable(%[2]s),\n\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s)),\n\t\t\t%[1]s_lc LowCardinality(%[2]s),\n\t\t\t%[1]s_nullable_lc LowCardinality(Nullable(%[2]s)),\n\t\t\t%[1]s_array_lc Array(LowCardinality(%[2]s)),\n\t\t\t%[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s)))\n\t\t) Engine=Memory`, tableName, chType)\n\t} else {\n\t\tsqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\tblock_id UInt8,\n\t\t\t%[1]s %[2]s,\n\t\t\t%[1]s_nullable Nullable(%[2]s),\n\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s))\n\t\t) Engine=Memory`, tableName, chType)\n\t}\n\terr = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\tblockID := column.New[uint8]()\n\tcol := column.New[T]()\n\tcolNullable := column.New[T]().Nullable()\n\tcolArray := column.New[T]().Array()\n\tcolNullableArray := column.New[T]().Nullable().Array()\n\tcolLC := column.New[T]().LC()\n\tcolLCNullable := column.New[T]().Nullable().LC()\n\tcolArrayLC := column.New[T]().LC().Array()\n\tcolArrayLCNullable := column.New[T]().Nullable().LC().Array()\n\tvar colInsert []T\n\tvar colNullableInsert []*T\n\tvar colArrayInsert [][]T\n\tvar colArrayNullableInsert [][]*T\n\tvar colLCInsert []T\n\tvar colLCNullableInsert []*T\n\tvar colLCArrayInsert [][]T\n\tvar colLCNullableArrayInsert [][]*T\n\n\t// SetWriteBufferSize is not necessary. this just to show how to set write buffer\n\tcol.SetWriteBufferSize(10)\n\tcolNullable.SetWriteBufferSize(10)\n\tcolArray.SetWriteBufferSize(10)\n\tcolNullableArray.SetWriteBufferSize(10)\n\tcolLC.SetWriteBufferSize(10)\n\tcolLCNullable.SetWriteBufferSize(10)\n\tcolArrayLC.SetWriteBufferSize(10)\n\tcolArrayLCNullable.SetWriteBufferSize(10)\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tblockID.Append(uint8(insertN))\n\t\t\tval := firstVal(i * (insertN + 1))\n\t\t\tval2 := secondVal(i * (insertN + 1))\n\t\t\tvalArray := []T{val, val2}\n\t\t\tvalArrayNil := []*T{&val, nil}\n\n\t\t\tcol.Append(val)\n\t\t\tcolInsert = append(colInsert, val)\n\n\t\t\t// example add nullable\n\t\t\tif i%2 == 0 {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, &val)\n\t\t\t\tcolNullable.Append(val)\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, &val)\n\t\t\t\tcolLCNullable.Append(val)\n\t\t\t} else {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, nil)\n\t\t\t\tcolNullable.AppendNil()\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, nil)\n\t\t\t\tcolLCNullable.AppendNil()\n\t\t\t}\n\n\t\t\tcolArray.Append(valArray)\n\t\t\tcolArrayInsert = append(colArrayInsert, valArray)\n\n\t\t\tcolNullableArray.AppendP(valArrayNil)\n\t\t\tcolArrayNullableInsert = append(colArrayNullableInsert, valArrayNil)\n\n\t\t\tcolLCInsert = append(colLCInsert, val)\n\t\t\tcolLC.Append(val)\n\n\t\t\tcolLCArrayInsert = append(colLCArrayInsert, valArray)\n\t\t\tcolArrayLC.Append(valArray)\n\n\t\t\tcolLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil)\n\t\t\tcolArrayLCNullable.AppendP(valArrayNil)\n\t\t}\n\t\tif isLC {\n\t\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tblock_id,\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_lc,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\t\tblockID,\n\t\t\t\tcol,\n\t\t\t\tcolNullable,\n\t\t\t\tcolArray,\n\t\t\t\tcolNullableArray,\n\t\t\t\tcolLC,\n\t\t\t\tcolLCNullable,\n\t\t\t\tcolArrayLC,\n\t\t\t\tcolArrayLCNullable,\n\t\t\t)\n\t\t} else {\n\t\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tblock_id,\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\t\tblockID,\n\t\t\t\tcol,\n\t\t\t\tcolNullable,\n\t\t\t\tcolArray,\n\t\t\t\tcolNullableArray,\n\t\t\t)\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t}\n\n\t// test read all\n\tcolRead := column.New[T]()\n\tcolNullableRead := column.New[T]().Nullable()\n\tcolArrayRead := column.New[T]().Array()\n\tcolNullableArrayRead := column.New[T]().Nullable().Array()\n\tcolLCRead := column.New[T]().LC()\n\tcolLCNullableRead := column.New[T]().Nullable().LC()\n\tcolArrayLCRead := column.New[T]().LC().Array()\n\tcolArrayLCNullableRead := column.New[T]().Nullable().LC().Array()\n\tvar selectStmt chconn.SelectStmt\n\tif isLC {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\tFROM test_%[1]s order by block_id`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t\tcolLCRead,\n\t\t\tcolLCNullableRead,\n\t\t\tcolArrayLCRead,\n\t\t\tcolArrayLCNullableRead,\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t)\n\t}\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []T\n\tvar colNullableData []*T\n\tvar colArrayData [][]T\n\tvar colArrayNullableData [][]*T\n\tvar colLCData []T\n\tvar colLCDataWithKeys []T\n\tvar dictData []T\n\tvar dictKey []int\n\tvar colLCNullableData []*T\n\tvar colLCArrayData [][]T\n\tvar colLCNullableArrayData [][]*T\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t\tcolNullableData = colNullableRead.ReadP(colNullableData)\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData)\n\t\tif isLC {\n\t\t\tcolLCData = colLCRead.Read(colLCData)\n\t\t\tcolLCNullableData = colLCNullableRead.ReadP(colLCNullableData)\n\t\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\t\t\tcolLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData)\n\t\t\tdictData = colLCRead.Dicts()\n\t\t\tdictKey = colLCRead.Keys()\n\t\t\t// get data from dict and keys\n\t\t\tfor _, val := range dictKey {\n\t\t\t\tcolLCDataWithKeys = append(colLCDataWithKeys, dictData[val])\n\t\t\t}\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tif isLC {\n\t\tassert.Equal(t, colLCInsert, colLCData)\n\t\tassert.Equal(t, colLCInsert, colLCDataWithKeys)\n\t\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\t\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\t\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\t}\n\n\t// test row\n\tcolRead = column.New[T]()\n\tcolNullableRead = column.New[T]().Nullable()\n\tcolArrayRead = column.New[T]().Array()\n\tcolNullableArrayRead = column.New[T]().Nullable().Array()\n\tcolLCRead = column.New[T]().LowCardinality()\n\tcolLCNullableRead = column.New[T]().Nullable().LowCardinality()\n\tcolArrayLCRead = column.New[T]().LowCardinality().Array()\n\tcolArrayLCNullableRead = column.New[T]().Nullable().LowCardinality().Array()\n\tif isLC {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_lc,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t\tcolLCRead,\n\t\t\tcolLCNullableRead,\n\t\t\tcolArrayLCRead,\n\t\t\tcolArrayLCNullableRead,\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable\n\t\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t)\n\t}\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tcolData = colData[:0]\n\tcolNullableData = colNullableData[:0]\n\tcolArrayData = colArrayData[:0]\n\tcolArrayNullableData = colArrayNullableData[:0]\n\tcolLCData = colLCData[:0]\n\tcolLCNullableData = colLCNullableData[:0]\n\tcolLCArrayData = colLCArrayData[:0]\n\tcolLCNullableArrayData = colLCNullableArrayData[:0]\n\n\tfor selectStmt.Next() {\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcolData = append(colData, colRead.Row(i))\n\t\t\tcolNullableData = append(colNullableData, colNullableRead.RowP(i))\n\t\t\tcolArrayData = append(colArrayData, colArrayRead.Row(i))\n\t\t\tcolArrayNullableData = append(colArrayNullableData, colNullableArrayRead.RowP(i))\n\t\t\tif isLC {\n\t\t\t\tcolLCData = append(colLCData, colLCRead.Row(i))\n\t\t\t\tcolLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i))\n\t\t\t\tcolLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i))\n\t\t\t\tcolLCNullableArrayData = append(colLCNullableArrayData, colArrayLCNullableRead.RowP(i))\n\t\t\t}\n\t\t}\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tif isLC {\n\t\tassert.Equal(t, colLCInsert, colLCData)\n\t\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\t\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\t\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\t}\n\n\t// check dynamic column\n\tif isLC {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable\n\t\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\t)\n\t}\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\tif isLC {\n\t\tassert.Len(t, autoColumns, 8)\n\t\tif tableName == \"bool\" {\n\t\t\tassert.Equal(t, column.New[uint8]().ColumnType(), autoColumns[0].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().Nullable().ColumnType(), autoColumns[1].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().Array().ColumnType(), autoColumns[2].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().LowCardinality().ColumnType(), autoColumns[4].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().Nullable().LowCardinality().ColumnType(), autoColumns[5].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().LowCardinality().Array().ColumnType(), autoColumns[6].ColumnType())\n\t\t\tassert.Equal(t, column.New[uint8]().Nullable().LowCardinality().Array().ColumnType(), autoColumns[7].ColumnType())\n\t\t} else {\n\t\t\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\t\t\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\t\t\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\t\t\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\t\t\tassert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())\n\t\t\tassert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())\n\t\t\tassert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())\n\t\t\tassert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())\n\t\t}\n\t} else {\n\t\tassert.Len(t, autoColumns, 4)\n\t\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\t\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\t\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\t\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\t}\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n\nfunc TestEmptyCollection(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\ttableName := \"empty_collection\"\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\tsqlCreate := fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s)),\n\t\t\t%[1]s_array_lc Array(LowCardinality(%[2]s)),\n\t\t\t%[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s)))\n\t\t) Engine=Memory`, tableName, \"UInt16\")\n\n\terr = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\tcolArray := column.New[uint16]().Array()\n\tcolNullableArray := column.New[uint16]().Nullable().Array()\n\tcolArrayLC := column.New[uint16]().LC().Array()\n\tcolArrayLCNullable := column.New[uint16]().Nullable().LC().Array()\n\tcolArray.Append()\n\tcolArray.Append([]uint16{})\n\tcolNullableArray.AppendP()\n\tcolNullableArray.AppendP([]*uint16{})\n\tcolArrayLC.Append()\n\tcolArrayLC.Append([]uint16{})\n\tcolArrayLCNullable.AppendP()\n\tcolArrayLCNullable.AppendP([]*uint16{})\n\n\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\tcolArray,\n\t\tcolNullableArray,\n\t\tcolArrayLC,\n\t\tcolArrayLCNullable,\n\t)\n\n\trequire.NoError(t, err)\n\n\t// test read all\n\tcolArrayRead := column.New[uint16]().Array()\n\tcolNullableArrayRead := column.New[uint16]().Nullable().Array()\n\tcolArrayLCRead := column.New[uint16]().LC().Array()\n\tcolArrayLCNullableRead := column.New[uint16]().Nullable().LC().Array()\n\tvar selectStmt chconn.SelectStmt\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\tFROM test_%[1]s `, tableName),\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colArrayData [][]uint16\n\tvar colArrayNullableData [][]*uint16\n\tvar colLCArrayData [][]uint16\n\tvar colLCNullableArrayData [][]*uint16\n\n\tfor selectStmt.Next() {\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData)\n\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\t\tcolLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, [][]uint16{{}}, colArrayData)\n\tassert.Equal(t, [][]*uint16{{}}, colArrayNullableData)\n\n\tassert.Equal(t, [][]uint16{{}}, colLCArrayData)\n\tassert.Equal(t, [][]*uint16{{}}, colLCNullableArrayData)\n}\n"
  },
  {
    "path": "column/base_validate.go",
    "content": "package column\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n)\n\nvar chColumnByteSize = map[string]int{\n\t\"Bool\":       1,\n\t\"Int8\":       1,\n\t\"Int16\":      2,\n\t\"Int32\":      4,\n\t\"Int64\":      8,\n\t\"Int128\":     16,\n\t\"Int256\":     32,\n\t\"UInt8\":      1,\n\t\"UInt16\":     2,\n\t\"UInt32\":     4,\n\t\"UInt64\":     8,\n\t\"UInt128\":    16,\n\t\"UInt256\":    32,\n\t\"Float32\":    4,\n\t\"Float64\":    8,\n\t\"Date\":       2,\n\t\"Date32\":     4,\n\t\"DateTime\":   4,\n\t\"DateTime64\": 8,\n\t\"UUID\":       16,\n\t\"IPv4\":       4,\n\t\"IPv6\":       16,\n}\n\nvar byteChColumnType = map[int]string{\n\t1:  \"Int8|UInt8|Enum8\",\n\t2:  \"Int16|UInt16|Enum16|Date\",\n\t4:  \"Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4\",\n\t8:  \"Int64|UInt64|Float64|Decimal64|DateTime64\",\n\t16: \"Int128|UInt128|Decimal128|IPv6|UUID\",\n\t32: \"Int256|UInt256|Decimal256\",\n}\n\nfunc (c *Base[T]) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tif byteSize, ok := chColumnByteSize[string(chType)]; ok {\n\t\tif byteSize != c.size {\n\t\t\treturn &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tif ok, err := c.checkEnum8(chType); ok {\n\t\treturn err\n\t}\n\n\tif ok, err := c.checkEnum16(chType); ok {\n\t\treturn err\n\t}\n\n\tif ok, err := c.checkDateTime(chType); ok {\n\t\treturn err\n\t}\n\n\tif ok, err := c.checkDateTime(chType); ok {\n\t\treturn err\n\t}\n\n\tif ok, err := c.checkDateTime64(chType); ok {\n\t\treturn err\n\t}\n\tif ok, err := c.checkFixedString(chType); ok {\n\t\treturn err\n\t}\n\tif ok, err := c.checkDecimal(chType); ok {\n\t\treturn err\n\t}\n\n\treturn &ErrInvalidType{\n\t\tcolumn: c,\n\t}\n}\n\nfunc (c *Base[T]) checkEnum8(chType []byte) (bool, error) {\n\tif helper.IsEnum8(chType) {\n\t\tif c.size != Uint8Size {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) checkEnum16(chType []byte) (bool, error) {\n\tif helper.IsEnum16(chType) {\n\t\tif c.size != Uint16Size {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) checkDateTime(chType []byte) (bool, error) {\n\tif helper.IsDateTimeWithParam(chType) {\n\t\tif c.size != 4 {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\tc.params = []interface{}{\n\t\t\t// precision\n\t\t\t0,\n\t\t\t// timezone\n\t\t\tchType[helper.DateTimeStrLen : len(chType)-1],\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) checkDateTime64(chType []byte) (bool, error) {\n\tif helper.IsDateTime64(chType) {\n\t\tif c.size != 8 {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\tparts := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(\", \"))\n\t\tc.params = []interface{}{\n\t\t\tparts[0],\n\t\t\t[]byte{},\n\t\t}\n\t\tif len(parts) > 1 {\n\t\t\tc.params[1] = parts[1]\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) checkFixedString(chType []byte) (bool, error) {\n\tif helper.IsFixedString(chType) {\n\t\tsize, err := strconv.Atoi(string(chType[helper.FixedStringStrLen : len(chType)-1]))\n\t\tif err != nil {\n\t\t\treturn true, fmt.Errorf(\"invalid size: %s\", err)\n\t\t}\n\t\tif c.size != size {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) checkDecimal(chType []byte) (bool, error) {\n\tif helper.IsDecimal(chType) {\n\t\tparts := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(\", \"))\n\t\tif len(parts) != 2 {\n\t\t\treturn true, fmt.Errorf(\"invalid decimal type (should have precision and scale): %s\", c.chType)\n\t\t}\n\n\t\tprecision, err := strconv.Atoi(string(parts[0]))\n\t\tif err != nil {\n\t\t\treturn true, fmt.Errorf(\"invalid precision: %s\", err)\n\t\t}\n\t\tscale, err := strconv.Atoi(string(parts[1]))\n\t\tif err != nil {\n\t\t\treturn true, fmt.Errorf(\"invalid scale: %s\", err)\n\t\t}\n\t\tc.params = []interface{}{precision, scale}\n\t\tvar size int\n\t\tswitch {\n\t\tcase precision >= 1 && precision <= 9:\n\t\t\tsize = 4\n\t\tcase precision >= 10 && precision <= 18:\n\t\t\tsize = 8\n\t\tcase precision >= 19 && precision <= 38:\n\t\t\tsize = 16\n\t\tcase precision >= 39 && precision <= 76:\n\t\t\tsize = 32\n\t\tdefault:\n\t\t\treturn true, fmt.Errorf(\"invalid precision: %d. it should be between 1 and 76\", precision)\n\t\t}\n\t\tif c.size != size {\n\t\t\treturn true, &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (c *Base[T]) ColumnType() string {\n\tif ok, _ := c.checkFixedString(c.chType); !ok {\n\t\tif str, ok := byteChColumnType[c.size]; ok {\n\t\t\treturn str\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"T(%d bytes size)\", c.size)\n}\n"
  },
  {
    "path": "column/bench_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc BenchmarkTestChconnSelect100MUint64(b *testing.B) {\n\t// return\n\tctx := context.Background()\n\tc, err := chconn.Connect(ctx, \"password=salam\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tcolRead := column.New[uint64]()\n\tfor n := 0; n < b.N; n++ {\n\t\ts, err := c.Select(ctx, \"SELECT number FROM system.numbers_mt LIMIT 100000000\", colRead)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\n\t\tfor s.Next() {\n\t\t\tcolRead.Data()\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\ts.Close()\n\t}\n}\n\nfunc BenchmarkTestChconnSelect1MString(b *testing.B) {\n\tctx := context.Background()\n\tc, err := chconn.Connect(ctx, \"password=salam\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tcolRead := column.NewString()\n\tvar data [][]byte\n\tfor n := 0; n < b.N; n++ {\n\t\ts, err := c.Select(ctx, \"SELECT randomString(20) FROM system.numbers_mt LIMIT 1000000\", colRead)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\n\t\tfor s.Next() {\n\t\t\tdata = data[:0]\n\t\t\tcolRead.DataBytes()\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\ts.Close()\n\t}\n}\n\nfunc BenchmarkTestChconnInsert10M(b *testing.B) {\n\t// return\n\tctx := context.Background()\n\tc, err := chconn.Connect(ctx, \"password=salam\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\terr = c.Exec(ctx, \"DROP TABLE IF EXISTS test_insert_chconn\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\terr = c.Exec(ctx, \"CREATE TABLE test_insert_chconn (id UInt64) ENGINE = Null\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tconst (\n\t\trowsInBlock = 10_000_000\n\t)\n\n\tidColumns := column.New[uint64]()\n\tidColumns.SetWriteBufferSize(rowsInBlock)\n\tfor n := 0; n < b.N; n++ {\n\t\tfor y := 0; y < rowsInBlock; y++ {\n\t\t\tidColumns.Append(1)\n\t\t}\n\t\terr := c.Insert(ctx, \"INSERT INTO test_insert_chconn VALUES\", idColumns)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "column/column_helper.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\ntype ColumnBasic interface {\n\tReadRaw(num int, r *readerwriter.Reader) error\n\tHeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error\n\tHeaderWriter(*readerwriter.Writer)\n\tWriteTo(io.Writer) (int64, error)\n\tNumRow() int\n\tReset()\n\tSetType(v []byte)\n\tType() []byte\n\tSetName(v []byte)\n\tName() []byte\n\tValidate() error\n\tColumnType() string\n\tSetWriteBufferSize(int)\n}\n\ntype Column[T any] interface {\n\tColumnBasic\n\tData() []T\n\tRead([]T) []T\n\tRow(int) T\n\tAppend(...T)\n}\n\ntype NullableColumn[T any] interface {\n\tColumn[T]\n\tDataP() []*T\n\tReadP([]*T) []*T\n\tRowP(int) *T\n\tAppendP(...*T)\n}\n\ntype column struct {\n\tr         *readerwriter.Reader\n\tb         []byte\n\ttotalByte int\n\tname      []byte\n\tchType    []byte\n\tparent    ColumnBasic\n}\n\nfunc (c *column) readColumn(readColumn bool, revision uint64) error {\n\tif c.parent != nil || !readColumn {\n\t\treturn nil\n\t}\n\tstrLen, err := c.r.Uvarint()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read column name length: %w\", err)\n\t}\n\tif cap(c.name) < int(strLen) {\n\t\tc.name = make([]byte, strLen)\n\t} else {\n\t\tc.name = c.name[:strLen]\n\t}\n\t_, err = c.r.Read(c.name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read column name: %w\", err)\n\t}\n\n\tstrLen, err = c.r.Uvarint()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read column type length: %w\", err)\n\t}\n\tif cap(c.chType) < int(strLen) {\n\t\tc.chType = make([]byte, strLen)\n\t} else {\n\t\tc.chType = c.chType[:strLen]\n\t}\n\t_, err = c.r.Read(c.chType)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read column type: %w\", err)\n\t}\n\n\tif revision >= helper.DbmsMinProtocolWithCustomSerialization {\n\t\thasCustomSerialization, err := c.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read custom serialization: %w\", err)\n\t\t}\n\t\t// todo check with json object\n\t\tif hasCustomSerialization == 1 {\n\t\t\treturn fmt.Errorf(\"custom serialization not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Name get name of the column\nfunc (c *column) Name() []byte {\n\treturn c.name\n}\n\n// Type get clickhouse type\nfunc (c *column) Type() []byte {\n\treturn c.chType\n}\n\n// SetName set name of the column\nfunc (c *column) SetName(v []byte) {\n\tc.name = v\n}\n\n// SetType set clickhouse type\nfunc (c *column) SetType(v []byte) {\n\tc.chType = v\n}\n"
  },
  {
    "path": "column/date.go",
    "content": "package column\n\nimport (\n\t\"strings\"\n\t\"time\"\n\t\"unsafe\"\n)\n\n// DateType is an interface to handle convert between time.Time and T.\ntype DateType[T any] interface {\n\tcomparable\n\tFromTime(val time.Time, precision int) T\n\tToTime(val *time.Location, precision int) time.Time\n}\n\n// Date is a date column of ClickHouse date type (Date, Date32, DateTime, DateTime64).\n// it is a wrapper of time.Time. but if you want to work with the raw data like unix timestamp\n// you can directly use `Column` (`New[T]()`)\n//\n// `uint16` or `types.Date` or any 16 bits data types For `Date`.\n//\n// `uint32` or `types.Date32` or any 32 bits data types For `Date32`\n//\n// `uint32` or `types.DateTime` or any 32 bits data types For `DateTime`\n//\n// `uint64` or `types.DateTime64` or any 64 bits data types For `DateTime64`\ntype Date[T DateType[T]] struct {\n\tBase[T]\n\tloc       *time.Location\n\tprecision int\n}\n\n// NewDate create a new date column of ClickHouse date type (Date, Date32, DateTime, DateTime64).\n// it is a wrapper of time.Time. but if you want to work with the raw data like unix timestamp\n// you can directly use `Column` (`New[T]()``)\n//\n// `uint16` or `types.Date` or any 16 bits data types For `Date`.\n//\n// `uint32` or `types.Date32` or any 32 bits data types For `Date32`\n//\n// `uint32` or `types.DateTime` or any 32 bits data types For `DateTime`\n//\n// `uint64` or `types.DateTime64` or any 64 bits data types For `DateTime64`\n//\n// ONLY ON SELECT, timezone set automatically for `DateTime` and `DateTime64` if not set and present in clickhouse datatype)\n\nfunc NewDate[T DateType[T]]() *Date[T] {\n\tvar tmpValue T\n\tsize := int(unsafe.Sizeof(tmpValue))\n\treturn &Date[T]{\n\t\tBase: Base[T]{\n\t\t\tsize: size,\n\t\t},\n\t}\n}\n\n// SetLocation set the location of the time.Time. Only use for `DateTime` and `DateTime64`\nfunc (c *Date[T]) SetLocation(loc *time.Location) *Date[T] {\n\tc.loc = loc\n\treturn c\n}\n\n// Location get location\n//\n// ONLY ON SELECT, set automatically for `DateTime` and `DateTime64` if not set and present in clickhouse datatype)\nfunc (c *Date[T]) Location() *time.Location {\n\tif c.loc == nil && len(c.params) >= 2 && len(c.params[1].([]byte)) > 0 {\n\t\tloc, err := time.LoadLocation(strings.Trim(string(c.params[1].([]byte)), \"'\"))\n\t\tif err == nil {\n\t\t\tc.SetLocation(loc)\n\t\t} else {\n\t\t\tc.SetLocation(time.Local)\n\t\t}\n\t}\n\tif c.loc == nil {\n\t\tc.SetLocation(time.Local)\n\t}\n\treturn c.loc\n}\n\n// SetPrecision set the precision of the time.Time. Only use for `DateTime64`\nfunc (c *Date[T]) SetPrecision(precision int) *Date[T] {\n\tc.precision = precision\n\treturn c\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Date[T]) Data() []time.Time {\n\tvalues := make([]time.Time, c.numRow)\n\tfor i := 0; i < c.numRow; i++ {\n\t\tvalues[i] = c.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Date[T]) Read(value []time.Time) []time.Time {\n\tif cap(value)-len(value) >= c.NumRow() {\n\t\tvalue = (value)[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalue = append(value, make([]time.Time, c.NumRow())...)\n\t}\n\tval := (value)[len(value)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = c.Row(i)\n\t}\n\treturn value\n}\n\n// Row return the value of given row\n// NOTE: Row number start from zero\nfunc (c *Date[T]) Row(row int) time.Time {\n\ti := row * c.size\n\treturn (*(*T)(unsafe.Pointer(&c.b[i]))).ToTime(c.Location(), c.precision)\n}\n\n// Append value for insert\nfunc (c *Date[T]) Append(v ...time.Time) {\n\tvar val T\n\tfor _, v := range v {\n\t\tc.values = append(c.values, val.FromTime(v, c.precision))\n\t}\n\tc.numRow += len(v)\n}\n\n// Array return a Array type for this column\nfunc (c *Date[T]) Array() *Array[time.Time] {\n\treturn NewArray[time.Time](c)\n}\n\n// Nullable return a nullable type for this column\nfunc (c *Date[T]) Nullable() *Nullable[time.Time] {\n\treturn NewNullable[time.Time](c)\n}\n\n// LC return a low cardinality type for this column\nfunc (c *Date[T]) LC() *LowCardinality[time.Time] {\n\treturn NewLC[time.Time](c)\n}\n\n// LowCardinality return a low cardinality type for this column\nfunc (c *Date[T]) LowCardinality() *LowCardinality[time.Time] {\n\treturn NewLC[time.Time](c)\n}\n\nfunc (c *Date[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic {\n\tif nullable {\n\t\treturn c.Nullable().elem(arrayLevel, lc)\n\t}\n\tif lc {\n\t\treturn c.LowCardinality().elem(arrayLevel)\n\t}\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/date_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestDate(t *testing.T) {\n\ttestDateColumn(t, true, \"Date\", \"date\", func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i, 0, 0, 0, 0, time.UTC)\n\t}, func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i+1, 0, 0, 0, 0, time.UTC)\n\t}, func() *column.Date[types.Date] {\n\t\treturn column.NewDate[types.Date]()\n\t})\n}\nfunc TestDate32(t *testing.T) {\n\ttestDateColumn(t, true, \"Date32\", \"date32\", func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i, 0, 0, 0, 0, time.UTC)\n\t}, func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i+1, 0, 0, 0, 0, time.UTC)\n\t}, func() *column.Date[types.Date32] {\n\t\treturn column.NewDate[types.Date32]()\n\t})\n}\n\nfunc TestDateTime(t *testing.T) {\n\ttestDateColumn(t, true, \"DateTime\", \"dateTime\", func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i, 0, 0, i+1, 0, time.Local)\n\t}, func(i int) time.Time {\n\t\treturn time.Date(2020, 1, i, 0, 0, i+2, 0, time.Local)\n\t}, func() *column.Date[types.DateTime] {\n\t\treturn column.NewDate[types.DateTime]()\n\t})\n}\nfunc TestDateTimeTimezone(t *testing.T) {\n\ttestDateColumn(t, true, \"DateTime('America/New_York')\", \"dateTime_timezone\", func(i int) time.Time {\n\t\tloc, err := time.LoadLocation(\"America/New_York\")\n\t\trequire.NoError(t, err)\n\t\treturn time.Date(2020, 1, i, 0, 0, i+1, 0, loc)\n\t}, func(i int) time.Time {\n\t\tloc, err := time.LoadLocation(\"America/New_York\")\n\t\trequire.NoError(t, err)\n\t\treturn time.Date(2020, 1, i, 0, 0, i+2, 0, loc)\n\t}, func() *column.Date[types.DateTime] {\n\t\treturn column.NewDate[types.DateTime]()\n\t})\n}\n\nfunc TestDateTime64(t *testing.T) {\n\ttestDateColumn(t, false, \"DateTime64(9, 'America/New_York')\", \"dateTime64\", func(i int) time.Time {\n\t\tloc, err := time.LoadLocation(\"America/New_York\")\n\t\trequire.NoError(t, err)\n\t\treturn time.Date(2020, 1, i, 0, 0, i+1, i+110, loc)\n\t}, func(i int) time.Time {\n\t\tloc, err := time.LoadLocation(\"America/New_York\")\n\t\trequire.NoError(t, err)\n\t\treturn time.Date(2020, 1, i, 0, 0, i+1, i+1101, loc)\n\t}, func() *column.Date[types.DateTime64] {\n\t\treturn column.NewDate[types.DateTime64]().SetPrecision(9)\n\t})\n}\n\nfunc testDateColumn[T column.DateType[T]](\n\tt *testing.T,\n\tisLC bool,\n\tchType, tableName string,\n\tfirstVal func(i int) time.Time,\n\tsecondVal func(i int) time.Time,\n\tgetBaseColumn func() *column.Date[T],\n) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\tvar sqlCreate string\n\tif isLC {\n\t\tsqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\t%[1]s %[2]s,\n\t\t\t%[1]s_nullable Nullable(%[2]s),\n\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s)),\n\t\t\t%[1]s_lc LowCardinality(%[2]s),\n\t\t\t%[1]s_nullable_lc LowCardinality(Nullable(%[2]s)),\n\t\t\t%[1]s_array_lc Array(LowCardinality(%[2]s)),\n\t\t\t%[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s)))\n\t\t) Engine=Memory`, tableName, chType)\n\t} else {\n\t\tsqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\t%[1]s %[2]s,\n\t\t\t%[1]s_nullable Nullable(%[2]s),\n\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s))\n\t\t) Engine=Memory`, tableName, chType)\n\t}\n\terr = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol := getBaseColumn()\n\tcolNullable := getBaseColumn().Nullable()\n\tcolArray := getBaseColumn().Array()\n\tcolNullableArray := getBaseColumn().Nullable().Array()\n\tcolLC := getBaseColumn().LC()\n\tcolLCNullable := getBaseColumn().Nullable().LC()\n\tcolArrayLC := getBaseColumn().LC().Array()\n\tcolArrayLCNullable := getBaseColumn().Nullable().LC().Array()\n\tvar colInsert []time.Time\n\tvar colNullableInsert []*time.Time\n\tvar colArrayInsert [][]time.Time\n\tvar colArrayNullableInsert [][]*time.Time\n\tvar colLCInsert []time.Time\n\tvar colLCNullableInsert []*time.Time\n\tvar colLCArrayInsert [][]time.Time\n\tvar colLCNullableArrayInsert [][]*time.Time\n\n\t// SetWriteBufferSize is not necessary. this just to show how to set write buffer\n\tcol.SetWriteBufferSize(10)\n\tcolNullable.SetWriteBufferSize(10)\n\tcolArray.SetWriteBufferSize(10)\n\tcolNullableArray.SetWriteBufferSize(10)\n\tcolLC.SetWriteBufferSize(10)\n\tcolLCNullable.SetWriteBufferSize(10)\n\tcolArrayLC.SetWriteBufferSize(10)\n\tcolArrayLCNullable.SetWriteBufferSize(10)\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tval := firstVal(i)\n\t\t\tval2 := secondVal(i)\n\t\t\tvalArray := []time.Time{val, val2}\n\t\t\tvalArrayNil := []*time.Time{&val, nil}\n\n\t\t\tcol.Append(val)\n\t\t\tcolInsert = append(colInsert, val)\n\n\t\t\t// example add nullable\n\t\t\tif i%2 == 0 {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, &val)\n\t\t\t\tcolNullable.Append(val)\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, &val)\n\t\t\t\tcolLCNullable.Append(val)\n\t\t\t} else {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, nil)\n\t\t\t\tcolNullable.AppendNil()\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, nil)\n\t\t\t\tcolLCNullable.AppendNil()\n\t\t\t}\n\n\t\t\tcolArray.Append(valArray)\n\t\t\tcolArrayInsert = append(colArrayInsert, valArray)\n\n\t\t\tcolNullableArray.AppendP(valArrayNil)\n\t\t\tcolArrayNullableInsert = append(colArrayNullableInsert, valArrayNil)\n\n\t\t\tcolLCInsert = append(colLCInsert, val)\n\t\t\tcolLC.Append(val)\n\n\t\t\tcolLCArrayInsert = append(colLCArrayInsert, valArray)\n\t\t\tcolArrayLC.Append(valArray)\n\n\t\t\tcolLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil)\n\t\t\tcolArrayLCNullable.AppendP(valArrayNil)\n\t\t}\n\t\tif isLC {\n\t\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_lc,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\t\tcol,\n\t\t\t\tcolNullable,\n\t\t\t\tcolArray,\n\t\t\t\tcolNullableArray,\n\t\t\t\tcolLC,\n\t\t\t\tcolLCNullable,\n\t\t\t\tcolArrayLC,\n\t\t\t\tcolArrayLCNullable,\n\t\t\t)\n\t\t} else {\n\t\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\t\tcol,\n\t\t\t\tcolNullable,\n\t\t\t\tcolArray,\n\t\t\t\tcolNullableArray,\n\t\t\t)\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t}\n\n\t// test read all\n\tcolRead := getBaseColumn()\n\tcolNullableRead := getBaseColumn().Nullable()\n\tcolArrayRead := getBaseColumn().Array()\n\tcolNullableArrayRead := getBaseColumn().Nullable().Array()\n\tcolLCRead := getBaseColumn().LC()\n\tcolLCNullableRead := getBaseColumn().Nullable().LC()\n\tcolArrayLCRead := getBaseColumn().LC().Array()\n\tcolArrayLCNullableRead := getBaseColumn().Nullable().LC().Array()\n\tvar selectStmt chconn.SelectStmt\n\tif isLC {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\tFROM test_%[1]s`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t\tcolLCRead,\n\t\t\tcolLCNullableRead,\n\t\t\tcolArrayLCRead,\n\t\t\tcolArrayLCNullableRead,\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable\n\t\tFROM test_%[1]s`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t)\n\t}\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []time.Time\n\tvar colNullableData []*time.Time\n\tvar colArrayData [][]time.Time\n\tvar colArrayNullableData [][]*time.Time\n\tvar colLCData []time.Time\n\tvar colLCNullableData []*time.Time\n\tvar colLCArrayData [][]time.Time\n\tvar colLCNullableArrayData [][]*time.Time\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t\tcolNullableData = colNullableRead.ReadP(colNullableData)\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData)\n\t\tif isLC {\n\t\t\tcolLCData = colLCRead.Read(colLCData)\n\t\t\tcolLCNullableData = colLCNullableRead.ReadP(colLCNullableData)\n\t\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\t\t\tcolLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData)\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tif isLC {\n\t\tassert.Equal(t, colLCInsert, colLCData)\n\t\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\t\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\t\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\t}\n\n\t// test row\n\tcolRead = getBaseColumn()\n\tcolNullableRead = getBaseColumn().Nullable()\n\tcolArrayRead = getBaseColumn().Array()\n\tcolNullableArrayRead = getBaseColumn().Nullable().Array()\n\tcolLCRead = getBaseColumn().LowCardinality()\n\tcolLCNullableRead = getBaseColumn().Nullable().LowCardinality()\n\tcolArrayLCRead = getBaseColumn().LowCardinality().Array()\n\tcolArrayLCNullableRead = getBaseColumn().Nullable().LowCardinality().Array()\n\tif isLC {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_lc,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t\tcolLCRead,\n\t\t\tcolLCNullableRead,\n\t\t\tcolArrayLCRead,\n\t\t\tcolArrayLCNullableRead,\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable\n\t\t\tFROM test_%[1]s`, tableName),\n\t\t\tcolRead,\n\t\t\tcolNullableRead,\n\t\t\tcolArrayRead,\n\t\t\tcolNullableArrayRead,\n\t\t)\n\t}\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tcolData = colData[:0]\n\tcolNullableData = colNullableData[:0]\n\tcolArrayData = colArrayData[:0]\n\tcolArrayNullableData = colArrayNullableData[:0]\n\tcolLCData = colLCData[:0]\n\tcolLCNullableData = colLCNullableData[:0]\n\tcolLCArrayData = colLCArrayData[:0]\n\tcolLCNullableArrayData = colLCNullableArrayData[:0]\n\n\tfor selectStmt.Next() {\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcolData = append(colData, colRead.Row(i))\n\t\t\tcolNullableData = append(colNullableData, colNullableRead.RowP(i))\n\t\t\tcolArrayData = append(colArrayData, colArrayRead.Row(i))\n\t\t\tcolArrayNullableData = append(colArrayNullableData, colNullableArrayRead.RowP(i))\n\t\t\tif isLC {\n\t\t\t\tcolLCData = append(colLCData, colLCRead.Row(i))\n\t\t\t\tcolLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i))\n\t\t\t\tcolLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i))\n\t\t\t\tcolLCNullableArrayData = append(colLCNullableArrayData, colArrayLCNullableRead.RowP(i))\n\t\t\t}\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tif isLC {\n\t\tassert.Equal(t, colLCInsert, colLCData)\n\t\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\t\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\t\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\t}\n\n\t// check dynamic column\n\tif isLC {\n\t\tselectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_lc,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\t\tFROM test_%[1]s`,\n\t\t\ttableName),\n\t\t\t&chconn.QueryOptions{\n\t\t\t\tUseGoTime: false,\n\t\t\t},\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t\t%[1]s,\n\t\t\t\t\t%[1]s_nullable,\n\t\t\t\t\t%[1]s_array,\n\t\t\t\t\t%[1]s_array_nullable\n\t\t\t\tFROM test_%[1]s`, tableName,\n\t\t),\n\t\t\t&chconn.QueryOptions{\n\t\t\t\tUseGoTime: false,\n\t\t\t},\n\t\t)\n\t}\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\tif isLC {\n\t\tassert.Len(t, autoColumns, 8)\n\t\tassert.Equal(t, column.New[T]().ColumnType(), autoColumns[0].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().ColumnType(), autoColumns[1].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Array().ColumnType(), autoColumns[2].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType())\n\t\tassert.Equal(t, column.New[T]().LowCardinality().ColumnType(), autoColumns[4].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().LowCardinality().ColumnType(), autoColumns[5].ColumnType())\n\t\tassert.Equal(t, column.New[T]().LowCardinality().Array().ColumnType(), autoColumns[6].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().LowCardinality().Array().ColumnType(), autoColumns[7].ColumnType())\n\t} else {\n\t\tassert.Len(t, autoColumns, 4)\n\t\tassert.Equal(t, column.New[T]().ColumnType(), autoColumns[0].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().ColumnType(), autoColumns[1].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Array().ColumnType(), autoColumns[2].ColumnType())\n\t\tassert.Equal(t, column.New[T]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType())\n\t}\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n\n\t// check dynamic column\n\tif isLC {\n\t\tselectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_lc,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t\tFROM test_%[1]s`,\n\t\t\ttableName),\n\t\t\t&chconn.QueryOptions{\n\t\t\t\tUseGoTime: true,\n\t\t\t},\n\t\t)\n\t} else {\n\t\tselectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t\t\t\t%[1]s,\n\t\t\t\t\t\t%[1]s_nullable,\n\t\t\t\t\t\t%[1]s_array,\n\t\t\t\t\t\t%[1]s_array_nullable\n\t\t\t\t\tFROM test_%[1]s`, tableName,\n\t\t),\n\t\t\t&chconn.QueryOptions{\n\t\t\t\tUseGoTime: true,\n\t\t\t},\n\t\t)\n\t}\n\trequire.NoError(t, err)\n\tautoColumns = selectStmt.Columns()\n\tif isLC {\n\t\tassert.Len(t, autoColumns, 8)\n\t\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\t\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\t\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\t\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\t\tassert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())\n\t\tassert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())\n\t\tassert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())\n\t\tassert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())\n\t} else {\n\t\tassert.Len(t, autoColumns, 4)\n\t\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\t\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\t\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\t\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\t}\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n\nfunc TestInvalidNegativeTimes(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\t`DROP TABLE IF EXISTS test_invalid_dates`,\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\tsqlCreate := `CREATE TABLE test_invalid_dates (\n\t\t\tdate Date,\n\t\t\tdate32 Date32,\n\t\t\tdateTime DateTime,\n\t\t\tdateTime64 DateTime64(3)\n\t\t) Engine=Memory`\n\n\terr = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcolDate := column.NewDate[types.Date]()\n\tcolDate32 := column.NewDate[types.Date32]()\n\tcolDateTime := column.NewDate[types.DateTime]()\n\tcolDateTime64 := column.NewDate[types.DateTime64]()\n\tinvalidTime := time.Unix(-3208988700, 0) // 1868\n\tcolDate.Append(invalidTime)\n\tcolDate32.Append(invalidTime)\n\tcolDateTime.Append(invalidTime)\n\tcolDateTime64.Append(invalidTime)\n\n\terr = conn.Insert(context.Background(), `INSERT INTO\n\ttest_invalid_dates (\n\t\t\t\tdate,\n\t\t\t\tdate32,\n\t\t\t\tdateTime,\n\t\t\t\tdateTime64\n\t\t\t)\n\t\tVALUES`,\n\t\tcolDate,\n\t\tcolDate32,\n\t\tcolDateTime,\n\t\tcolDateTime64,\n\t)\n\trequire.NoError(t, err)\n\n\t// test read all\n\tcolDateRead := column.NewDate[types.Date]()\n\tcolDate32Read := column.NewDate[types.Date32]()\n\tcolDateTimeRead := column.NewDate[types.DateTime]()\n\tcolDateTime64Read := column.NewDate[types.DateTime64]()\n\tvar selectStmt chconn.SelectStmt\n\tselectStmt, err = conn.Select(context.Background(), `SELECT\n\t\tdate,\n\t\tdate32,\n\t\tdateTime,\n\t\tdateTime64\n\t\tFROM test_invalid_dates`,\n\t\tcolDateRead,\n\t\tcolDate32Read,\n\t\tcolDateTimeRead,\n\t\tcolDateTime64Read,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tfor selectStmt.Next() {\n\t}\n\tassert.Equal(t, colDateRead.Row(0).In(time.UTC).Format(time.RFC3339), \"1970-01-01T00:00:00Z\")\n\tassert.Equal(t, colDate32Read.Row(0).In(time.UTC).Format(time.RFC3339), \"1900-01-01T00:00:00Z\")\n\tassert.Equal(t, colDateTimeRead.Row(0).In(time.UTC).Format(time.RFC3339), \"1970-01-01T00:00:00Z\")\n\tassert.Equal(t, colDateTime64Read.Row(0).In(time.UTC).Format(time.RFC3339), \"1900-01-01T00:00:00Z\")\n\n\trequire.NoError(t, selectStmt.Err())\n}\n"
  },
  {
    "path": "column/error_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestInsertColumnLowCardinalityError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_lc`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_lc (\n\t\tcol  LowCardinality(String)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write header\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write stype\",\n\t\t\twantErr:     \"block: write block data for column col (error writing stype: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"write dictionarySize\",\n\t\t\twantErr:     \"block: write block data for column col (error writing dictionarySize: timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"write dictionary\",\n\t\t\twantErr:     \"block: write block data for column col (error writing dictionary: timeout)\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"write keys len\",\n\t\t\twantErr:     \"block: write block data for column col (error writing keys len: timeout)\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"write indices\",\n\t\t\twantErr:     \"block: write block data for column col (error writing indices: timeout)\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.NewString().LowCardinality()\n\t\t\tcol.Append(\"test\")\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_lc (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadLCError(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading keys serialization version\",\n\t\t\twantErr:     \"read column header: error reading keys serialization version: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading serialization type\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading serialization type: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading dictionary size\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading dictionary size: timeout\",\n\t\t\tnumberValid: startValidReader + 7,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading dictionary\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading dictionary: error read string len: timeout\",\n\t\t\tnumberValid: startValidReader + 8,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading string len\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading dictionary: error read string len: timeout\",\n\t\t\tnumberValid: startValidReader + 9,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading string\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading dictionary: error read string: timeout\",\n\t\t\tnumberValid: startValidReader + 10,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading indices size\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading indices size: timeout\",\n\t\t\tnumberValid: startValidReader + 11,\n\t\t},\n\t\t{\n\t\t\tname:        \"error reading indices\",\n\t\t\twantErr:     \"read data \\\"toLowCardinality(toString(number))\\\": error reading indices: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 12,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.NewString().LC()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT toLowCardinality(toString(number)) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnArrayError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array (\n\t\tcol  Array(UInt8)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write len data\",\n\t\t\twantErr:     \"block: write block data for column col (write len data: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[uint8]().Array()\n\t\t\tcol.Append([]uint8{1})\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_array (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadArrayError(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"read offset error\",\n\t\t\twantErr:     \"read data \\\"array(number, number)\\\": array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"read data column\",\n\t\t\twantErr:     \"read data \\\"array(number, number)\\\": array: read data column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.New[uint64]().Array()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT array(number,number) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnArrayNullable(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array_nullable`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array_nullable (\n\t\tcol  Array(Nullable(UInt8))\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write len data\",\n\t\t\twantErr:     \"block: write block data for column col (write len data: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"write nullable data\",\n\t\t\twantErr:     \"block: write block data for column col (write nullable data: timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[uint8]().Nullable().Array()\n\t\t\tcol.Append([]uint8{1})\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_array_nullable (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadArrayNullableError(t *testing.T) {\n\tstartValidReader := 39\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read offset error\",\n\t\t\twantErr:     \"read data \\\"array(toNullable(number))\\\": array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read data column\",\n\t\t\twantErr:     \"read data \\\"array(toNullable(number))\\\": array: read data column: read nullable data: read nullable data: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.New[uint64]().Nullable().Array()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT array(toNullable(number)) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestSelectReadNullableError(t *testing.T) {\n\tstartValidReader := 39\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read nullable data\",\n\t\t\twantErr:     \"read data \\\"toNullable(number)\\\": read nullable data: read nullable data: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.New[uint64]().Nullable()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT toNullable(number) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnArray2Error(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array2`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array2 (\n\t\tcol  Array(Array(UInt8))\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write len data\",\n\t\t\twantErr:     \"block: write block data for column col (write len data: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[uint8]().Array().Array()\n\t\t\tcol.Append([][]uint8{{1}})\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_array2 (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadArray2Error(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"read offset error\",\n\t\t\twantErr:     \"read data \\\"array(array(number, number))\\\": array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"read data column\",\n\t\t\twantErr:     \"read data \\\"array(array(number, number))\\\": array: read data column: array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.New[uint64]().Array().Array()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT array(array(number,number)) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\nfunc TestInsertColumnArray3Error(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array3`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array3 (\n\t\tcol  Array(Array(Array(UInt8)))\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write len data\",\n\t\t\twantErr:     \"block: write block data for column col (write len data: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[uint8]().Array().Array().Array()\n\t\t\tcol.Append([][][]uint8{{{1}}})\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_array3 (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadArray3Error(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"read column header: read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column header: read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column header: read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column header: read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"read offset error\",\n\t\t\twantErr:     \"read data \\\"array(array(array(number, number)))\\\": array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"read data column\",\n\t\t\twantErr:     \"read data \\\"array(array(array(number, number)))\\\": array: read data column: array: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tcol := column.New[uint64]().Array().Array().Array()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT array(array(array(number,number))) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnTupleError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_tuple`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_tuple (\n\t\tcol  Tuple(String)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write header\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write columns\",\n\t\t\twantErr:     \"block: write block data for column col (tuple: write column index 0: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.NewString()\n\t\t\tcolTuple := column.NewTuple(col)\n\t\t\tcol.Append(\"test\")\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_tuple (col) VALUES\",\n\t\t\t\tcolTuple,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadTupleError(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t\tlc          bool\n\t}{\n\t\t{\n\t\t\tname:        \"read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t\tlc:          true,\n\t\t},\n\t\t{\n\t\t\tname:        \"read sub column header\",\n\t\t\twantErr:     \"read column header: tuple: read column header index 0: error reading keys serialization version: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t\tlc:          true,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column index 2\",\n\t\t\twantErr:     \"read data \\\"tuple(1)\\\": tuple: read column index 0: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\t// we can't use tupp[le(toLowCardinality('1')) so we use this tricky way\n\t\t\t// https://github.com/ClickHouse/ClickHouse/issues/39109\n\t\t\tvar col column.ColumnBasic\n\t\t\tif tt.lc {\n\t\t\t\tcol = column.New[uint64]().LC()\n\t\t\t} else {\n\t\t\t\tcol = column.New[uint8]()\n\t\t\t}\n\t\t\tcolTuple := column.NewTuple(col)\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT tuple(1);\", colTuple)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnMapError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_map`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_map (\n\t\tcol  Map(UInt8,UInt8)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write len data\",\n\t\t\twantErr:     \"block: write block data for column col (write len data: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"write key data\",\n\t\t\twantErr:     \"block: write block data for column col (write key data: timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"write value data\",\n\t\t\twantErr:     \"block: write block data for column col (write value data: timeout)\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcolValue := column.New[uint8]()\n\t\t\tcol := column.NewMap[uint8, uint8](column.New[uint8](), colValue)\n\t\t\tcol.Append(map[uint8]uint8{1: 1})\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_map (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestSelectReadMapError(t *testing.T) {\n\tstartValidReader := 36\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t\tlc          bool\n\t}{\n\t\t{\n\t\t\tname:        \"read column name length\",\n\t\t\twantErr:     \"read column header: read column name length: timeout\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column name\",\n\t\t\twantErr:     \"read column header: read column name: timeout\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type length\",\n\t\t\twantErr:     \"read column header: read column type length: timeout\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read column type error\",\n\t\t\twantErr:     \"read column header: read column type: timeout\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read custom serialization\",\n\t\t\twantErr:     \"read column header: read custom serialization: timeout\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t\tlc:          true,\n\t\t},\n\t\t{\n\t\t\tname:        \"read value header\",\n\t\t\twantErr:     \"read column header: map: read key header: error reading keys serialization version: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t\tlc:          true,\n\t\t},\n\t\t{\n\t\t\tname:        \"read value header\",\n\t\t\twantErr:     \"read column header: map: read value header: error reading keys serialization version: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t\tlc:          true,\n\t\t},\n\t\t{\n\t\t\tname:        \"read offset error\",\n\t\t\twantErr:     \"read data \\\"map(number, number)\\\": map: read offset column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"read key column\",\n\t\t\twantErr:     \"read data \\\"map(number, number)\\\": map: read key column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t\t{\n\t\t\tname:        \"read value column\",\n\t\t\twantErr:     \"read data \\\"map(number, number)\\\": map: read value column: read data: timeout\",\n\t\t\tnumberValid: startValidReader + 7,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := chconn.ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := chconn.ConnectConfig(context.Background(), config)\n\t\t\tassert.NoError(t, err)\n\t\t\tvar colKey column.Column[uint64]\n\t\t\tvar colValue column.Column[uint64]\n\t\t\tif tt.lc {\n\t\t\t\tcolKey = column.New[uint64]().LC()\n\t\t\t\tcolValue = column.New[uint64]().LC()\n\t\t\t} else {\n\t\t\t\tcolKey = column.New[uint64]()\n\t\t\t\tcolValue = column.New[uint64]()\n\t\t\t}\n\t\t\tcol := column.NewMap(colKey, colValue)\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT map(number,number) FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt.Next()\n\n\t\t\tassert.EqualError(t, stmt.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestInvalidType(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := chconn.ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := chconn.ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname           string\n\t\tcolumnSelector string\n\t\twantErr        string\n\t\tcolumn         column.ColumnBasic\n\t}{\n\t\t{\n\t\t\tname:           \"1 byte invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Int8|UInt8|Enum8\",\n\t\t\tcolumn:         column.New[int8](),\n\t\t},\n\t\t{\n\t\t\tname:           \"2 bytes invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Int16|UInt16|Enum16|Date\",\n\t\t\tcolumn:         column.New[int16](),\n\t\t},\n\t\t{\n\t\t\tname:           \"4 bytes invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4\",\n\t\t\tcolumn:         column.New[int32](),\n\t\t},\n\t\t{\n\t\t\tname:           \"8 bytes invalid\",\n\t\t\tcolumnSelector: \"toInt32(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Int32, column types: Int64|UInt64|Float64|Decimal64|DateTime64\",\n\t\t\tcolumn:         column.New[int64](),\n\t\t},\n\t\t{\n\t\t\tname:           \"16 bytes invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Int128|UInt128|Decimal128|IPv6|UUID\",\n\t\t\tcolumn:         column.New[types.Int128](),\n\t\t},\n\t\t{\n\t\t\tname:           \"32 bytes invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Int256|UInt256|Decimal256\",\n\t\t\tcolumn:         column.New[types.Int256](),\n\t\t},\n\t\t{\n\t\t\tname:           \"string invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: String\",\n\t\t\tcolumn:         column.NewString(),\n\t\t},\n\t\t{\n\t\t\tname:           \"fixed string invalid\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: T(20 bytes size)\",\n\t\t\tcolumn:         column.New[[20]byte](),\n\t\t},\n\t\t{\n\t\t\tname:           \"fixed string invalid size\",\n\t\t\tcolumnSelector: \"toFixedString(toString(number),2)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: FixedString(2), column types: T(20 bytes size)\",\n\t\t\tcolumn:         column.New[[20]byte](),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid nullable\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Nullable(Int64|UInt64|Float64|Decimal64|DateTime64)\",\n\t\t\tcolumn:         column.New[int64]().Nullable(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid nullable inside\",\n\t\t\tcolumnSelector: \"toNullable(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Nullable(UInt64), column types: Nullable(Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.New[int8]().Nullable(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid LowCardinality\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: LowCardinality(Int64|UInt64|Float64|Decimal64|DateTime64)\",\n\t\t\tcolumn:         column.New[int64]().LC(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid LowCardinality inside\",\n\t\t\tcolumnSelector: \"toLowCardinality(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: LowCardinality(UInt64), column types: LowCardinality(Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.New[int8]().LC(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid nullable LowCardinality\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: UInt64, column types: \" +\n\t\t\t\t\"LowCardinality(Nullable(Int64|UInt64|Float64|Decimal64|DateTime64))\",\n\t\t\tcolumn: column.New[int64]().Nullable().LC(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid nullable LowCardinality inside\",\n\t\t\tcolumnSelector: \"toLowCardinality(toNullable(number))\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: LowCardinality(Nullable(UInt64)), column types: \" +\n\t\t\t\t\"LowCardinality(Int8|UInt8|Enum8)\",\n\n\t\t\tcolumn: column.New[int8]().LC(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid array\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Array(Int64|UInt64|Float64|Decimal64|DateTime64)\",\n\t\t\tcolumn:         column.New[int64]().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid array inside\",\n\t\t\tcolumnSelector: \"array(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Array(UInt64), column types: Array(Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.New[int8]().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid array nullable\",\n\t\t\tcolumnSelector: \"array(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Array(UInt64), column types: Array(Nullable(Int8|UInt8|Enum8))\",\n\t\t\tcolumn:         column.New[int8]().Nullable().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid map\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Map(Int8|UInt8|Enum8, Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.NewMap[int8, int8](column.New[int8](), column.New[int8]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid map key\",\n\t\t\tcolumnSelector: \"map(number,number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Map(UInt64, UInt64), column types: Map(Int8|UInt8|Enum8, Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.NewMap[int8, int8](column.New[int8](), column.New[int8]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid map value\",\n\t\t\tcolumnSelector: \"map(number,number)\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: Map(UInt64, UInt64), column types: \" +\n\t\t\t\t\"Map(Int64|UInt64|Float64|Decimal64|DateTime64, Int8|UInt8|Enum8)\",\n\t\t\tcolumn: column.NewMap[int64, int8](column.New[int64](), column.New[int8]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid tuple\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: UInt64, column types: \" +\n\t\t\t\t\"Tuple(Int64|UInt64|Float64|Decimal64|DateTime64,Int8|UInt8|Enum8)\",\n\n\t\t\tcolumn: column.NewTuple(column.New[int64](), column.New[int8]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid tuple inside\",\n\t\t\tcolumnSelector: \"tuple(number)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Tuple(UInt64), column types: Tuple(Int8|UInt8|Enum8)\",\n\t\t\tcolumn:         column.NewTuple(column.New[int8]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"invalid tuple columns\",\n\t\t\tcolumnSelector: \"tuple(number)\",\n\t\t\twantErr:        \"columns number for tuple(number) (Tuple(UInt64)) is not equal to tuple columns number: 1 != 2\",\n\t\t\tcolumn:         column.NewTuple(column.New[uint64](), column.New[uint64]()),\n\t\t},\n\t\t{\n\t\t\tname:           \"date time with timezone\",\n\t\t\tcolumnSelector: \"toDateTime('2010-01-01', 'America/New_York') + number\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: DateTime('America/New_York'), column types: \" +\n\t\t\t\t\"Int64|UInt64|Float64|Decimal64|DateTime64\",\n\n\t\t\tcolumn: column.New[uint64](),\n\t\t},\n\t\t{\n\t\t\tname:           \"date time 64 with timezone\",\n\t\t\tcolumnSelector: \"toDateTime64('2010-01-01', 3, 'America/New_York') + number\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: DateTime64(3, 'America/New_York'), column types: \" +\n\t\t\t\t\"Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4\",\n\n\t\t\tcolumn: column.New[uint32](),\n\t\t},\n\t\t{\n\t\t\tname:           \"Decimal\",\n\t\t\tcolumnSelector: \"toDecimal32(number,3)\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: Decimal(9, 3), column types: Int64|UInt64|Float64|Decimal64|DateTime64\",\n\t\t\tcolumn:         column.New[uint64](),\n\t\t},\n\t\t{\n\t\t\tname:           \"Array2\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr:        \"mismatch column type: ClickHouse Type: UInt64, column types: Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64))\",\n\t\t\tcolumn:         column.New[uint64]().Array().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"Array2 inside\",\n\t\t\tcolumnSelector: \"array(number,number)\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: Array(UInt64), column types:\" +\n\t\t\t\t\" Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64))\",\n\n\t\t\tcolumn: column.New[uint64]().Array().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"Array3\",\n\t\t\tcolumnSelector: \"number\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: UInt64, column types: \" +\n\t\t\t\t\"Array(Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64)))\",\n\n\t\t\tcolumn: column.New[uint64]().Array().Array().Array(),\n\t\t},\n\t\t{\n\t\t\tname:           \"Array3 inside\",\n\t\t\tcolumnSelector: \"array(number,number)\",\n\t\t\twantErr: \"mismatch column type: ClickHouse Type: Array(UInt64), column types: \" +\n\t\t\t\t\"Array(Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64)))\",\n\n\t\t\tcolumn: column.New[uint64]().Array().Array().Array(),\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc, err = chconn.ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tstmt, err := c.Select(context.Background(),\n\t\t\t\tfmt.Sprintf(\"SELECT %s FROM  system.numbers limit 1\", tt.columnSelector),\n\t\t\t\ttt.column,\n\t\t\t)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tfor stmt.Next() {\n\n\t\t\t}\n\t\t\trequire.EqualError(t, errors.Unwrap(stmt.Err()), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestMapInvalidColumnNumber(t *testing.T) {\n\tm := column.NewMap[uint8, uint8](column.New[uint8](), column.New[uint8]())\n\tm.SetType([]byte(\"Map(UInt8,UInt8,UInt8)\"))\n\terr := m.Validate()\n\tassert.Equal(t, err.Error(), \"columns number is not equal to map columns number: 3 != 2\")\n}\n\nfunc TestFixedStringInvalidType(t *testing.T) {\n\tm := column.New[[20]byte]()\n\tm.SetType([]byte(\"FixedString(a)\"))\n\terr := m.Validate()\n\tassert.Equal(t, err.Error(), \"invalid size: strconv.Atoi: parsing \\\"a\\\": invalid syntax\")\n}\n\nfunc TestEnum8InvalidType(t *testing.T) {\n\tm := column.New[int16]()\n\tm.SetType([]byte(\"Enum8()\"))\n\terr := m.Validate()\n\tassert.Equal(t, err.Error(), \"mismatch column type: ClickHouse Type: Enum8(), column types: Int16|UInt16|Enum16|Date\")\n}\nfunc TestEnum16InvalidType(t *testing.T) {\n\tm := column.New[int32]()\n\tm.SetType([]byte(\"Enum16()\"))\n\terr := m.Validate()\n\tassert.Equal(t, err.Error(), \"mismatch column type: ClickHouse Type: Enum16(), \"+\n\t\t\"column types: Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4\")\n}\n\nfunc TestDecimalInvalidType(t *testing.T) {\n\tm := column.New[[20]byte]()\n\tm.SetType([]byte(\"Decimal()\"))\n\terr := m.Validate()\n\tassert.Equal(t, err.Error(), \"invalid decimal type (should have precision and scale): Decimal()\")\n\n\tm.SetType([]byte(\"Decimal(a, a)\"))\n\terr = m.Validate()\n\tassert.Equal(t, err.Error(), \"invalid precision: strconv.Atoi: parsing \\\"a\\\": invalid syntax\")\n\n\tm.SetType([]byte(\"Decimal(3, a)\"))\n\terr = m.Validate()\n\tassert.Equal(t, err.Error(), \"invalid scale: strconv.Atoi: parsing \\\"a\\\": invalid syntax\")\n\n\tm.SetType([]byte(\"Decimal(200, 3)\"))\n\terr = m.Validate()\n\tassert.Equal(t, err.Error(), \"invalid precision: 200. it should be between 1 and 76\")\n}\n\nfunc TestInvalidDate(t *testing.T) {\n\tm := column.NewDate[types.DateTime]()\n\tm.SetType([]byte(\"DateTime('InvalidTimeZone')\"))\n\terr := m.Validate()\n\tassert.NoError(t, err)\n\tassert.Equal(t, m.Location(), time.Local)\n}\n\nfunc TestInvalidSimpleAggregateFunction(t *testing.T) {\n\tm := column.New[int]()\n\tm.SetType([]byte(\"SimpleAggregateFunction(sum))\"))\n\tassert.Panics(t, func() {\n\t\tm.Validate()\n\t})\n}\n"
  },
  {
    "path": "column/errors.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n)\n\ntype ErrInvalidType struct {\n\tcolumn     ColumnBasic\n\tColumnType string\n}\n\nfunc (e ErrInvalidType) Error() string {\n\treturn fmt.Sprintf(\"mismatch column type: ClickHouse Type: %s, column types: %s\",\n\t\tstring(e.column.Type()),\n\t\te.column.ColumnType(),\n\t)\n}\n"
  },
  {
    "path": "column/helper_test.go",
    "content": "package column_test\n\nimport (\n\t\"io\"\n)\n\ntype readErrorHelper struct {\n\tnumberValid int\n\terr         error\n\tr           io.Reader\n\tcount       int\n}\n\nfunc (r *readErrorHelper) Read(p []byte) (int, error) {\n\tr.count++\n\tif r.count > r.numberValid {\n\t\treturn 0, r.err\n\t}\n\treturn r.r.Read(p)\n}\n\ntype writerErrorHelper struct {\n\tnumberValid int\n\terr         error\n\tw           io.Writer\n\tcount       int\n}\n\nfunc (w *writerErrorHelper) Write(p []byte) (int, error) {\n\tw.count++\n\tif w.count > w.numberValid {\n\t\treturn 0, w.err\n\t}\n\treturn w.w.Write(p)\n}\n"
  },
  {
    "path": "column/lc.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\nconst (\n\t// Need to read additional keys.\n\t// Additional keys are stored before indexes as value N and N keys\n\t// after them.\n\thasAdditionalKeysBit = 1 << 9\n\t// Need to update dictionary.\n\t// It means that previous granule has different dictionary.\n\tneedUpdateDictionary = 1 << 10\n\n\tserializationType = hasAdditionalKeysBit | needUpdateDictionary\n)\n\n// LowCardinality use for LowCardinality ClickHouse DataTypes\ntype LowCardinality[T comparable] struct {\n\tcolumn\n\tnumRow         int\n\tdictColumn     Column[T]\n\tindices        indicesColumnI\n\toldIndicesType int\n\tscratch        [8]byte\n\treadedKeys     []int\n\treadedDict     []T\n\tdict           map[T]int\n\tkeys           []int\n\tnullable       bool\n}\n\n// NewLowCardinality return new LC for LowCardinality ClickHouse DataTypes\nfunc NewLowCardinality[T comparable](dictColumn Column[T]) *LowCardinality[T] {\n\treturn NewLC(dictColumn)\n}\n\n// NewLC return new LC for LowCardinality ClickHouse DataTypes\nfunc NewLC[T comparable](dictColumn Column[T]) *LowCardinality[T] {\n\tl := &LowCardinality[T]{\n\t\tdict:       make(map[T]int),\n\t\tdictColumn: dictColumn,\n\t}\n\treturn l\n}\n\n// Data get all the data in current block as a slice.\n//\n// NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read\nfunc (c *LowCardinality[T]) Data() []T {\n\tresult := make([]T, c.NumRow())\n\tfor i, k := range c.readedKeys {\n\t\tresult[i] = c.readedDict[k]\n\t}\n\treturn result\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *LowCardinality[T]) Read(value []T) []T {\n\tfor _, k := range c.readedKeys {\n\t\tvalue = append(value, c.readedDict[k])\n\t}\n\treturn value\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *LowCardinality[T]) Row(row int) T {\n\treturn c.readedDict[c.readedKeys[row]]\n}\n\n// Append value for insert\nfunc (c *LowCardinality[T]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tkey, ok := c.dict[v]\n\t\tif !ok {\n\t\t\tkey = len(c.dict)\n\t\t\tc.dict[v] = key\n\t\t\tc.dictColumn.Append(v)\n\t\t}\n\t\tc.keys = append(c.keys, key)\n\t}\n\tc.numRow += len(v)\n}\n\n// Dicts get dictionary data\n// each key is an index of the dictionary\nfunc (c *LowCardinality[T]) Dicts() []T {\n\treturn c.readedDict\n}\n\n// Keys get keys of data\n// each key is an index of the dictionary\nfunc (c *LowCardinality[T]) Keys() []int {\n\treturn c.readedKeys\n}\n\n// NumRow return number of row for this block\nfunc (c *LowCardinality[T]) NumRow() int {\n\treturn c.numRow\n}\n\n// Array return a Array type for this column\nfunc (c *LowCardinality[T]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *LowCardinality[T]) Reset() {\n\tc.dictColumn.Reset()\n\tc.dict = make(map[T]int)\n\tc.keys = c.keys[:0]\n\tc.readedDict = c.readedDict[:0]\n\tc.readedKeys = c.readedKeys[:0]\n\tc.numRow = 0\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *LowCardinality[T]) SetWriteBufferSize(row int) {\n\tif cap(c.keys) < row {\n\t\tc.keys = make([]int, 0, row)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *LowCardinality[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.r = r\n\tc.numRow = num\n\tif c.numRow == 0 {\n\t\tc.indices = newIndicesColumn[uint8]()\n\t\tc.readedDict = c.readedDict[:0]\n\t\tc.readedKeys = c.readedKeys[:0]\n\t\t// to reset nullable dictionary\n\t\treturn c.dictColumn.ReadRaw(0, r)\n\t}\n\n\tserializationType, err := c.r.Uint64()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading serialization type: %w\", err)\n\t}\n\tintType := int(serializationType & 0xf)\n\n\tdictionarySize, err := c.r.Uint64()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading dictionary size: %w\", err)\n\t}\n\n\terr = c.dictColumn.ReadRaw(int(dictionarySize), r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading dictionary: %w\", err)\n\t}\n\n\tindicesSize, err := r.Uint64()\n\tc.numRow = int(indicesSize)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading indices size: %w\", err)\n\t}\n\tif c.indices == nil || c.oldIndicesType != intType {\n\t\tc.indices = getLCIndicate(intType)\n\t\tc.oldIndicesType = intType\n\t}\n\n\terr = c.indices.ReadRaw(c.numRow, c.r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading indices: %w\", err)\n\t}\n\tc.readedDict = c.readedDict[:0]\n\tc.readedKeys = c.readedKeys[:0]\n\tc.readedDict = c.dictColumn.Read(c.readedDict)\n\tc.indices.readInt(&c.readedKeys)\n\treturn nil\n}\n\n// HeaderReader writes header data to writer\n// it uses internally\nfunc (c *LowCardinality[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\terr := c.readColumn(readColumn, revision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// ready KeysSerializationVersion.\n\t_, err = r.Uint64()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reading keys serialization version: %w\", err)\n\t}\n\n\tif !c.nullable {\n\t\treturn c.dictColumn.HeaderReader(r, false, revision)\n\t}\n\n\treturn c.dictColumn.HeaderReader(r, false, revision)\n}\n\nfunc (c *LowCardinality[T]) ColumnType() string {\n\tif !c.nullable {\n\t\treturn strings.ReplaceAll(helper.LowCardinalityTypeStr, \"<type>\", c.dictColumn.ColumnType())\n\t}\n\treturn strings.ReplaceAll(helper.LowCardinalityNullableTypeStr, \"<type>\", c.dictColumn.ColumnType())\n}\n\nfunc (c *LowCardinality[T]) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tif !c.nullable {\n\t\tif !helper.IsLowCardinality(chType) {\n\t\t\treturn &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\tc.dictColumn.SetType(chType[helper.LenLowCardinalityStr : len(chType)-1])\n\t} else {\n\t\tif !helper.IsNullableLowCardinality(chType) {\n\t\t\treturn &ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t\tc.dictColumn.SetType(chType[helper.LenLowCardinalityNullableStr : len(chType)-2])\n\t}\n\tif err := c.dictColumn.Validate(); err != nil {\n\t\treturn &ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\treturn nil\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *LowCardinality[T]) WriteTo(w io.Writer) (int64, error) {\n\tdictionarySize := c.dictColumn.NumRow()\n\t// Do not write anything for empty column.\n\t// May happen while writing empty arrays.\n\tif dictionarySize == 0 || (c.nullable && dictionarySize == 1) {\n\t\treturn 0, nil\n\t}\n\tvar n int64\n\tintType := int(math.Log2(float64(dictionarySize)) / 8)\n\tstype := serializationType | intType\n\n\tnw, err := c.writeUint64(w, uint64(stype))\n\tn += int64(nw)\n\tif err != nil {\n\t\treturn n, fmt.Errorf(\"error writing stype: %w\", err)\n\t}\n\n\tnw, err = c.writeUint64(w, uint64(dictionarySize))\n\tn += int64(nw)\n\tif err != nil {\n\t\treturn n, fmt.Errorf(\"error writing dictionarySize: %w\", err)\n\t}\n\n\tnwd, err := c.dictColumn.WriteTo(w)\n\tn += nwd\n\tif err != nil {\n\t\treturn n, fmt.Errorf(\"error writing dictionary: %w\", err)\n\t}\n\n\tnw, err = c.writeUint64(w, uint64(len(c.keys)))\n\tn += int64(nw)\n\tif err != nil {\n\t\treturn n, fmt.Errorf(\"error writing keys len: %w\", err)\n\t}\n\tif c.indices == nil || c.oldIndicesType != intType {\n\t\tc.indices = getLCIndicate(intType)\n\t\tc.oldIndicesType = intType\n\t} else {\n\t\tc.indices.Reset()\n\t}\n\tc.indices = getLCIndicate(intType)\n\tc.indices.appendInts(c.keys)\n\tnwt, err := c.indices.WriteTo(w)\n\tif err != nil {\n\t\treturn n, fmt.Errorf(\"error writing indices: %w\", err)\n\t}\n\treturn n + nwt, err\n}\n\n// HeaderWriter reader header data\n// it uses internally\nfunc (c *LowCardinality[T]) HeaderWriter(w *readerwriter.Writer) {\n\t// write KeysSerializationVersion. for more information see clickhouse docs\n\tw.Int64(1)\n}\n\nfunc getLCIndicate(intType int) indicesColumnI {\n\tswitch intType {\n\tcase 0:\n\t\treturn newIndicesColumn[uint8]()\n\tcase 1:\n\t\treturn newIndicesColumn[uint16]()\n\tcase 2:\n\t\treturn newIndicesColumn[uint32]()\n\tcase 3:\n\t\tpanic(\"cannot handle this amount of data for lc\")\n\t}\n\t// this should never happen unless something wrong with the code\n\tpanic(\"cannot not find indicate type\")\n}\n\nfunc (c *LowCardinality[T]) writeUint64(w io.Writer, v uint64) (int, error) {\n\tc.scratch[0] = byte(v)\n\tc.scratch[1] = byte(v >> 8)\n\tc.scratch[2] = byte(v >> 16)\n\tc.scratch[3] = byte(v >> 24)\n\tc.scratch[4] = byte(v >> 32)\n\tc.scratch[5] = byte(v >> 40)\n\tc.scratch[6] = byte(v >> 48)\n\tc.scratch[7] = byte(v >> 56)\n\treturn w.Write(c.scratch[:8])\n}\n\nfunc (c *LowCardinality[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/lc_indices.go",
    "content": "package column\n\nimport (\n\t\"io\"\n\t\"unsafe\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\ntype indicesColumnI interface {\n\tReadRaw(num int, r *readerwriter.Reader) error\n\tWriteTo(io.Writer) (int64, error)\n\tappendInts([]int)\n\treadInt(value *[]int)\n\tReset()\n}\n\ntype indicatedTypes interface {\n\tuint8 | uint16 | uint32 | uint64\n}\n\ntype indicesColumn[T indicatedTypes] struct {\n\tBase[T]\n}\n\nfunc newIndicesColumn[T indicatedTypes]() *indicesColumn[T] {\n\tvar tmpValue T\n\tsize := int(unsafe.Sizeof(tmpValue))\n\treturn &indicesColumn[T]{\n\t\tBase: Base[T]{\n\t\t\tsize: size,\n\t\t},\n\t}\n}\n\nfunc (c *indicesColumn[T]) readInt(value *[]int) {\n\tfor _, v := range c.Data() {\n\t\t*value = append(*value,\n\t\t\tint(v),\n\t\t)\n\t}\n}\n\nfunc (c *indicesColumn[T]) appendInts(values []int) {\n\tfor _, v := range values {\n\t\tc.values = append(c.values, T(v))\n\t}\n}\n"
  },
  {
    "path": "column/lc_nullable.go",
    "content": "package column\n\n// LowCardinalityNullable for LowCardinality(Nullable(T)) ClickHouse DataTypes\ntype LowCardinalityNullable[T comparable] struct {\n\tLowCardinality[T]\n}\n\n// NewLowCardinalityNullable return new LowCardinalityNullable for nullable LowCardinality ClickHouse DataTypes\nfunc NewLowCardinalityNullable[T comparable](dictColumn Column[T]) *LowCardinalityNullable[T] {\n\treturn NewLCNullable(dictColumn)\n}\n\n// NewLCNullable return new LowCardinalityNullable for nullable LowCardinality ClickHouse DataTypes\nfunc NewLCNullable[T comparable](dictColumn Column[T]) *LowCardinalityNullable[T] {\n\tvar empty T\n\tdictColumn.Append(empty)\n\tl := &LowCardinalityNullable[T]{\n\t\tLowCardinality: LowCardinality[T]{\n\t\t\tnullable:   true,\n\t\t\tdict:       make(map[T]int),\n\t\t\tdictColumn: dictColumn,\n\t\t},\n\t}\n\treturn l\n}\n\n// Data get all nullable data in current block as a slice.\n//\n// NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read\nfunc (c *LowCardinalityNullable[T]) DataP() []*T {\n\tresult := make([]*T, c.NumRow())\n\tfor i, k := range c.readedKeys {\n\t\tif k == 0 {\n\t\t\tresult[i] = nil\n\t\t} else {\n\t\t\tval := c.readedDict[k]\n\t\t\tresult[i] = &val\n\t\t}\n\t}\n\treturn result\n}\n\n// Read reads all nullable data in current block and append to the input.\nfunc (c *LowCardinalityNullable[T]) ReadP(value []*T) []*T {\n\tfor _, k := range c.readedKeys {\n\t\tif k == 0 {\n\t\t\tvalue = append(value, nil)\n\t\t} else {\n\t\t\tval := c.readedDict[k]\n\t\t\tvalue = append(value, &val)\n\t\t}\n\t}\n\treturn value\n}\n\n// Row return nullable value of given row\n// NOTE: Row number start from zero\nfunc (c *LowCardinalityNullable[T]) RowP(row int) *T {\n\tif c.readedKeys[row] == 0 {\n\t\treturn nil\n\t}\n\tval := c.readedDict[c.readedKeys[row]]\n\treturn &val\n}\n\n// Append value for insert\nfunc (c *LowCardinalityNullable[T]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tkey, ok := c.dict[v]\n\t\tif !ok {\n\t\t\tkey = len(c.dict)\n\t\t\tc.dict[v] = key\n\t\t\tc.dictColumn.Append(v)\n\t\t}\n\t\tc.keys = append(c.keys, key+1)\n\t}\n\n\tc.numRow += len(v)\n}\n\n// Append nil value for insert\nfunc (c *LowCardinalityNullable[T]) AppendNil() {\n\tc.keys = append(c.keys, 0)\n\tc.numRow++\n}\n\n// Append nullable value for insert\n//\n// as an alternative (for better performance), you can use `Append` and `AppendNil` to insert a value\nfunc (c *LowCardinalityNullable[T]) AppendP(v ...*T) {\n\tfor _, v := range v {\n\t\tif v == nil {\n\t\t\tc.keys = append(c.keys, 0)\n\t\t\tcontinue\n\t\t}\n\t\tkey, ok := c.dict[*v]\n\t\tif !ok {\n\t\t\tkey = len(c.dict)\n\t\t\tc.dict[*v] = key\n\t\t\tc.dictColumn.Append(*v)\n\t\t}\n\t\tc.keys = append(c.keys, key+1)\n\t}\n\n\tc.numRow += len(v)\n}\n\n// Array return a Array type for this column\nfunc (c *LowCardinalityNullable[T]) Array() *ArrayNullable[T] {\n\treturn NewArrayNullable[T](c)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *LowCardinalityNullable[T]) Reset() {\n\tc.LowCardinality.Reset()\n\tvar empty T\n\tc.dictColumn.Append(empty)\n}\n\nfunc (c *LowCardinalityNullable[T]) elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/lc_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestLcIndicator16(t *testing.T) {\n\ttableName := \"lc_indicator_16\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t%[1]s_lc LowCardinality(Int64)\n\t\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol := column.New[int64]().LC()\n\n\tvar colInsert []int64\n\n\trows := int(^uint8(0)) + 10\n\tfor i := 0; i < rows; i++ {\n\t\tval := int64(i + 1)\n\t\tcol.Append(val)\n\t\tcolInsert = append(colInsert, val)\n\t}\n\n\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s_lc\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\tcol,\n\t)\n\trequire.NoError(t, err)\n\n\t// test read row\n\tcolRead := column.New[int64]().LC()\n\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s_lc\n\t\tFROM test_%[1]s`, tableName),\n\t\tcolRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []int64\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, colInsert, colData)\n}\n\nfunc TestLcIndicator32(t *testing.T) {\n\ttableName := \"lc_indicator_32\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t%[1]s_lc LowCardinality(Int64)\n\t\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol := column.New[int64]().LC()\n\n\tvar colInsert []int64\n\n\trows := int(^uint16(0)) + 10\n\tfor i := 0; i < rows; i++ {\n\t\tval := int64(i + 1)\n\t\tcol.Append(val)\n\t\tcolInsert = append(colInsert, val)\n\t}\n\n\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s_lc\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\tcol,\n\t)\n\trequire.NoError(t, err)\n\n\t// test read row\n\tcolRead := column.New[int64]().LC()\n\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s_lc\n\t\tFROM test_%[1]s`, tableName),\n\t\tcolRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []int64\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, colInsert, colData)\n}\n"
  },
  {
    "path": "column/map.go",
    "content": "package column\n\n// Map is a column of Map(K,V) ClickHouse data type\n// Map in clickhouse actually is a array of pair(K,V)\ntype Map[K comparable, V any] struct {\n\tMapBase\n\tkeyColumnData   []K\n\tvalueColumnData []V\n}\n\n// NewMap create a new map column of Map(K,V) ClickHouse data type\nfunc NewMap[K comparable, V any](\n\tkeyColumn Column[K],\n\tvalueColumn Column[V],\n) *Map[K, V] {\n\ta := &Map[K, V]{\n\t\tMapBase: MapBase{\n\t\t\tkeyColumn:    keyColumn,\n\t\t\tvalueColumn:  valueColumn,\n\t\t\toffsetColumn: New[uint64](),\n\t\t},\n\t}\n\ta.resetHook = func() {\n\t\ta.keyColumnData = a.keyColumnData[:0]\n\t\ta.valueColumnData = a.valueColumnData[:0]\n\t}\n\treturn a\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Map[K, V]) Data() []map[K]V {\n\tvalues := make([]map[K]V, c.offsetColumn.numRow)\n\toffsets := c.Offsets()\n\tif len(offsets) == 0 {\n\t\treturn values\n\t}\n\tkeyColumnData := c.getKeyColumnData()\n\tvalueColumnData := c.getValueColumnData()\n\tvar lastOffset uint64\n\tfor i, offset := range offsets {\n\t\tval := make(map[K]V)\n\t\tfor ki, key := range keyColumnData[lastOffset:offset] {\n\t\t\tval[key] = valueColumnData[lastOffset:offset][ki]\n\t\t}\n\t\tvalues[i] = val\n\t\tlastOffset = offset\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Map[K, V]) Read(value []map[K]V) []map[K]V {\n\treturn append(value, c.Data()...)\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Map[K, V]) Row(row int) map[K]V {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tkeyColumnData := c.getKeyColumnData()\n\tvalueColumnData := c.getValueColumnData()\n\n\tval := make(map[K]V)\n\toffset := c.offsetColumn.Row(row)\n\tfor ki, key := range keyColumnData[lastOffset:offset] {\n\t\tval[key] = valueColumnData[lastOffset:offset][ki]\n\t}\n\treturn val\n}\n\n// Append value for insert\nfunc (c *Map[K, V]) Append(v map[K]V) {\n\tc.AppendLen(len(v))\n\tfor k, d := range v {\n\t\tc.keyColumn.(Column[K]).Append(k)\n\t\tc.valueColumn.(Column[V]).Append(d)\n\t}\n}\n\nfunc (c *Map[K, V]) getKeyColumnData() []K {\n\tif len(c.keyColumnData) == 0 {\n\t\tc.keyColumnData = c.keyColumn.(Column[K]).Data()\n\t}\n\treturn c.keyColumnData\n}\nfunc (c *Map[K, V]) getValueColumnData() []V {\n\tif len(c.valueColumnData) == 0 {\n\t\tc.valueColumnData = c.valueColumn.(Column[V]).Data()\n\t}\n\treturn c.valueColumnData\n}\n\n// KeyColumn return the key column\nfunc (c *Map[K, V]) KeyColumn() Column[K] {\n\treturn c.keyColumn.(Column[K])\n}\n\n// ValueColumn return the value column\nfunc (c *Map[K, V]) ValueColumn() Column[V] {\n\treturn c.valueColumn.(Column[V])\n}\n"
  },
  {
    "path": "column/map_base.go",
    "content": "package column\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// Map is a column of Map(K,V) ClickHouse data type\n// Map in clickhouse actually is a array of pair(K,V)\n//\n// MapBase is a base class for map and also for non generic  of map to use dynamic select column\ntype MapBase struct {\n\tcolumn\n\toffsetColumn *Base[uint64]\n\tkeyColumn    ColumnBasic\n\tvalueColumn  ColumnBasic\n\toffset       uint64\n\tresetHook    func()\n}\n\n// NewMapBase create a new map column of Map(K,V) ClickHouse data type\nfunc NewMapBase(\n\tkeyColumn, valueColumn ColumnBasic,\n) *MapBase {\n\ta := &MapBase{\n\t\tkeyColumn:    keyColumn,\n\t\tvalueColumn:  valueColumn,\n\t\toffsetColumn: New[uint64](),\n\t}\n\treturn a\n}\n\n// Each run the given function for each row in the column with start and end offsets.\n//\n// in some cases  like Map(K,Array(Nullable)) you can't read the data with generic for this situations. you can use this function.\n//\n// For example\n// colNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data()\n// colNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[V]).DataP()\n//\n//\tcolNullableArrayRead.Each(func(start, end uint64) bool {\n//\t\t\tval := make(map[string][]*V)\n//\t\t\tfor ki, key := range colNullableArrayReadKey[start:end] {\n//\t\t\t\tval[key] = colNullableArrayReadValue[start:end][ki]\n//\t\t\t}\n//\t\t\tcolArrayNullableData = append(colArrayNullableData, val)\n//\t\t\treturn true\n//\t\t})\nfunc (c *MapBase) Each(f func(start, end uint64) bool) {\n\toffsets := c.Offsets()\n\tif len(offsets) == 0 {\n\t\treturn\n\t}\n\tvar lastOffset uint64\n\tfor _, offset := range offsets {\n\t\tif !f(lastOffset, offset) {\n\t\t\treturn\n\t\t}\n\t\tlastOffset = offset\n\t}\n}\n\n// AppendLen Append len for insert\nfunc (c *MapBase) AppendLen(v int) {\n\tc.offset += uint64(v)\n\tc.offsetColumn.Append(c.offset)\n}\n\n// NumRow return number of row for this block\nfunc (c *MapBase) NumRow() int {\n\treturn c.offsetColumn.NumRow()\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *MapBase) Reset() {\n\tc.offsetColumn.Reset()\n\tc.keyColumn.Reset()\n\tc.valueColumn.Reset()\n\tc.offset = 0\n}\n\n// Offsets return all the offsets in current block\nfunc (c *MapBase) Offsets() []uint64 {\n\treturn c.offsetColumn.Data()\n}\n\n// TotalRows return total rows on this block of array data\nfunc (c *MapBase) TotalRows() int {\n\tif c.offsetColumn.totalByte == 0 {\n\t\treturn 0\n\t}\n\treturn int(binary.LittleEndian.Uint64(c.offsetColumn.b[c.offsetColumn.totalByte-8 : c.offsetColumn.totalByte]))\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *MapBase) SetWriteBufferSize(row int) {\n\tc.offsetColumn.SetWriteBufferSize(row)\n\tc.keyColumn.SetWriteBufferSize(row)\n\tc.valueColumn.SetWriteBufferSize(row)\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *MapBase) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.offsetColumn.Reset()\n\terr := c.offsetColumn.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map: read offset column: %w\", err)\n\t}\n\n\terr = c.keyColumn.ReadRaw(c.TotalRows(), r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map: read key column: %w\", err)\n\t}\n\n\terr = c.valueColumn.ReadRaw(c.TotalRows(), r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map: read value column: %w\", err)\n\t}\n\tif c.resetHook != nil {\n\t\tc.resetHook()\n\t}\n\treturn nil\n}\n\n// KeyColumn return the key column\nfunc (c *MapBase) KeyColumn() ColumnBasic {\n\treturn c.keyColumn\n}\n\n// ValueColumn return the value column\nfunc (c *MapBase) ValueColumn() ColumnBasic {\n\treturn c.valueColumn\n}\n\n// HeaderReader reads header data from reader\n// it uses internally\nfunc (c *MapBase) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\terr := c.offsetColumn.HeaderReader(r, readColumn, revision)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.name = c.offsetColumn.name\n\tc.chType = c.offsetColumn.chType\n\tc.keyColumn.SetName(c.name)\n\tc.valueColumn.SetName(c.name)\n\n\terr = c.keyColumn.HeaderReader(r, false, revision)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map: read key header: %w\", err)\n\t}\n\terr = c.valueColumn.HeaderReader(r, false, revision)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map: read value header: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (c *MapBase) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\n\tif !helper.IsMap(chType) {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\tcolumnsMap, err := helper.TypesInParentheses(chType[helper.LenMapStr : len(chType)-1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"map invalid types %w\", err)\n\t}\n\n\tif len(columnsMap) != 2 {\n\t\t//nolint:goerr113\n\t\treturn fmt.Errorf(\"columns number is not equal to map columns number: %d != %d\", len(columnsMap), 2)\n\t}\n\n\tc.keyColumn.SetType(columnsMap[0].ChType)\n\tc.keyColumn.SetName(columnsMap[0].Name)\n\tc.valueColumn.SetType(columnsMap[1].ChType)\n\tc.valueColumn.SetName(columnsMap[1].Name)\n\n\tif c.keyColumn.Validate() != nil {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\tif c.valueColumn.Validate() != nil {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *MapBase) ColumnType() string {\n\treturn strings.ReplaceAll(\n\t\tstrings.ReplaceAll(helper.MapTypeStr, \"<key>\", c.keyColumn.ColumnType()),\n\t\t\"<value>\", c.valueColumn.ColumnType())\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *MapBase) WriteTo(w io.Writer) (int64, error) {\n\tnw, err := c.offsetColumn.WriteTo(w)\n\tif err != nil {\n\t\treturn nw, fmt.Errorf(\"write len data: %w\", err)\n\t}\n\tn, errDataColumn := c.keyColumn.WriteTo(w)\n\tnw += n\n\tif errDataColumn != nil {\n\t\treturn nw, fmt.Errorf(\"write key data: %w\", errDataColumn)\n\t}\n\n\tn, errDataColumn = c.valueColumn.WriteTo(w)\n\tnw += n\n\tif errDataColumn != nil {\n\t\treturn nw, fmt.Errorf(\"write value data: %w\", errDataColumn)\n\t}\n\n\treturn nw + n, errDataColumn\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *MapBase) HeaderWriter(w *readerwriter.Writer) {\n\tc.keyColumn.HeaderWriter(w)\n\tc.valueColumn.HeaderWriter(w)\n}\n"
  },
  {
    "path": "column/map_nullable.go",
    "content": "package column\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n\n// MapNullable is a column of Map(K,V) ClickHouse data type where V is nullable.\n// Map in clickhouse actually is a array of pair(K,V)\ntype MapNullable[K comparable, V any] struct {\n\tMap[K, V]\n\tvalueColumn     NullableColumn[V]\n\tkeyColumnData   []K\n\tvalueColumnData []*V\n}\n\n// NewMapNullable create a new map column of Map(K,V) ClickHouse data type\nfunc NewMapNullable[K comparable, V any](\n\tkeyColumn Column[K],\n\tvalueColumn NullableColumn[V],\n) *MapNullable[K, V] {\n\ta := &MapNullable[K, V]{\n\t\tvalueColumn: valueColumn,\n\t\tMap: Map[K, V]{\n\t\t\tMapBase: MapBase{\n\t\t\t\tkeyColumn:    keyColumn,\n\t\t\t\tvalueColumn:  valueColumn,\n\t\t\t\toffsetColumn: New[uint64](),\n\t\t\t},\n\t\t},\n\t}\n\treturn a\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *MapNullable[T, V]) DataP() []map[T]*V {\n\tvalues := make([]map[T]*V, c.offsetColumn.numRow)\n\tvar lastOffset uint64\n\tfor i := 0; i < c.offsetColumn.numRow; i++ {\n\t\tval := make(map[T]*V)\n\t\toffset := c.offsetColumn.Row(i)\n\t\tfor ki, key := range c.keyColumnData[lastOffset:offset] {\n\t\t\tv := c.valueColumnData[lastOffset:offset][ki]\n\t\t\tval[key] = v\n\t\t}\n\t\tvalues[i] = val\n\t\tlastOffset = c.offsetColumn.Row(i)\n\t}\n\treturn values\n}\n\n// Read reads all the data in current block and append to column.\nfunc (c *MapNullable[T, V]) ReadP(value []map[T]*V) []map[T]*V {\n\treturn append(value, c.DataP()...)\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *MapNullable[T, V]) RowP(row int) map[T]*V {\n\tvar lastOffset uint64\n\tif row != 0 {\n\t\tlastOffset = c.offsetColumn.Row(row - 1)\n\t}\n\tval := make(map[T]*V)\n\toffset := c.offsetColumn.Row(row)\n\tfor ki, key := range c.keyColumnData[lastOffset:offset] {\n\t\tv := c.valueColumnData[lastOffset:offset][ki]\n\t\tval[key] = v\n\t}\n\treturn val\n}\n\nfunc (c *MapNullable[K, V]) AppendP(v map[K]*V) {\n\tc.AppendLen(len(v))\n\tfor k, d := range v {\n\t\tc.keyColumn.(Column[K]).Append(k)\n\t\tc.valueColumn.AppendP(d)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *MapNullable[K, V]) ReadRaw(num int, r *readerwriter.Reader) error {\n\terr := c.Map.ReadRaw(num, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.keyColumnData = c.keyColumn.(Column[K]).Data()\n\tc.valueColumnData = c.valueColumn.DataP()\n\n\treturn nil\n}\n\n// ValueColumn return the value column\nfunc (c *MapNullable[K, V]) ValueColumn() NullableColumn[V] {\n\treturn c.valueColumn\n}\n"
  },
  {
    "path": "column/map_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestMapUint8(t *testing.T) {\n\ttestMapColumn(t, \"UInt8\", \"uint8\", func(i int) []uint8 {\n\t\td := make([]uint8, 2)\n\t\td[0] = uint8(i)\n\t\td[1] = uint8(i + 1)\n\t\treturn d\n\t}, func(i int) []uint8 {\n\t\td := make([]uint8, 2)\n\t\td[0] = uint8(i)\n\t\td[1] = uint8(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapUint16(t *testing.T) {\n\ttestMapColumn(t, \"UInt16\", \"uint16\", func(i int) []uint16 {\n\t\td := make([]uint16, 2)\n\t\td[0] = uint16(i)\n\t\td[1] = uint16(i + 1)\n\t\treturn d\n\t}, func(i int) []uint16 {\n\t\td := make([]uint16, 2)\n\t\td[0] = uint16(i)\n\t\td[1] = uint16(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapUint32(t *testing.T) {\n\ttestMapColumn(t, \"UInt32\", \"uint32\", func(i int) []uint32 {\n\t\td := make([]uint32, 2)\n\t\td[0] = uint32(i)\n\t\td[1] = uint32(i + 1)\n\t\treturn d\n\t}, func(i int) []uint32 {\n\t\td := make([]uint32, 2)\n\t\td[0] = uint32(i)\n\t\td[1] = uint32(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapUint64(t *testing.T) {\n\ttestMapColumn(t, \"UInt64\", \"uint64\", func(i int) []uint64 {\n\t\td := make([]uint64, 2)\n\t\td[0] = uint64(i)\n\t\td[1] = uint64(i + 1)\n\t\treturn d\n\t}, func(i int) []uint64 {\n\t\td := make([]uint64, 2)\n\t\td[0] = uint64(i)\n\t\td[1] = uint64(i + 1)\n\t\treturn d\n\t})\n}\nfunc TestMapInt8(t *testing.T) {\n\ttestMapColumn(t, \"Int8\", \"int8\", func(i int) []int8 {\n\t\td := make([]int8, 2)\n\t\td[0] = int8(i)\n\t\td[1] = int8(i + 1)\n\t\treturn d\n\t}, func(i int) []int8 {\n\t\td := make([]int8, 2)\n\t\td[0] = int8(i)\n\t\td[1] = int8(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapInt16(t *testing.T) {\n\ttestMapColumn(t, \"Int16\", \"int16\", func(i int) []int16 {\n\t\td := make([]int16, 2)\n\t\td[0] = int16(i)\n\t\td[1] = int16(i + 1)\n\t\treturn d\n\t}, func(i int) []int16 {\n\t\td := make([]int16, 2)\n\t\td[0] = int16(i)\n\t\td[1] = int16(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapInt32(t *testing.T) {\n\ttestMapColumn(t, \"Int32\", \"int32\", func(i int) []int32 {\n\t\td := make([]int32, 2)\n\t\td[0] = int32(i)\n\t\td[1] = int32(i + 1)\n\t\treturn d\n\t}, func(i int) []int32 {\n\t\td := make([]int32, 2)\n\t\td[0] = int32(i)\n\t\td[1] = int32(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapInt64(t *testing.T) {\n\ttestMapColumn(t, \"Int64\", \"int64\", func(i int) []int64 {\n\t\td := make([]int64, 2)\n\t\td[0] = int64(i)\n\t\td[1] = int64(i + 1)\n\t\treturn d\n\t}, func(i int) []int64 {\n\t\td := make([]int64, 2)\n\t\td[0] = int64(i)\n\t\td[1] = int64(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapFloat32(t *testing.T) {\n\ttestMapColumn(t, \"Float32\", \"float32\", func(i int) []float32 {\n\t\td := make([]float32, 2)\n\t\td[0] = float32(i)\n\t\td[1] = float32(i + 1)\n\t\treturn d\n\t}, func(i int) []float32 {\n\t\td := make([]float32, 2)\n\t\td[0] = float32(i)\n\t\td[1] = float32(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc TestMapFloat64(t *testing.T) {\n\ttestMapColumn(t, \"Float64\", \"float64\", func(i int) []float64 {\n\t\td := make([]float64, 2)\n\t\td[0] = float64(i)\n\t\td[1] = float64(i + 1)\n\t\treturn d\n\t}, func(i int) []float64 {\n\t\td := make([]float64, 2)\n\t\td[0] = float64(i)\n\t\td[1] = float64(i + 1)\n\t\treturn d\n\t})\n}\n\nfunc testMapColumn[V comparable](\n\tt *testing.T,\n\tchType, tableName string,\n\tfirstVal func(i int) []V,\n\tsecondVal func(i int) []V,\n) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_map_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_map_%[1]s (\n\t\t\t\t%[1]s Map(String,%[2]s),\n\t\t\t\t%[1]s_nullable Map(String,Nullable(%[2]s)),\n\t\t\t\t%[1]s_array Map(String,Array(%[2]s)),\n\t\t\t\t%[1]s_array_nullable Map(String,Array(Nullable(%[2]s))),\n\t\t\t\t%[1]s_lc Map(String,LowCardinality(%[2]s)),\n\t\t\t\t%[1]s_nullable_lc Map(String,LowCardinality(Nullable(%[2]s))),\n\t\t\t\t%[1]s_array_lc Map(String,Array(LowCardinality(%[2]s))),\n\t\t\t\t%[1]s_array_lc_nullable Map(String,Array(LowCardinality(Nullable(%[2]s))))\n\t\t\t) Engine=Memory`, tableName, chType), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol := column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V](),\n\t)\n\tcolNullable := column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable(),\n\t)\n\tcolArray := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Array(),\n\t)\n\tcolNullableArray := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().Array(),\n\t)\n\tcolLC := column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC(),\n\t)\n\tcolLCNullable := column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC(),\n\t)\n\tcolArrayLC := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC().Array(),\n\t)\n\tcolArrayLCNullable := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC().Array(),\n\t)\n\tvar colInsert []map[string]V\n\tvar colNullableInsert []map[string]*V\n\tvar colArrayInsert []map[string][]V\n\tvar colArrayNullableInsert []map[string][]*V\n\tvar colLCInsert []map[string]V\n\tvar colLCNullableInsert []map[string]*V\n\tvar colLCArrayInsert []map[string][]V\n\tvar colLCNullableArrayInsert []map[string][]*V\n\n\t// SetWriteBufferSize is not necessary. this just to show how to set write buffer\n\tcol.SetWriteBufferSize(10)\n\tcolNullable.SetWriteBufferSize(10)\n\tcolArray.SetWriteBufferSize(10)\n\tcolNullableArray.SetWriteBufferSize(10)\n\tcolLC.SetWriteBufferSize(10)\n\tcolLCNullable.SetWriteBufferSize(10)\n\tcolArrayLC.SetWriteBufferSize(10)\n\tcolArrayLCNullable.SetWriteBufferSize(10)\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tvalData := firstVal(i)\n\t\t\tval2Data := secondVal(i)\n\t\t\tval := map[string]V{\n\t\t\t\t\"a\": valData[0],\n\t\t\t\t\"b\": valData[1],\n\t\t\t}\n\t\t\tvalNullable := map[string]*V{\n\t\t\t\t\"a\": &valData[0],\n\t\t\t\t\"b\": &valData[1],\n\t\t\t}\n\t\t\tvalNullable2 := map[string]*V{\n\t\t\t\t\"a\": &valData[1],\n\t\t\t\t\"b\": nil,\n\t\t\t}\n\t\t\tvalArray := map[string][]V{\n\t\t\t\t\"a\": valData,\n\t\t\t\t\"b\": val2Data,\n\t\t\t}\n\n\t\t\tvalArrayNil := map[string][]*V{\n\t\t\t\t\"a\": {&valData[0], &valData[1]},\n\t\t\t\t\"b\": {&valData[1], nil},\n\t\t\t}\n\t\t\tcol.Append(val)\n\t\t\tcolInsert = append(colInsert, val)\n\n\t\t\t// example add nullable\n\n\t\t\tif i%2 == 0 {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, valNullable)\n\t\t\t\tcolNullable.AppendP(valNullable)\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, valNullable)\n\t\t\t\tcolLCNullable.AppendP(valNullable)\n\t\t\t} else {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, valNullable2)\n\t\t\t\tcolNullable.AppendP(valNullable2)\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, valNullable2)\n\t\t\t\tcolLCNullable.AppendP(valNullable2)\n\t\t\t}\n\n\t\t\tcolArray.Append(valArray)\n\t\t\tcolArrayInsert = append(colArrayInsert, valArray)\n\n\t\t\tcolNullableArray.AppendLen(len(valArrayNil))\n\t\t\tfor k, v := range valArrayNil {\n\t\t\t\tcolNullableArray.KeyColumn().Append(k)\n\t\t\t\tcolNullableArray.ValueColumn().(*column.ArrayNullable[V]).AppendP(v)\n\t\t\t}\n\t\t\tcolArrayNullableInsert = append(colArrayNullableInsert, valArrayNil)\n\n\t\t\tcolLCInsert = append(colLCInsert, val)\n\t\t\tcolLC.Append(val)\n\n\t\t\tcolLCArrayInsert = append(colLCArrayInsert, valArray)\n\t\t\tcolArrayLC.Append(valArray)\n\n\t\t\tcolLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil)\n\t\t\tcolArrayLCNullable.AppendLen(len(valArrayNil))\n\t\t\tfor k, v := range valArrayNil {\n\t\t\t\tcolArrayLCNullable.KeyColumn().Append(k)\n\t\t\t\tcolArrayLCNullable.ValueColumn().(*column.ArrayNullable[V]).AppendP(v)\n\t\t\t}\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\ttest_map_%[1]s (\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_lc,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tcol,\n\t\t\tcolNullable,\n\t\t\tcolArray,\n\t\t\tcolNullableArray,\n\t\t\tcolLC,\n\t\t\tcolLCNullable,\n\t\t\tcolArrayLC,\n\t\t\tcolArrayLCNullable,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// test read all\n\tcolRead := column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V](),\n\t)\n\tcolNullableRead := column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable(),\n\t)\n\tcolArrayRead := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Array(),\n\t)\n\tcolNullableArrayRead := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().Array(),\n\t)\n\tcolLCRead := column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC(),\n\t)\n\tcolLCNullableRead := column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC(),\n\t)\n\tcolArrayLCRead := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC().Array(),\n\t)\n\tcolArrayLCNullableRead := column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC().Array(),\n\t)\n\tvar colData []map[string]V\n\tvar colNullableData []map[string]*V\n\tvar colArrayData []map[string][]V\n\tvar colArrayNullableData []map[string][]*V\n\tvar colLCData []map[string]V\n\tvar colLCNullableData []map[string]*V\n\tvar colLCArrayData []map[string][]V\n\tvar colLCNullableArrayData []map[string][]*V\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t%[1]s,\n\t%[1]s_nullable,\n\t%[1]s_array,\n\t%[1]s_array_nullable,\n\t%[1]s_lc,\n\t%[1]s_nullable_lc,\n\t%[1]s_array_lc,\n\t%[1]s_array_lc_nullable\n\tFROM test_map_%[1]s`, tableName),\n\t\tcolRead,\n\t\tcolNullableRead,\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t\tcolNullableData = colNullableRead.ReadP(colNullableData)\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data()\n\t\tcolNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[V]).DataP()\n\t\tcolNullableArrayRead.Each(func(start, end uint64) bool {\n\t\t\tval := make(map[string][]*V)\n\t\t\tfor ki, key := range colNullableArrayReadKey[start:end] {\n\t\t\t\tval[key] = colNullableArrayReadValue[start:end][ki]\n\t\t\t}\n\t\t\tcolArrayNullableData = append(colArrayNullableData, val)\n\t\t\treturn true\n\t\t})\n\t\tcolLCData = colLCRead.Read(colLCData)\n\t\tcolLCNullableData = colLCNullableRead.ReadP(colLCNullableData)\n\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\n\t\tcolArrayLCNullableReadKey := colArrayLCNullableRead.KeyColumn().Data()\n\t\tcolArrayLCNullableReadValue := colArrayLCNullableRead.ValueColumn().(*column.ArrayNullable[V]).DataP()\n\t\tcolArrayLCNullableRead.Each(func(start, end uint64) bool {\n\t\t\tval := make(map[string][]*V)\n\t\t\tfor ki, key := range colArrayLCNullableReadKey[start:end] {\n\t\t\t\tval[key] = colArrayLCNullableReadValue[start:end][ki]\n\t\t\t}\n\t\t\tcolLCNullableArrayData = append(colLCNullableArrayData, val)\n\t\t\treturn true\n\t\t})\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tassert.Equal(t, colLCInsert, colLCData)\n\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\n\t// test read Row\n\tcolRead = column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V](),\n\t)\n\tcolNullableRead = column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable(),\n\t)\n\tcolArrayRead = column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Array(),\n\t)\n\tcolNullableArrayRead = column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().Array(),\n\t)\n\tcolLCRead = column.NewMap[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC(),\n\t)\n\tcolLCNullableRead = column.NewMapNullable[string, V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC(),\n\t)\n\tcolArrayLCRead = column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().LC().Array(),\n\t)\n\tcolArrayLCNullableRead = column.NewMap[string, []V](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[V]().Nullable().LC().Array(),\n\t)\n\tcolData = colData[:0]\n\tcolNullableData = colNullableData[:0]\n\tcolArrayData = colArrayData[:0]\n\tcolArrayNullableData = colArrayNullableData[:0]\n\tcolLCData = colLCData[:0]\n\tcolLCNullableData = colLCNullableData[:0]\n\tcolLCArrayData = colLCArrayData[:0]\n\tcolLCNullableArrayData = colLCNullableArrayData[:0]\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\t\tFROM test_map_%[1]s`, tableName),\n\t\tcolRead,\n\t\tcolNullableRead,\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tfor selectStmt.Next() {\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcolData = append(colData, colRead.Row(i))\n\t\t\tcolNullableData = append(colNullableData, colNullableRead.RowP(i))\n\t\t\tcolArrayData = append(colArrayData, colArrayRead.Row(i))\n\t\t\tcolLCData = append(colLCData, colLCRead.Row(i))\n\t\t\tcolLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i))\n\t\t\tcolLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i))\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colLCInsert, colLCData)\n\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\n\t// check dynamic column\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\t\tFROM test_map_%[1]s`, tableName),\n\t)\n\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\n\tassert.Len(t, autoColumns, 8)\n\n\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\tassert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())\n\tassert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())\n\tassert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())\n\tassert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n\nfunc TestMapEmptyResult(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\t// test read all\n\tcolRead := column.NewMap[uint64, uint64](\n\t\tcolumn.New[uint64](),\n\t\tcolumn.New[uint64](),\n\t)\n\n\tselectStmt, err := conn.Select(context.Background(), `SELECT map(number,number) from system.numbers limit 0`,\n\t\tcolRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tfor selectStmt.Next() {\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, colRead.Data(), []map[uint64]uint64{})\n\tassert.Equal(t, colRead.TotalRows(), 0)\n\tcolRead.Each(func(start, end uint64) bool {\n\t\tassert.Fail(t, \"should not be called\")\n\t\treturn true\n\t})\n}\n\nfunc TestMapEmpty(t *testing.T) {\n\tt.Parallel()\n\n\ttableName := \"map_empty\"\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_map_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_map_%[1]s (\n\t\t\t\t%[1]s Map(String,%[2]s),\n\t\t\t\t%[1]s_nullable Map(String,Nullable(%[2]s)),\n\t\t\t\t%[1]s_array Map(String,Array(%[2]s)),\n\t\t\t\t%[1]s_array_nullable Map(String,Array(Nullable(%[2]s))),\n\t\t\t\t%[1]s_lc Map(String,LowCardinality(%[2]s)),\n\t\t\t\t%[1]s_nullable_lc Map(String,LowCardinality(Nullable(%[2]s))),\n\t\t\t\t%[1]s_array_lc Map(String,Array(LowCardinality(%[2]s))),\n\t\t\t\t%[1]s_array_lc_nullable Map(String,Array(LowCardinality(Nullable(%[2]s))))\n\t\t\t) Engine=Memory`, tableName, \"UInt16\"), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol := column.NewMap[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16](),\n\t)\n\tcolNullable := column.NewMapNullable[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable(),\n\t)\n\tcolArray := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Array(),\n\t)\n\tcolNullableArray := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().Array(),\n\t)\n\tcolLC := column.NewMap[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().LC(),\n\t)\n\tcolLCNullable := column.NewMapNullable[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().LC(),\n\t)\n\tcolArrayLC := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().LC().Array(),\n\t)\n\tcolArrayLCNullable := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().LC().Array(),\n\t)\n\n\tcol.Append(nil)\n\tcol.Append(map[string]uint16{})\n\tcolNullable.Append(nil)\n\tcolNullable.AppendP(map[string]*uint16{})\n\tcolArray.Append(nil)\n\tcolArray.Append(map[string][]uint16{})\n\tcolNullableArray.Append(nil)\n\tcolNullableArray.Append(map[string][]uint16{})\n\tcolLC.Append(nil)\n\tcolLC.Append(map[string]uint16{})\n\tcolLCNullable.Append(nil)\n\tcolLCNullable.AppendP(map[string]*uint16{})\n\tcolArrayLC.Append(nil)\n\tcolArrayLC.Append(map[string][]uint16{})\n\tcolArrayLCNullable.Append(nil)\n\tcolArrayLCNullable.Append(map[string][]uint16{})\n\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\ttest_map_%[1]s (\n\t\t\t%[1]s,\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_lc,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\tcol,\n\t\tcolNullable,\n\t\tcolArray,\n\t\tcolNullableArray,\n\t\tcolLC,\n\t\tcolLCNullable,\n\t\tcolArrayLC,\n\t\tcolArrayLCNullable,\n\t)\n\trequire.NoError(t, err)\n\t// test read all\n\tcolRead := column.NewMap[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16](),\n\t)\n\tcolNullableRead := column.NewMapNullable[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable(),\n\t)\n\tcolArrayRead := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Array(),\n\t)\n\tcolNullableArrayRead := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().Array(),\n\t)\n\tcolLCRead := column.NewMap[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().LC(),\n\t)\n\tcolLCNullableRead := column.NewMapNullable[string, uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().LC(),\n\t)\n\tcolArrayLCRead := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().LC().Array(),\n\t)\n\tcolArrayLCNullableRead := column.NewMap[string, []uint16](\n\t\tcolumn.NewString(),\n\t\tcolumn.New[uint16]().Nullable().LC().Array(),\n\t)\n\tvar colData []map[string]uint16\n\tvar colNullableData []map[string]*uint16\n\tvar colArrayData []map[string][]uint16\n\tvar colArrayNullableData []map[string][]*uint16\n\tvar colLCData []map[string]uint16\n\tvar colLCNullableData []map[string]*uint16\n\tvar colLCArrayData []map[string][]uint16\n\tvar colLCNullableArrayData []map[string][]*uint16\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t%[1]s,\n\t%[1]s_nullable,\n\t%[1]s_array,\n\t%[1]s_array_nullable,\n\t%[1]s_lc,\n\t%[1]s_nullable_lc,\n\t%[1]s_array_lc,\n\t%[1]s_array_lc_nullable\n\tFROM test_map_%[1]s`, tableName),\n\t\tcolRead,\n\t\tcolNullableRead,\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t\tcolNullableData = colNullableRead.ReadP(colNullableData)\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data()\n\t\tcolNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[uint16]).DataP()\n\t\tcolNullableArrayRead.Each(func(start, end uint64) bool {\n\t\t\tval := make(map[string][]*uint16)\n\t\t\tfor ki, key := range colNullableArrayReadKey[start:end] {\n\t\t\t\tval[key] = colNullableArrayReadValue[start:end][ki]\n\t\t\t}\n\t\t\tcolArrayNullableData = append(colArrayNullableData, val)\n\t\t\treturn true\n\t\t})\n\t\tcolLCData = colLCRead.Read(colLCData)\n\t\tcolLCNullableData = colLCNullableRead.ReadP(colLCNullableData)\n\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\n\t\tcolArrayLCNullableReadKey := colArrayLCNullableRead.KeyColumn().Data()\n\t\tcolArrayLCNullableReadValue := colArrayLCNullableRead.ValueColumn().(*column.ArrayNullable[uint16]).DataP()\n\t\tcolArrayLCNullableRead.Each(func(start, end uint64) bool {\n\t\t\tval := make(map[string][]*uint16)\n\t\t\tfor ki, key := range colArrayLCNullableReadKey[start:end] {\n\t\t\t\tval[key] = colArrayLCNullableReadValue[start:end][ki]\n\t\t\t}\n\t\t\tcolLCNullableArrayData = append(colLCNullableArrayData, val)\n\t\t\treturn true\n\t\t})\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, []map[string]uint16{{}, {}}, colData)\n\tassert.Equal(t, []map[string]uint16{{}, {}}, colRead.Data())\n\tassert.Equal(t, []map[string]*uint16{{}, {}}, colNullableData)\n\tassert.Equal(t, []map[string]*uint16{{}, {}}, colNullableRead.DataP())\n\tassert.Equal(t, []map[string][]uint16{{}, {}}, colArrayData)\n\tassert.Equal(t, []map[string][]uint16{{}, {}}, colArrayRead.Data())\n\tassert.Equal(t, []map[string][]*uint16{{}, {}}, colArrayNullableData)\n\tassert.Equal(t, []map[string]uint16{{}, {}}, colLCData)\n\tassert.Equal(t, []map[string]uint16{{}, {}}, colLCRead.Data())\n\tassert.Equal(t, []map[string]*uint16{{}, {}}, colLCNullableData)\n\tassert.Equal(t, []map[string]*uint16{{}, {}}, colLCNullableRead.DataP())\n\tassert.Equal(t, []map[string][]uint16{{}, {}}, colLCArrayData)\n\tassert.Equal(t, []map[string][]uint16{{}, {}}, colArrayLCRead.Data())\n\tassert.Equal(t, []map[string][]*uint16{{}, {}}, colLCNullableArrayData)\n}\n"
  },
  {
    "path": "column/nested.go",
    "content": "package column\n\n// NewNested create a new nested of Nested(T1,T2,.....,Tn) ClickHouse data type\n//\n// this is actually an alias for NewTuple(T1,T2,.....,Tn).Array()\nfunc NewNested(columns ...ColumnBasic) *ArrayBase {\n\treturn NewTuple(columns...).Array()\n}\n"
  },
  {
    "path": "column/nested_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestNestedNoFlattened(t *testing.T) {\n\ttableName := \"nested_no_flattened\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t\t{\n\t\t\tName:  \"flatten_nested\",\n\t\t\tValue: \"false\",\n\t\t},\n\t}\n\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\tcol1 Nested(col1_n1 Int64, col2_n1 String),\n\t\t\tcol2 Nested(col1_n2 Int64, col2_n2 Nested(col1_n2_n1 Int64, col2_n2_n2 String))\n\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\ttype Col1Type types.Tuple2[int64, string]\n\tcol1 := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString())\n\n\ttype Col2Type types.Tuple2[int64, []Col1Type]\n\n\tcol2N2 := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString())\n\tcol2 := column.NewNested2[Col2Type, int64, []Col1Type](column.New[int64](), col2N2)\n\n\tvar col1Insert [][]Col1Type\n\tvar col2Insert [][]Col2Type\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tvalString := fmt.Sprintf(\"string %d\", i)\n\t\t\tvalInt := int64(i)\n\t\t\tval2String := fmt.Sprintf(\"string %d\", i+1)\n\t\t\tval2Int := int64(i + 1)\n\t\t\tcol1.Append([]Col1Type{\n\t\t\t\t{\n\t\t\t\t\tCol1: valInt,\n\t\t\t\t\tCol2: valString,\n\t\t\t\t},\n\t\t\t},\n\t\t\t)\n\t\t\tcol1Insert = append(col1Insert, []Col1Type{\n\t\t\t\t{\n\t\t\t\t\tCol1: valInt,\n\t\t\t\t\tCol2: valString,\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tcol2.Append([]Col2Type{\n\t\t\t\t{\n\t\t\t\t\tCol1: valInt,\n\t\t\t\t\tCol2: []Col1Type{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: val2Int,\n\t\t\t\t\t\t\tCol2: val2String,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t\tcol2Insert = append(col2Insert, []Col2Type{\n\t\t\t\t{\n\t\t\t\t\tCol1: valInt,\n\t\t\t\t\tCol2: []Col1Type{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: val2Int,\n\t\t\t\t\t\t\tCol2: val2String,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tcol1,\n\t\t\t\tcol2\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tcol1,\n\t\t\tcol2,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// example read all\n\n\tcol1Read := column.NewTuple2[Col1Type, int64, string](column.New[int64](), column.NewString()).Array()\n\n\tcol2N2Read := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString())\n\tcol2Read := column.NewNested2[Col2Type, int64, []Col1Type](column.New[int64](), col2N2Read)\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\tcol1,col2\n\tFROM test_%[1]s`, tableName),\n\t\tcol1Read,\n\t\tcol2Read)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\tvar col1Data [][]Col1Type\n\tvar col2Data [][]Col2Type\n\n\tfor selectStmt.Next() {\n\t\tcol1Data = col1Read.Read(col1Data)\n\t\tcol2Data = col2Read.Read(col2Data)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, col1Insert, col1Data)\n\tassert.Equal(t, col2Insert, col2Data)\n\n\t// // check dynamic column\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\tcol1, col2\n\tFROM test_%[1]s`, tableName))\n\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\n\tassert.Len(t, autoColumns, 2)\n\n\tassert.Equal(t, column.NewTuple(column.New[int64](), column.NewString()).Array().ColumnType(), autoColumns[0].ColumnType())\n\tassert.Equal(t,\n\t\tcolumn.NewTuple(column.New[int64](),\n\t\t\tcolumn.NewTuple(column.New[int64](), column.NewString()).Array()).Array().\n\t\t\tColumnType(), autoColumns[1].ColumnType())\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n"
  },
  {
    "path": "column/nullable.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"unsafe\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\ntype appendEmptyInterface interface {\n\tappendEmpty()\n}\n\n// Nullable is a column of Nullable(T) ClickHouse data type\ntype Nullable[T comparable] struct {\n\tcolumn\n\tnumRow     int\n\tdataColumn Column[T]\n\twriterData []byte\n\tb          []byte\n}\n\n// NewNullable return new Nullable for Nullable(T) ClickHouse DataType\nfunc NewNullable[T comparable](dataColumn Column[T]) *Nullable[T] {\n\treturn &Nullable[T]{\n\t\tdataColumn: dataColumn,\n\t}\n}\n\n// Data get all the data in current block as a slice.\n//\n// NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read\nfunc (c *Nullable[T]) Data() []T {\n\treturn c.dataColumn.Data()\n}\n\n// Data get all the nullable  data in current block as a slice of pointer.\n//\n// As an alternative (for better performance).\n// You can use `Data` and one of `RowIsNil` and `ReadNil` and `DataNil`  to detect if value is null or not.\nfunc (c *Nullable[T]) DataP() []*T {\n\tval := make([]*T, c.numRow)\n\tfor i, d := range c.dataColumn.Data() {\n\t\tif c.RowIsNil(i) {\n\t\t\tval[i] = nil\n\t\t} else {\n\t\t\t// make a copy of the value\n\t\t\tv := d\n\t\t\tval[i] = &v\n\t\t}\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Nullable[T]) Read(value []T) []T {\n\treturn c.dataColumn.Read(value)\n}\n\n// ReadP read all value in this block and append to the input slice (for nullable data)\n//\n// As an alternative (for better performance), You can use `Read` and one of `RowIsNil` and `ReadNil` and `DataNil`\n// to detect if value is null or not.\nfunc (c *Nullable[T]) ReadP(value []*T) []*T {\n\tfor i := 0; i < c.numRow; i++ {\n\t\tvalue = append(value, c.RowP(i))\n\t}\n\treturn value\n}\n\n// Append value for insert\nfunc (c *Nullable[T]) Row(i int) T {\n\treturn c.dataColumn.Row(i)\n}\n\n// RowP return the value of given row for nullable data\n// NOTE: Row number start from zero\n//\n// As an alternative (for better performance), you can use `Row()` to get a value and `RowIsNil()` to check if it is null.\nfunc (c *Nullable[T]) RowP(row int) *T {\n\tif c.b[row] == 1 {\n\t\treturn nil\n\t}\n\tval := c.dataColumn.Row(row)\n\treturn &val\n}\n\n// ReadAll read all nils state in this block and append to the input\nfunc (c *Nullable[T]) ReadNil(value []bool) []bool {\n\treturn append(value, *(*[]bool)(unsafe.Pointer(&c.b))...)\n}\n\n// DataNil get all nil state in this block\nfunc (c *Nullable[T]) DataNil() []bool {\n\treturn *(*[]bool)(unsafe.Pointer(&c.b))\n}\n\n// RowIsNil return true if the row is null\nfunc (c *Nullable[T]) RowIsNil(row int) bool {\n\treturn c.b[row] == 1\n}\n\n// Append value for insert\nfunc (c *Nullable[T]) Append(v ...T) {\n\tc.writerData = append(c.writerData, make([]uint8, len(v))...)\n\tc.dataColumn.Append(v...)\n}\n\n// Append nullable value for insert\n//\n// as an alternative (for better performance), you can use `Append` and `AppendNil` to insert a value\nfunc (c *Nullable[T]) AppendP(v ...*T) {\n\tfor _, v := range v {\n\t\tif v == nil {\n\t\t\tc.AppendNil()\n\t\t\tcontinue\n\t\t}\n\t\tc.Append(*v)\n\t}\n}\n\n// Append nil value for insert\nfunc (c *Nullable[T]) AppendNil() {\n\tc.writerData = append(c.writerData, 1)\n\tc.dataColumn.(appendEmptyInterface).appendEmpty()\n}\n\n// NumRow return number of row for this block\nfunc (c *Nullable[T]) NumRow() int {\n\treturn c.dataColumn.NumRow()\n}\n\n// Array return a Array type for this column\nfunc (c *Nullable[T]) Array() *ArrayNullable[T] {\n\treturn NewArrayNullable[T](c)\n}\n\n// LC return a low cardinality type for this column\nfunc (c *Nullable[T]) LC() *LowCardinalityNullable[T] {\n\treturn NewLowCardinalityNullable(c.dataColumn)\n}\n\n// LowCardinality return a low cardinality type for this column\nfunc (c *Nullable[T]) LowCardinality() *LowCardinalityNullable[T] {\n\treturn NewLowCardinalityNullable(c.dataColumn)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *Nullable[T]) Reset() {\n\tc.b = c.b[:0]\n\tc.numRow = 0\n\tc.writerData = c.writerData[:0]\n\tc.dataColumn.Reset()\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *Nullable[T]) SetWriteBufferSize(row int) {\n\tif cap(c.writerData) < row {\n\t\tc.writerData = make([]byte, 0, row)\n\t}\n\tc.dataColumn.SetWriteBufferSize(row)\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.Reset()\n\tc.r = r\n\tc.numRow = num\n\n\terr := c.readBuffer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read nullable data: %w\", err)\n\t}\n\treturn c.dataColumn.ReadRaw(num, r)\n}\n\nfunc (c *Nullable[T]) readBuffer() error {\n\tif cap(c.b) < c.numRow {\n\t\tc.b = make([]byte, c.numRow)\n\t} else {\n\t\tc.b = c.b[:c.numRow]\n\t}\n\t_, err := c.r.Read(c.b)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read nullable data: %w\", err)\n\t}\n\treturn nil\n}\n\n// HeaderReader reads header data from reader\n// it uses internally\nfunc (c *Nullable[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\terr := c.readColumn(readColumn, revision)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.dataColumn.HeaderReader(r, false, revision)\n}\n\nfunc (c *Nullable[T]) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tif !helper.IsNullable(chType) {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\tc.dataColumn.SetType(chType[helper.LenNullableStr : len(chType)-1])\n\tif c.dataColumn.Validate() != nil {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Nullable[T]) ColumnType() string {\n\treturn strings.ReplaceAll(helper.NullableTypeStr, \"<type>\", c.dataColumn.ColumnType())\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *Nullable[T]) WriteTo(w io.Writer) (int64, error) {\n\tn, err := w.Write(c.writerData)\n\tif err != nil {\n\t\treturn int64(n), fmt.Errorf(\"write nullable data: %w\", err)\n\t}\n\n\tnw, err := c.dataColumn.WriteTo(w)\n\treturn nw + int64(n), err\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *Nullable[T]) HeaderWriter(w *readerwriter.Writer) {\n}\n\nfunc (c *Nullable[T]) elem(arrayLevel int, lc bool) ColumnBasic {\n\tif lc {\n\t\treturn c.LowCardinality().elem(arrayLevel)\n\t}\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/nullable_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestNullableAsNormal(t *testing.T) {\n\ttableName := \"nullable\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\tblock_id UInt8,\n\t\t%[1]s_nullable Nullable(Int64),\n\t\t%[1]s_array_nullable Array(Nullable(Int64)),\n\t\t%[1]s_nullable_lc LowCardinality(Nullable(Int64)),\n\t\t%[1]s_array_lc_nullable Array(LowCardinality(Nullable(Int64)))\n\t\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tblockID := column.New[uint8]()\n\tcolNullable := column.New[int64]().Nullable()\n\tcolNullableArray := column.New[int64]().Nullable().Array()\n\tcolLCNullable := column.New[int64]().Nullable().LC()\n\tcolArrayLCNullable := column.New[int64]().Nullable().LC().Array()\n\n\tvar colInsert []int64\n\tvar colArrayInsert [][]int64\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tval := int64(i + 1)\n\t\t\tblockID.Append(uint8(insertN))\n\t\t\tcolNullable.Append(val)\n\t\t\tcolNullableArray.Append([]int64{val, val + 1})\n\t\t\tcolLCNullable.Append(val)\n\t\t\tcolArrayLCNullable.Append([]int64{val, val + 1})\n\t\t\tcolInsert = append(colInsert, val)\n\t\t\tcolArrayInsert = append(colArrayInsert, []int64{val, val + 1})\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tblock_id,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tblockID,\n\t\t\tcolNullable,\n\t\t\tcolNullableArray,\n\t\t\tcolLCNullable,\n\t\t\tcolArrayLCNullable,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// test read row\n\tcolNullableRead := column.New[int64]().Nullable()\n\tcolNullableArrayRead := column.New[int64]().Nullable().Array()\n\tcolLCNullableRead := column.New[int64]().Nullable().LC()\n\tcolArrayLCNullableRead := column.New[int64]().Nullable().LC().Array()\n\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\tcolNullableRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []int64\n\tvar colArrayData [][]int64\n\tvar colLCData []int64\n\tvar colLCArrayData [][]int64\n\tvar colDataNilRead []bool\n\tvar colDataNilData []bool\n\n\tfor selectStmt.Next() {\n\t\tcolData = colNullableRead.Read(colData)\n\t\tcolDataNilRead = colNullableRead.ReadNil(colDataNilRead)\n\t\tcolDataNilData = append(colDataNilData, colNullableRead.DataNil()...)\n\t\tcolArrayData = colNullableArrayRead.Read(colArrayData)\n\t\tcolLCData = colLCNullableRead.Read(colLCData)\n\t\tcolLCArrayData = colArrayLCNullableRead.Read(colLCArrayData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colInsert, colLCData)\n\tassert.Equal(t, colArrayInsert, colLCArrayData)\n\tassert.Equal(t, colDataNilRead, colDataNilData)\n\tassert.Equal(t, make([]bool, len(colInsert)), colDataNilRead)\n\n\t// test read all\n\tcolNullableRead = column.New[int64]().Nullable()\n\tcolNullableArrayRead = column.New[int64]().Nullable().Array()\n\tcolLCNullableRead = column.New[int64]().Nullable().LC()\n\tcolArrayLCNullableRead = column.New[int64]().Nullable().LC().Array()\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t\t%[1]s_nullable,\n\t\t\t%[1]s_array_nullable,\n\t\t\t%[1]s_nullable_lc,\n\t\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t\tcolNullableRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCNullableRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tcolData = colData[:0]\n\tcolArrayData = colArrayData[:0]\n\tcolLCData = colLCData[:0]\n\tcolLCArrayData = colLCArrayData[:0]\n\n\tfor selectStmt.Next() {\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcolData = append(colData, colNullableRead.Row(i))\n\t\t\tcolArrayData = append(colArrayData, colNullableArrayRead.Row(i))\n\t\t\tcolLCData = append(colLCData, colLCNullableRead.Row(i))\n\t\t\tcolLCArrayData = append(colLCArrayData, colArrayLCNullableRead.Row(i))\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colInsert, colLCData)\n\tassert.Equal(t, colArrayInsert, colLCArrayData)\n}\n"
  },
  {
    "path": "column/point.go",
    "content": "package column\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/types\"\n\nfunc NewPoint() *Tuple2[types.Point, float64, float64] {\n\treturn NewTuple2[types.Point, float64, float64](New[float64](), New[float64]())\n}\n"
  },
  {
    "path": "column/size.go",
    "content": "package column\n\nconst (\n\t// Uint8Size data Size of Uint8 Column\n\tUint8Size = 1\n\t// Uint16Size data Size of Uint16 Column\n\tUint16Size = 2\n\t// Uint32Size data Size of Uint32 Column\n\tUint32Size = 4\n\t// Uint64Size data Size of Uint64 Column\n\tUint64Size = 8\n\t// Uint128Size data Size of Uint128 Column\n\tUint128Size = 16\n\t// Uint256Size data Size of Uint256 Column\n\tUint256Size = 32\n\t// Int8Size data Size of Int8 Column\n\tInt8Size = 1\n\t// Int16Size data Size of Int16 Column\n\tInt16Size = 2\n\t// Int32Size data Size of Int32 Column\n\tInt32Size = 4\n\t// Int64Size data Size of Int64 Column\n\tInt64Size = 8\n\t// Int128Size data Size of Int128 Column\n\tInt128Size = 16\n\t// Int256Size data Size of Int256 Column\n\tInt256Size = 32\n\t// Float32Size data Size of Float32 Column\n\tFloat32Size = 4\n\t// Float64Size data Size of Float64 Column\n\tFloat64Size = 8\n\t// DateSize data Size of Date Column\n\tDateSize = 2\n\t// Date32Size data Size of Date32 Column\n\tDate32Size = 4\n\t// DatetimeSize data Size of Datetime Column\n\tDatetimeSize = 4\n\t// Datetime64Size data Size of Datetime64 Column\n\tDatetime64Size = 8\n\t// IPv4Size data Size of IPv4 Column\n\tIPv4Size = 4\n\t// IPv6Size data Size of IPv6 Column\n\tIPv6Size = 16\n\t// Decimal32Size data Size of Decimal32 Column\n\tDecimal32Size = 4\n\t// Decimal64Size data Size of Decimal64 Column\n\tDecimal64Size = 8\n\t// Decimal128Size data Size of Decimal128 Column\n\tDecimal128Size = 16\n\t// Decimal256Size data Size of Decimal256 Column\n\tDecimal256Size = 32\n\t// ArraylenSize data Size of Arraylen Column\n\tArraylenSize = 8\n\t// MaplenSize data Size of Maplen Column\n\tMaplenSize = 8\n\t// UUIDSize data Size of UUID Column\n\tUUIDSize = 16\n)\n"
  },
  {
    "path": "column/string.go",
    "content": "package column\n\n// String is a column of String ClickHouse data type\ntype String struct {\n\tStringBase[string]\n}\n\n// NewString is a column of String ClickHouse data type\nfunc NewString() *String {\n\treturn &String{}\n}\n\nfunc (c *String) Elem(arrayLevel int, nullable, lc bool) ColumnBasic {\n\tif nullable {\n\t\treturn c.Nullable().elem(arrayLevel, lc)\n\t}\n\tif lc {\n\t\treturn c.LowCardinality().elem(arrayLevel)\n\t}\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/string_base.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\ntype stringPos struct {\n\tstart int\n\tend   int\n}\n\n// StringBase is a column of String ClickHouse data type with generic type\ntype StringBase[T ~string] struct {\n\tcolumn\n\tnumRow     int\n\twriterData []byte\n\tvals       []byte\n\tpos        []stringPos\n}\n\n// NewString is a column of String ClickHouse data type with generic type\nfunc NewStringBase[T ~string]() *StringBase[T] {\n\treturn &StringBase[T]{}\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *StringBase[T]) Data() []T {\n\tval := make([]T, len(c.pos))\n\tfor i, v := range c.pos {\n\t\tval[i] = T(c.vals[v.start:v.end])\n\t}\n\treturn val\n}\n\n// Data get all the data in current block as a slice of []byte.\nfunc (c *StringBase[T]) DataBytes() [][]byte {\n\treturn c.ReadBytes(nil)\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *StringBase[T]) Read(value []T) []T {\n\tif cap(value)-len(value) >= len(c.pos) {\n\t\tvalue = (value)[:len(value)+len(c.pos)]\n\t} else {\n\t\tvalue = append(value, make([]T, len(c.pos))...)\n\t}\n\tval := (value)[len(value)-len(c.pos):]\n\tfor i, v := range c.pos {\n\t\tval[i] = T(c.vals[v.start:v.end])\n\t}\n\treturn value\n}\n\n// Read reads all the data as `[]byte` in current block and append to the input.\n//\n// data is valid only in the current block.\nfunc (c *StringBase[T]) ReadBytes(value [][]byte) [][]byte {\n\tif cap(value)-len(value) >= len(c.pos) {\n\t\tvalue = (value)[:len(value)+len(c.pos)]\n\t} else {\n\t\tvalue = append(value, make([][]byte, len(c.pos))...)\n\t}\n\n\tval := (value)[len(value)-len(c.pos):]\n\tfor i, v := range c.pos {\n\t\tval[i] = c.vals[v.start:v.end]\n\t}\n\n\treturn value\n}\n\n// Row return the value of given row.\n//\n// NOTE: Row number start from zero\nfunc (c *StringBase[T]) Row(row int) T {\n\treturn T(c.RowBytes(row))\n}\n\n// Row return the value of given row.\n//\n// Data is valid only in the current block.\nfunc (c *StringBase[T]) RowBytes(row int) []byte {\n\tpos := c.pos[row]\n\treturn c.vals[pos.start:pos.end]\n}\n\nfunc (c *StringBase[T]) Each(f func(i int, b []byte) bool) {\n\tfor i, p := range c.pos {\n\t\tif !f(i, c.vals[p.start:p.end]) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *StringBase[T]) appendLen(x int) {\n\ti := 0\n\tfor x >= 0x80 {\n\t\tc.writerData = append(c.writerData, byte(x)|0x80)\n\t\tx >>= 7\n\t\ti++\n\t}\n\tc.writerData = append(c.writerData, byte(x))\n}\n\n// Append value for insert\nfunc (c *StringBase[T]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tc.appendLen(len(v))\n\t\tc.writerData = append(c.writerData, v...)\n\t}\n\tc.numRow += len(v)\n}\n\n// AppendBytes value of bytes for insert\nfunc (c *StringBase[T]) AppendBytes(v ...[]byte) {\n\tfor _, v := range v {\n\t\tc.appendLen(len(v))\n\t\tc.writerData = append(c.writerData, v...)\n\t}\n\tc.numRow += len(v)\n}\n\n// NumRow return number of row for this block\nfunc (c *StringBase[T]) NumRow() int {\n\treturn c.numRow\n}\n\n// Array return a Array type for this column\nfunc (c *StringBase[T]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n\n// Nullable return a nullable type for this column\nfunc (c *StringBase[T]) Nullable() *Nullable[T] {\n\treturn NewNullable[T](c)\n}\n\n// LC return a low cardinality type for this column\nfunc (c *StringBase[T]) LC() *LowCardinality[T] {\n\treturn NewLC[T](c)\n}\n\n// LowCardinality return a low cardinality type for this column\nfunc (c *StringBase[T]) LowCardinality() *LowCardinality[T] {\n\treturn NewLC[T](c)\n}\n\n// Reset all status and buffer data\n//\n// Reading data does not require a reset after each read. The reset will be triggered automatically.\n//\n// However, writing data requires a reset after each write.\nfunc (c *StringBase[T]) Reset() {\n\tc.numRow = 0\n\tc.vals = c.vals[:0]\n\tc.pos = c.pos[:0]\n\tc.writerData = c.writerData[:0]\n}\n\n// SetWriteBufferSize set write buffer (number of bytes)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *StringBase[T]) SetWriteBufferSize(b int) {\n\tif cap(c.writerData) < b {\n\t\tc.writerData = make([]byte, 0, b)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically when you call `ReadColumns()`\nfunc (c *StringBase[T]) ReadRaw(num int, r *readerwriter.Reader) error {\n\tc.Reset()\n\tc.r = r\n\tc.numRow = num\n\n\tvar p stringPos\n\tfor i := 0; i < num; i++ {\n\t\tl, err := c.r.Uvarint()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error read string len: %w\", err)\n\t\t}\n\n\t\tp.start = p.end\n\t\tp.end += int(l)\n\n\t\tc.vals = append(c.vals, make([]byte, l)...)\n\t\tif _, err := c.r.Read(c.vals[p.start:p.end]); err != nil {\n\t\t\treturn fmt.Errorf(\"error read string: %w\", err)\n\t\t}\n\t\tc.pos = append(c.pos, p)\n\t}\n\treturn nil\n}\n\n// HeaderReader reads header data from read\n// it uses internally\nfunc (c *StringBase[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\treturn c.readColumn(readColumn, revision)\n}\n\nfunc (c *StringBase[T]) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tif !helper.IsString(chType) {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *StringBase[T]) ColumnType() string {\n\treturn helper.StringStr\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *StringBase[T]) WriteTo(w io.Writer) (int64, error) {\n\tnw, err := w.Write(c.writerData)\n\treturn int64(nw), err\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *StringBase[T]) HeaderWriter(w *readerwriter.Writer) {\n}\n\nfunc (c *StringBase[T]) appendEmpty() {\n\tvar emptyValue T\n\tc.Append(emptyValue)\n}\n\nfunc (c *StringBase[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic {\n\tif nullable {\n\t\treturn c.Nullable().elem(arrayLevel, lc)\n\t}\n\tif lc {\n\t\treturn c.LowCardinality().elem(arrayLevel)\n\t}\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/string_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestString(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\ttableName := \"string\"\n\tchType := \"String\"\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t\t    block_id UInt8,\n\t\t\t\t%[1]s %[2]s,\n\t\t\t\t%[1]s_nullable Nullable(%[2]s),\n\t\t\t\t%[1]s_array Array(%[2]s),\n\t\t\t\t%[1]s_array_nullable Array(Nullable(%[2]s)),\n\t\t\t\t%[1]s_lc LowCardinality(%[2]s),\n\t\t\t\t%[1]s_nullable_lc LowCardinality(Nullable(%[2]s)),\n\t\t\t\t%[1]s_array_lc Array(LowCardinality(%[2]s)),\n\t\t\t\t%[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s)))\n\t\t\t) Engine=Memory`, tableName, chType), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tblockID := column.New[uint8]()\n\tcol := column.NewString()\n\tcolNullable := column.NewString().Nullable()\n\tcolArray := column.NewString().Array()\n\tcolNullableArray := column.NewString().Nullable().Array()\n\tcolLC := column.NewString().LC()\n\tcolLCNullable := column.NewString().Nullable().LC()\n\tcolArrayLC := column.NewString().LC().Array()\n\tcolArrayLCNullable := column.NewString().Nullable().LC().Array()\n\tvar colInsert []string\n\tvar colInsertByte [][]byte\n\tvar colNullableInsert []*string\n\tvar colArrayInsert [][]string\n\tvar colArrayNullableInsert [][]*string\n\tvar colLCInsert []string\n\tvar colLCNullableInsert []*string\n\tvar colLCArrayInsert [][]string\n\tvar colLCNullableArrayInsert [][]*string\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tblockID.Append(uint8(insertN))\n\t\t\tval := fmt.Sprintf(\"string %d\", i)\n\t\t\tval2 := strings.Repeat(val, 50)\n\t\t\tvalArray := []string{val, val2}\n\t\t\tvalArrayNil := []*string{&val, nil}\n\n\t\t\tcol.Append(val)\n\t\t\tcolInsert = append(colInsert, val)\n\t\t\tcolInsertByte = append(colInsertByte, []byte(val))\n\n\t\t\t// example add nullable\n\t\t\tif i%2 == 0 {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, &val)\n\t\t\t\tcolNullable.Append(val)\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, &val)\n\t\t\t\tcolLCNullable.Append(val)\n\t\t\t} else {\n\t\t\t\tcolNullableInsert = append(colNullableInsert, nil)\n\t\t\t\tcolNullable.AppendNil()\n\t\t\t\tcolLCNullableInsert = append(colLCNullableInsert, nil)\n\t\t\t\tcolLCNullable.AppendNil()\n\t\t\t}\n\n\t\t\tcolArray.Append(valArray)\n\t\t\tcolArrayInsert = append(colArrayInsert, valArray)\n\n\t\t\tcolNullableArray.AppendP(valArrayNil)\n\t\t\tcolArrayNullableInsert = append(colArrayNullableInsert, valArrayNil)\n\n\t\t\tcolLCInsert = append(colLCInsert, val)\n\t\t\tcolLC.Append(val)\n\n\t\t\tcolLCArrayInsert = append(colLCArrayInsert, valArray)\n\t\t\tcolArrayLC.Append(valArray)\n\n\t\t\tcolLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil)\n\t\t\tcolArrayLCNullable.AppendP(valArrayNil)\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tblock_id,\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_lc,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tblockID,\n\t\t\tcol,\n\t\t\tcolNullable,\n\t\t\tcolArray,\n\t\t\tcolNullableArray,\n\t\t\tcolLC,\n\t\t\tcolLCNullable,\n\t\t\tcolArrayLC,\n\t\t\tcolArrayLCNullable,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// example read all\n\n\tcolRead := column.NewString()\n\tcolNullableRead := column.NewString().Nullable()\n\tcolArrayRead := column.NewString().Array()\n\tcolNullableArrayRead := column.NewString().Nullable().Array()\n\tcolLCRead := column.NewString().LC()\n\tcolLCNullableRead := column.NewString().Nullable().LC()\n\tcolArrayLCRead := column.NewString().LC().Array()\n\tcolArrayLCNullableRead := column.NewString().Nullable().LC().Array()\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\tFROM test_%[1]s order by block_id`, tableName),\n\t\tcolRead,\n\t\tcolNullableRead,\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead)\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colData []string\n\tvar colDataByte [][]byte\n\tvar colDataByteByData [][]byte\n\tvar colDataByteByRow [][]byte\n\tvar colNullableData []*string\n\tvar colArrayData [][]string\n\tvar colArrayNullableData [][]*string\n\tvar colLCData []string\n\tvar colLCNullableData []*string\n\tvar colLCArrayData [][]string\n\tvar colLCNullableArrayData [][]*string\n\n\tfor selectStmt.Next() {\n\t\trequire.NoError(t, err)\n\n\t\tcolData = colRead.Read(colData)\n\t\tcolDataByte = colRead.ReadBytes(colDataByte)\n\t\tcolDataByteByData = append(colDataByteByData, colRead.DataBytes()...)\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcolDataByteByRow = append(colDataByteByRow, colRead.RowBytes(i))\n\t\t}\n\t\tcolNullableData = colNullableRead.ReadP(colNullableData)\n\t\tcolArrayData = colArrayRead.Read(colArrayData)\n\t\tcolArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData)\n\t\tcolLCData = colLCRead.Read(colLCData)\n\t\tcolLCNullableData = colLCNullableRead.ReadP(colLCNullableData)\n\t\tcolLCArrayData = colArrayLCRead.Read(colLCArrayData)\n\t\tcolLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colInsert, colData)\n\tassert.Equal(t, colInsertByte, colDataByte)\n\tassert.Equal(t, colInsertByte, colDataByteByData)\n\tassert.Equal(t, colInsertByte, colDataByteByRow)\n\tassert.Equal(t, colNullableInsert, colNullableData)\n\tassert.Equal(t, colArrayInsert, colArrayData)\n\tassert.Equal(t, colArrayNullableInsert, colArrayNullableData)\n\tassert.Equal(t, colLCInsert, colLCData)\n\tassert.Equal(t, colLCNullableInsert, colLCNullableData)\n\tassert.Equal(t, colLCArrayInsert, colLCArrayData)\n\tassert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData)\n\n\t// check dynamic column\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s order by block_id`, tableName),\n\t)\n\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\n\tassert.Len(t, autoColumns, 8)\n\n\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\tassert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())\n\tassert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())\n\tassert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())\n\tassert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n"
  },
  {
    "path": "column/tuple.go",
    "content": "package column\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// Tuple is a column of Tuple(T1,T2,.....,Tn) ClickHouse data type\n//\n// this is actually a group of columns. it doesn't have any method for read or write data\n//\n// You MUST use this on Select and Insert methods and for append and read data use the sub columns\ntype Tuple struct {\n\tcolumn\n\tcolumns []ColumnBasic\n}\n\n// NewTuple create a new tuple of Tuple(T1,T2,.....,Tn) ClickHouse data type\n//\n// this is actually a group of columns. it doesn't have any method for read or write data\n//\n// You MUST use this on Select and Insert methods and for append and read data use the sub columns\nfunc NewTuple(columns ...ColumnBasic) *Tuple {\n\tif len(columns) < 1 {\n\t\tpanic(\"tuple must have at least one column\")\n\t}\n\treturn &Tuple{\n\t\tcolumns: columns,\n\t}\n}\n\n// NumRow return number of row for this block\nfunc (c *Tuple) NumRow() int {\n\treturn c.columns[0].NumRow()\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple) Array() *ArrayBase {\n\treturn NewArrayBase(c)\n}\n\n// Reset all statuses and buffered data\n//\n// After each reading, the reading data does not need to be reset. It will be automatically reset.\n//\n// When inserting, buffers are reset only after the operation is successful.\n// If an error occurs, you can safely call insert again.\nfunc (c *Tuple) Reset() {\n\tfor _, col := range c.columns {\n\t\tcol.Reset()\n\t}\n}\n\n// SetWriteBufferSize set write buffer (number of rows)\n// this buffer only used for writing.\n// By setting this buffer, you will avoid allocating the memory several times.\nfunc (c *Tuple) SetWriteBufferSize(row int) {\n\tfor _, col := range c.columns {\n\t\tcol.SetWriteBufferSize(row)\n\t}\n}\n\n// ReadRaw read raw data from the reader. it runs automatically\nfunc (c *Tuple) ReadRaw(num int, r *readerwriter.Reader) error {\n\tfor i, col := range c.columns {\n\t\terr := col.ReadRaw(num, r)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tuple: read column index %d: %w\", i, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// HeaderReader reads header data from reader.\n// it uses internally\nfunc (c *Tuple) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error {\n\tc.r = r\n\terr := c.readColumn(readColumn, revision)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, col := range c.columns {\n\t\terr = col.HeaderReader(r, false, revision)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tuple: read column header index %d: %w\", i, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Column returns the all sub columns\nfunc (c *Tuple) Columns() []ColumnBasic {\n\treturn c.columns\n}\n\nfunc (c *Tuple) Validate() error {\n\tchType := helper.FilterSimpleAggregate(c.chType)\n\tif helper.IsPoint(chType) {\n\t\tchType = helper.PointMainTypeStr\n\t}\n\n\tif !helper.IsTuple(chType) {\n\t\treturn ErrInvalidType{\n\t\t\tcolumn: c,\n\t\t}\n\t}\n\n\tcolumnsTuple, err := helper.TypesInParentheses(chType[helper.LenTupleStr : len(chType)-1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"tuple invalid types %w\", err)\n\t}\n\tif len(columnsTuple) != len(c.columns) {\n\t\t//nolint:goerr113\n\t\treturn fmt.Errorf(\"columns number for %s (%s) is not equal to tuple columns number: %d != %d\",\n\t\t\tstring(c.name),\n\t\t\tstring(c.Type()),\n\t\t\tlen(columnsTuple),\n\t\t\tlen(c.columns),\n\t\t)\n\t}\n\n\tfor i, col := range c.columns {\n\t\tcol.SetType(columnsTuple[i].ChType)\n\t\tcol.SetName(columnsTuple[i].Name)\n\t\tif col.Validate() != nil {\n\t\t\treturn ErrInvalidType{\n\t\t\t\tcolumn: c,\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Tuple) ColumnType() string {\n\tstr := helper.TupleStr\n\tfor _, col := range c.columns {\n\t\tstr += col.ColumnType() + \",\"\n\t}\n\treturn str[:len(str)-1] + \")\"\n}\n\n// WriteTo write data to ClickHouse.\n// it uses internally\nfunc (c *Tuple) WriteTo(w io.Writer) (int64, error) {\n\tvar n int64\n\tfor i, col := range c.columns {\n\t\tnw, err := col.WriteTo(w)\n\t\tif err != nil {\n\t\t\treturn n, fmt.Errorf(\"tuple: write column index %d: %w\", i, err)\n\t\t}\n\t\tn += nw\n\t}\n\treturn n, nil\n}\n\n// HeaderWriter writes header data to writer\n// it uses internally\nfunc (c *Tuple) HeaderWriter(w *readerwriter.Writer) {\n\tfor _, col := range c.columns {\n\t\tcol.HeaderWriter(w)\n\t}\n}\n\nfunc (c *Tuple) Elem(arrayLevel int) ColumnBasic {\n\tif arrayLevel > 0 {\n\t\treturn c.Array().elem(arrayLevel - 1)\n\t}\n\treturn c\n}\n"
  },
  {
    "path": "column/tuple1.go",
    "content": "package column\n\n// Tuple1 is a column of Tuple(T1) ClickHouse data type\ntype Tuple1[T1 any] struct {\n\tTuple\n\tcol1 Column[T1]\n}\n\n// NewTuple1 create a new tuple of Tuple(T1) ClickHouse data type\nfunc NewTuple1[T1 any](\n\tcolumn1 Column[T1],\n) *Tuple1[T1] {\n\treturn &Tuple1[T1]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n\t\t\t\tcolumn1,\n\t\t\t},\n\t\t},\n\t\tcol1: column1,\n\t}\n}\n\n// NewNested1 create a new nested of Nested(T1) ClickHouse data type\n//\n// this is actually an alias for NewTuple1(T1).Array()\nfunc NewNested1[T any](\n\tcolumn1 Column[T],\n) *Array[T] {\n\treturn NewTuple1(\n\t\tcolumn1,\n\t).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple1[T]) Data() []T {\n\treturn c.col1.Data()\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple1[T]) Read(value []T) []T {\n\treturn c.col1.Read(value)\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple1[T]) Row(row int) T {\n\treturn c.col1.Row(row)\n}\n\n// Append value for insert\nfunc (c *Tuple1[T]) Append(v ...T) {\n\tc.col1.Append(v...)\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple1[T]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuple2_gen.go",
    "content": "package column\n\nimport (\n\t\"unsafe\"\n)\n\ntype tuple2Value[T1, T2 any] struct {\n\tCol1 T1\n\tCol2 T2\n}\n\n// Tuple2 is a column of Tuple(T1, T2) ClickHouse data type\ntype Tuple2[T ~struct {\n\tCol1 T1\n\tCol2 T2\n}, T1, T2 any] struct {\n\tTuple\n\tcol1 Column[T1]\n\tcol2 Column[T2]\n}\n\n// NewTuple2 create a new tuple of Tuple(T1, T2) ClickHouse data type\nfunc NewTuple2[T ~struct {\n\tCol1 T1\n\tCol2 T2\n}, T1, T2 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n) *Tuple2[T, T1, T2] {\n\treturn &Tuple2[T, T1, T2]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n\t\t\t\tcolumn1,\n\t\t\t\tcolumn2,\n\t\t\t},\n\t\t},\n\t\tcol1: column1,\n\t\tcol2: column2,\n\t}\n}\n\n// NewNested2 create a new nested of Nested(T1, T2) ClickHouse data type\n//\n// this is actually an alias for NewTuple2(T1, T2).Array()\nfunc NewNested2[T ~struct {\n\tCol1 T1\n\tCol2 T2\n}, T1, T2 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n) *Array[T] {\n\treturn NewTuple2[T](\n\t\tcolumn1,\n\t\tcolumn2,\n\t).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple2[T, T1, T2]) Data() []T {\n\tval := make([]T, c.NumRow())\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = T(tuple2Value[T1, T2]{\n\t\t\tCol1: c.col1.Row(i),\n\t\t\tCol2: c.col2.Row(i),\n\t\t})\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple2[T, T1, T2]) Read(value []T) []T {\n\tvalTuple := *(*[]tuple2Value[T1, T2])(unsafe.Pointer(&value))\n\tif cap(valTuple)-len(valTuple) >= c.NumRow() {\n\t\tvalTuple = valTuple[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalTuple = append(valTuple, make([]tuple2Value[T1, T2], c.NumRow())...)\n\t}\n\n\tval := valTuple[len(valTuple)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i].Col1 = c.col1.Row(i)\n\t\tval[i].Col2 = c.col2.Row(i)\n\t}\n\treturn *(*[]T)(unsafe.Pointer(&valTuple))\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple2[T, T1, T2]) Row(row int) T {\n\treturn T(tuple2Value[T1, T2]{\n\t\tCol1: c.col1.Row(row),\n\t\tCol2: c.col2.Row(row),\n\t})\n}\n\n// Append value for insert\nfunc (c *Tuple2[T, T1, T2]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tt := tuple2Value[T1, T2](v)\n\t\tc.col1.Append(t.Col1)\n\t\tc.col2.Append(t.Col2)\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple2[T, T1, T2]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuple3_gen.go",
    "content": "package column\n\nimport (\n\t\"unsafe\"\n)\n\ntype tuple3Value[T1, T2, T3 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n}\n\n// Tuple3 is a column of Tuple(T1, T2, T3) ClickHouse data type\ntype Tuple3[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n}, T1, T2, T3 any] struct {\n\tTuple\n\tcol1 Column[T1]\n\tcol2 Column[T2]\n\tcol3 Column[T3]\n}\n\n// NewTuple3 create a new tuple of Tuple(T1, T2, T3) ClickHouse data type\nfunc NewTuple3[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n}, T1, T2, T3 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n) *Tuple3[T, T1, T2, T3] {\n\treturn &Tuple3[T, T1, T2, T3]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n\t\t\t\tcolumn1,\n\t\t\t\tcolumn2,\n\t\t\t\tcolumn3,\n\t\t\t},\n\t\t},\n\t\tcol1: column1,\n\t\tcol2: column2,\n\t\tcol3: column3,\n\t}\n}\n\n// NewNested3 create a new nested of Nested(T1, T2, T3) ClickHouse data type\n//\n// this is actually an alias for NewTuple3(T1, T2, T3).Array()\nfunc NewNested3[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n}, T1, T2, T3 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n) *Array[T] {\n\treturn NewTuple3[T](\n\t\tcolumn1,\n\t\tcolumn2,\n\t\tcolumn3,\n\t).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple3[T, T1, T2, T3]) Data() []T {\n\tval := make([]T, c.NumRow())\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = T(tuple3Value[T1, T2, T3]{\n\t\t\tCol1: c.col1.Row(i),\n\t\t\tCol2: c.col2.Row(i),\n\t\t\tCol3: c.col3.Row(i),\n\t\t})\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple3[T, T1, T2, T3]) Read(value []T) []T {\n\tvalTuple := *(*[]tuple3Value[T1, T2, T3])(unsafe.Pointer(&value))\n\tif cap(valTuple)-len(valTuple) >= c.NumRow() {\n\t\tvalTuple = valTuple[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalTuple = append(valTuple, make([]tuple3Value[T1, T2, T3], c.NumRow())...)\n\t}\n\n\tval := valTuple[len(valTuple)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i].Col1 = c.col1.Row(i)\n\t\tval[i].Col2 = c.col2.Row(i)\n\t\tval[i].Col3 = c.col3.Row(i)\n\t}\n\treturn *(*[]T)(unsafe.Pointer(&valTuple))\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple3[T, T1, T2, T3]) Row(row int) T {\n\treturn T(tuple3Value[T1, T2, T3]{\n\t\tCol1: c.col1.Row(row),\n\t\tCol2: c.col2.Row(row),\n\t\tCol3: c.col3.Row(row),\n\t})\n}\n\n// Append value for insert\nfunc (c *Tuple3[T, T1, T2, T3]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tt := tuple3Value[T1, T2, T3](v)\n\t\tc.col1.Append(t.Col1)\n\t\tc.col2.Append(t.Col2)\n\t\tc.col3.Append(t.Col3)\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple3[T, T1, T2, T3]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuple4_gen.go",
    "content": "package column\n\nimport (\n\t\"unsafe\"\n)\n\ntype tuple4Value[T1, T2, T3, T4 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n}\n\n// Tuple4 is a column of Tuple(T1, T2, T3, T4) ClickHouse data type\ntype Tuple4[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n}, T1, T2, T3, T4 any] struct {\n\tTuple\n\tcol1 Column[T1]\n\tcol2 Column[T2]\n\tcol3 Column[T3]\n\tcol4 Column[T4]\n}\n\n// NewTuple4 create a new tuple of Tuple(T1, T2, T3, T4) ClickHouse data type\nfunc NewTuple4[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n}, T1, T2, T3, T4 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n\tcolumn4 Column[T4],\n) *Tuple4[T, T1, T2, T3, T4] {\n\treturn &Tuple4[T, T1, T2, T3, T4]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n\t\t\t\tcolumn1,\n\t\t\t\tcolumn2,\n\t\t\t\tcolumn3,\n\t\t\t\tcolumn4,\n\t\t\t},\n\t\t},\n\t\tcol1: column1,\n\t\tcol2: column2,\n\t\tcol3: column3,\n\t\tcol4: column4,\n\t}\n}\n\n// NewNested4 create a new nested of Nested(T1, T2, T3, T4) ClickHouse data type\n//\n// this is actually an alias for NewTuple4(T1, T2, T3, T4).Array()\nfunc NewNested4[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n}, T1, T2, T3, T4 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n\tcolumn4 Column[T4],\n) *Array[T] {\n\treturn NewTuple4[T](\n\t\tcolumn1,\n\t\tcolumn2,\n\t\tcolumn3,\n\t\tcolumn4,\n\t).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple4[T, T1, T2, T3, T4]) Data() []T {\n\tval := make([]T, c.NumRow())\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = T(tuple4Value[T1, T2, T3, T4]{\n\t\t\tCol1: c.col1.Row(i),\n\t\t\tCol2: c.col2.Row(i),\n\t\t\tCol3: c.col3.Row(i),\n\t\t\tCol4: c.col4.Row(i),\n\t\t})\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple4[T, T1, T2, T3, T4]) Read(value []T) []T {\n\tvalTuple := *(*[]tuple4Value[T1, T2, T3, T4])(unsafe.Pointer(&value))\n\tif cap(valTuple)-len(valTuple) >= c.NumRow() {\n\t\tvalTuple = valTuple[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalTuple = append(valTuple, make([]tuple4Value[T1, T2, T3, T4], c.NumRow())...)\n\t}\n\n\tval := valTuple[len(valTuple)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i].Col1 = c.col1.Row(i)\n\t\tval[i].Col2 = c.col2.Row(i)\n\t\tval[i].Col3 = c.col3.Row(i)\n\t\tval[i].Col4 = c.col4.Row(i)\n\t}\n\treturn *(*[]T)(unsafe.Pointer(&valTuple))\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple4[T, T1, T2, T3, T4]) Row(row int) T {\n\treturn T(tuple4Value[T1, T2, T3, T4]{\n\t\tCol1: c.col1.Row(row),\n\t\tCol2: c.col2.Row(row),\n\t\tCol3: c.col3.Row(row),\n\t\tCol4: c.col4.Row(row),\n\t})\n}\n\n// Append value for insert\nfunc (c *Tuple4[T, T1, T2, T3, T4]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tt := tuple4Value[T1, T2, T3, T4](v)\n\t\tc.col1.Append(t.Col1)\n\t\tc.col2.Append(t.Col2)\n\t\tc.col3.Append(t.Col3)\n\t\tc.col4.Append(t.Col4)\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple4[T, T1, T2, T3, T4]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuple5_gen.go",
    "content": "package column\n\nimport (\n\t\"unsafe\"\n)\n\ntype tuple5Value[T1, T2, T3, T4, T5 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n\tCol5 T5\n}\n\n// Tuple5 is a column of Tuple(T1, T2, T3, T4, T5) ClickHouse data type\ntype Tuple5[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n\tCol5 T5\n}, T1, T2, T3, T4, T5 any] struct {\n\tTuple\n\tcol1 Column[T1]\n\tcol2 Column[T2]\n\tcol3 Column[T3]\n\tcol4 Column[T4]\n\tcol5 Column[T5]\n}\n\n// NewTuple5 create a new tuple of Tuple(T1, T2, T3, T4, T5) ClickHouse data type\nfunc NewTuple5[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n\tCol5 T5\n}, T1, T2, T3, T4, T5 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n\tcolumn4 Column[T4],\n\tcolumn5 Column[T5],\n) *Tuple5[T, T1, T2, T3, T4, T5] {\n\treturn &Tuple5[T, T1, T2, T3, T4, T5]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n\t\t\t\tcolumn1,\n\t\t\t\tcolumn2,\n\t\t\t\tcolumn3,\n\t\t\t\tcolumn4,\n\t\t\t\tcolumn5,\n\t\t\t},\n\t\t},\n\t\tcol1: column1,\n\t\tcol2: column2,\n\t\tcol3: column3,\n\t\tcol4: column4,\n\t\tcol5: column5,\n\t}\n}\n\n// NewNested5 create a new nested of Nested(T1, T2, T3, T4, T5) ClickHouse data type\n//\n// this is actually an alias for NewTuple5(T1, T2, T3, T4, T5).Array()\nfunc NewNested5[T ~struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n\tCol5 T5\n}, T1, T2, T3, T4, T5 any](\n\tcolumn1 Column[T1],\n\tcolumn2 Column[T2],\n\tcolumn3 Column[T3],\n\tcolumn4 Column[T4],\n\tcolumn5 Column[T5],\n) *Array[T] {\n\treturn NewTuple5[T](\n\t\tcolumn1,\n\t\tcolumn2,\n\t\tcolumn3,\n\t\tcolumn4,\n\t\tcolumn5,\n\t).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple5[T, T1, T2, T3, T4, T5]) Data() []T {\n\tval := make([]T, c.NumRow())\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = T(tuple5Value[T1, T2, T3, T4, T5]{\n\t\t\tCol1: c.col1.Row(i),\n\t\t\tCol2: c.col2.Row(i),\n\t\t\tCol3: c.col3.Row(i),\n\t\t\tCol4: c.col4.Row(i),\n\t\t\tCol5: c.col5.Row(i),\n\t\t})\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple5[T, T1, T2, T3, T4, T5]) Read(value []T) []T {\n\tvalTuple := *(*[]tuple5Value[T1, T2, T3, T4, T5])(unsafe.Pointer(&value))\n\tif cap(valTuple)-len(valTuple) >= c.NumRow() {\n\t\tvalTuple = valTuple[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalTuple = append(valTuple, make([]tuple5Value[T1, T2, T3, T4, T5], c.NumRow())...)\n\t}\n\n\tval := valTuple[len(valTuple)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i].Col1 = c.col1.Row(i)\n\t\tval[i].Col2 = c.col2.Row(i)\n\t\tval[i].Col3 = c.col3.Row(i)\n\t\tval[i].Col4 = c.col4.Row(i)\n\t\tval[i].Col5 = c.col5.Row(i)\n\t}\n\treturn *(*[]T)(unsafe.Pointer(&valTuple))\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple5[T, T1, T2, T3, T4, T5]) Row(row int) T {\n\treturn T(tuple5Value[T1, T2, T3, T4, T5]{\n\t\tCol1: c.col1.Row(row),\n\t\tCol2: c.col2.Row(row),\n\t\tCol3: c.col3.Row(row),\n\t\tCol4: c.col4.Row(row),\n\t\tCol5: c.col5.Row(row),\n\t})\n}\n\n// Append value for insert\nfunc (c *Tuple5[T, T1, T2, T3, T4, T5]) Append(v ...T) {\n\tfor _, v := range v {\n\t\tt := tuple5Value[T1, T2, T3, T4, T5](v)\n\t\tc.col1.Append(t.Col1)\n\t\tc.col2.Append(t.Col2)\n\t\tc.col3.Append(t.Col3)\n\t\tc.col4.Append(t.Col4)\n\t\tc.col5.Append(t.Col5)\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple5[T, T1, T2, T3, T4, T5]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuple_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestTuple(t *testing.T) {\n\ttableName := \"tuple\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t%[1]s Tuple(String, Int64),\n\t\t%[1]s_nullable Tuple(Nullable(String), Nullable(Int64)),\n\t\t%[1]s_array Tuple(Array(String),Array(Int64)),\n\t\t%[1]s_array_nullable Tuple(Array(Nullable(String)),Array(Nullable(Int64))),\n\t\t%[1]s_lc Tuple(LowCardinality(String),LowCardinality(Int64)),\n\t\t%[1]s_nullable_lc Tuple(LowCardinality(Nullable(String)),LowCardinality(Nullable(Int64))),\n\t\t%[1]s_array_lc Tuple(Array(LowCardinality(String)),Array(LowCardinality(Int64))),\n\t\t%[1]s_array_lc_nullable Tuple(Array(LowCardinality(Nullable(String))),Array(LowCardinality(Nullable(Int64)))),\n\t\t%[1]s_array_array_tuple Array(Array(Tuple(String, Int64)))\n\t\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcolString := column.NewString()\n\tcolInt := column.New[int64]()\n\tcol := column.NewTuple(colString, colInt)\n\n\tcolNullableString := column.NewString().Nullable()\n\tcolNullableInt := column.New[int64]().Nullable()\n\tcolNullable := column.NewTuple(colNullableString, colNullableInt)\n\n\tcolArrayString := column.NewString().Array()\n\tcolArrayInt := column.New[int64]().Array()\n\tcolArray := column.NewTuple(colArrayString, colArrayInt)\n\n\tcolNullableArrayString := column.NewString().Nullable().Array()\n\tcolNullableArrayInt := column.New[int64]().Nullable().Array()\n\tcolNullableArray := column.NewTuple(colNullableArrayString, colNullableArrayInt)\n\n\tcolLCString := column.NewString().LowCardinality()\n\tcolLCInt := column.New[int64]().LowCardinality()\n\tcolLC := column.NewTuple(colLCString, colLCInt)\n\n\tcolLCNullableString := column.NewString().Nullable().LowCardinality()\n\tcolLCNullableInt := column.New[int64]().Nullable().LowCardinality()\n\tcolLCNullable := column.NewTuple(colLCNullableString, colLCNullableInt)\n\n\tcolArrayLCString := column.NewString().LowCardinality().Array()\n\tcolArrayLCInt := column.New[int64]().LowCardinality().Array()\n\tcolArrayLC := column.NewTuple(colArrayLCString, colArrayLCInt)\n\n\tcolArrayLCNullableString := column.NewString().Nullable().LowCardinality().Array()\n\tcolArrayLCNullableInt := column.New[int64]().Nullable().LowCardinality().Array()\n\tcolArrayLCNullable := column.NewTuple(colArrayLCNullableString, colArrayLCNullableInt)\n\n\tcolArrayArrayTupleString := column.NewString()\n\tcolArrayArrayTupleInt := column.New[int64]()\n\tcolArrayArrayTuple := column.NewTuple(colArrayArrayTupleString, colArrayArrayTupleInt).Array().Array()\n\n\tvar colStringInsert []string\n\tvar colIntInsert []int64\n\tvar colNullableStringInsert []*string\n\tvar colNullableIntInsert []*int64\n\tvar colArrayStringInsert [][]string\n\tvar colArrayIntInsert [][]int64\n\tvar colArrayNullableStringInsert [][]*string\n\tvar colArrayNullableIntInsert [][]*int64\n\tvar colLCStringInsert []string\n\tvar colLCIntInsert []int64\n\tvar colLCNullableStringInsert []*string\n\tvar colLCNullableIntInsert []*int64\n\tvar colLCArrayStringInsert [][]string\n\tvar colLCArrayIntInsert [][]int64\n\tvar colLCNullableArrayStringInsert [][]*string\n\tvar colLCNullableArrayIntInsert [][]*int64\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tvalString := fmt.Sprintf(\"string %d\", i)\n\t\t\tvalInt := int64(i)\n\t\t\tval2String := fmt.Sprintf(\"string %d\", i+1)\n\t\t\tval2Int := int64(i + 1)\n\t\t\tvalArrayString := []string{valString, val2String}\n\t\t\tvalArrayInt := []int64{valInt, val2Int}\n\t\t\tvalArrayNilString := []*string{&valString, nil}\n\t\t\tvalArrayNilInt := []*int64{&valInt, nil}\n\n\t\t\tcolStringInsert = append(colStringInsert, valString)\n\t\t\tcolIntInsert = append(colIntInsert, valInt)\n\n\t\t\tcolString.Append(valString)\n\t\t\tcolInt.Append(valInt)\n\n\t\t\t// example add nullable\n\t\t\tif i%2 == 0 {\n\t\t\t\tcolNullableStringInsert = append(colNullableStringInsert, &valString)\n\t\t\t\tcolNullableIntInsert = append(colNullableIntInsert, &valInt)\n\t\t\t\tcolNullableString.Append(valString)\n\t\t\t\tcolNullableInt.Append(valInt)\n\t\t\t\tcolLCNullableStringInsert = append(colLCNullableStringInsert, &valString)\n\t\t\t\tcolLCNullableIntInsert = append(colLCNullableIntInsert, &valInt)\n\t\t\t\tcolLCNullableString.Append(valString)\n\t\t\t\tcolLCNullableInt.Append(valInt)\n\t\t\t} else {\n\t\t\t\tcolNullableStringInsert = append(colNullableStringInsert, nil)\n\t\t\t\tcolNullableIntInsert = append(colNullableIntInsert, nil)\n\t\t\t\tcolNullableString.AppendNil()\n\t\t\t\tcolNullableInt.AppendNil()\n\t\t\t\tcolLCNullableStringInsert = append(colLCNullableStringInsert, nil)\n\t\t\t\tcolLCNullableIntInsert = append(colLCNullableIntInsert, nil)\n\t\t\t\tcolLCNullableString.AppendNil()\n\t\t\t\tcolLCNullableInt.AppendNil()\n\t\t\t}\n\n\t\t\tcolArrayString.Append(valArrayString)\n\t\t\tcolArrayInt.Append(valArrayInt)\n\t\t\tcolArrayStringInsert = append(colArrayStringInsert, valArrayString)\n\t\t\tcolArrayIntInsert = append(colArrayIntInsert, valArrayInt)\n\n\t\t\tcolNullableArrayString.AppendP(valArrayNilString)\n\t\t\tcolNullableArrayInt.AppendP(valArrayNilInt)\n\t\t\tcolArrayNullableStringInsert = append(colArrayNullableStringInsert, valArrayNilString)\n\t\t\tcolArrayNullableIntInsert = append(colArrayNullableIntInsert, valArrayNilInt)\n\n\t\t\tcolLCStringInsert = append(colLCStringInsert, valString)\n\t\t\tcolLCIntInsert = append(colLCIntInsert, valInt)\n\t\t\tcolLCString.Append(valString)\n\t\t\tcolLCInt.Append(valInt)\n\n\t\t\tcolLCArrayStringInsert = append(colLCArrayStringInsert, valArrayString)\n\t\t\tcolLCArrayIntInsert = append(colLCArrayIntInsert, valArrayInt)\n\t\t\tcolArrayLCString.Append(valArrayString)\n\t\t\tcolArrayLCInt.Append(valArrayInt)\n\n\t\t\tcolLCNullableArrayStringInsert = append(colLCNullableArrayStringInsert, valArrayNilString)\n\t\t\tcolLCNullableArrayIntInsert = append(colLCNullableArrayIntInsert, valArrayNilInt)\n\t\t\tcolArrayLCNullableString.AppendP(valArrayNilString)\n\t\t\tcolArrayLCNullableInt.AppendP(valArrayNilInt)\n\n\t\t\tcolArrayArrayTuple.AppendLen(1)\n\t\t\tcolArrayArrayTuple.Column().(*column.ArrayBase).AppendLen(2)\n\t\t\tcolArrayArrayTupleString.Append(valString, val2String)\n\t\t\tcolArrayArrayTupleInt.Append(valInt, val2Int)\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s,\n\t\t\t\t%[1]s_nullable,\n\t\t\t\t%[1]s_array,\n\t\t\t\t%[1]s_array_nullable,\n\t\t\t\t%[1]s_lc,\n\t\t\t\t%[1]s_nullable_lc,\n\t\t\t\t%[1]s_array_lc,\n\t\t\t\t%[1]s_array_lc_nullable,\n\t\t\t\t%[1]s_array_array_tuple\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tcol,\n\t\t\tcolNullable,\n\t\t\tcolArray,\n\t\t\tcolNullableArray,\n\t\t\tcolLC,\n\t\t\tcolLCNullable,\n\t\t\tcolArrayLC,\n\t\t\tcolArrayLCNullable,\n\t\t\tcolArrayArrayTuple,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// example read all\n\n\tcolStringRead := column.NewString()\n\tcolIntRead := column.New[int64]()\n\tcolRead := column.NewTuple(colStringRead, colIntRead)\n\n\tcolNullableStringRead := column.NewString().Nullable()\n\tcolNullableIntRead := column.New[int64]().Nullable()\n\tcolNullableRead := column.NewTuple(colNullableStringRead, colNullableIntRead)\n\n\tcolArrayStringRead := column.NewString().Array()\n\tcolArrayIntRead := column.New[int64]().Array()\n\tcolArrayRead := column.NewTuple(colArrayStringRead, colArrayIntRead)\n\n\tcolNullableArrayStringRead := column.NewString().Nullable().Array()\n\tcolNullableArrayIntRead := column.New[int64]().Nullable().Array()\n\tcolNullableArrayRead := column.NewTuple(colNullableArrayStringRead, colNullableArrayIntRead)\n\n\tcolLCStringRead := column.NewString().LowCardinality()\n\tcolLCIntRead := column.New[int64]().LowCardinality()\n\tcolLCRead := column.NewTuple(colLCStringRead, colLCIntRead)\n\n\tcolLCNullableStringRead := column.NewString().Nullable().LowCardinality()\n\tcolLCNullableIntRead := column.New[int64]().Nullable().LowCardinality()\n\tcolLCNullableRead := column.NewTuple(colLCNullableStringRead, colLCNullableIntRead)\n\n\tcolArrayLCStringRead := column.NewString().LowCardinality().Array()\n\tcolArrayLCIntRead := column.New[int64]().LowCardinality().Array()\n\tcolArrayLCRead := column.NewTuple(colArrayLCStringRead, colArrayLCIntRead)\n\n\tcolArrayLCNullableStringRead := column.NewString().Nullable().LowCardinality().Array()\n\tcolArrayLCNullableIntRead := column.New[int64]().Nullable().LowCardinality().Array()\n\tcolArrayLCNullableRead := column.NewTuple(colArrayLCNullableStringRead, colArrayLCNullableIntRead)\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t%[1]s,\n\t%[1]s_nullable,\n\t%[1]s_array,\n\t%[1]s_array_nullable,\n\t%[1]s_lc,\n\t%[1]s_nullable_lc,\n\t%[1]s_array_lc,\n\t%[1]s_array_lc_nullable\n\tFROM test_%[1]s`, tableName),\n\t\tcolRead,\n\t\tcolNullableRead,\n\t\tcolArrayRead,\n\t\tcolNullableArrayRead,\n\t\tcolLCRead,\n\t\tcolLCNullableRead,\n\t\tcolArrayLCRead,\n\t\tcolArrayLCNullableRead)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar colStringData []string\n\tvar colIntData []int64\n\tvar colNullableStringData []*string\n\tvar colNullableIntData []*int64\n\tvar colArrayStringData [][]string\n\tvar colArrayIntData [][]int64\n\tvar colArrayNullableStringData [][]*string\n\tvar colArrayNullableIntData [][]*int64\n\tvar colLCStringData []string\n\tvar colLCIntData []int64\n\tvar colLCNullableStringData []*string\n\tvar colLCNullableIntData []*int64\n\tvar colLCArrayStringData [][]string\n\tvar colLCArrayIntData [][]int64\n\tvar colLCNullableArrayStringData [][]*string\n\tvar colLCNullableArrayIntData [][]*int64\n\n\tfor selectStmt.Next() {\n\t\tcolStringData = colStringRead.Read(colStringData)\n\t\tcolNullableStringData = colNullableStringRead.ReadP(colNullableStringData)\n\t\tcolArrayStringData = colArrayStringRead.Read(colArrayStringData)\n\t\tcolArrayNullableStringData = colNullableArrayStringRead.ReadP(colArrayNullableStringData)\n\t\tcolLCStringData = colLCStringRead.Read(colLCStringData)\n\t\tcolLCNullableStringData = colLCNullableStringRead.ReadP(colLCNullableStringData)\n\t\tcolLCArrayStringData = colArrayLCStringRead.Read(colLCArrayStringData)\n\t\tcolLCNullableArrayStringData = colArrayLCNullableStringRead.ReadP(colLCNullableArrayStringData)\n\n\t\tcolIntData = colIntRead.Read(colIntData)\n\t\tcolNullableIntData = colNullableIntRead.ReadP(colNullableIntData)\n\t\tcolArrayIntData = colArrayIntRead.Read(colArrayIntData)\n\t\tcolArrayNullableIntData = colNullableArrayIntRead.ReadP(colArrayNullableIntData)\n\t\tcolLCIntData = colLCIntRead.Read(colLCIntData)\n\t\tcolLCNullableIntData = colLCNullableIntRead.ReadP(colLCNullableIntData)\n\t\tcolLCArrayIntData = colArrayLCIntRead.Read(colLCArrayIntData)\n\t\tcolLCNullableArrayIntData = colArrayLCNullableIntRead.ReadP(colLCNullableArrayIntData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, colStringInsert, colStringData)\n\tassert.Equal(t, colIntInsert, colIntData)\n\tassert.Equal(t, colNullableStringInsert, colNullableStringData)\n\tassert.Equal(t, colNullableIntInsert, colNullableIntData)\n\tassert.Equal(t, colArrayStringInsert, colArrayStringData)\n\tassert.Equal(t, colArrayIntInsert, colArrayIntData)\n\tassert.Equal(t, colArrayNullableStringInsert, colArrayNullableStringData)\n\tassert.Equal(t, colArrayNullableIntInsert, colArrayNullableIntData)\n\tassert.Equal(t, colLCStringInsert, colLCStringData)\n\tassert.Equal(t, colLCIntInsert, colLCIntData)\n\tassert.Equal(t, colLCNullableStringInsert, colLCNullableStringData)\n\tassert.Equal(t, colLCNullableIntInsert, colLCNullableIntData)\n\tassert.Equal(t, colLCArrayStringInsert, colLCArrayStringData)\n\tassert.Equal(t, colLCArrayIntInsert, colLCArrayIntData)\n\tassert.Equal(t, colLCNullableArrayStringInsert, colLCNullableArrayStringData)\n\tassert.Equal(t, colLCNullableArrayIntInsert, colLCNullableArrayIntData)\n\n\t// check dynamic column\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s,\n\t\t%[1]s_nullable,\n\t\t%[1]s_array,\n\t\t%[1]s_array_nullable,\n\t\t%[1]s_lc,\n\t\t%[1]s_nullable_lc,\n\t\t%[1]s_array_lc,\n\t\t%[1]s_array_lc_nullable\n\t\tFROM test_%[1]s`, tableName),\n\t)\n\n\trequire.NoError(t, err)\n\tautoColumns := selectStmt.Columns()\n\n\tassert.Len(t, autoColumns, 8)\n\n\tassert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType())\n\tassert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType())\n\tassert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType())\n\tassert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType())\n\tassert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType())\n\tassert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType())\n\tassert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType())\n\tassert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType())\n\n\tfor selectStmt.Next() {\n\t}\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n}\n\nfunc TestTupleNoColumn(t *testing.T) {\n\tassert.Panics(t, func() { column.NewTuple() })\n}\n\nfunc TestGeo(t *testing.T) {\n\ttableName := \"geo\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"1\",\n\t\t},\n\t\t{\n\t\t\tName:  \"allow_experimental_geo_types\",\n\t\t\tValue: \"1\",\n\t\t},\n\t}\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\tpoint Point ,\n\t\tring Ring ,\n\t\tpolygon Polygon ,\n\t\tmultiPolygon MultiPolygon\n\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcolPoint := column.NewPoint()\n\tcolRing := column.NewPoint().Array()\n\tcolPolygon := column.NewPoint().Array().Array()\n\tcolMultiPolygon := column.NewPoint().Array().Array().Array()\n\n\tcolPoint.SetWriteBufferSize(20)\n\tcolRing.SetWriteBufferSize(20)\n\tcolPolygon.SetWriteBufferSize(20)\n\tcolMultiPolygon.SetWriteBufferSize(20)\n\n\tvar pointInsert []types.Point\n\tvar ringInsert [][]types.Point\n\tvar polygonInsert [][][]types.Point\n\tvar multiPolygonInsert [][][][]types.Point\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tpointValue := types.Point{\n\t\t\t\tCol1: float64(i),\n\t\t\t\tCol2: float64(i + 1),\n\t\t\t}\n\t\t\tringValue := []types.Point{\n\t\t\t\t{\n\t\t\t\t\tCol1: float64(i),\n\t\t\t\t\tCol2: float64(i + 1),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tCol1: float64(i + 2),\n\t\t\t\t\tCol2: float64(i + 3),\n\t\t\t\t},\n\t\t\t}\n\t\t\tpolygonValue := [][]types.Point{\n\t\t\t\t{\n\t\t\t\t\t{\n\t\t\t\t\t\tCol1: float64(i),\n\t\t\t\t\t\tCol2: float64(i + 1),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tCol1: float64(i + 2),\n\t\t\t\t\t\tCol2: float64(i + 3),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t{\n\t\t\t\t\t\tCol1: float64(i),\n\t\t\t\t\t\tCol2: float64(i + 1),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tCol1: float64(i + 2),\n\t\t\t\t\t\tCol2: float64(i + 3),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tmultiPolygonValue := [][][]types.Point{\n\t\t\t\t{\n\t\t\t\t\t{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: float64(i),\n\t\t\t\t\t\t\tCol2: float64(i + 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: float64(i + 2),\n\t\t\t\t\t\t\tCol2: float64(i + 3),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: float64(i),\n\t\t\t\t\t\t\tCol2: float64(i + 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tCol1: float64(i + 2),\n\t\t\t\t\t\t\tCol2: float64(i + 3),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tcolPoint.Append(pointValue)\n\t\t\tpointInsert = append(pointInsert, pointValue)\n\t\t\tcolRing.Append(ringValue)\n\t\t\tringInsert = append(ringInsert, ringValue)\n\t\t\tcolPolygon.Append(polygonValue)\n\t\t\tpolygonInsert = append(polygonInsert, polygonValue)\n\t\t\tcolMultiPolygon.Append(multiPolygonValue)\n\t\t\tmultiPolygonInsert = append(multiPolygonInsert, multiPolygonValue)\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\tpoint,\n\t\t\t\tring,\n\t\t\t\tpolygon,\n\t\t\t\tmultiPolygon\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tcolPoint,\n\t\t\tcolRing,\n\t\t\tcolPolygon,\n\t\t\tcolMultiPolygon,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// example read all\n\n\tcolPointRead := column.NewPoint()\n\tcolRingRead := column.NewPoint().Array()\n\tcolPolygonRead := column.NewPoint().Array().Array()\n\tcolMultiPolygonRead := column.NewPoint().Array().Array().Array()\n\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\tpoint,\n\tring,\n\tpolygon,\n\tmultiPolygon\n\tFROM test_%[1]s`, tableName),\n\t\tcolPointRead,\n\t\tcolRingRead,\n\t\tcolPolygonRead,\n\t\tcolMultiPolygonRead,\n\t)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar pointData []types.Point\n\tvar ringData [][]types.Point\n\tvar polygonData [][][]types.Point\n\tvar multiPolygonData [][][][]types.Point\n\n\tfor selectStmt.Next() {\n\t\tpointData = colPointRead.Read(pointData)\n\t\tringData = colRingRead.Read(ringData)\n\t\tpolygonData = colPolygonRead.Read(polygonData)\n\t\tmultiPolygonData = colMultiPolygonRead.Read(multiPolygonData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\n\tassert.Equal(t, pointInsert, pointData)\n\tassert.Equal(t, ringInsert, ringData)\n\tassert.Equal(t, polygonInsert, polygonData)\n\tassert.Equal(t, multiPolygonInsert, multiPolygonData)\n}\n"
  },
  {
    "path": "column/tuples_template/tuple.go.tmpl",
    "content": "package column\n\nimport (\n\t\"unsafe\"\n)\n\ntype tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }} any] struct {\n    {{- range $val := iterate .Numbrer \"1\" }}\n    Col{{ $val }} T{{ $val }}{{end }}\n}\n\n// Tuple{{.Numbrer}} is a column of Tuple(T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}) ClickHouse data type\ntype Tuple{{.Numbrer}}[T ~struct {\n\t {{- range $val := iterate .Numbrer \"1\" }}\n    Col{{ $val }} T{{ $val }}{{end }}\n}{{- range $val := iterate .Numbrer \"1\" }}, T{{ $val }}{{end }} any] struct {\n\tTuple\n    {{- range $val := iterate .Numbrer \"1\" }}\n    col{{ $val }} Column[T{{ $val }}]{{end }}\n}\n\n// NewTuple{{.Numbrer}} create a new tuple of Tuple(T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}) ClickHouse data type\nfunc NewTuple{{.Numbrer}}[T ~struct {\n\t {{- range $val := iterate .Numbrer \"1\" }}\n    Col{{ $val }} T{{ $val }}{{end }}\n}{{- range $val := iterate .Numbrer \"1\" }}, T{{ $val }}{{end }} any](\n{{- range $val := iterate .Numbrer \"1\" }}\ncolumn{{ $val }} Column[T{{ $val }}],{{end }}\n) *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}] {\n\treturn &Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]{\n\t\tTuple: Tuple{\n\t\t\tcolumns: []ColumnBasic{\n            {{- range $val := iterate .Numbrer \"1\" }}\n                column{{ $val }},{{end }}\n            },\n\t\t},\n\t\t {{- range $val := iterate .Numbrer \"1\" }}\n        col{{ $val }}: column{{ $val }},{{end }}\n\t}\n}\n\n// NewNested{{.Numbrer}} create a new nested of Nested(T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}) ClickHouse data type\n//\n// this is actually an alias for NewTuple{{.Numbrer}}(T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}).Array()\nfunc NewNested{{.Numbrer}}[T ~struct {\n\t {{- range $val := iterate .Numbrer \"1\" }}\n    Col{{ $val }} T{{ $val }}{{end }}\n}{{- range $val := iterate .Numbrer \"1\" }}, T{{ $val }}{{end }} any](\n{{- range $val := iterate .Numbrer \"1\" }}\ncolumn{{ $val }} Column[T{{ $val }}],{{end }}\n) *Array[T] {\n\treturn NewTuple{{.Numbrer}}[T](\n    {{- range $val := iterate .Numbrer \"1\" }}\n    column{{ $val }},{{end}}\n    ).Array()\n}\n\n// Data get all the data in current block as a slice.\nfunc (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]) Data() []T {\n\tval := make([]T, c.NumRow())\n\tfor i := 0; i < c.NumRow(); i++ {\n\t\tval[i] = T(tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}]{\n         {{- range $val := iterate .Numbrer \"1\" }}\n            Col{{ $val }}: c.col{{ $val }}.Row(i),{{end }}\n\t\t})\n\t}\n\treturn val\n}\n\n// Read reads all the data in current block and append to the input.\nfunc (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]) Read(value []T) []T {\n\tvalTuple := *(*[]tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}])(unsafe.Pointer(&value))\n\tif cap(valTuple)-len(valTuple) >= c.NumRow() {\n\t\tvalTuple = valTuple[:len(value)+c.NumRow()]\n\t} else {\n\t\tvalTuple = append(valTuple, make([]tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}], c.NumRow())...)\n\t}\n\n\tval := valTuple[len(valTuple)-c.NumRow():]\n\tfor i := 0; i < c.NumRow(); i++ {\n        {{- range $val := iterate .Numbrer \"1\" }}\n            val[i].Col{{ $val }} = c.col{{ $val }}.Row(i){{end }}\n\t}\n\treturn *(*[]T)(unsafe.Pointer(&valTuple))\n}\n\n// Row return the value of given row.\n// NOTE: Row number start from zero\nfunc (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]) Row(row int) T {\n\treturn T(tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}]{\n\t\t{{- range $val := iterate .Numbrer \"1\" }}\n            Col{{ $val }}: c.col{{ $val }}.Row(row),{{end }}\n\t})\n}\n\n// Append value for insert\nfunc (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]) Append(v ...T) {\n\tfor _, v := range v {\n        t := tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer \"2\" }}, T{{ $val }}{{end }}](v)\n        {{- range $val := iterate .Numbrer \"1\" }}\n        c.col{{ $val }}.Append(t.Col{{ $val }}){{end }}\n\t}\n}\n\n// Array return a Array type for this column\nfunc (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer \"1\" }} ,T{{$val}}{{end}}]) Array() *Array[T] {\n\treturn NewArray[T](c)\n}\n"
  },
  {
    "path": "column/tuples_template/tuple2.json",
    "content": "{\n    \"Numbrer\": \"2\"\n}"
  },
  {
    "path": "column/tuples_template/tuple3.json",
    "content": "{\n    \"Numbrer\": \"3\"\n}"
  },
  {
    "path": "column/tuples_template/tuple4.json",
    "content": "{\n    \"Numbrer\": \"4\"\n}"
  },
  {
    "path": "column/tuples_template/tuple5.json",
    "content": "{\n    \"Numbrer\": \"5\"\n}"
  },
  {
    "path": "column/tuples_test.go",
    "content": "package column_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestTuples(t *testing.T) {\n\ttableName := \"tuples\"\n\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := chconn.Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(),\n\t\tfmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName),\n\t)\n\trequire.NoError(t, err)\n\tset := chconn.Settings{\n\t\t{\n\t\t\tName:  \"allow_suspicious_low_cardinality_types\",\n\t\t\tValue: \"true\",\n\t\t},\n\t}\n\n\terr = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s (\n\t\t%[1]s1 Tuple(Int64),\n\t\t%[1]s1_array Array(Tuple(Int64)),\n\t\t%[1]s2 Tuple(Int64, Int64),\n\t\t%[1]s2_array Array(Tuple(Int64, Int64)),\n\t\t%[1]s3 Tuple(Int64, Int64, Int64),\n\t\t%[1]s3_array Array(Tuple(Int64, Int64, Int64)),\n\t\t%[1]s4 Tuple(Int64, Int64, Int64, Int64),\n\t\t%[1]s4_array Array(Tuple(Int64, Int64, Int64, Int64)),\n\t\t%[1]s5 Tuple(Int64, Int64, Int64, Int64, Int64),\n\t\t%[1]s5_array Array(Tuple(Int64, Int64, Int64, Int64, Int64))\n\t\t) Engine=Memory`, tableName), &chconn.QueryOptions{\n\t\tSettings: set,\n\t})\n\n\trequire.NoError(t, err)\n\n\tcol1 := column.NewTuple1[int64](column.New[int64]())\n\tcol1Array := column.NewTuple1[int64](column.New[int64]()).Array()\n\ttype Tuple2 types.Tuple2[int64, int64]\n\tcol2 := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]())\n\tcol2Array := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array()\n\ttype Tuple3 types.Tuple3[int64, int64, int64]\n\tcol3 := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]())\n\tcol3Array := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]()).Array()\n\ttype Tuple4 types.Tuple4[int64, int64, int64, int64]\n\tcol4 := column.NewTuple4[\n\t\tTuple4,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t)\n\tcol4Array := column.NewTuple4[\n\t\tTuple4,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t).Array()\n\ttype Tuple5 types.Tuple5[\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t]\n\tcol5 := column.NewTuple5[\n\t\tTuple5,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t)\n\tcol5Array := column.NewTuple5[\n\t\tTuple5,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t).Array()\n\n\tvar col1Insert []int64\n\tvar col1ArrayInsert [][]int64\n\tvar col2Insert []Tuple2\n\tvar col2ArrayInsert [][]Tuple2\n\tvar col3Insert []Tuple3\n\tvar col3ArrayInsert [][]Tuple3\n\tvar col4Insert []Tuple4\n\tvar col4ArrayInsert [][]Tuple4\n\tvar col5Insert []Tuple5\n\tvar col5ArrayInsert [][]Tuple5\n\n\tfor insertN := 0; insertN < 2; insertN++ {\n\t\trows := 10\n\t\tfor i := 0; i < rows; i++ {\n\t\t\tcol1.Append(int64(i))\n\t\t\tcol1Insert = append(col1Insert, int64(i))\n\t\t\tcol1Array.Append([]int64{int64(i), int64(i + 1)})\n\t\t\tcol1ArrayInsert = append(col1ArrayInsert, []int64{int64(i), int64(i + 1)})\n\t\t\tcol2.Append(Tuple2{int64(i), int64(i + 1)})\n\t\t\tcol2Insert = append(col2Insert, Tuple2{int64(i), int64(i + 1)})\n\t\t\tcol2Array.Append([]Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}})\n\t\t\tcol2ArrayInsert = append(col2ArrayInsert, []Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}})\n\t\t\tcol3.Append(Tuple3{int64(i), int64(i + 1), int64(i + 2)})\n\t\t\tcol3Insert = append(col3Insert, Tuple3{int64(i), int64(i + 1), int64(i + 2)})\n\t\t\tcol3Array.Append([]Tuple3{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2)},\n\t\t\t\t{int64(i + 3), int64(i + 4), int64(i + 5)},\n\t\t\t})\n\t\t\tcol3ArrayInsert = append(col3ArrayInsert, []Tuple3{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2)},\n\t\t\t\t{int64(i + 3), int64(i + 4), int64(i + 5)},\n\t\t\t})\n\t\t\tcol4.Append(Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)})\n\t\t\tcol4Insert = append(col4Insert, Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)})\n\t\t\tcol4Array.Append([]Tuple4{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)},\n\t\t\t\t{int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)},\n\t\t\t})\n\t\t\tcol4ArrayInsert = append(col4ArrayInsert, []Tuple4{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)},\n\t\t\t\t{int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)},\n\t\t\t})\n\t\t\tcol5.Append(Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)})\n\t\t\tcol5Insert = append(col5Insert, Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)})\n\t\t\tcol5Array.Append([]Tuple5{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)},\n\t\t\t\t{int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)},\n\t\t\t})\n\t\t\tcol5ArrayInsert = append(col5ArrayInsert, []Tuple5{\n\t\t\t\t{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)},\n\t\t\t\t{int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)},\n\t\t\t})\n\t\t}\n\n\t\terr = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO\n\t\t\ttest_%[1]s (\n\t\t\t\t%[1]s1,\n\t\t\t\t%[1]s1_array,\n\t\t\t\t%[1]s2,\n\t\t\t\t%[1]s2_array,\n\t\t\t\t%[1]s3,\n\t\t\t\t%[1]s3_array,\n\t\t\t\t%[1]s4,\n\t\t\t\t%[1]s4_array,\n\t\t\t\t%[1]s5,\n\t\t\t\t%[1]s5_array\n\t\t\t)\n\t\tVALUES`, tableName),\n\t\t\tcol1, col1Array, col2, col2Array, col3, col3Array, col4, col4Array, col5, col5Array,\n\t\t)\n\t\trequire.NoError(t, err)\n\t}\n\n\t// example read all\n\tcol1Read := column.NewTuple1[int64](column.New[int64]())\n\tcol1ArrayRead := column.NewTuple1[int64](column.New[int64]()).Array()\n\tcol2Read := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]())\n\tcol2ArrayRead := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array()\n\tcol3Read := column.NewTuple3[\n\t\tTuple3,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t)\n\tcol3ArrayRead := column.NewTuple3[\n\t\tTuple3,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t).Array()\n\tcol4Read := column.NewTuple4[\n\t\tTuple4,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t)\n\tcol4ArrayRead := column.NewTuple4[\n\t\tTuple4,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t).Array()\n\tcol5Read := column.NewTuple5[\n\t\tTuple5,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t)\n\tcol5ArrayRead := column.NewTuple5[\n\t\tTuple5,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t\tint64,\n\t](\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t\tcolumn.New[int64](),\n\t).Array()\n\tselectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s1,\n\t\t%[1]s1_array,\n\t\t%[1]s2,\n\t\t%[1]s2_array,\n\t\t%[1]s3,\n\t\t%[1]s3_array,\n\t\t%[1]s4,\n\t\t%[1]s4_array,\n\t\t%[1]s5,\n\t\t%[1]s5_array\n\n\tFROM test_%[1]s`, tableName),\n\t\tcol1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar col1ReadData []int64\n\tvar col1ArrayReadData [][]int64\n\tvar col2ReadData []Tuple2\n\tvar col2ArrayReadData [][]Tuple2\n\tvar col3ReadData []Tuple3\n\tvar col3ArrayReadData [][]Tuple3\n\tvar col4ReadData []Tuple4\n\tvar col4ArrayReadData [][]Tuple4\n\tvar col5ReadData []Tuple5\n\tvar col5ArrayReadData [][]Tuple5\n\n\tfor selectStmt.Next() {\n\t\tcol1ReadData = col1Read.Read(col1ReadData)\n\t\tcol1ArrayReadData = col1ArrayRead.Read(col1ArrayReadData)\n\t\tcol2ReadData = col2Read.Read(col2ReadData)\n\t\tcol2ArrayReadData = col2ArrayRead.Read(col2ArrayReadData)\n\t\tcol3ReadData = col3Read.Read(col3ReadData)\n\t\tcol3ArrayReadData = col3ArrayRead.Read(col3ArrayReadData)\n\t\tcol4ReadData = col4Read.Read(col4ReadData)\n\t\tcol4ArrayReadData = col4ArrayRead.Read(col4ArrayReadData)\n\t\tcol5ReadData = col5Read.Read(col5ReadData)\n\t\tcol5ArrayReadData = col5ArrayRead.Read(col5ArrayReadData)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n\n\tassert.Equal(t, col1Insert, col1ReadData)\n\tassert.Equal(t, col1ArrayInsert, col1ArrayReadData)\n\tassert.Equal(t, col2Insert, col2ReadData)\n\tassert.Equal(t, col2ArrayInsert, col2ArrayReadData)\n\tassert.Equal(t, col3Insert, col3ReadData)\n\tassert.Equal(t, col3ArrayInsert, col3ArrayReadData)\n\tassert.Equal(t, col4Insert, col4ReadData)\n\tassert.Equal(t, col4ArrayInsert, col4ArrayReadData)\n\tassert.Equal(t, col5Insert, col5ReadData)\n\tassert.Equal(t, col5ArrayInsert, col5ArrayReadData)\n\n\t// example read row\n\n\tselectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT\n\t\t%[1]s1,\n\t\t%[1]s1_array,\n\t\t%[1]s2,\n\t\t%[1]s2_array,\n\t\t%[1]s3,\n\t\t%[1]s3_array,\n\t\t%[1]s4,\n\t\t%[1]s4_array,\n\t\t%[1]s5,\n\t\t%[1]s5_array\n\n\tFROM test_%[1]s`, tableName),\n\t\tcol1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead)\n\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tcol1ReadData = col1ReadData[:0]\n\tcol1ArrayReadData = col1ArrayReadData[:0]\n\tcol2ReadData = col2ReadData[:0]\n\tcol2ArrayReadData = col2ArrayReadData[:0]\n\tcol3ReadData = col3ReadData[:0]\n\tcol3ArrayReadData = col3ArrayReadData[:0]\n\tcol4ReadData = col4ReadData[:0]\n\tcol4ArrayReadData = col4ArrayReadData[:0]\n\tcol5ReadData = col5ReadData[:0]\n\tcol5ArrayReadData = col5ArrayReadData[:0]\n\n\tfor selectStmt.Next() {\n\t\tfor i := 0; i < selectStmt.RowsInBlock(); i++ {\n\t\t\tcol1ReadData = append(col1ReadData, col1Read.Row(i))\n\t\t\tcol1ArrayReadData = append(col1ArrayReadData, col1ArrayRead.Row(i))\n\t\t\tcol2ReadData = append(col2ReadData, col2Read.Row(i))\n\t\t\tcol2ArrayReadData = append(col2ArrayReadData, col2ArrayRead.Row(i))\n\t\t\tcol3ReadData = append(col3ReadData, col3Read.Row(i))\n\t\t\tcol3ArrayReadData = append(col3ArrayReadData, col3ArrayRead.Row(i))\n\t\t\tcol4ReadData = append(col4ReadData, col4Read.Row(i))\n\t\t\tcol4ArrayReadData = append(col4ArrayReadData, col4ArrayRead.Row(i))\n\t\t\tcol5ReadData = append(col5ReadData, col5Read.Row(i))\n\t\t\tcol5ArrayReadData = append(col5ArrayReadData, col5ArrayRead.Row(i))\n\t\t}\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tselectStmt.Close()\n\n\tassert.Equal(t, col1Insert, col1ReadData)\n\tassert.Equal(t, col1ArrayInsert, col1ArrayReadData)\n\tassert.Equal(t, col2Insert, col2ReadData)\n\tassert.Equal(t, col2ArrayInsert, col2ArrayReadData)\n\tassert.Equal(t, col3Insert, col3ReadData)\n\tassert.Equal(t, col3ArrayInsert, col3ArrayReadData)\n\tassert.Equal(t, col4Insert, col4ReadData)\n\tassert.Equal(t, col4ArrayInsert, col4ArrayReadData)\n\tassert.Equal(t, col5Insert, col5ReadData)\n\tassert.Equal(t, col5ArrayInsert, col5ArrayReadData)\n}\n"
  },
  {
    "path": "config.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst defaultUsername = \"default\"\nconst defaultDatabase = \"default\"\nconst defaultDBPort = \"9000\"\nconst defaultClientName = \"chx\"\n\n// Method is compression codec.\ntype CompressMethod byte\n\n// Possible compression methods.\nconst (\n\tCompressNone     CompressMethod = 0x00\n\tCompressChecksum CompressMethod = 0x02\n\tCompressLZ4      CompressMethod = 0x82\n\tCompressZSTD     CompressMethod = 0x90\n)\n\n// AfterConnectFunc is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables\n// or prepare statements). If this returns an error the connection attempt fails.\ntype AfterConnectFunc func(ctx context.Context, conn Conn) error\n\n// ValidateConnectFunc is called during a connection attempt after a successful authentication with the ClickHouse server.\n// It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next\n// fallback config is tried. This allows implementing high availability behavior.\ntype ValidateConnectFunc func(ctx context.Context, conn Conn) error\n\n// Config is the settings used to establish a connection to a ClickHouse server. It must be created by ParseConfig and\n// then it can be modified. A manually initialized Config will cause ConnectConfig to panic.\ntype Config struct {\n\tHost              string // host (e.g. localhost)\n\tPort              uint16\n\tDatabase          string\n\tUser              string\n\tPassword          string\n\tClientName        string\n\tTLSConfig         *tls.Config // nil disables TLS\n\tConnectTimeout    time.Duration\n\tDialFunc          DialFunc   // e.g. net.Dialer.DialContext\n\tLookupFunc        LookupFunc // e.g. net.Resolver.LookupHost\n\tReaderFunc        ReaderFunc // e.g. bufio.Reader\n\tCompress          CompressMethod\n\tQuotaKey          string\n\tWriterFunc        WriterFunc\n\tMinReadBufferSize int\n\t// Run-time parameters to set on connection as session default values\n\tRuntimeParams map[string]string\n\n\tFallbacks []*FallbackConfig\n\n\t// ValidateConnect is called during a connection attempt after a successful authentication with the ClickHouse server.\n\t// It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next\n\t// fallback config is tried. This allows implementing high availability behavior.\n\tValidateConnect ValidateConnectFunc\n\n\t// AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables\n\t// or prepare statements). If this returns an error the connection attempt fails.\n\tAfterConnect AfterConnectFunc\n\n\tcreatedByParseConfig bool // Used to enforce created by ParseConfig rule.\n\n\t// Original connection string that was parsed into config.\n\tconnString string\n}\n\n// Copy returns a deep copy of the config that is safe to use and modify.\n// The only exception is the TLSConfig field:\n// according to the tls.Config docs it must not be modified after creation.\nfunc (c *Config) Copy() *Config {\n\tnewConf := new(Config)\n\t*newConf = *c\n\tif newConf.TLSConfig != nil {\n\t\tnewConf.TLSConfig = c.TLSConfig.Clone()\n\t}\n\tif newConf.RuntimeParams != nil {\n\t\tnewConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams))\n\t\tfor k, v := range c.RuntimeParams {\n\t\t\tnewConf.RuntimeParams[k] = v\n\t\t}\n\t}\n\tif newConf.Fallbacks != nil {\n\t\tnewConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))\n\t\tfor i, fallback := range c.Fallbacks {\n\t\t\tnewFallback := new(FallbackConfig)\n\t\t\t*newFallback = *fallback\n\t\t\tif newFallback.TLSConfig != nil {\n\t\t\t\tnewFallback.TLSConfig = fallback.TLSConfig.Clone()\n\t\t\t}\n\t\t\tnewConf.Fallbacks[i] = newFallback\n\t\t}\n\t}\n\treturn newConf\n}\n\n// ConnString returns the original connection string used to connect to the ClickHouse server.\nfunc (c *Config) ConnString() string { return c.connString }\n\n// FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a\n// network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections.\ntype FallbackConfig struct {\n\tHost      string // host (e.g. localhost)\n\tPort      uint16\n\tTLSConfig *tls.Config // nil disables TLS\n}\n\n// NetworkAddress converts a ClickHouse host and port into network and address suitable for use with\n// net.Dial.\nfunc NetworkAddress(host string, port uint16) (network, address string) {\n\tnetwork = \"tcp\"\n\taddress = net.JoinHostPort(host, strconv.Itoa(int(port)))\n\treturn\n}\n\n// ParseConfig builds a []*Config with default values and use CH* Env.\n//\n//\t# Example DSN\n//\tuser=vahid password=secret host=ch.example.com port=5432 dbname=mydb sslmode=verify-ca\n//\n//\t# Example URL\n//\tclickhouse://vahid:secret@ch.example.com:9440/mydb?sslmode=verify-ca\n//\n// ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated\n// values that will be tried in order. This can be used as part of a high availability system.\n//\n//\t# Example URL\n//\tclickhouse://vahid:secret@foo.example.com:9000,bar.example.com:9000/mydb\n//\n// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed\n// via database URL or DSN:\n//\n//\tCHHOST\n//\tCHPORT\n//\tCHDATABASE\n//\tCHUSER\n//\tCHPASSWORD\n//\tCHCLIENTNAME\n//\tCHCONNECT_TIMEOUT\n//\tCHSSLMODE\n//\tCHSSLKEY\n//\tCHSSLCERT\n//\tCHSSLROOTCERT\n//\n// Important Security Notes:\n//\n// ParseConfig tries to match libpq behavior with regard to CHSSLMODE. This includes defaulting to \"prefer\" behavior if\n// not set.\n//\n// See http://www.postgresql.org/docs/12/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of\n// security each sslmode provides.\n//\n// The sslmode \"prefer\" (the default), sslmode \"allow\", and multiple hosts are implemented via the Fallbacks field of\n// the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of\n// sslmode \"prefer\" this means it will first try the main Config settings which use TLS, then it will try the fallback\n// which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually\n// changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting\n// TLCConfig.\n//\n// If a host name resolves into multiple addresses chconn will only try the first.\n//\n// In addition, ParseConfig accepts the following options:\n//\n//\t\tmin_read_buffer_size\n//\t\t\tThe minimum size of the internal read buffer. Default 8192.\n//\t\tcompress\n//\t\t\tcompression method. empty string or \"checksum\" or \"lz4\" or \"zstd\".\n//\t     in the \"checksum\" chconn checks the checksum and not use any compress method.\n//\t\tquota_key\n//\t\t\tthe quota key.\nfunc ParseConfig(connString string) (*Config, error) {\n\tdefaultSettings := defaultSettings()\n\tenvSettings := parseEnvSettings()\n\n\tconnStringSettings := make(map[string]string)\n\tif connString != \"\" {\n\t\tvar err error\n\t\t// connString may be a database URL or a DSN\n\t\tif strings.HasPrefix(connString, \"clickhouse://\") {\n\t\t\tconnStringSettings, err = parseURLSettings(connString)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &parseConfigError{connString: connString, msg: \"failed to parse as URL\", err: err}\n\t\t\t}\n\t\t} else {\n\t\t\tconnStringSettings, err = parseDSNSettings(connString)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &parseConfigError{connString: connString, msg: \"failed to parse as DSN\", err: err}\n\t\t\t}\n\t\t}\n\t}\n\n\tsettings := mergeSettings(defaultSettings, envSettings, connStringSettings)\n\tminReadBufferSize, err := strconv.Atoi(settings[\"min_read_buffer_size\"])\n\tif err != nil {\n\t\treturn nil, &parseConfigError{connString: connString, msg: \"cannot parse min_read_buffer_size\", err: err}\n\t}\n\n\tconfig := &Config{\n\t\tcreatedByParseConfig: true,\n\t\tDatabase:             settings[\"database\"],\n\t\tUser:                 settings[\"user\"],\n\t\tPassword:             settings[\"password\"],\n\t\tRuntimeParams:        make(map[string]string),\n\t\tClientName:           settings[\"client_name\"],\n\t\tMinReadBufferSize:    minReadBufferSize,\n\t\tconnString:           connString,\n\t}\n\n\tswitch settings[\"compress\"] {\n\tcase \"checksum\":\n\t\tconfig.Compress = CompressChecksum\n\tcase \"lz4\":\n\t\tconfig.Compress = CompressLZ4\n\tcase \"zstd\":\n\t\tconfig.Compress = CompressZSTD\n\t}\n\n\tconfig.QuotaKey = settings[\"quota_key\"]\n\n\tif connectTimeoutSetting, present := settings[\"connect_timeout\"]; present {\n\t\tconnectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting)\n\t\tif err != nil {\n\t\t\treturn nil, &parseConfigError{connString: connString, msg: \"invalid connect_timeout\", err: err}\n\t\t}\n\t\tconfig.ConnectTimeout = connectTimeout\n\t\tconfig.DialFunc = makeConnectTimeoutDialFunc(connectTimeout)\n\t} else {\n\t\tdefaultDialer := makeDefaultDialer()\n\t\tconfig.DialFunc = defaultDialer.DialContext\n\t}\n\n\tconfig.LookupFunc = makeDefaultResolver().LookupHost\n\n\tnotRuntimeParams := map[string]struct{}{\n\t\t\"host\":                 {},\n\t\t\"port\":                 {},\n\t\t\"database\":             {},\n\t\t\"user\":                 {},\n\t\t\"password\":             {},\n\t\t\"connect_timeout\":      {},\n\t\t\"sslmode\":              {},\n\t\t\"client_name\":          {},\n\t\t\"min_read_buffer_size\": {},\n\t\t\"sslkey\":               {},\n\t\t\"sslcert\":              {},\n\t\t\"sslrootcert\":          {},\n\t\t\"compress\":             {},\n\t\t\"quota_key\":            {},\n\t}\n\n\tfor k, v := range settings {\n\t\tif _, present := notRuntimeParams[k]; present {\n\t\t\tcontinue\n\t\t}\n\t\tconfig.RuntimeParams[k] = v\n\t}\n\n\tfallbacks := []*FallbackConfig{}\n\n\thosts := strings.Split(settings[\"host\"], \",\")\n\tports := strings.Split(settings[\"port\"], \",\")\n\n\tfor i, host := range hosts {\n\t\tvar portStr string\n\t\tif i < len(ports) {\n\t\t\tportStr = ports[i]\n\t\t} else {\n\t\t\tportStr = ports[0]\n\t\t}\n\n\t\tport, err := parsePort(portStr)\n\t\tif err != nil {\n\t\t\treturn nil, &parseConfigError{connString: connString, msg: \"invalid port\", err: err}\n\t\t}\n\n\t\tvar tlsConfigs []*tls.Config\n\n\t\ttlsConfigs, err = configTLS(settings, host)\n\n\t\tif err != nil {\n\t\t\treturn nil, &parseConfigError{connString: connString, msg: \"failed to configure TLS\", err: err}\n\t\t}\n\n\t\tfor _, tlsConfig := range tlsConfigs {\n\t\t\tfallbacks = append(fallbacks, &FallbackConfig{\n\t\t\t\tHost:      host,\n\t\t\t\tPort:      port,\n\t\t\t\tTLSConfig: tlsConfig,\n\t\t\t})\n\t\t}\n\t}\n\n\tconfig.Host = fallbacks[0].Host\n\tconfig.Port = fallbacks[0].Port\n\tconfig.TLSConfig = fallbacks[0].TLSConfig\n\tconfig.Fallbacks = fallbacks[1:]\n\n\treturn config, nil\n}\n\nfunc defaultSettings() map[string]string {\n\tsettings := make(map[string]string)\n\n\tsettings[\"host\"] = \"localhost\"\n\tsettings[\"port\"] = defaultDBPort\n\tsettings[\"user\"] = defaultUsername\n\tsettings[\"database\"] = defaultDatabase\n\tsettings[\"client_name\"] = defaultClientName\n\tsettings[\"min_read_buffer_size\"] = \"8192\"\n\n\treturn settings\n}\n\nfunc mergeSettings(settingSets ...map[string]string) map[string]string {\n\tsettings := make(map[string]string)\n\n\tfor _, s2 := range settingSets {\n\t\tfor k, v := range s2 {\n\t\t\tsettings[k] = v\n\t\t}\n\t}\n\n\treturn settings\n}\n\nfunc parseEnvSettings() map[string]string {\n\tsettings := make(map[string]string)\n\n\tnameMap := map[string]string{\n\t\t\"CHHOST\":            \"host\",\n\t\t\"CHPORT\":            \"port\",\n\t\t\"CHDATABASE\":        \"database\",\n\t\t\"CHUSER\":            \"user\",\n\t\t\"CHPASSWORD\":        \"password\",\n\t\t\"CHCLIENTNAME\":      \"client_name\",\n\t\t\"CHCONNECT_TIMEOUT\": \"connect_timeout\",\n\t\t\"CHSSLMODE\":         \"sslmode\",\n\t\t\"CHSSLKEY\":          \"sslkey\",\n\t\t\"CHSSLCERT\":         \"sslcert\",\n\t\t\"CHSSLROOTCERT\":     \"sslrootcert\",\n\t}\n\n\tfor envname, realname := range nameMap {\n\t\tvalue := os.Getenv(envname)\n\t\tif value != \"\" {\n\t\t\tsettings[realname] = value\n\t\t}\n\t}\n\n\treturn settings\n}\n\nfunc parseURLSettings(connString string) (map[string]string, error) {\n\tsettings := make(map[string]string)\n\n\turlConn, err := url.Parse(connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif urlConn.User != nil {\n\t\tsettings[\"user\"] = urlConn.User.Username()\n\t\tif password, present := urlConn.User.Password(); present {\n\t\t\tsettings[\"password\"] = password\n\t\t}\n\t}\n\n\t// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.\n\tvar hosts []string\n\tvar ports []string\n\tfor _, host := range strings.Split(urlConn.Host, \",\") {\n\t\tif host == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif isIPOnly(host) {\n\t\t\thosts = append(hosts, strings.Trim(host, \"[]\"))\n\t\t\tcontinue\n\t\t}\n\t\th, p, err := net.SplitHostPort(host)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to split host:port in '%s', err: %w\", host, err)\n\t\t}\n\t\tif h != \"\" {\n\t\t\thosts = append(hosts, h)\n\t\t}\n\t\tif p != \"\" {\n\t\t\tports = append(ports, p)\n\t\t}\n\t}\n\tif len(hosts) > 0 {\n\t\tsettings[\"host\"] = strings.Join(hosts, \",\")\n\t}\n\tif len(ports) > 0 {\n\t\tsettings[\"port\"] = strings.Join(ports, \",\")\n\t}\n\n\tdatabase := strings.TrimLeft(urlConn.Path, \"/\")\n\tif database != \"\" {\n\t\tsettings[\"database\"] = database\n\t}\n\n\tnameMap := map[string]string{\n\t\t\"dbname\": \"database\",\n\t}\n\n\tfor k, v := range urlConn.Query() {\n\t\tif k2, present := nameMap[k]; present {\n\t\t\tk = k2\n\t\t}\n\n\t\tsettings[k] = v[0]\n\t}\n\n\treturn settings, nil\n}\n\nfunc isIPOnly(host string) bool {\n\treturn net.ParseIP(strings.Trim(host, \"[]\")) != nil || !strings.Contains(host, \":\")\n}\n\nvar asciiSpace = [256]uint8{'\\t': 1, '\\n': 1, '\\v': 1, '\\f': 1, '\\r': 1, ' ': 1}\n\nfunc parseDSNSettings(s string) (map[string]string, error) {\n\tsettings := make(map[string]string)\n\n\tnameMap := map[string]string{\n\t\t\"dbname\": \"database\",\n\t}\n\n\tfor len(s) > 0 {\n\t\tvar key, val string\n\t\teqIdx := strings.IndexRune(s, '=')\n\t\tif eqIdx < 0 {\n\t\t\treturn nil, ErrInvalidDSN\n\t\t}\n\n\t\tkey = strings.Trim(s[:eqIdx], \" \\t\\n\\r\\v\\f\")\n\t\ts = strings.TrimLeft(s[eqIdx+1:], \" \\t\\n\\r\\v\\f\")\n\t\tif s == \"\" {\n\t\t} else if s[0] != '\\'' {\n\t\t\tend := 0\n\t\t\tfor ; end < len(s); end++ {\n\t\t\t\tif asciiSpace[s[end]] == 1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif s[end] == '\\\\' {\n\t\t\t\t\tend++\n\t\t\t\t\tif end == len(s) {\n\t\t\t\t\t\treturn nil, ErrInvalidBackSlash\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tval = strings.ReplaceAll(strings.ReplaceAll(s[:end], \"\\\\\\\\\", \"\\\\\"), \"\\\\'\", \"'\")\n\t\t\tif end == len(s) {\n\t\t\t\ts = \"\"\n\t\t\t} else {\n\t\t\t\ts = s[end+1:]\n\t\t\t}\n\t\t} else { // quoted string\n\t\t\ts = s[1:]\n\t\t\tend := 0\n\t\t\tfor ; end < len(s); end++ {\n\t\t\t\tif s[end] == '\\'' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif s[end] == '\\\\' {\n\t\t\t\t\tend++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif end == len(s) {\n\t\t\t\treturn nil, ErrInvalidquoted\n\t\t\t}\n\t\t\tval = strings.ReplaceAll(strings.ReplaceAll(s[:end], \"\\\\\\\\\", \"\\\\\"), \"\\\\'\", \"'\")\n\t\t\tif end == len(s) {\n\t\t\t\ts = \"\"\n\t\t\t} else {\n\t\t\t\ts = s[end+1:]\n\t\t\t}\n\t\t}\n\n\t\tif k, ok := nameMap[key]; ok {\n\t\t\tkey = k\n\t\t}\n\n\t\tif key == \"\" {\n\t\t\treturn nil, ErrInvalidDSN\n\t\t}\n\n\t\tsettings[key] = val\n\t}\n\n\treturn settings, nil\n}\n\n// configTLS uses libpq's TLS parameters to construct  []*tls.Config. It is\n// necessary to allow returning multiple TLS configs as sslmode \"allow\" and\n// \"prefer\" allow fallback.\n//\n//nolint:funlen,gocyclo\nfunc configTLS(settings map[string]string, thisHost string) ([]*tls.Config, error) {\n\thost := thisHost\n\tsslmode := settings[\"sslmode\"]\n\tsslrootcert := settings[\"sslrootcert\"]\n\tsslcert := settings[\"sslcert\"]\n\tsslkey := settings[\"sslkey\"]\n\n\t// in clickhouse default non tls connection accepted and  tls connection listen on another port\n\tif sslmode == \"\" || sslmode == \"disable\" {\n\t\treturn []*tls.Config{nil}, nil\n\t}\n\n\t//nolint:gosec // it change by config\n\ttlsConfig := &tls.Config{}\n\n\tswitch sslmode {\n\tcase \"disable\":\n\t\treturn []*tls.Config{nil}, nil\n\tcase \"allow\", \"prefer\":\n\t\ttlsConfig.InsecureSkipVerify = true\n\tcase \"require\":\n\t\tif sslrootcert != \"\" {\n\t\t\tgoto nextCase\n\t\t}\n\t\ttlsConfig.InsecureSkipVerify = true\n\t\tbreak\n\tnextCase:\n\t\tfallthrough\n\tcase \"verify-ca\":\n\t\t// Don't perform the default certificate verification because it\n\t\t// will verify the hostname. Instead, verify the server's\n\t\t// certificate chain ourselves in VerifyPeerCertificate and\n\t\t// ignore the server name. This emulates libpq's verify-ca\n\t\t// behavior.\n\t\t//\n\t\t// See https://github.com/golang/go/issues/21971#issuecomment-332693931\n\t\t// and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate\n\t\t// for more info.\n\t\ttlsConfig.InsecureSkipVerify = true\n\t\ttlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {\n\t\t\tcerts := make([]*x509.Certificate, len(certificates))\n\t\t\tfor i, asn1Data := range certificates {\n\t\t\t\tcert, err := x509.ParseCertificate(asn1Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to parse certificate from server: %w\", err)\n\t\t\t\t}\n\t\t\t\tcerts[i] = cert\n\t\t\t}\n\n\t\t\t// Leave DNSName empty to skip hostname verification.\n\t\t\topts := x509.VerifyOptions{\n\t\t\t\tRoots:         tlsConfig.RootCAs,\n\t\t\t\tIntermediates: x509.NewCertPool(),\n\t\t\t}\n\t\t\t// Skip the first cert because it's the leaf. All others\n\t\t\t// are intermediates.\n\t\t\tfor _, cert := range certs[1:] {\n\t\t\t\topts.Intermediates.AddCert(cert)\n\t\t\t}\n\t\t\t_, err := certs[0].Verify(opts)\n\t\t\treturn err\n\t\t}\n\tcase \"verify-full\":\n\t\ttlsConfig.ServerName = host\n\tdefault:\n\t\treturn nil, ErrSSLModeInvalid\n\t}\n\n\tif sslrootcert != \"\" {\n\t\tcaCertPool := x509.NewCertPool()\n\n\t\tcaPath := sslrootcert\n\t\tcaCert, err := os.ReadFile(caPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read CA file: %w\", err)\n\t\t}\n\n\t\tif !caCertPool.AppendCertsFromPEM(caCert) {\n\t\t\treturn nil, ErrAddCA\n\t\t}\n\n\t\ttlsConfig.RootCAs = caCertPool\n\t\ttlsConfig.ClientCAs = caCertPool\n\t}\n\n\tif (sslcert != \"\" && sslkey == \"\") || (sslcert == \"\" && sslkey != \"\") {\n\t\treturn nil, ErrMissCertRequirement\n\t}\n\n\tif sslcert != \"\" && sslkey != \"\" {\n\t\tcert, err := tls.LoadX509KeyPair(sslcert, sslkey)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to read cert: %w\", err)\n\t\t}\n\n\t\ttlsConfig.Certificates = []tls.Certificate{cert}\n\t}\n\n\tswitch sslmode {\n\tcase \"allow\":\n\t\treturn []*tls.Config{nil, tlsConfig}, nil\n\tcase \"prefer\":\n\t\treturn []*tls.Config{tlsConfig, nil}, nil\n\tcase \"require\", \"verify-ca\", \"verify-full\":\n\t\treturn []*tls.Config{tlsConfig}, nil\n\tdefault:\n\t\tpanic(\"BUG: bad sslmode should already have been caught\")\n\t}\n}\n\nfunc parsePort(s string) (uint16, error) {\n\tport, err := strconv.ParseUint(s, 10, 16)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif port < 1 || port > math.MaxUint16 {\n\t\treturn 0, ErrPortInvalid\n\t}\n\treturn uint16(port), nil\n}\n\nfunc makeDefaultDialer() *net.Dialer {\n\treturn &net.Dialer{KeepAlive: 5 * time.Minute}\n}\n\nfunc makeDefaultResolver() *net.Resolver {\n\treturn net.DefaultResolver\n}\n\nfunc parseConnectTimeoutSetting(s string) (time.Duration, error) {\n\ttimeout, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif timeout < 0 {\n\t\treturn 0, ErrNegativeTimeout\n\t}\n\treturn time.Duration(timeout) * time.Second, nil\n}\n\nfunc makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {\n\td := makeDefaultDialer()\n\td.Timeout = timeout\n\treturn d.DialContext\n}\n"
  },
  {
    "path": "config_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar parseConfigTests = []struct {\n\tname       string\n\tconnString string\n\tconfig     *Config\n}{\n\t// Test all sslmodes\n\t{\n\t\tname:       \"sslmode not set (disable)\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tClientName:    defaultClientName,\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode disable\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tClientName:    defaultClientName,\n\t\t\tPort:          9000,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode allow\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=allow\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tTLSConfig: &tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode prefer\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=prefer\",\n\t\tconfig: &Config{\n\n\t\t\tUser:       \"vahid\",\n\t\t\tPassword:   \"secret\",\n\t\t\tHost:       \"localhost\",\n\t\t\tPort:       9000,\n\t\t\tDatabase:   \"mydb\",\n\t\t\tClientName: defaultClientName,\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"localhost\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode require\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=require\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tClientName:    defaultClientName,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode verify-ca\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=verify-ca\",\n\t\tconfig: &Config{\n\t\t\tUser:       \"vahid\",\n\t\t\tPassword:   \"secret\",\n\t\t\tHost:       \"localhost\",\n\t\t\tPort:       9000,\n\t\t\tClientName: defaultClientName,\n\t\t\tDatabase:   \"mydb\",\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"sslmode verify-full\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=verify-full\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     &tls.Config{ServerName: \"localhost\"},\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url everything\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable&client_name=chxtest&extradata=test&connect_timeout=5\",\n\t\tconfig: &Config{\n\t\t\tUser:           \"vahid\",\n\t\t\tPassword:       \"secret\",\n\t\t\tHost:           \"localhost\",\n\t\t\tPort:           9000,\n\t\t\tDatabase:       \"mydb\",\n\t\t\tTLSConfig:      nil,\n\t\t\tConnectTimeout: 5 * time.Second,\n\t\t\tClientName:     \"chxtest\",\n\t\t\tRuntimeParams: map[string]string{\n\t\t\t\t\"extradata\": \"test\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url missing password\",\n\t\tconnString: \"clickhouse://vahid@localhost:9000/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url missing user and password\",\n\t\tconnString: \"clickhouse://localhost:9000/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          defaultUsername,\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url missing port\",\n\t\tconnString: \"clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url clickhouse protocol\",\n\t\tconnString: \"clickhouse://vahid@localhost:9000/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url IPv4 with port\",\n\t\tconnString: \"clickhouse://vahid@127.0.0.1:5433/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"127.0.0.1\",\n\t\t\tClientName:    defaultClientName,\n\t\t\tPort:          5433,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url IPv6 with port\",\n\t\tconnString: \"clickhouse://vahid@[2001:db8::1]:5433/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"2001:db8::1\",\n\t\t\tPort:          5433,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"database url IPv6 no port\",\n\t\tconnString: \"clickhouse://vahid@[2001:db8::1]/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"2001:db8::1\",\n\t\t\tPort:          9000,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tClientName:    defaultClientName,\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN everything\",\n\t\tconnString: \"user=vahid password=secret host=localhost port=9000 dbname=mydb sslmode=disable client_name=chxtest connect_timeout=5\",\n\t\tconfig: &Config{\n\t\t\tUser:           \"vahid\",\n\t\t\tPassword:       \"secret\",\n\t\t\tHost:           \"localhost\",\n\t\t\tPort:           9000,\n\t\t\tDatabase:       \"mydb\",\n\t\t\tTLSConfig:      nil,\n\t\t\tClientName:     \"chxtest\",\n\t\t\tConnectTimeout: 5 * time.Second,\n\t\t\tRuntimeParams:  map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with escaped single quote\",\n\t\tconnString: \"user=vahid\\\\'s password=secret host=localhost port=9000 dbname=mydb sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid's\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with escaped backslash\",\n\t\tconnString: \"user=vahid password=sooper\\\\\\\\secret host=localhost port=9000 dbname=mydb sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"sooper\\\\secret\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with single quoted values\",\n\t\tconnString: \"user='vahid' host='localhost' dbname='mydb' sslmode='disable'\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with single quoted value with escaped single quote\",\n\t\tconnString: \"user='vahid\\\\'s' host='localhost' dbname='mydb' sslmode='disable'\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid's\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with empty single quoted value\",\n\t\tconnString: \"user='vahid' password='' host='localhost' dbname='mydb' sslmode='disable'\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN with space between key and value\",\n\t\tconnString: \"user = 'vahid' password = '' host = 'localhost' dbname = 'mydb' sslmode='disable'\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tHost:          \"localhost\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t},\n\t},\n\t{\n\t\tname:       \"URL multiple hosts\",\n\t\tconnString: \"clickhouse://vahid:secret@foo,bar,baz/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"foo\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"URL multiple hosts and ports\",\n\t\tconnString: \"clickhouse://vahid:secret@foo:1,bar:2,baz:3/mydb?sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"foo\",\n\t\t\tPort:          1,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      2,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      3,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN multiple hosts one port\",\n\t\tconnString: \"user=vahid password=secret host=foo,bar,baz port=9000 dbname=mydb sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"foo\",\n\t\t\tPort:          9000,\n\t\t\tClientName:    defaultClientName,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"DSN multiple hosts multiple ports\",\n\t\tconnString: \"user=vahid password=secret host=foo,bar,baz port=1,2,3 dbname=mydb sslmode=disable\",\n\t\tconfig: &Config{\n\t\t\tUser:          \"vahid\",\n\t\t\tPassword:      \"secret\",\n\t\t\tHost:          \"foo\",\n\t\t\tPort:          1,\n\t\t\tDatabase:      \"mydb\",\n\t\t\tTLSConfig:     nil,\n\t\t\tClientName:    defaultClientName,\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      2,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      3,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"multiple hosts and fallback tsl\",\n\t\tconnString: \"user=vahid password=secret host=foo,bar,baz dbname=mydb sslmode=prefer\",\n\t\tconfig: &Config{\n\t\t\tUser:       \"vahid\",\n\t\t\tPassword:   \"secret\",\n\t\t\tHost:       \"foo\",\n\t\t\tPort:       9000,\n\t\t\tDatabase:   \"mydb\",\n\t\t\tClientName: defaultClientName,\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"foo\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost: \"bar\",\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tTLSConfig: &tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t}},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost: \"baz\",\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tTLSConfig: &tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t}},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tname:       \"enable compress\",\n\t\tconnString: \"user=vahid password=secret host=foo,bar,baz dbname=mydb sslmode=prefer compress=checksum\",\n\t\tconfig: &Config{\n\t\t\tUser:       \"vahid\",\n\t\t\tPassword:   \"secret\",\n\t\t\tHost:       \"foo\",\n\t\t\tPort:       9000,\n\t\t\tDatabase:   \"mydb\",\n\t\t\tCompress:   CompressChecksum,\n\t\t\tClientName: defaultClientName,\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t\tRuntimeParams: map[string]string{},\n\t\t\tFallbacks: []*FallbackConfig{\n\t\t\t\t{\n\t\t\t\t\tHost:      \"foo\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost: \"bar\",\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tTLSConfig: &tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t}},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"bar\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tHost: \"baz\",\n\t\t\t\t\tPort: 9000,\n\t\t\t\t\tTLSConfig: &tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t\t}},\n\t\t\t\t{\n\t\t\t\t\tHost:      \"baz\",\n\t\t\t\t\tPort:      9000,\n\t\t\t\t\tTLSConfig: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestParseConfig(t *testing.T) {\n\tt.Parallel()\n\n\tfor i, tt := range parseConfigTests {\n\t\tconfig, err := ParseConfig(tt.connString)\n\t\tif !assert.Nilf(t, err, \"Test %d (%s)\", i, tt.name) {\n\t\t\tcontinue\n\t\t}\n\n\t\tassertConfigsEqual(t, tt.config, config, fmt.Sprintf(\"Test %d (%s)\", i, tt.name))\n\t}\n}\n\nfunc TestParseConfigDSNWithTrailingEmptyEqualDoesNotPanic(t *testing.T) {\n\t_, err := ParseConfig(\"host= user= password= port= database=\")\n\trequire.NoError(t, err)\n}\n\nfunc TestParseConfigDSNLeadingEqual(t *testing.T) {\n\t_, err := ParseConfig(\"= user=vahid\")\n\trequire.Error(t, err)\n}\n\nfunc TestParseConfigDSNTrailingBackslash(t *testing.T) {\n\t_, err := ParseConfig(`x=x\\`)\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"invalid backslash\")\n}\n\nfunc TestConfigCopyReturnsEqualConfig(t *testing.T) {\n\tconnString := \"clickhouse://vahid:secret@localhost:9000/mydb?client_name=chxtest&connect_timeout=5\"\n\toriginal, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tcopied := original.Copy()\n\tassertConfigsEqual(t, original, copied, \"Test Config.Copy() returns equal config\")\n}\n\nfunc TestConfigCopyOriginalConfigDidNotChange(t *testing.T) {\n\tconnString := \"host=localhost,localhost2 port=9000,9000 database=mydb  client_name=chxtest connect_timeout=5\"\n\toriginal, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tcopied := original.Copy()\n\tassertConfigsEqual(t, original, copied, \"Test Config.Copy() returns equal config\")\n\n\tcopied.Port = uint16(5433)\n\tcopied.RuntimeParams[\"foo\"] = \"bar\"\n\n\tassert.Equal(t, uint16(9000), original.Port)\n\tassert.Equal(t, \"\", original.RuntimeParams[\"foo\"])\n}\n\nfunc TestConfigCopyCanBeUsedToConnect(t *testing.T) {\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\toriginal, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tcopied := original.Copy()\n\tassert.NotPanics(t, func() {\n\t\t_, err = ConnectConfig(context.Background(), copied)\n\t})\n\tassert.NoError(t, err)\n}\n\nfunc assertConfigsEqual(t *testing.T, expected, actual *Config, testName string) {\n\tif !assert.NotNil(t, expected) {\n\t\treturn\n\t}\n\tif !assert.NotNil(t, actual) {\n\t\treturn\n\t}\n\n\tassert.Equalf(t, expected.Host, actual.Host, \"%s - Host\", testName)\n\tassert.Equalf(t, expected.Database, actual.Database, \"%s - Database\", testName)\n\tassert.Equalf(t, expected.Port, actual.Port, \"%s - Port\", testName)\n\tassert.Equalf(t, expected.User, actual.User, \"%s - User\", testName)\n\tassert.Equalf(t, expected.Password, actual.Password, \"%s - Password\", testName)\n\tassert.Equalf(t, expected.ConnectTimeout, actual.ConnectTimeout, \"%s - ConnectTimeout\", testName)\n\tassert.Equalf(t, expected.ClientName, actual.ClientName, \"%s - Client Name\", testName)\n\tassert.Equalf(t, expected.RuntimeParams, actual.RuntimeParams, \"%s - RuntimeParams\", testName)\n\n\t// Can't test function equality, so just test that they are set or not.\n\tassert.Equalf(t, expected.ValidateConnect == nil, actual.ValidateConnect == nil, \"%s - ValidateConnect\", testName)\n\tassert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, \"%s - AfterConnect\", testName)\n\n\tif assert.Equalf(t, expected.TLSConfig == nil, actual.TLSConfig == nil, \"%s - TLSConfig\", testName) {\n\t\tif expected.TLSConfig != nil {\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.TLSConfig.InsecureSkipVerify,\n\t\t\t\tactual.TLSConfig.InsecureSkipVerify,\n\t\t\t\t\"%s - TLSConfig InsecureSkipVerify\",\n\t\t\t\ttestName,\n\t\t\t)\n\t\t\tassert.Equalf(t,\n\t\t\t\texpected.TLSConfig.ServerName,\n\t\t\t\tactual.TLSConfig.ServerName,\n\t\t\t\t\"%s - TLSConfig ServerName\",\n\t\t\t\ttestName,\n\t\t\t)\n\t\t}\n\t}\n\n\tif assert.Equalf(t, len(expected.Fallbacks), len(actual.Fallbacks), \"%s - Fallbacks\", testName) {\n\t\tfor i := range expected.Fallbacks {\n\t\t\tassert.Equalf(t, expected.Fallbacks[i].Host, actual.Fallbacks[i].Host, \"%s - Fallback %d - Host\", testName, i)\n\t\t\tassert.Equalf(t, expected.Fallbacks[i].Port, actual.Fallbacks[i].Port, \"%s - Fallback %d - Port\", testName, i)\n\n\t\t\tif assert.Equalf(t,\n\t\t\t\texpected.Fallbacks[i].TLSConfig == nil,\n\t\t\t\tactual.Fallbacks[i].TLSConfig == nil,\n\t\t\t\t\"%s - Fallback %d - TLSConfig\",\n\t\t\t\ttestName,\n\t\t\t\ti,\n\t\t\t) {\n\t\t\t\tif expected.Fallbacks[i].TLSConfig != nil {\n\t\t\t\t\tassert.Equalf(t,\n\t\t\t\t\t\texpected.Fallbacks[i].TLSConfig.InsecureSkipVerify,\n\t\t\t\t\t\tactual.Fallbacks[i].TLSConfig.InsecureSkipVerify,\n\t\t\t\t\t\t\"%s - Fallback %d - TLSConfig InsecureSkipVerify\", testName,\n\t\t\t\t\t)\n\t\t\t\t\tassert.Equalf(t,\n\t\t\t\t\t\texpected.Fallbacks[i].TLSConfig.ServerName,\n\t\t\t\t\t\tactual.Fallbacks[i].TLSConfig.ServerName,\n\t\t\t\t\t\t\"%s - Fallback %d - TLSConfig ServerName\",\n\t\t\t\t\t\ttestName,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestParseConfigEnv(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tenvvars map[string]string\n\t\tconfig  *Config\n\t}{\n\t\t{\n\t\t\t// not testing no environment at all as that would use default host and that can vary.\n\t\t\tname:    \"CHHOST only\",\n\t\t\tenvvars: map[string]string{\"CHHOST\": \"123.123.123.123\"},\n\t\t\tconfig: &Config{\n\t\t\t\tUser:          defaultUsername,\n\t\t\t\tHost:          \"123.123.123.123\",\n\t\t\t\tPort:          9000,\n\t\t\t\tClientName:    defaultClientName,\n\t\t\t\tDatabase:      defaultDatabase,\n\t\t\t\tTLSConfig:     nil,\n\t\t\t\tRuntimeParams: map[string]string{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"All non-TLS environment\",\n\t\t\tenvvars: map[string]string{\n\t\t\t\t\"CHHOST\":            \"123.123.123.123\",\n\t\t\t\t\"CHPORT\":            \"7777\",\n\t\t\t\t\"CHDATABASE\":        \"foo\",\n\t\t\t\t\"CHUSER\":            \"bar\",\n\t\t\t\t\"CHPASSWORD\":        \"baz\",\n\t\t\t\t\"CHCONNECT_TIMEOUT\": \"10\",\n\t\t\t\t\"CHSSLMODE\":         \"disable\",\n\t\t\t\t\"CHCLIENTNAME\":      \"chxtest\",\n\t\t\t},\n\t\t\tconfig: &Config{\n\t\t\t\tHost:           \"123.123.123.123\",\n\t\t\t\tPort:           7777,\n\t\t\t\tDatabase:       \"foo\",\n\t\t\t\tUser:           \"bar\",\n\t\t\t\tPassword:       \"baz\",\n\t\t\t\tConnectTimeout: 10 * time.Second,\n\t\t\t\tTLSConfig:      nil,\n\t\t\t\tClientName:     \"chxtest\",\n\t\t\t\tRuntimeParams:  map[string]string{},\n\t\t\t},\n\t\t},\n\t}\n\tchEnvvars := []string{\"CHHOST\", \"CHPORT\", \"CHDATABASE\", \"CHUSER\", \"CHPASSWORD\", \"CHCLIENTNAME\", \"CHSSLMODE\", \"CHCONNECT_TIMEOUT\"}\n\n\tsavedEnv := make(map[string]string)\n\tfor _, n := range chEnvvars {\n\t\tsavedEnv[n] = os.Getenv(n)\n\t}\n\n\tdefer func() {\n\t\tfor k, v := range savedEnv {\n\t\t\terr := os.Setenv(k, v)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Unable to restore environment: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor i, tt := range tests {\n\t\tfor _, n := range chEnvvars {\n\t\t\terr := os.Unsetenv(n)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\n\t\tfor k, v := range tt.envvars {\n\t\t\terr := os.Setenv(k, v)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\n\t\tconfig, err := ParseConfig(\"\")\n\t\tif !assert.Nilf(t, err, \"Test %d (%s)\", i, tt.name) {\n\t\t\tcontinue\n\t\t}\n\n\t\tassertConfigsEqual(t, tt.config, config, fmt.Sprintf(\"Test %d (%s)\", i, tt.name))\n\t}\n}\n\nfunc TestParseConfigError(t *testing.T) {\n\tt.Parallel()\n\n\tcontent := []byte(\"invalid tls\")\n\ttmpInvalidTLS, err := os.CreateTemp(\"\", \"invalidtls\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer os.Remove(tmpInvalidTLS.Name()) // clean up\n\n\tif _, err := tmpInvalidTLS.Write(content); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := tmpInvalidTLS.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tparseConfigErrorTests := []struct {\n\t\tname       string\n\t\tconnString string\n\t\terr        string\n\t\terrUnwarp  string\n\t}{\n\t\t{\n\t\t\tname:       \"invalid url\",\n\t\t\tconnString: \"clickhouse://invalid\\t\",\n\t\t\terr:        \"cannot parse `clickhouse://invalid\\t`: failed to parse as URL (parse \\\"clickhouse://invalid\\\\t\\\": net/url: invalid control character in URL)\", //nolint:lll //can't change line lengh\n\t\t}, {\n\t\t\tname:       \"invalid port\",\n\t\t\tconnString: \"port=invalid\",\n\t\t\terrUnwarp:  \"strconv.ParseUint: parsing \\\"invalid\\\": invalid syntax\",\n\t\t}, {\n\t\t\tname:       \"invalid port range\",\n\t\t\tconnString: \"port=0\",\n\t\t\terr:        \"cannot parse `port=0`: invalid port (outside range)\",\n\t\t}, {\n\t\t\tname:       \"invalid connect_timeout\",\n\t\t\tconnString: \"connect_timeout=200g\",\n\t\t\terr:        \"cannot parse `connect_timeout=200g`: invalid connect_timeout (strconv.ParseInt: parsing \\\"200g\\\": invalid syntax)\",\n\t\t}, {\n\t\t\tname:       \"negative connect_timeout\",\n\t\t\tconnString: \"connect_timeout=-100\",\n\t\t\terr:        \"cannot parse `connect_timeout=-100`: invalid connect_timeout (negative timeout)\",\n\t\t}, {\n\t\t\tname:       \"negative sslmode\",\n\t\t\tconnString: \"sslmode=invalid\",\n\t\t\terr:        \"cannot parse `sslmode=invalid`: failed to configure TLS (sslmode is invalid)\",\n\t\t}, {\n\t\t\tname:       \"fail load sslrootcert\",\n\t\t\tconnString: \"sslrootcert=invalid_address sslmode=prefer\",\n\t\t\terr:        \"cannot parse `sslrootcert=invalid_address sslmode=prefer`: failed to configure TLS (unable to read CA file: open invalid_address: no such file or directory)\", //nolint:lll //can't change line lengh\n\t\t}, {\n\t\t\tname:       \"invalid sslrootcert\",\n\t\t\tconnString: \"sslrootcert=\" + tmpInvalidTLS.Name() + \" sslmode=prefer\",\n\t\t\terr:        \"cannot parse `sslrootcert=\" + tmpInvalidTLS.Name() + \" sslmode=prefer`: failed to configure TLS (unable to add CA to cert pool)\", //nolint:lll //can't change line lengh\n\t\t}, {\n\t\t\tname:       \"not provide both sslcert and sskkey\",\n\t\t\tconnString: \"sslcert=invalid_address sslmode=prefer\",\n\t\t\terr:        \"cannot parse `sslcert=invalid_address sslmode=prefer`: failed to configure TLS (both \\\"sslcert\\\" and \\\"sslkey\\\" are required)\", //nolint:lll //can't change line lengh\n\t\t}, {\n\t\t\tname:       \"invalid sslcert\",\n\t\t\tconnString: \"sslcert=invalid_address sslkey=invalid_address sslmode=prefer\",\n\t\t\terr:        \"cannot parse `sslcert=invalid_address sslkey=invalid_address sslmode=prefer`: failed to configure TLS (unable to read cert: open invalid_address: no such file or directory)\", //nolint:lll //can't change line lengh\n\t\t},\n\t}\n\n\tfor i, tt := range parseConfigErrorTests {\n\t\t_, err := ParseConfig(tt.connString)\n\t\tif !assert.Errorf(t, err, \"Test %d (%s)\", i, tt.name) {\n\t\t\tcontinue\n\t\t}\n\t\tif tt.err != \"\" {\n\t\t\tif !assert.EqualError(t, err, tt.err, \"Test %d (%s)\", i, tt.name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tif !assert.EqualErrorf(t, errors.Unwrap(err), tt.errUnwarp, \"Test %d (%s)\", i, tt.name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "doc.go",
    "content": "// Package chconn is a low-level Clickhouse database driver.\n/*\nchconn is a pure Go driver for [ClickHouse] that use Native protocol\nchconn aims to be low-level, fast, and performant.\n\nIf you have any suggestion or comment, please feel free to open an issue on this tutorial's GitHub page!\n*/\npackage chconn\n"
  },
  {
    "path": "doc_test.go",
    "content": "package chconn_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/chpool\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc Example() {\n\tconn, err := chpool.New(os.Getenv(\"DATABASE_URL\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer conn.Close()\n\n\t// to check if the connection is alive\n\terr = conn.Ping(context.Background())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = conn.Exec(context.Background(), `DROP TABLE IF EXISTS example_table`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = conn.Exec(context.Background(), `CREATE TABLE  example_table (\n\t\tuint64 UInt64,\n\t\tuint64_nullable Nullable(UInt64)\n\t) Engine=Memory`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcol1 := column.New[uint64]()\n\tcol2 := column.New[uint64]().Nullable()\n\trows := 1_000_0000 // One hundred million rows- insert in 10 times\n\tnumInsert := 10\n\tcol1.SetWriteBufferSize(rows)\n\tcol2.SetWriteBufferSize(rows)\n\tstartInsert := time.Now()\n\tfor i := 0; i < numInsert; i++ {\n\t\tcol1.Reset()\n\t\tcol2.Reset()\n\t\tfor y := 0; y < rows; y++ {\n\t\t\tcol1.Append(uint64(i))\n\t\t\tif i%2 == 0 {\n\t\t\t\tcol2.Append(uint64(i))\n\t\t\t} else {\n\t\t\t\tcol2.AppendNil()\n\t\t\t}\n\t\t}\n\n\t\tctxInsert, cancelInsert := context.WithTimeout(context.Background(), time.Second*30)\n\t\t// insert data\n\t\terr = conn.Insert(ctxInsert, \"INSERT INTO example_table (uint64,uint64_nullable) VALUES\", col1, col2)\n\t\tif err != nil {\n\t\t\tcancelInsert()\n\t\t\tpanic(err)\n\t\t}\n\t\tcancelInsert()\n\t}\n\tfmt.Println(\"inserted 10M rows in \", time.Since(startInsert))\n\n\t// select data\n\tcol1Read := column.New[uint64]()\n\tcol2Read := column.New[uint64]().Nullable()\n\n\tctxSelect, cancelSelect := context.WithTimeout(context.Background(), time.Second*30)\n\tdefer cancelSelect()\n\n\tstartSelect := time.Now()\n\tselectStmt, err := conn.Select(ctxSelect, \"SELECT uint64,uint64_nullable FROM  example_table\", col1Read, col2Read)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// make sure the stmt close after select. but it's not necessary\n\tdefer selectStmt.Close()\n\n\tvar col1Data []uint64\n\tvar col2DataNil []bool\n\tvar col2Data []uint64\n\t// read data block by block\n\t// for more information about block, see: https://clickhouse.com/docs/en/development/architecture/#block\n\tfor selectStmt.Next() {\n\t\tcol1Data = col1Data[:0]\n\t\tcol1Data = col1Read.Read(col1Data)\n\n\t\tcol2DataNil = col2DataNil[:0]\n\t\tcol2DataNil = col2Read.ReadNil(col2DataNil)\n\n\t\tcol2Data = col2Data[:0]\n\t\tcol2Data = col2Read.Read(col2Data)\n\t}\n\n\t// check errors\n\tif selectStmt.Err() != nil {\n\t\tpanic(selectStmt.Err())\n\t}\n\tfmt.Println(\"selected 10M rows in \", time.Since(startSelect))\n}\n"
  },
  {
    "path": "errors.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// ErrNegativeTimeout when negative timeout provided\nvar ErrNegativeTimeout = errors.New(\"negative timeout\")\n\n// ErrPortInvalid when privide out of range port\nvar ErrPortInvalid = errors.New(\"outside range\")\n\n// ErrSSLModeInvalid when privide invalid ssl mode\nvar ErrSSLModeInvalid = errors.New(\"sslmode is invalid\")\n\n// ErrAddCA when can't add ca\nvar ErrAddCA = errors.New(\"unable to add CA to cert pool\")\n\n// ErrMissCertRequirement when sslcert or sslkey not provided\nvar ErrMissCertRequirement = errors.New(`both \"sslcert\" and \"sslkey\" are required`)\n\n// ErrInvalidDSN for invalid dsn\nvar ErrInvalidDSN = errors.New(\"invalid dsn\")\n\n// ErrInvalidBackSlash invalid backslash in dsn\nvar ErrInvalidBackSlash = errors.New(\"invalid backslash\")\n\n// ErrInvalidquoted invalid quoted in dsn\nvar ErrInvalidquoted = errors.New(\"unterminated quoted string in connection info string\")\n\n// ErrIPNotFound when can't found ip in connecting\nvar ErrIPNotFound = errors.New(\"ip addr wasn't found\")\n\n// ChError represents an error reported by the Clickhouse server\ntype ChError struct {\n\tCode       ChErrorType\n\tName       string\n\tMessage    string\n\tStackTrace string\n\terr        error\n}\n\nfunc (e *ChError) read(r *readerwriter.Reader) error {\n\tvar (\n\t\terr       error\n\t\thasNested uint8\n\t\terrCode   int32\n\t)\n\tif errCode, err = r.Int32(); err != nil {\n\t\treturn &readError{\"ChError: read code\", err}\n\t}\n\te.Code = ChErrorType(errCode)\n\tif e.Name, err = r.String(); err != nil {\n\t\treturn &readError{\"ChError: read name\", err}\n\t}\n\tif e.Message, err = r.String(); err != nil {\n\t\treturn &readError{\"ChError: read message\", err}\n\t}\n\te.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+\":\"))\n\tif e.StackTrace, err = r.String(); err != nil {\n\t\treturn &readError{\"ChError: read StackTrace\", err}\n\t}\n\tif hasNested, err = r.ReadByte(); err != nil {\n\t\treturn &readError{\"ChError: read hasNested\", err}\n\t}\n\tif hasNested == 1 {\n\t\tnestedErr := &ChError{}\n\t\tif err := nestedErr.read(r); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\te.err = nestedErr\n\t}\n\treturn nil\n}\n\n// Unwrap returns the underlying error\nfunc (e *ChError) Unwrap() error {\n\treturn e.err\n}\n\n// Error return string error\nfunc (e *ChError) Error() string {\n\tif e.err == nil {\n\t\treturn fmt.Sprintf(\" %s (%d): %s\", e.Name, e.Code, e.Message)\n\t}\n\treturn fmt.Sprintf(\" %s (%d): %s (%s)\", e.Name, e.Code, e.Message, e.err)\n}\n\n// preferContextOverNetTimeoutError returns ctx.Err() if ctx.Err() is present and err is a net.Error with Timeout() ==\n// true. Otherwise returns err.\nfunc preferContextOverNetTimeoutError(ctx context.Context, err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tvar timeoutError net.Error\n\terrors.As(err, &timeoutError)\n\tif timeoutError != nil && timeoutError.Timeout() &&\n\t\tctx.Err() != nil {\n\t\treturn &errTimeout{\n\t\t\tmainError: err,\n\t\t\terr:       ctx.Err(),\n\t\t}\n\t}\n\treturn err\n}\n\n// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is\n// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true.\ntype errTimeout struct {\n\terr       error\n\tmainError error\n}\n\nfunc (e *errTimeout) Error() string {\n\tif e.mainError == nil {\n\t\treturn fmt.Sprintf(\"timeout: %s\", e.err.Error())\n\t}\n\treturn fmt.Sprintf(\"timeout: %s - %s\", e.err.Error(), e.mainError.Error())\n}\n\nfunc (e *errTimeout) Unwrap() error {\n\treturn e.err\n}\n\ntype contextAlreadyDoneError struct {\n\terr error\n}\n\nfunc (e *contextAlreadyDoneError) Error() string {\n\treturn fmt.Sprintf(\"context already done: %s\", e.err.Error())\n}\n\nfunc (e *contextAlreadyDoneError) Unwrap() error {\n\treturn e.err\n}\n\n// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`.\nfunc newContextAlreadyDoneError(ctx context.Context) (err error) {\n\treturn &errTimeout{\n\t\terr: &contextAlreadyDoneError{err: ctx.Err()},\n\t}\n}\n\ntype unexpectedPacket struct {\n\texpected string\n\tactual   interface{}\n}\n\nfunc (e *unexpectedPacket) Error() string {\n\treturn fmt.Sprintf(\"Unexpected packet from server (expected %s got %#v)\", e.expected, e.actual)\n}\n\ntype notImplementedPacket struct {\n\tpacket uint64\n}\n\nfunc (e *notImplementedPacket) Error() string {\n\treturn fmt.Sprintf(\"packet not implemented: %d\", e.packet)\n}\n\ntype connectError struct {\n\tconfig *Config\n\tmsg    string\n\terr    error\n}\n\nfunc (e *connectError) Error() string {\n\tsb := &strings.Builder{}\n\tfmt.Fprintf(sb, \"failed to connect to `host=%s user=%s database=%s`: %s\", e.config.Host, e.config.User, e.config.Database, e.msg)\n\tif e.err != nil {\n\t\tfmt.Fprintf(sb, \" (%s)\", e.err.Error())\n\t}\n\treturn sb.String()\n}\n\nfunc (e *connectError) Unwrap() error {\n\treturn e.err\n}\n\ntype connLockError struct {\n\tstatus string\n}\n\nfunc (e *connLockError) Error() string {\n\treturn e.status\n}\n\ntype parseConfigError struct {\n\tconnString string\n\tmsg        string\n\terr        error\n}\n\nfunc (e *parseConfigError) Error() string {\n\tconnString := redactPW(e.connString)\n\tif e.err == nil {\n\t\treturn fmt.Sprintf(\"cannot parse `%s`: %s\", connString, e.msg)\n\t}\n\treturn fmt.Sprintf(\"cannot parse `%s`: %s (%s)\", connString, e.msg, e.err.Error())\n}\n\nfunc (e *parseConfigError) Unwrap() error {\n\treturn e.err\n}\n\ntype readError struct {\n\tmsg string\n\terr error\n}\n\nfunc (e *readError) Error() string {\n\treturn fmt.Sprintf(\"%s (%s)\", e.msg, e.err.Error())\n}\n\nfunc (e *readError) Unwrap() error {\n\treturn e.err\n}\n\ntype writeError struct {\n\tmsg string\n\terr error\n}\n\nfunc (e *writeError) Error() string {\n\treturn fmt.Sprintf(\"%s (%s)\", e.msg, e.err.Error())\n}\n\nfunc (e *writeError) Unwrap() error {\n\treturn e.err\n}\n\nfunc redactPW(connString string) string {\n\tif strings.HasPrefix(connString, \"clickhouse://\") {\n\t\tif u, err := url.Parse(connString); err == nil {\n\t\t\treturn redactURL(u)\n\t\t}\n\t}\n\tquotedDSN := regexp.MustCompile(`password='[^']*'`)\n\tconnString = quotedDSN.ReplaceAllLiteralString(connString, \"password=xxxxx\")\n\tplainDSN := regexp.MustCompile(`password=[^ ]*`)\n\tconnString = plainDSN.ReplaceAllLiteralString(connString, \"password=xxxxx\")\n\tbrokenURL := regexp.MustCompile(`:[^:@]+?@`)\n\tconnString = brokenURL.ReplaceAllLiteralString(connString, \":xxxxxx@\")\n\treturn connString\n}\n\nfunc redactURL(u *url.URL) string {\n\tif u == nil {\n\t\treturn \"\"\n\t}\n\tif _, pwSet := u.User.Password(); pwSet {\n\t\tu.User = url.UserPassword(u.User.Username(), \"xxxxx\")\n\t}\n\treturn u.String()\n}\n\n// InsertError represents an error when insert error\ntype InsertError struct {\n\terr        error\n\tremoteAddr net.Addr\n}\n\n// Error return string error\nfunc (e *InsertError) Error() string {\n\treturn fmt.Sprintf(\"failed to insert data: remoteAddr: %s - %s\", e.remoteAddr.String(), e.err.Error())\n}\n\n// Unwrap returns the underlying error\nfunc (e *InsertError) Unwrap() error {\n\treturn e.err\n}\n\n// ColumnNumberReadError represents an error when read more or less column\ntype ColumnNumberReadError struct {\n\tRead      int\n\tAvailable uint64\n}\n\nfunc (e *ColumnNumberReadError) Error() string {\n\treturn fmt.Sprintf(\"read %d column(s), but available %d column(s)\", e.Read, e.Available)\n}\n\n// ColumnNumberWriteError represents an error when number of write column is not equal to number of query column\ntype ColumnNumberWriteError struct {\n\tWriteColumn int\n\tNeedColumn  uint64\n}\n\nfunc (e *ColumnNumberWriteError) Error() string {\n\treturn fmt.Sprintf(\"write %d column(s) but insert query needs %d column(s)\", e.WriteColumn, e.NeedColumn)\n}\n\n// NumberWriteError represents an error when number rows of columns is not equal\ntype NumberWriteError struct {\n\tFirstNumRow int\n\tNumRow      int\n\tColumn      string\n\tFirstColumn string\n}\n\nfunc (e *NumberWriteError) Error() string {\n\treturn fmt.Sprintf(\"%q has %d rows but %q column has %d rows\", e.FirstColumn, e.FirstNumRow, e.Column, e.NumRow)\n}\n\n// ColumnNotFoundError represents an error when column not found (when try to reorder columns)\ntype ColumnNotFoundError struct {\n\tColumn string\n}\n\nfunc (e *ColumnNotFoundError) Error() string {\n\treturn fmt.Sprintf(\"the input columns do not contain column %q. The column name must be set using the `SetName` method\", e.Column)\n}\n"
  },
  {
    "path": "errors_ch_code.go",
    "content": "package chconn\n\ntype ChErrorType int32\n\nconst (\n\tChErrorOk                                           ChErrorType = 0    // OK\n\tChErrorUnsupportedMethod                            ChErrorType = 1    // UNSUPPORTED_METHOD\n\tChErrorUnsupportedParameter                         ChErrorType = 2    // UNSUPPORTED_PARAMETER\n\tChErrorUnexpectedEndOfFile                          ChErrorType = 3    // UNEXPECTED_END_OF_FILE\n\tChErrorExpectedEndOfFile                            ChErrorType = 4    // EXPECTED_END_OF_FILE\n\tChErrorCannotParseText                              ChErrorType = 6    // CANNOT_PARSE_TEXT\n\tChErrorIncorrectNumberOfColumns                     ChErrorType = 7    // INCORRECT_NUMBER_OF_COLUMNS\n\tChErrorThereIsNoColumn                              ChErrorType = 8    // THERE_IS_NO_COLUMN\n\tChErrorSizesOfColumnsDoesntMatch                    ChErrorType = 9    // SIZES_OF_COLUMNS_DOESNT_MATCH\n\tChErrorNotFoundColumnInBlock                        ChErrorType = 10   // NOT_FOUND_COLUMN_IN_BLOCK\n\tChErrorPositionOutOfBound                           ChErrorType = 11   // POSITION_OUT_OF_BOUND\n\tChErrorParameterOutOfBound                          ChErrorType = 12   // PARAMETER_OUT_OF_BOUND\n\tChErrorSizesOfColumnsInTupleDoesntMatch             ChErrorType = 13   // SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH\n\tChErrorDuplicateColumn                              ChErrorType = 15   // DUPLICATE_COLUMN\n\tChErrorNoSuchColumnInTable                          ChErrorType = 16   // NO_SUCH_COLUMN_IN_TABLE\n\tChErrorDelimiterInStringLiteralDoesntMatch          ChErrorType = 17   // DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH\n\tChErrorCannotInsertElementIntoConstantColumn        ChErrorType = 18   // CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN\n\tChErrorSizeOfFixedStringDoesntMatch                 ChErrorType = 19   // SIZE_OF_FIXED_STRING_DOESNT_MATCH\n\tChErrorNumberOfColumnsDoesntMatch                   ChErrorType = 20   // NUMBER_OF_COLUMNS_DOESNT_MATCH\n\tChErrorCannotReadAllDataFromTabSeparatedInput       ChErrorType = 21   // CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT\n\tChErrorCannotParseAllValueFromTabSeparatedInput     ChErrorType = 22   // CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT\n\tChErrorCannotReadFromIstream                        ChErrorType = 23   // CANNOT_READ_FROM_ISTREAM\n\tChErrorCannotWriteToOstream                         ChErrorType = 24   // CANNOT_WRITE_TO_OSTREAM\n\tChErrorCannotParseEscapeSequence                    ChErrorType = 25   // CANNOT_PARSE_ESCAPE_SEQUENCE\n\tChErrorCannotParseQuotedString                      ChErrorType = 26   // CANNOT_PARSE_QUOTED_STRING\n\tChErrorCannotParseInputAssertionFailed              ChErrorType = 27   // CANNOT_PARSE_INPUT_ASSERTION_FAILED\n\tChErrorCannotPrintFloatOrDoubleNumber               ChErrorType = 28   // CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER\n\tChErrorCannotPrintInteger                           ChErrorType = 29   // CANNOT_PRINT_INTEGER\n\tChErrorCannotReadSizeOfCompressedChunk              ChErrorType = 30   // CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK\n\tChErrorCannotReadCompressedChunk                    ChErrorType = 31   // CANNOT_READ_COMPRESSED_CHUNK\n\tChErrorAttemptToReadAfterEOF                        ChErrorType = 32   // ATTEMPT_TO_READ_AFTER_EOF\n\tChErrorCannotReadAllData                            ChErrorType = 33   // CANNOT_READ_ALL_DATA\n\tChErrorTooManyArgumentsForFunction                  ChErrorType = 34   // TOO_MANY_ARGUMENTS_FOR_FUNCTION\n\tChErrorTooFewArgumentsForFunction                   ChErrorType = 35   // TOO_FEW_ARGUMENTS_FOR_FUNCTION\n\tChErrorBadArguments                                 ChErrorType = 36   // BAD_ARGUMENTS\n\tChErrorUnknownElementInAst                          ChErrorType = 37   // UNKNOWN_ELEMENT_IN_AST\n\tChErrorCannotParseDate                              ChErrorType = 38   // CANNOT_PARSE_DATE\n\tChErrorTooLargeSizeCompressed                       ChErrorType = 39   // TOO_LARGE_SIZE_COMPRESSED\n\tChErrorChecksumDoesntMatch                          ChErrorType = 40   // CHECKSUM_DOESNT_MATCH\n\tChErrorCannotParseDatetime                          ChErrorType = 41   // CANNOT_PARSE_DATETIME\n\tChErrorNumberOfArgumentsDoesntMatch                 ChErrorType = 42   // NUMBER_OF_ARGUMENTS_DOESNT_MATCH\n\tChErrorIllegalTypeOfArgument                        ChErrorType = 43   // ILLEGAL_TYPE_OF_ARGUMENT\n\tChErrorIllegalColumn                                ChErrorType = 44   // ILLEGAL_COLUMN\n\tChErrorIllegalNumberOfResultColumns                 ChErrorType = 45   // ILLEGAL_NUMBER_OF_RESULT_COLUMNS\n\tChErrorUnknownFunction                              ChErrorType = 46   // UNKNOWN_FUNCTION\n\tChErrorUnknownIdentifier                            ChErrorType = 47   // UNKNOWN_IDENTIFIER\n\tChErrorNotImplemented                               ChErrorType = 48   // NOT_IMPLEMENTED\n\tChErrorLogicalError                                 ChErrorType = 49   // LOGICAL_ERROR\n\tChErrorUnknownType                                  ChErrorType = 50   // UNKNOWN_TYPE\n\tChErrorEmptyListOfColumnsQueried                    ChErrorType = 51   // EMPTY_LIST_OF_COLUMNS_QUERIED\n\tChErrorColumnQueriedMoreThanOnce                    ChErrorType = 52   // COLUMN_QUERIED_MORE_THAN_ONCE\n\tChErrorTypeMismatch                                 ChErrorType = 53   // TYPE_MISMATCH\n\tChErrorStorageDoesntAllowParameters                 ChErrorType = 54   // STORAGE_DOESNT_ALLOW_PARAMETERS\n\tChErrorStorageRequiresParameter                     ChErrorType = 55   // STORAGE_REQUIRES_PARAMETER\n\tChErrorUnknownStorage                               ChErrorType = 56   // UNKNOWN_STORAGE\n\tChErrorTableAlreadyExists                           ChErrorType = 57   // TABLE_ALREADY_EXISTS\n\tChErrorTableMetadataAlreadyExists                   ChErrorType = 58   // TABLE_METADATA_ALREADY_EXISTS\n\tChErrorIllegalTypeOfColumnForFilter                 ChErrorType = 59   // ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER\n\tChErrorUnknownTable                                 ChErrorType = 60   // UNKNOWN_TABLE\n\tChErrorOnlyFilterColumnInBlock                      ChErrorType = 61   // ONLY_FILTER_COLUMN_IN_BLOCK\n\tChErrorSyntaxError                                  ChErrorType = 62   // SYNTAX_ERROR\n\tChErrorUnknownAggregateFunction                     ChErrorType = 63   // UNKNOWN_AGGREGATE_FUNCTION\n\tChErrorCannotReadAggregateFunctionFromText          ChErrorType = 64   // CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT\n\tChErrorCannotWriteAggregateFunctionAsText           ChErrorType = 65   // CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT\n\tChErrorNotAColumn                                   ChErrorType = 66   // NOT_A_COLUMN\n\tChErrorIllegalKeyOfAggregation                      ChErrorType = 67   // ILLEGAL_KEY_OF_AGGREGATION\n\tChErrorCannotGetSizeOfField                         ChErrorType = 68   // CANNOT_GET_SIZE_OF_FIELD\n\tChErrorArgumentOutOfBound                           ChErrorType = 69   // ARGUMENT_OUT_OF_BOUND\n\tChErrorCannotConvertType                            ChErrorType = 70   // CANNOT_CONVERT_TYPE\n\tChErrorCannotWriteAfterEndOfBuffer                  ChErrorType = 71   // CANNOT_WRITE_AFTER_END_OF_BUFFER\n\tChErrorCannotParseNumber                            ChErrorType = 72   // CANNOT_PARSE_NUMBER\n\tChErrorUnknownFormat                                ChErrorType = 73   // UNKNOWN_FORMAT\n\tChErrorCannotReadFromFileDescriptor                 ChErrorType = 74   // CANNOT_READ_FROM_FILE_DESCRIPTOR\n\tChErrorCannotWriteToFileDescriptor                  ChErrorType = 75   // CANNOT_WRITE_TO_FILE_DESCRIPTOR\n\tChErrorCannotOpenFile                               ChErrorType = 76   // CANNOT_OPEN_FILE\n\tChErrorCannotCloseFile                              ChErrorType = 77   // CANNOT_CLOSE_FILE\n\tChErrorUnknownTypeOfQuery                           ChErrorType = 78   // UNKNOWN_TYPE_OF_QUERY\n\tChErrorIncorrectFileName                            ChErrorType = 79   // INCORRECT_FILE_NAME\n\tChErrorIncorrectQuery                               ChErrorType = 80   // INCORRECT_QUERY\n\tChErrorUnknownDatabase                              ChErrorType = 81   // UNKNOWN_DATABASE\n\tChErrorDatabaseAlreadyExists                        ChErrorType = 82   // DATABASE_ALREADY_EXISTS\n\tChErrorDirectoryDoesntExist                         ChErrorType = 83   // DIRECTORY_DOESNT_EXIST\n\tChErrorDirectoryAlreadyExists                       ChErrorType = 84   // DIRECTORY_ALREADY_EXISTS\n\tChErrorFormatIsNotSuitableForInput                  ChErrorType = 85   // FORMAT_IS_NOT_SUITABLE_FOR_INPUT\n\tChErrorReceivedErrorFromRemoteIoServer              ChErrorType = 86   // RECEIVED_ERROR_FROM_REMOTE_IO_SERVER\n\tChErrorCannotSeekThroughFile                        ChErrorType = 87   // CANNOT_SEEK_THROUGH_FILE\n\tChErrorCannotTruncateFile                           ChErrorType = 88   // CANNOT_TRUNCATE_FILE\n\tChErrorUnknownCompressionMethod                     ChErrorType = 89   // UNKNOWN_COMPRESSION_METHOD\n\tChErrorEmptyListOfColumnsPassed                     ChErrorType = 90   // EMPTY_LIST_OF_COLUMNS_PASSED\n\tChErrorSizesOfMarksFilesAreInconsistent             ChErrorType = 91   // SIZES_OF_MARKS_FILES_ARE_INCONSISTENT\n\tChErrorEmptyDataPassed                              ChErrorType = 92   // EMPTY_DATA_PASSED\n\tChErrorUnknownAggregatedDataVariant                 ChErrorType = 93   // UNKNOWN_AGGREGATED_DATA_VARIANT\n\tChErrorCannotMergeDifferentAggregatedDataVariants   ChErrorType = 94   // CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS\n\tChErrorCannotReadFromSocket                         ChErrorType = 95   // CANNOT_READ_FROM_SOCKET\n\tChErrorCannotWriteToSocket                          ChErrorType = 96   // CANNOT_WRITE_TO_SOCKET\n\tChErrorCannotReadAllDataFromChunkedInput            ChErrorType = 97   // CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT\n\tChErrorCannotWriteToEmptyBlockOutputStream          ChErrorType = 98   // CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM\n\tChErrorUnknownPacketFromClient                      ChErrorType = 99   // UNKNOWN_PACKET_FROM_CLIENT\n\tChErrorUnknownPacketFromServer                      ChErrorType = 100  // UNKNOWN_PACKET_FROM_SERVER\n\tChErrorUnexpectedPacketFromClient                   ChErrorType = 101  // UNEXPECTED_PACKET_FROM_CLIENT\n\tChErrorUnexpectedPacketFromServer                   ChErrorType = 102  // UNEXPECTED_PACKET_FROM_SERVER\n\tChErrorReceivedDataForWrongQueryID                  ChErrorType = 103  // RECEIVED_DATA_FOR_WRONG_QUERY_ID\n\tChErrorTooSmallBufferSize                           ChErrorType = 104  // TOO_SMALL_BUFFER_SIZE\n\tChErrorCannotReadHistory                            ChErrorType = 105  // CANNOT_READ_HISTORY\n\tChErrorCannotAppendHistory                          ChErrorType = 106  // CANNOT_APPEND_HISTORY\n\tChErrorFileDoesntExist                              ChErrorType = 107  // FILE_DOESNT_EXIST\n\tChErrorNoDataToInsert                               ChErrorType = 108  // NO_DATA_TO_INSERT\n\tChErrorCannotBlockSignal                            ChErrorType = 109  // CANNOT_BLOCK_SIGNAL\n\tChErrorCannotUnblockSignal                          ChErrorType = 110  // CANNOT_UNBLOCK_SIGNAL\n\tChErrorCannotManipulateSigset                       ChErrorType = 111  // CANNOT_MANIPULATE_SIGSET\n\tChErrorCannotWaitForSignal                          ChErrorType = 112  // CANNOT_WAIT_FOR_SIGNAL\n\tChErrorThereIsNoSession                             ChErrorType = 113  // THERE_IS_NO_SESSION\n\tChErrorCannotClockGettime                           ChErrorType = 114  // CANNOT_CLOCK_GETTIME\n\tChErrorUnknownSetting                               ChErrorType = 115  // UNKNOWN_SETTING\n\tChErrorThereIsNoDefaultValue                        ChErrorType = 116  // THERE_IS_NO_DEFAULT_VALUE\n\tChErrorIncorrectData                                ChErrorType = 117  // INCORRECT_DATA\n\tChErrorEngineRequired                               ChErrorType = 119  // ENGINE_REQUIRED\n\tChErrorCannotInsertValueOfDifferentSizeIntoTuple    ChErrorType = 120  // CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE\n\tChErrorUnsupportedJoinKeys                          ChErrorType = 121  // UNSUPPORTED_JOIN_KEYS\n\tChErrorIncompatibleColumns                          ChErrorType = 122  // INCOMPATIBLE_COLUMNS\n\tChErrorUnknownTypeOfAstNode                         ChErrorType = 123  // UNKNOWN_TYPE_OF_AST_NODE\n\tChErrorIncorrectElementOfSet                        ChErrorType = 124  // INCORRECT_ELEMENT_OF_SET\n\tChErrorIncorrectResultOfScalarSubquery              ChErrorType = 125  // INCORRECT_RESULT_OF_SCALAR_SUBQUERY\n\tChErrorCannotGetReturnType                          ChErrorType = 126  // CANNOT_GET_RETURN_TYPE\n\tChErrorIllegalIndex                                 ChErrorType = 127  // ILLEGAL_INDEX\n\tChErrorTooLargeArraySize                            ChErrorType = 128  // TOO_LARGE_ARRAY_SIZE\n\tChErrorFunctionIsSpecial                            ChErrorType = 129  // FUNCTION_IS_SPECIAL\n\tChErrorCannotReadArrayFromText                      ChErrorType = 130  // CANNOT_READ_ARRAY_FROM_TEXT\n\tChErrorTooLargeStringSize                           ChErrorType = 131  // TOO_LARGE_STRING_SIZE\n\tChErrorAggregateFunctionDoesntAllowParameters       ChErrorType = 133  // AGGREGATE_FUNCTION_DOESNT_ALLOW_PARAMETERS\n\tChErrorParametersToAggregateFunctionsMustBeLiterals ChErrorType = 134  // PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS\n\tChErrorZeroArrayOrTupleIndex                        ChErrorType = 135  // ZERO_ARRAY_OR_TUPLE_INDEX\n\tChErrorUnknownElementInConfig                       ChErrorType = 137  // UNKNOWN_ELEMENT_IN_CONFIG\n\tChErrorExcessiveElementInConfig                     ChErrorType = 138  // EXCESSIVE_ELEMENT_IN_CONFIG\n\tChErrorNoElementsInConfig                           ChErrorType = 139  // NO_ELEMENTS_IN_CONFIG\n\tChErrorAllRequestedColumnsAreMissing                ChErrorType = 140  // ALL_REQUESTED_COLUMNS_ARE_MISSING\n\tChErrorSamplingNotSupported                         ChErrorType = 141  // SAMPLING_NOT_SUPPORTED\n\tChErrorNotFoundNode                                 ChErrorType = 142  // NOT_FOUND_NODE\n\tChErrorFoundMoreThanOneNode                         ChErrorType = 143  // FOUND_MORE_THAN_ONE_NODE\n\tChErrorFirstDateIsBiggerThanLastDate                ChErrorType = 144  // FIRST_DATE_IS_BIGGER_THAN_LAST_DATE\n\tChErrorUnknownOverflowMode                          ChErrorType = 145  // UNKNOWN_OVERFLOW_MODE\n\tChErrorQuerySectionDoesntMakeSense                  ChErrorType = 146  // QUERY_SECTION_DOESNT_MAKE_SENSE\n\tChErrorNotFoundFunctionElementForAggregate          ChErrorType = 147  // NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE\n\tChErrorNotFoundRelationElementForCondition          ChErrorType = 148  // NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION\n\tChErrorNotFoundRhsElementForCondition               ChErrorType = 149  // NOT_FOUND_RHS_ELEMENT_FOR_CONDITION\n\tChErrorEmptyListOfAttributesPassed                  ChErrorType = 150  // EMPTY_LIST_OF_ATTRIBUTES_PASSED\n\tChErrorIndexOfColumnInSortClauseIsOutOfRange        ChErrorType = 151  // INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE\n\tChErrorUnknownDirectionOfSorting                    ChErrorType = 152  // UNKNOWN_DIRECTION_OF_SORTING\n\tChErrorIllegalDivision                              ChErrorType = 153  // ILLEGAL_DIVISION\n\tChErrorAggregateFunctionNotApplicable               ChErrorType = 154  // AGGREGATE_FUNCTION_NOT_APPLICABLE\n\tChErrorUnknownRelation                              ChErrorType = 155  // UNKNOWN_RELATION\n\tChErrorDictionariesWasNotLoaded                     ChErrorType = 156  // DICTIONARIES_WAS_NOT_LOADED\n\tChErrorIllegalOverflowMode                          ChErrorType = 157  // ILLEGAL_OVERFLOW_MODE\n\tChErrorTooManyRows                                  ChErrorType = 158  // TOO_MANY_ROWS\n\tChErrorTimeoutExceeded                              ChErrorType = 159  // TIMEOUT_EXCEEDED\n\tChErrorTooSlow                                      ChErrorType = 160  // TOO_SLOW\n\tChErrorTooManyColumns                               ChErrorType = 161  // TOO_MANY_COLUMNS\n\tChErrorTooDeepSubqueries                            ChErrorType = 162  // TOO_DEEP_SUBQUERIES\n\tChErrorTooDeepPipeline                              ChErrorType = 163  // TOO_DEEP_PIPELINE\n\tChErrorReadonly                                     ChErrorType = 164  // READONLY\n\tChErrorTooManyTemporaryColumns                      ChErrorType = 165  // TOO_MANY_TEMPORARY_COLUMNS\n\tChErrorTooManyTemporaryNonConstColumns              ChErrorType = 166  // TOO_MANY_TEMPORARY_NON_CONST_COLUMNS\n\tChErrorTooDeepAst                                   ChErrorType = 167  // TOO_DEEP_AST\n\tChErrorTooBigAst                                    ChErrorType = 168  // TOO_BIG_AST\n\tChErrorBadTypeOfField                               ChErrorType = 169  // BAD_TYPE_OF_FIELD\n\tChErrorBadGet                                       ChErrorType = 170  // BAD_GET\n\tChErrorCannotCreateDirectory                        ChErrorType = 172  // CANNOT_CREATE_DIRECTORY\n\tChErrorCannotAllocateMemory                         ChErrorType = 173  // CANNOT_ALLOCATE_MEMORY\n\tChErrorCyclicAliases                                ChErrorType = 174  // CYCLIC_ALIASES\n\tChErrorChunkNotFound                                ChErrorType = 176  // CHUNK_NOT_FOUND\n\tChErrorDuplicateChunkName                           ChErrorType = 177  // DUPLICATE_CHUNK_NAME\n\tChErrorMultipleAliasesForExpression                 ChErrorType = 178  // MULTIPLE_ALIASES_FOR_EXPRESSION\n\tChErrorMultipleExpressionsForAlias                  ChErrorType = 179  // MULTIPLE_EXPRESSIONS_FOR_ALIAS\n\tChErrorThereIsNoProfile                             ChErrorType = 180  // THERE_IS_NO_PROFILE\n\tChErrorIllegalFinal                                 ChErrorType = 181  // ILLEGAL_FINAL\n\tChErrorIllegalPrewhere                              ChErrorType = 182  // ILLEGAL_PREWHERE\n\tChErrorUnexpectedExpression                         ChErrorType = 183  // UNEXPECTED_EXPRESSION\n\tChErrorIllegalAggregation                           ChErrorType = 184  // ILLEGAL_AGGREGATION\n\tChErrorUnsupportedMyisamBlockType                   ChErrorType = 185  // UNSUPPORTED_MYISAM_BLOCK_TYPE\n\tChErrorUnsupportedCollationLocale                   ChErrorType = 186  // UNSUPPORTED_COLLATION_LOCALE\n\tChErrorCollationComparisonFailed                    ChErrorType = 187  // COLLATION_COMPARISON_FAILED\n\tChErrorUnknownAction                                ChErrorType = 188  // UNKNOWN_ACTION\n\tChErrorTableMustNotBeCreatedManually                ChErrorType = 189  // TABLE_MUST_NOT_BE_CREATED_MANUALLY\n\tChErrorSizesOfArraysDoesntMatch                     ChErrorType = 190  // SIZES_OF_ARRAYS_DOESNT_MATCH\n\tChErrorSetSizeLimitExceeded                         ChErrorType = 191  // SET_SIZE_LIMIT_EXCEEDED\n\tChErrorUnknownUser                                  ChErrorType = 192  // UNKNOWN_USER\n\tChErrorWrongPassword                                ChErrorType = 193  // WRONG_PASSWORD\n\tChErrorRequiredPassword                             ChErrorType = 194  // REQUIRED_PASSWORD\n\tChErrorIPAddressNotAllowed                          ChErrorType = 195  // IP_ADDRESS_NOT_ALLOWED\n\tChErrorUnknownAddressPatternType                    ChErrorType = 196  // UNKNOWN_ADDRESS_PATTERN_TYPE\n\tChErrorServerRevisionIsTooOld                       ChErrorType = 197  // SERVER_REVISION_IS_TOO_OLD\n\tChErrorDNSError                                     ChErrorType = 198  // DNS_ERROR\n\tChErrorUnknownQuota                                 ChErrorType = 199  // UNKNOWN_QUOTA\n\tChErrorQuotaDoesntAllowKeys                         ChErrorType = 200  // QUOTA_DOESNT_ALLOW_KEYS\n\tChErrorQuotaExpired                                 ChErrorType = 201  // QUOTA_EXPIRED\n\tChErrorTooManySimultaneousQueries                   ChErrorType = 202  // TOO_MANY_SIMULTANEOUS_QUERIES\n\tChErrorNoFreeConnection                             ChErrorType = 203  // NO_FREE_CONNECTION\n\tChErrorCannotFsync                                  ChErrorType = 204  // CANNOT_FSYNC\n\tChErrorNestedTypeTooDeep                            ChErrorType = 205  // NESTED_TYPE_TOO_DEEP\n\tChErrorAliasRequired                                ChErrorType = 206  // ALIAS_REQUIRED\n\tChErrorAmbiguousIdentifier                          ChErrorType = 207  // AMBIGUOUS_IDENTIFIER\n\tChErrorEmptyNestedTable                             ChErrorType = 208  // EMPTY_NESTED_TABLE\n\tChErrorSocketTimeout                                ChErrorType = 209  // SOCKET_TIMEOUT\n\tChErrorNetworkError                                 ChErrorType = 210  // NETWORK_ERROR\n\tChErrorEmptyQuery                                   ChErrorType = 211  // EMPTY_QUERY\n\tChErrorUnknownLoadBalancing                         ChErrorType = 212  // UNKNOWN_LOAD_BALANCING\n\tChErrorUnknownTotalsMode                            ChErrorType = 213  // UNKNOWN_TOTALS_MODE\n\tChErrorCannotStatvfs                                ChErrorType = 214  // CANNOT_STATVFS\n\tChErrorNotAnAggregate                               ChErrorType = 215  // NOT_AN_AGGREGATE\n\tChErrorQueryWithSameIDIsAlreadyRunning              ChErrorType = 216  // QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING\n\tChErrorClientHasConnectedToWrongPort                ChErrorType = 217  // CLIENT_HAS_CONNECTED_TO_WRONG_PORT\n\tChErrorTableIsDropped                               ChErrorType = 218  // TABLE_IS_DROPPED\n\tChErrorDatabaseNotEmpty                             ChErrorType = 219  // DATABASE_NOT_EMPTY\n\tChErrorDuplicateInterserverIoEndpoint               ChErrorType = 220  // DUPLICATE_INTERSERVER_IO_ENDPOINT\n\tChErrorNoSuchInterserverIoEndpoint                  ChErrorType = 221  // NO_SUCH_INTERSERVER_IO_ENDPOINT\n\tChErrorAddingReplicaToNonEmptyTable                 ChErrorType = 222  // ADDING_REPLICA_TO_NON_EMPTY_TABLE\n\tChErrorUnexpectedAstStructure                       ChErrorType = 223  // UNEXPECTED_AST_STRUCTURE\n\tChErrorReplicaIsAlreadyActive                       ChErrorType = 224  // REPLICA_IS_ALREADY_ACTIVE\n\tChErrorNoZookeeper                                  ChErrorType = 225  // NO_ZOOKEEPER\n\tChErrorNoFileInDataPart                             ChErrorType = 226  // NO_FILE_IN_DATA_PART\n\tChErrorUnexpectedFileInDataPart                     ChErrorType = 227  // UNEXPECTED_FILE_IN_DATA_PART\n\tChErrorBadSizeOfFileInDataPart                      ChErrorType = 228  // BAD_SIZE_OF_FILE_IN_DATA_PART\n\tChErrorQueryIsTooLarge                              ChErrorType = 229  // QUERY_IS_TOO_LARGE\n\tChErrorNotFoundExpectedDataPart                     ChErrorType = 230  // NOT_FOUND_EXPECTED_DATA_PART\n\tChErrorTooManyUnexpectedDataParts                   ChErrorType = 231  // TOO_MANY_UNEXPECTED_DATA_PARTS\n\tChErrorNoSuchDataPart                               ChErrorType = 232  // NO_SUCH_DATA_PART\n\tChErrorBadDataPartName                              ChErrorType = 233  // BAD_DATA_PART_NAME\n\tChErrorNoReplicaHasPart                             ChErrorType = 234  // NO_REPLICA_HAS_PART\n\tChErrorDuplicateDataPart                            ChErrorType = 235  // DUPLICATE_DATA_PART\n\tChErrorAborted                                      ChErrorType = 236  // ABORTED\n\tChErrorNoReplicaNameGiven                           ChErrorType = 237  // NO_REPLICA_NAME_GIVEN\n\tChErrorFormatVersionTooOld                          ChErrorType = 238  // FORMAT_VERSION_TOO_OLD\n\tChErrorCannotMunmap                                 ChErrorType = 239  // CANNOT_MUNMAP\n\tChErrorCannotMremap                                 ChErrorType = 240  // CANNOT_MREMAP\n\tChErrorMemoryLimitExceeded                          ChErrorType = 241  // MEMORY_LIMIT_EXCEEDED\n\tChErrorTableIsReadOnly                              ChErrorType = 242  // TABLE_IS_READ_ONLY\n\tChErrorNotEnoughSpace                               ChErrorType = 243  // NOT_ENOUGH_SPACE\n\tChErrorUnexpectedZookeeperError                     ChErrorType = 244  // UNEXPECTED_ZOOKEEPER_ERROR\n\tChErrorCorruptedData                                ChErrorType = 246  // CORRUPTED_DATA\n\tChErrorIncorrectMark                                ChErrorType = 247  // INCORRECT_MARK\n\tChErrorInvalidPartitionValue                        ChErrorType = 248  // INVALID_PARTITION_VALUE\n\tChErrorNotEnoughBlockNumbers                        ChErrorType = 250  // NOT_ENOUGH_BLOCK_NUMBERS\n\tChErrorNoSuchReplica                                ChErrorType = 251  // NO_SUCH_REPLICA\n\tChErrorTooManyParts                                 ChErrorType = 252  // TOO_MANY_PARTS\n\tChErrorReplicaIsAlreadyExist                        ChErrorType = 253  // REPLICA_IS_ALREADY_EXIST\n\tChErrorNoActiveReplicas                             ChErrorType = 254  // NO_ACTIVE_REPLICAS\n\tChErrorTooManyRetriesToFetchParts                   ChErrorType = 255  // TOO_MANY_RETRIES_TO_FETCH_PARTS\n\tChErrorPartitionAlreadyExists                       ChErrorType = 256  // PARTITION_ALREADY_EXISTS\n\tChErrorPartitionDoesntExist                         ChErrorType = 257  // PARTITION_DOESNT_EXIST\n\tChErrorUnionAllResultStructuresMismatch             ChErrorType = 258  // UNION_ALL_RESULT_STRUCTURES_MISMATCH\n\tChErrorClientOutputFormatSpecified                  ChErrorType = 260  // CLIENT_OUTPUT_FORMAT_SPECIFIED\n\tChErrorUnknownBlockInfoField                        ChErrorType = 261  // UNKNOWN_BLOCK_INFO_FIELD\n\tChErrorBadCollation                                 ChErrorType = 262  // BAD_COLLATION\n\tChErrorCannotCompileCode                            ChErrorType = 263  // CANNOT_COMPILE_CODE\n\tChErrorIncompatibleTypeOfJoin                       ChErrorType = 264  // INCOMPATIBLE_TYPE_OF_JOIN\n\tChErrorNoAvailableReplica                           ChErrorType = 265  // NO_AVAILABLE_REPLICA\n\tChErrorMismatchReplicasDataSources                  ChErrorType = 266  // MISMATCH_REPLICAS_DATA_SOURCES\n\tChErrorStorageDoesntSupportParallelReplicas         ChErrorType = 267  // STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS\n\tChErrorCpuidError                                   ChErrorType = 268  // CPUID_ERROR\n\tChErrorInfiniteLoop                                 ChErrorType = 269  // INFINITE_LOOP\n\tChErrorCannotCompress                               ChErrorType = 270  // CANNOT_COMPRESS\n\tChErrorCannotDecompress                             ChErrorType = 271  // CANNOT_DECOMPRESS\n\tChErrorCannotIoSubmit                               ChErrorType = 272  // CANNOT_IO_SUBMIT\n\tChErrorCannotIoGetevents                            ChErrorType = 273  // CANNOT_IO_GETEVENTS\n\tChErrorAioReadError                                 ChErrorType = 274  // AIO_READ_ERROR\n\tChErrorAioWriteError                                ChErrorType = 275  // AIO_WRITE_ERROR\n\tChErrorIndexNotUsed                                 ChErrorType = 277  // INDEX_NOT_USED\n\tChErrorAllConnectionTriesFailed                     ChErrorType = 279  // ALL_CONNECTION_TRIES_FAILED\n\tChErrorNoAvailableData                              ChErrorType = 280  // NO_AVAILABLE_DATA\n\tChErrorDictionaryIsEmpty                            ChErrorType = 281  // DICTIONARY_IS_EMPTY\n\tChErrorIncorrectIndex                               ChErrorType = 282  // INCORRECT_INDEX\n\tChErrorUnknownDistributedProductMode                ChErrorType = 283  // UNKNOWN_DISTRIBUTED_PRODUCT_MODE\n\tChErrorWrongGlobalSubquery                          ChErrorType = 284  // WRONG_GLOBAL_SUBQUERY\n\tChErrorTooFewLiveReplicas                           ChErrorType = 285  // TOO_FEW_LIVE_REPLICAS\n\tChErrorUnsatisfiedQuorumForPreviousWrite            ChErrorType = 286  // UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE\n\tChErrorUnknownFormatVersion                         ChErrorType = 287  // UNKNOWN_FORMAT_VERSION\n\tChErrorDistributedInJoinSubqueryDenied              ChErrorType = 288  // DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED\n\tChErrorReplicaIsNotInQuorum                         ChErrorType = 289  // REPLICA_IS_NOT_IN_QUORUM\n\tChErrorLimitExceeded                                ChErrorType = 290  // LIMIT_EXCEEDED\n\tChErrorDatabaseAccessDenied                         ChErrorType = 291  // DATABASE_ACCESS_DENIED\n\tChErrorMongodbCannotAuthenticate                    ChErrorType = 293  // MONGODB_CANNOT_AUTHENTICATE\n\tChErrorInvalidBlockExtraInfo                        ChErrorType = 294  // INVALID_BLOCK_EXTRA_INFO\n\tChErrorReceivedEmptyData                            ChErrorType = 295  // RECEIVED_EMPTY_DATA\n\tChErrorNoRemoteShardFound                           ChErrorType = 296  // NO_REMOTE_SHARD_FOUND\n\tChErrorShardHasNoConnections                        ChErrorType = 297  // SHARD_HAS_NO_CONNECTIONS\n\tChErrorCannotPipe                                   ChErrorType = 298  // CANNOT_PIPE\n\tChErrorCannotFork                                   ChErrorType = 299  // CANNOT_FORK\n\tChErrorCannotDlsym                                  ChErrorType = 300  // CANNOT_DLSYM\n\tChErrorCannotCreateChildProcess                     ChErrorType = 301  // CANNOT_CREATE_CHILD_PROCESS\n\tChErrorChildWasNotExitedNormally                    ChErrorType = 302  // CHILD_WAS_NOT_EXITED_NORMALLY\n\tChErrorCannotSelect                                 ChErrorType = 303  // CANNOT_SELECT\n\tChErrorCannotWaitpid                                ChErrorType = 304  // CANNOT_WAITPID\n\tChErrorTableWasNotDropped                           ChErrorType = 305  // TABLE_WAS_NOT_DROPPED\n\tChErrorTooDeepRecursion                             ChErrorType = 306  // TOO_DEEP_RECURSION\n\tChErrorTooManyBytes                                 ChErrorType = 307  // TOO_MANY_BYTES\n\tChErrorUnexpectedNodeInZookeeper                    ChErrorType = 308  // UNEXPECTED_NODE_IN_ZOOKEEPER\n\tChErrorFunctionCannotHaveParameters                 ChErrorType = 309  // FUNCTION_CANNOT_HAVE_PARAMETERS\n\tChErrorInvalidShardWeight                           ChErrorType = 317  // INVALID_SHARD_WEIGHT\n\tChErrorInvalidConfigParameter                       ChErrorType = 318  // INVALID_CONFIG_PARAMETER\n\tChErrorUnknownStatusOfInsert                        ChErrorType = 319  // UNKNOWN_STATUS_OF_INSERT\n\tChErrorValueIsOutOfRangeOfDataType                  ChErrorType = 321  // VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE\n\tChErrorBarrierTimeout                               ChErrorType = 335  // BARRIER_TIMEOUT\n\tChErrorUnknownDatabaseEngine                        ChErrorType = 336  // UNKNOWN_DATABASE_ENGINE\n\tChErrorDdlGuardIsActive                             ChErrorType = 337  // DDL_GUARD_IS_ACTIVE\n\tChErrorUnfinished                                   ChErrorType = 341  // UNFINISHED\n\tChErrorMetadataMismatch                             ChErrorType = 342  // METADATA_MISMATCH\n\tChErrorSupportIsDisabled                            ChErrorType = 344  // SUPPORT_IS_DISABLED\n\tChErrorTableDiffersTooMuch                          ChErrorType = 345  // TABLE_DIFFERS_TOO_MUCH\n\tChErrorCannotConvertCharset                         ChErrorType = 346  // CANNOT_CONVERT_CHARSET\n\tChErrorCannotLoadConfig                             ChErrorType = 347  // CANNOT_LOAD_CONFIG\n\tChErrorCannotInsertNullInOrdinaryColumn             ChErrorType = 349  // CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN\n\tChErrorIncompatibleSourceTables                     ChErrorType = 350  // INCOMPATIBLE_SOURCE_TABLES\n\tChErrorAmbiguousTableName                           ChErrorType = 351  // AMBIGUOUS_TABLE_NAME\n\tChErrorAmbiguousColumnName                          ChErrorType = 352  // AMBIGUOUS_COLUMN_NAME\n\tChErrorIndexOfPositionalArgumentIsOutOfRange        ChErrorType = 353  // INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE\n\tChErrorZlibInflateFailed                            ChErrorType = 354  // ZLIB_INFLATE_FAILED\n\tChErrorZlibDeflateFailed                            ChErrorType = 355  // ZLIB_DEFLATE_FAILED\n\tChErrorBadLambda                                    ChErrorType = 356  // BAD_LAMBDA\n\tChErrorReservedIdentifierName                       ChErrorType = 357  // RESERVED_IDENTIFIER_NAME\n\tChErrorIntoOutfileNotAllowed                        ChErrorType = 358  // INTO_OUTFILE_NOT_ALLOWED\n\tChErrorTableSizeExceedsMaxDropSizeLimit             ChErrorType = 359  // TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT\n\tChErrorCannotCreateCharsetConverter                 ChErrorType = 360  // CANNOT_CREATE_CHARSET_CONVERTER\n\tChErrorSeekPositionOutOfBound                       ChErrorType = 361  // SEEK_POSITION_OUT_OF_BOUND\n\tChErrorCurrentWriteBufferIsExhausted                ChErrorType = 362  // CURRENT_WRITE_BUFFER_IS_EXHAUSTED\n\tChErrorCannotCreateIoBuffer                         ChErrorType = 363  // CANNOT_CREATE_IO_BUFFER\n\tChErrorReceivedErrorTooManyRequests                 ChErrorType = 364  // RECEIVED_ERROR_TOO_MANY_REQUESTS\n\tChErrorSizesOfNestedColumnsAreInconsistent          ChErrorType = 366  // SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT\n\tChErrorTooManyFetches                               ChErrorType = 367  // TOO_MANY_FETCHES\n\tChErrorAllReplicasAreStale                          ChErrorType = 369  // ALL_REPLICAS_ARE_STALE\n\tChErrorDataTypeCannotBeUsedInTables                 ChErrorType = 370  // DATA_TYPE_CANNOT_BE_USED_IN_TABLES\n\tChErrorInconsistentClusterDefinition                ChErrorType = 371  // INCONSISTENT_CLUSTER_DEFINITION\n\tChErrorSessionNotFound                              ChErrorType = 372  // SESSION_NOT_FOUND\n\tChErrorSessionIsLocked                              ChErrorType = 373  // SESSION_IS_LOCKED\n\tChErrorInvalidSessionTimeout                        ChErrorType = 374  // INVALID_SESSION_TIMEOUT\n\tChErrorCannotDlopen                                 ChErrorType = 375  // CANNOT_DLOPEN\n\tChErrorCannotParseUUID                              ChErrorType = 376  // CANNOT_PARSE_UUID\n\tChErrorIllegalSyntaxForDataType                     ChErrorType = 377  // ILLEGAL_SYNTAX_FOR_DATA_TYPE\n\tChErrorDataTypeCannotHaveArguments                  ChErrorType = 378  // DATA_TYPE_CANNOT_HAVE_ARGUMENTS\n\tChErrorUnknownStatusOfDistributedDdlTask            ChErrorType = 379  // UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK\n\tChErrorCannotKill                                   ChErrorType = 380  // CANNOT_KILL\n\tChErrorHTTPLengthRequired                           ChErrorType = 381  // HTTP_LENGTH_REQUIRED\n\tChErrorCannotLoadCatboostModel                      ChErrorType = 382  // CANNOT_LOAD_CATBOOST_MODEL\n\tChErrorCannotApplyCatboostModel                     ChErrorType = 383  // CANNOT_APPLY_CATBOOST_MODEL\n\tChErrorPartIsTemporarilyLocked                      ChErrorType = 384  // PART_IS_TEMPORARILY_LOCKED\n\tChErrorMultipleStreamsRequired                      ChErrorType = 385  // MULTIPLE_STREAMS_REQUIRED\n\tChErrorNoCommonType                                 ChErrorType = 386  // NO_COMMON_TYPE\n\tChErrorDictionaryAlreadyExists                      ChErrorType = 387  // DICTIONARY_ALREADY_EXISTS\n\tChErrorCannotAssignOptimize                         ChErrorType = 388  // CANNOT_ASSIGN_OPTIMIZE\n\tChErrorInsertWasDeduplicated                        ChErrorType = 389  // INSERT_WAS_DEDUPLICATED\n\tChErrorCannotGetCreateTableQuery                    ChErrorType = 390  // CANNOT_GET_CREATE_TABLE_QUERY\n\tChErrorExternalLibraryError                         ChErrorType = 391  // EXTERNAL_LIBRARY_ERROR\n\tChErrorQueryIsProhibited                            ChErrorType = 392  // QUERY_IS_PROHIBITED\n\tChErrorThereIsNoQuery                               ChErrorType = 393  // THERE_IS_NO_QUERY\n\tChErrorQueryWasCancelled                            ChErrorType = 394  // QUERY_WAS_CANCELED\n\tChErrorFunctionThrowIfValueIsNonZero                ChErrorType = 395  // FUNCTION_THROW_IF_VALUE_IS_NON_ZERO\n\tChErrorTooManyRowsOrBytes                           ChErrorType = 396  // TOO_MANY_ROWS_OR_BYTES\n\tChErrorQueryIsNotSupportedInMaterializedView        ChErrorType = 397  // QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW\n\tChErrorUnknownMutationCommand                       ChErrorType = 398  // UNKNOWN_MUTATION_COMMAND\n\tChErrorFormatIsNotSuitableForOutput                 ChErrorType = 399  // FORMAT_IS_NOT_SUITABLE_FOR_OUTPUT\n\tChErrorCannotStat                                   ChErrorType = 400  // CANNOT_STAT\n\tChErrorFeatureIsNotEnabledAtBuildTime               ChErrorType = 401  // FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME\n\tChErrorCannotIosetup                                ChErrorType = 402  // CANNOT_IOSETUP\n\tChErrorInvalidJoinOnExpression                      ChErrorType = 403  // INVALID_JOIN_ON_EXPRESSION\n\tChErrorBadOdbcConnectionString                      ChErrorType = 404  // BAD_ODBC_CONNECTION_STRING\n\tChErrorPartitionSizeExceedsMaxDropSizeLimit         ChErrorType = 405  // PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT\n\tChErrorTopAndLimitTogether                          ChErrorType = 406  // TOP_AND_LIMIT_TOGETHER\n\tChErrorDecimalOverflow                              ChErrorType = 407  // DECIMAL_OVERFLOW\n\tChErrorBadRequestParameter                          ChErrorType = 408  // BAD_REQUEST_PARAMETER\n\tChErrorExternalExecutableNotFound                   ChErrorType = 409  // EXTERNAL_EXECUTABLE_NOT_FOUND\n\tChErrorExternalServerIsNotResponding                ChErrorType = 410  // EXTERNAL_SERVER_IS_NOT_RESPONDING\n\tChErrorPthreadError                                 ChErrorType = 411  // PTHREAD_ERROR\n\tChErrorNetlinkError                                 ChErrorType = 412  // NETLINK_ERROR\n\tChErrorCannotSetSignalHandler                       ChErrorType = 413  // CANNOT_SET_SIGNAL_HANDLER\n\tChErrorAllReplicasLost                              ChErrorType = 415  // ALL_REPLICAS_LOST\n\tChErrorReplicaStatusChanged                         ChErrorType = 416  // REPLICA_STATUS_CHANGED\n\tChErrorExpectedAllOrAny                             ChErrorType = 417  // EXPECTED_ALL_OR_ANY\n\tChErrorUnknownJoin                                  ChErrorType = 418  // UNKNOWN_JOIN\n\tChErrorMultipleAssignmentsToColumn                  ChErrorType = 419  // MULTIPLE_ASSIGNMENTS_TO_COLUMN\n\tChErrorCannotUpdateColumn                           ChErrorType = 420  // CANNOT_UPDATE_COLUMN\n\tChErrorCannotAddDifferentAggregateStates            ChErrorType = 421  // CANNOT_ADD_DIFFERENT_AGGREGATE_STATES\n\tChErrorUnsupportedURIScheme                         ChErrorType = 422  // UNSUPPORTED_URI_SCHEME\n\tChErrorCannotGettimeofday                           ChErrorType = 423  // CANNOT_GETTIMEOFDAY\n\tChErrorCannotLink                                   ChErrorType = 424  // CANNOT_LINK\n\tChErrorSystemError                                  ChErrorType = 425  // SYSTEM_ERROR\n\tChErrorCannotCompileRegexp                          ChErrorType = 427  // CANNOT_COMPILE_REGEXP\n\tChErrorUnknownLogLevel                              ChErrorType = 428  // UNKNOWN_LOG_LEVEL\n\tChErrorFailedToGetpwuid                             ChErrorType = 429  // FAILED_TO_GETPWUID\n\tChErrorMismatchingUsersForProcessAndData            ChErrorType = 430  // MISMATCHING_USERS_FOR_PROCESS_AND_DATA\n\tChErrorIllegalSyntaxForCodecType                    ChErrorType = 431  // ILLEGAL_SYNTAX_FOR_CODEC_TYPE\n\tChErrorUnknownCodec                                 ChErrorType = 432  // UNKNOWN_CODEC\n\tChErrorIllegalCodecParameter                        ChErrorType = 433  // ILLEGAL_CODEC_PARAMETER\n\tChErrorCannotParseProtobufSchema                    ChErrorType = 434  // CANNOT_PARSE_PROTOBUF_SCHEMA\n\tChErrorNoColumnSerializedToRequiredProtobufField    ChErrorType = 435  // NO_COLUMN_SERIALIZED_TO_REQUIRED_PROTOBUF_FIELD\n\tChErrorProtobufBadCast                              ChErrorType = 436  // PROTOBUF_BAD_CAST\n\tChErrorProtobufFieldNotRepeated                     ChErrorType = 437  // PROTOBUF_FIELD_NOT_REPEATED\n\tChErrorDataTypeCannotBePromoted                     ChErrorType = 438  // DATA_TYPE_CANNOT_BE_PROMOTED\n\tChErrorCannotScheduleTask                           ChErrorType = 439  // CANNOT_SCHEDULE_TASK\n\tChErrorInvalidLimitExpression                       ChErrorType = 440  // INVALID_LIMIT_EXPRESSION\n\tChErrorCannotParseDomainValueFromString             ChErrorType = 441  // CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING\n\tChErrorBadDatabaseForTemporaryTable                 ChErrorType = 442  // BAD_DATABASE_FOR_TEMPORARY_TABLE\n\tChErrorNoColumnsSerializedToProtobufFields          ChErrorType = 443  // NO_COLUMNS_SERIALIZED_TO_PROTOBUF_FIELDS\n\tChErrorUnknownProtobufFormat                        ChErrorType = 444  // UNKNOWN_PROTOBUF_FORMAT\n\tChErrorCannotMprotect                               ChErrorType = 445  // CANNOT_MPROTECT\n\tChErrorFunctionNotAllowed                           ChErrorType = 446  // FUNCTION_NOT_ALLOWED\n\tChErrorHyperscanCannotScanText                      ChErrorType = 447  // HYPERSCAN_CANNOT_SCAN_TEXT\n\tChErrorBrotliReadFailed                             ChErrorType = 448  // BROTLI_READ_FAILED\n\tChErrorBrotliWriteFailed                            ChErrorType = 449  // BROTLI_WRITE_FAILED\n\tChErrorBadTTLExpression                             ChErrorType = 450  // BAD_TTL_EXPRESSION\n\tChErrorBadTTLFile                                   ChErrorType = 451  // BAD_TTL_FILE\n\tChErrorSettingConstraintViolation                   ChErrorType = 452  // SETTING_CONSTRAINT_VIOLATION\n\tChErrorMysqlClientInsufficientCapabilities          ChErrorType = 453  // MYSQL_CLIENT_INSUFFICIENT_CAPABILITIES\n\tChErrorOpensslError                                 ChErrorType = 454  // OPENSSL_ERROR\n\tChErrorSuspiciousTypeForLowCardinality              ChErrorType = 455  // SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY\n\tChErrorUnknownQueryParameter                        ChErrorType = 456  // UNKNOWN_QUERY_PARAMETER\n\tChErrorBadQueryParameter                            ChErrorType = 457  // BAD_QUERY_PARAMETER\n\tChErrorCannotUnlink                                 ChErrorType = 458  // CANNOT_UNLINK\n\tChErrorCannotSetThreadPriority                      ChErrorType = 459  // CANNOT_SET_THREAD_PRIORITY\n\tChErrorCannotCreateTimer                            ChErrorType = 460  // CANNOT_CREATE_TIMER\n\tChErrorCannotSetTimerPeriod                         ChErrorType = 461  // CANNOT_SET_TIMER_PERIOD\n\tChErrorCannotDeleteTimer                            ChErrorType = 462  // CANNOT_DELETE_TIMER\n\tChErrorCannotFcntl                                  ChErrorType = 463  // CANNOT_FCNTL\n\tChErrorCannotParseElf                               ChErrorType = 464  // CANNOT_PARSE_ELF\n\tChErrorCannotParseDwarf                             ChErrorType = 465  // CANNOT_PARSE_DWARF\n\tChErrorInsecurePath                                 ChErrorType = 466  // INSECURE_PATH\n\tChErrorCannotParseBool                              ChErrorType = 467  // CANNOT_PARSE_BOOL\n\tChErrorCannotPthreadAttr                            ChErrorType = 468  // CANNOT_PTHREAD_ATTR\n\tChErrorViolatedConstraint                           ChErrorType = 469  // VIOLATED_CONSTRAINT\n\tChErrorQueryIsNotSupportedInLiveView                ChErrorType = 470  // QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW\n\tChErrorInvalidSettingValue                          ChErrorType = 471  // INVALID_SETTING_VALUE\n\tChErrorReadonlySetting                              ChErrorType = 472  // READONLY_SETTING\n\tChErrorDeadlockAvoided                              ChErrorType = 473  // DEADLOCK_AVOIDED\n\tChErrorInvalidTemplateFormat                        ChErrorType = 474  // INVALID_TEMPLATE_FORMAT\n\tChErrorInvalidWithFillExpression                    ChErrorType = 475  // INVALID_WITH_FILL_EXPRESSION\n\tChErrorWithTiesWithoutOrderBy                       ChErrorType = 476  // WITH_TIES_WITHOUT_ORDER_BY\n\tChErrorInvalidUsageOfInput                          ChErrorType = 477  // INVALID_USAGE_OF_INPUT\n\tChErrorUnknownPolicy                                ChErrorType = 478  // UNKNOWN_POLICY\n\tChErrorUnknownDisk                                  ChErrorType = 479  // UNKNOWN_DISK\n\tChErrorUnknownProtocol                              ChErrorType = 480  // UNKNOWN_PROTOCOL\n\tChErrorPathAccessDenied                             ChErrorType = 481  // PATH_ACCESS_DENIED\n\tChErrorDictionaryAccessDenied                       ChErrorType = 482  // DICTIONARY_ACCESS_DENIED\n\tChErrorTooManyRedirects                             ChErrorType = 483  // TOO_MANY_REDIRECTS\n\tChErrorInternalRedisError                           ChErrorType = 484  // INTERNAL_REDIS_ERROR\n\tChErrorScalarAlreadyExists                          ChErrorType = 485  // SCALAR_ALREADY_EXISTS\n\tChErrorCannotGetCreateDictionaryQuery               ChErrorType = 487  // CANNOT_GET_CREATE_DICTIONARY_QUERY\n\tChErrorUnknownDictionary                            ChErrorType = 488  // UNKNOWN_DICTIONARY\n\tChErrorIncorrectDictionaryDefinition                ChErrorType = 489  // INCORRECT_DICTIONARY_DEFINITION\n\tChErrorCannotFormatDatetime                         ChErrorType = 490  // CANNOT_FORMAT_DATETIME\n\tChErrorUnacceptableURL                              ChErrorType = 491  // UNACCEPTABLE_URL\n\tChErrorAccessEntityNotFound                         ChErrorType = 492  // ACCESS_ENTITY_NOT_FOUND\n\tChErrorAccessEntityAlreadyExists                    ChErrorType = 493  // ACCESS_ENTITY_ALREADY_EXISTS\n\tChErrorAccessEntityFoundDuplicates                  ChErrorType = 494  // ACCESS_ENTITY_FOUND_DUPLICATES\n\tChErrorAccessStorageReadonly                        ChErrorType = 495  // ACCESS_STORAGE_READONLY\n\tChErrorQuotaRequiresClientKey                       ChErrorType = 496  // QUOTA_REQUIRES_CLIENT_KEY\n\tChErrorAccessDenied                                 ChErrorType = 497  // ACCESS_DENIED\n\tChErrorLimitByWithTiesIsNotSupported                ChErrorType = 498  // LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED\n\tChErrorS3Error                                      ChErrorType = 499  // S3_ERROR\n\tChErrorAzureBlobStorageError                        ChErrorType = 500  // AZURE_BLOB_STORAGE_ERROR\n\tChErrorCannotCreateDatabase                         ChErrorType = 501  // CANNOT_CREATE_DATABASE\n\tChErrorCannotSigqueue                               ChErrorType = 502  // CANNOT_SIGQUEUE\n\tChErrorAggregateFunctionThrow                       ChErrorType = 503  // AGGREGATE_FUNCTION_THROW\n\tChErrorFileAlreadyExists                            ChErrorType = 504  // FILE_ALREADY_EXISTS\n\tChErrorCannotDeleteDirectory                        ChErrorType = 505  // CANNOT_DELETE_DIRECTORY\n\tChErrorUnexpectedErrorCode                          ChErrorType = 506  // UNEXPECTED_ERROR_CODE\n\tChErrorUnableToSkipUnusedShards                     ChErrorType = 507  // UNABLE_TO_SKIP_UNUSED_SHARDS\n\tChErrorUnknownAccessType                            ChErrorType = 508  // UNKNOWN_ACCESS_TYPE\n\tChErrorInvalidGrant                                 ChErrorType = 509  // INVALID_GRANT\n\tChErrorCacheDictionaryUpdateFail                    ChErrorType = 510  // CACHE_DICTIONARY_UPDATE_FAIL\n\tChErrorUnknownRole                                  ChErrorType = 511  // UNKNOWN_ROLE\n\tChErrorSetNonGrantedRole                            ChErrorType = 512  // SET_NON_GRANTED_ROLE\n\tChErrorUnknownPartType                              ChErrorType = 513  // UNKNOWN_PART_TYPE\n\tChErrorAccessStorageForInsertionNotFound            ChErrorType = 514  // ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND\n\tChErrorIncorrectAccessEntityDefinition              ChErrorType = 515  // INCORRECT_ACCESS_ENTITY_DEFINITION\n\tChErrorAuthenticationFailed                         ChErrorType = 516  // AUTHENTICATION_FAILED\n\tChErrorCannotAssignAlter                            ChErrorType = 517  // CANNOT_ASSIGN_ALTER\n\tChErrorCannotCommitOffset                           ChErrorType = 518  // CANNOT_COMMIT_OFFSET\n\tChErrorNoRemoteShardAvailable                       ChErrorType = 519  // NO_REMOTE_SHARD_AVAILABLE\n\tChErrorCannotDetachDictionaryAsTable                ChErrorType = 520  // CANNOT_DETACH_DICTIONARY_AS_TABLE\n\tChErrorAtomicRenameFail                             ChErrorType = 521  // ATOMIC_RENAME_FAIL\n\tChErrorUnknownRowPolicy                             ChErrorType = 523  // UNKNOWN_ROW_POLICY\n\tChErrorAlterOfColumnIsForbidden                     ChErrorType = 524  // ALTER_OF_COLUMN_IS_FORBIDDEN\n\tChErrorIncorrectDiskIndex                           ChErrorType = 525  // INCORRECT_DISK_INDEX\n\tChErrorNoSuitableFunctionImplementation             ChErrorType = 527  // NO_SUITABLE_FUNCTION_IMPLEMENTATION\n\tChErrorCassandraInternalError                       ChErrorType = 528  // CASSANDRA_INTERNAL_ERROR\n\tChErrorNotALeader                                   ChErrorType = 529  // NOT_A_LEADER\n\tChErrorCannotConnectRabbitmq                        ChErrorType = 530  // CANNOT_CONNECT_RABBITMQ\n\tChErrorCannotFstat                                  ChErrorType = 531  // CANNOT_FSTAT\n\tChErrorLdapError                                    ChErrorType = 532  // LDAP_ERROR\n\tChErrorInconsistentReservations                     ChErrorType = 533  // INCONSISTENT_RESERVATIONS\n\tChErrorNoReservationsProvided                       ChErrorType = 534  // NO_RESERVATIONS_PROVIDED\n\tChErrorUnknownRaidType                              ChErrorType = 535  // UNKNOWN_RAID_TYPE\n\tChErrorCannotRestoreFromFieldDump                   ChErrorType = 536  // CANNOT_RESTORE_FROM_FIELD_DUMP\n\tChErrorIllegalMysqlVariable                         ChErrorType = 537  // ILLEGAL_MYSQL_VARIABLE\n\tChErrorMysqlSyntaxError                             ChErrorType = 538  // MYSQL_SYNTAX_ERROR\n\tChErrorCannotBindRabbitmqExchange                   ChErrorType = 539  // CANNOT_BIND_RABBITMQ_EXCHANGE\n\tChErrorCannotDeclareRabbitmqExchange                ChErrorType = 540  // CANNOT_DECLARE_RABBITMQ_EXCHANGE\n\tChErrorCannotCreateRabbitmqQueueBinding             ChErrorType = 541  // CANNOT_CREATE_RABBITMQ_QUEUE_BINDING\n\tChErrorCannotRemoveRabbitmqExchange                 ChErrorType = 542  // CANNOT_REMOVE_RABBITMQ_EXCHANGE\n\tChErrorUnknownMysqlDatatypesSupportLevel            ChErrorType = 543  // UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL\n\tChErrorRowAndRowsTogether                           ChErrorType = 544  // ROW_AND_ROWS_TOGETHER\n\tChErrorFirstAndNextTogether                         ChErrorType = 545  // FIRST_AND_NEXT_TOGETHER\n\tChErrorNoRowDelimiter                               ChErrorType = 546  // NO_ROW_DELIMITER\n\tChErrorInvalidRaidType                              ChErrorType = 547  // INVALID_RAID_TYPE\n\tChErrorUnknownVolume                                ChErrorType = 548  // UNKNOWN_VOLUME\n\tChErrorDataTypeCannotBeUsedInKey                    ChErrorType = 549  // DATA_TYPE_CANNOT_BE_USED_IN_KEY\n\tChErrorConditionalTreeParentNotFound                ChErrorType = 550  // CONDITIONAL_TREE_PARENT_NOT_FOUND\n\tChErrorIllegalProjectionManipulator                 ChErrorType = 551  // ILLEGAL_PROJECTION_MANIPULATOR\n\tChErrorUnrecognizedArguments                        ChErrorType = 552  // UNRECOGNIZED_ARGUMENTS\n\tChErrorLzmaStreamEncoderFailed                      ChErrorType = 553  // LZMA_STREAM_ENCODER_FAILED\n\tChErrorLzmaStreamDecoderFailed                      ChErrorType = 554  // LZMA_STREAM_DECODER_FAILED\n\tChErrorRocksdbError                                 ChErrorType = 555  // ROCKSDB_ERROR\n\tChErrorSyncMysqlUserAccessErro                      ChErrorType = 556  // SYNC_MYSQL_USER_ACCESS_ERRO\n\tChErrorUnknownUnion                                 ChErrorType = 557  // UNKNOWN_UNION\n\tChErrorExpectedAllOrDistinct                        ChErrorType = 558  // EXPECTED_ALL_OR_DISTINCT\n\tChErrorInvalidGrpcQueryInfo                         ChErrorType = 559  // INVALID_GRPC_QUERY_INFO\n\tChErrorZstdEncoderFailed                            ChErrorType = 560  // ZSTD_ENCODER_FAILED\n\tChErrorZstdDecoderFailed                            ChErrorType = 561  // ZSTD_DECODER_FAILED\n\tChErrorTldListNotFound                              ChErrorType = 562  // TLD_LIST_NOT_FOUND\n\tChErrorCannotReadMapFromText                        ChErrorType = 563  // CANNOT_READ_MAP_FROM_TEXT\n\tChErrorInterserverSchemeDoesntMatch                 ChErrorType = 564  // INTERSERVER_SCHEME_DOESNT_MATCH\n\tChErrorTooManyPartitions                            ChErrorType = 565  // TOO_MANY_PARTITIONS\n\tChErrorCannotRmdir                                  ChErrorType = 566  // CANNOT_RMDIR\n\tChErrorDuplicatedPartUuids                          ChErrorType = 567  // DUPLICATED_PART_UUIDS\n\tChErrorRaftError                                    ChErrorType = 568  // RAFT_ERROR\n\tChErrorMultipleColumnsSerializedToSameProtobufField ChErrorType = 569  // MULTIPLE_COLUMNS_SERIALIZED_TO_SAME_PROTOBUF_FIELD\n\tChErrorDataTypeIncompatibleWithProtobufField        ChErrorType = 570  // DATA_TYPE_INCOMPATIBLE_WITH_PROTOBUF_FIELD\n\tChErrorDatabaseReplicationFailed                    ChErrorType = 571  // DATABASE_REPLICATION_FAILED\n\tChErrorTooManyQueryPlanOptimizations                ChErrorType = 572  // TOO_MANY_QUERY_PLAN_OPTIMIZATIONS\n\tChErrorEpollError                                   ChErrorType = 573  // EPOLL_ERROR\n\tChErrorDistributedTooManyPendingBytes               ChErrorType = 574  // DISTRIBUTED_TOO_MANY_PENDING_BYTES\n\tChErrorUnknownSnapshot                              ChErrorType = 575  // UNKNOWN_SNAPSHOT\n\tChErrorKerberosError                                ChErrorType = 576  // KERBEROS_ERROR\n\tChErrorInvalidShardID                               ChErrorType = 577  // INVALID_SHARD_ID\n\tChErrorInvalidFormatInsertQueryWithData             ChErrorType = 578  // INVALID_FORMAT_INSERT_QUERY_WITH_DATA\n\tChErrorIncorrectPartType                            ChErrorType = 579  // INCORRECT_PART_TYPE\n\tChErrorCannotSetRoundingMode                        ChErrorType = 580  // CANNOT_SET_ROUNDING_MODE\n\tChErrorTooLargeDistributedDepth                     ChErrorType = 581  // TOO_LARGE_DISTRIBUTED_DEPTH\n\tChErrorNoSuchProjectionInTable                      ChErrorType = 582  // NO_SUCH_PROJECTION_IN_TABLE\n\tChErrorIllegalProjection                            ChErrorType = 583  // ILLEGAL_PROJECTION\n\tChErrorProjectionNotUsed                            ChErrorType = 584  // PROJECTION_NOT_USED\n\tChErrorCannotParseYaml                              ChErrorType = 585  // CANNOT_PARSE_YAML\n\tChErrorCannotCreateFile                             ChErrorType = 586  // CANNOT_CREATE_FILE\n\tChErrorConcurrentAccessNotSupported                 ChErrorType = 587  // CONCURRENT_ACCESS_NOT_SUPPORTED\n\tChErrorDistributedBrokenBatchInfo                   ChErrorType = 588  // DISTRIBUTED_BROKEN_BATCH_INFO\n\tChErrorDistributedBrokenBatchFiles                  ChErrorType = 589  // DISTRIBUTED_BROKEN_BATCH_FILES\n\tChErrorCannotSysconf                                ChErrorType = 590  // CANNOT_SYSCONF\n\tChErrorSqliteEngineError                            ChErrorType = 591  // SQLITE_ENGINE_ERROR\n\tChErrorDataEncryptionError                          ChErrorType = 592  // DATA_ENCRYPTION_ERROR\n\tChErrorZeroCopyReplicationError                     ChErrorType = 593  // ZERO_COPY_REPLICATION_ERROR\n\tChErrorBzip2StreamDecoderFailed                     ChErrorType = 594  // BZIP2_STREAM_DECODER_FAILED\n\tChErrorBzip2StreamEncoderFailed                     ChErrorType = 595  // BZIP2_STREAM_ENCODER_FAILED\n\tChErrorIntersectOrExceptResultStructuresMismatch    ChErrorType = 596  // INTERSECT_OR_EXCEPT_RESULT_STRUCTURES_MISMATCH\n\tChErrorNoSuchErrorCode                              ChErrorType = 597  // NO_SUCH_ERROR_CODE\n\tChErrorBackupAlreadyExists                          ChErrorType = 598  // BACKUP_ALREADY_EXISTS\n\tChErrorBackupNotFound                               ChErrorType = 599  // BACKUP_NOT_FOUND\n\tChErrorBackupVersionNotSupported                    ChErrorType = 600  // BACKUP_VERSION_NOT_SUPPORTED\n\tChErrorBackupDamaged                                ChErrorType = 601  // BACKUP_DAMAGED\n\tChErrorNoBaseBackup                                 ChErrorType = 602  // NO_BASE_BACKUP\n\tChErrorWrongBaseBackup                              ChErrorType = 603  // WRONG_BASE_BACKUP\n\tChErrorBackupEntryAlreadyExists                     ChErrorType = 604  // BACKUP_ENTRY_ALREADY_EXISTS\n\tChErrorBackupEntryNotFound                          ChErrorType = 605  // BACKUP_ENTRY_NOT_FOUND\n\tChErrorBackupIsEmpty                                ChErrorType = 606  // BACKUP_IS_EMPTY\n\tChErrorBackupElementDuplicate                       ChErrorType = 607  // BACKUP_ELEMENT_DUPLICATE\n\tChErrorCannotRestoreTable                           ChErrorType = 608  // CANNOT_RESTORE_TABLE\n\tChErrorFunctionAlreadyExists                        ChErrorType = 609  // FUNCTION_ALREADY_EXISTS\n\tChErrorCannotDropFunction                           ChErrorType = 610  // CANNOT_DROP_FUNCTION\n\tChErrorCannotCreateRecursiveFunction                ChErrorType = 611  // CANNOT_CREATE_RECURSIVE_FUNCTION\n\tChErrorObjectAlreadyStoredOnDisk                    ChErrorType = 612  // OBJECT_ALREADY_STORED_ON_DISK\n\tChErrorObjectWasNotStoredOnDisk                     ChErrorType = 613  // OBJECT_WAS_NOT_STORED_ON_DISK\n\tChErrorPostgresqlConnectionFailure                  ChErrorType = 614  // POSTGRESQL_CONNECTION_FAILURE\n\tChErrorCannotAdvise                                 ChErrorType = 615  // CANNOT_ADVISE\n\tChErrorUnknownReadMethod                            ChErrorType = 616  // UNKNOWN_READ_METHOD\n\tChErrorLz4EncoderFailed                             ChErrorType = 617  // LZ4_ENCODER_FAILED\n\tChErrorLz4DecoderFailed                             ChErrorType = 618  // LZ4_DECODER_FAILED\n\tChErrorPostgresqlReplicationInternalError           ChErrorType = 619  // POSTGRESQL_REPLICATION_INTERNAL_ERROR\n\tChErrorQueryNotAllowed                              ChErrorType = 620  // QUERY_NOT_ALLOWED\n\tChErrorCannotNormalizeString                        ChErrorType = 621  // CANNOT_NORMALIZE_STRING\n\tChErrorCannotParseCapnProtoSchema                   ChErrorType = 622  // CANNOT_PARSE_CAPN_PROTO_SCHEMA\n\tChErrorCapnProtoBadCast                             ChErrorType = 623  // CAPN_PROTO_BAD_CAST\n\tChErrorBadFileType                                  ChErrorType = 624  // BAD_FILE_TYPE\n\tChErrorIoSetupError                                 ChErrorType = 625  // IO_SETUP_ERROR\n\tChErrorCannotSkipUnknownField                       ChErrorType = 626  // CANNOT_SKIP_UNKNOWN_FIELD\n\tChErrorBackupEngineNotFound                         ChErrorType = 627  // BACKUP_ENGINE_NOT_FOUND\n\tChErrorOffsetFetchWithoutOrderBy                    ChErrorType = 628  // OFFSET_FETCH_WITHOUT_ORDER_BY\n\tChErrorHTTPRangeNotSatisfiable                      ChErrorType = 629  // HTTP_RANGE_NOT_SATISFIABLE\n\tChErrorHaveDependentObjects                         ChErrorType = 630  // HAVE_DEPENDENT_OBJECTS\n\tChErrorUnknownFileSize                              ChErrorType = 631  // UNKNOWN_FILE_SIZE\n\tChErrorUnexpectedDataAfterParsedValue               ChErrorType = 632  // UNEXPECTED_DATA_AFTER_PARSED_VALUE\n\tChErrorQueryIsNotSupportedInWindowView              ChErrorType = 633  // QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW\n\tChErrorMongodbError                                 ChErrorType = 634  // MONGODB_ERROR\n\tChErrorCannotPoll                                   ChErrorType = 635  // CANNOT_POLL\n\tChErrorCannotExtractTableStructure                  ChErrorType = 636  // CANNOT_EXTRACT_TABLE_STRUCTURE\n\tChErrorInvalidTableOverride                         ChErrorType = 637  // INVALID_TABLE_OVERRIDE\n\tChErrorSnappyUncompressFailed                       ChErrorType = 638  // SNAPPY_UNCOMPRESS_FAILED\n\tChErrorSnappyCompressFailed                         ChErrorType = 639  // SNAPPY_COMPRESS_FAILED\n\tChErrorNoHivemetastore                              ChErrorType = 640  // NO_HIVEMETASTORE\n\tChErrorCannotAppendToFile                           ChErrorType = 641  // CANNOT_APPEND_TO_FILE\n\tChErrorCannotPackArchive                            ChErrorType = 642  // CANNOT_PACK_ARCHIVE\n\tChErrorCannotUnpackArchive                          ChErrorType = 643  // CANNOT_UNPACK_ARCHIVE\n\tChErrorKeeperException                              ChErrorType = 999  // KEEPER_EXCEPTION\n\tChErrorPocoException                                ChErrorType = 1000 // POCO_EXCEPTION\n\tChErrorStdException                                 ChErrorType = 1001 // STD_EXCEPTION\n\tChErrorUnknownException                             ChErrorType = 1002 // UNKNOWN_EXCEPTION\n)\n"
  },
  {
    "path": "errors_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestChErrorReadError(t *testing.T) {\n\tstartValidReader := 14\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"ChError: read code\",\n\t\t\twantErr:     \"ChError: read code\",\n\t\t\tnumberValid: startValidReader,\n\t\t}, {\n\t\t\tname:        \"ChError: read name\",\n\t\t\twantErr:     \"ChError: read name\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t}, {\n\t\t\tname:        \"ChError: read message\",\n\t\t\twantErr:     \"ChError: read message\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t}, {\n\t\t\tname:        \"ChError: read StackTrace\",\n\t\t\twantErr:     \"ChError: read StackTrace\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t}, {\n\t\t\tname:        \"ChError: read hasNested\",\n\t\t\twantErr:     \"ChError: read hasNested\",\n\t\t\tnumberValid: startValidReader + 8,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\terr = c.Exec(context.Background(), \"SELECT * FROM invalid_table LIMIT 5;\")\n\t\t\trequire.Error(t, err)\n\t\t\treadErr, ok := err.(*readError)\n\t\t\trequire.True(t, ok)\n\t\t\trequire.Equal(t, readErr.msg, tt.wantErr)\n\t\t\trequire.EqualError(t, readErr.Unwrap(), \"timeout\")\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc NewParseConfigError(conn, msg string, err error) error {\n\treturn &parseConfigError{\n\t\tconnString: conn,\n\t\tmsg:        msg,\n\t\terr:        err,\n\t}\n}\n\nfunc TestConfigError(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\terr         error\n\t\texpectedMsg string\n\t}{\n\t\t{\n\t\t\tname:        \"url with password\",\n\t\t\terr:         NewParseConfigError(\"clickhouse://foo:password@host\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `clickhouse://foo:xxxxx@host`: msg\",\n\t\t},\n\t\t{\n\t\t\tname:        \"dsn with password unquoted\",\n\t\t\terr:         NewParseConfigError(\"host=host password=password user=user\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `host=host password=xxxxx user=user`: msg\",\n\t\t},\n\t\t{\n\t\t\tname:        \"dsn with password quoted\",\n\t\t\terr:         NewParseConfigError(\"host=host password='pass word' user=user\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `host=host password=xxxxx user=user`: msg\",\n\t\t},\n\t\t{\n\t\t\tname:        \"weird url\",\n\t\t\terr:         NewParseConfigError(\"clickhouse://foo::pasword@host:1:\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `clickhouse://foo:xxxxx@host:1:`: msg\",\n\t\t},\n\t\t{\n\t\t\tname:        \"weird url with slash in password\",\n\t\t\terr:         NewParseConfigError(\"clickhouse://user:pass/word@host:5432/db_name\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `clickhouse://user:xxxxxx@host:5432/db_name`: msg\",\n\t\t},\n\t\t{\n\t\t\tname:        \"url without password\",\n\t\t\terr:         NewParseConfigError(\"clickhouse://other@host/db\", \"msg\", nil),\n\t\t\texpectedMsg: \"cannot parse `clickhouse://other@host/db`: msg\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tassert.EqualError(t, tt.err, tt.expectedMsg)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/vahid-sohrabloo/chconn/v2\n\ngo 1.18\n\nrequire (\n\tgithub.com/go-faster/city v1.0.1\n\tgithub.com/google/uuid v1.3.0\n\tgithub.com/jackc/puddle/v2 v2.1.2\n\tgithub.com/klauspost/compress v1.15.15\n\tgithub.com/pierrec/lz4/v4 v4.1.17\n\tgithub.com/stretchr/testify v1.8.1\n)\n\nrequire (\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgo.uber.org/atomic v1.10.0 // indirect\n\tgolang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=\ngithub.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=\ngithub.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=\ngithub.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg=\ngithub.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels=\ngithub.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=\ngithub.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=\ngithub.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=\ngithub.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngo.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=\ngo.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=\ngolang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc=\ngolang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "helper_test.go",
    "content": "package chconn\n\nimport (\n\t\"io\"\n\t\"time\"\n)\n\ntype readErrorHelper struct {\n\tnumberValid int\n\terr         error\n\tr           io.Reader\n\tcount       int\n}\n\nfunc (r *readErrorHelper) Read(p []byte) (int, error) {\n\tr.count++\n\tif r.count > r.numberValid {\n\t\treturn 0, r.err\n\t}\n\treturn r.r.Read(p)\n}\n\ntype writerErrorHelper struct {\n\tnumberValid int\n\terr         error\n\tw           io.Writer\n\tcount       int\n}\n\nfunc (w *writerErrorHelper) Write(p []byte) (int, error) {\n\tw.count++\n\tif w.count > w.numberValid {\n\t\treturn 0, w.err\n\t}\n\treturn w.w.Write(p)\n}\n\ntype writerSlowHelper struct {\n\tw     io.Writer\n\tsleep time.Duration\n}\n\nfunc (w *writerSlowHelper) Write(p []byte) (int, error) {\n\ttime.Sleep(w.sleep)\n\treturn w.w.Write(p)\n}\n"
  },
  {
    "path": "insert.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\n// InsertStmt is a interface for insert stream statement\ntype InsertStmt interface {\n\t// Write write a columns (a block of data) to the clickhouse server\n\t// after each write you need to reset the columns. it will not reset automatically\n\tWrite(ctx context.Context, columns ...column.ColumnBasic) error\n\t// Flush flush the data to the clickhouse server and close the statement\n\tFlush(ctx context.Context) error\n\t// Close close the statement and release the connection\n\t// close will be called automatically after Flush\n\tClose()\n}\n\ntype insertStmt struct {\n\tblock        *block\n\tconn         *conn\n\tquery        string\n\tqueryOptions *QueryOptions\n\tclientInfo   *ClientInfo\n\thasError     bool\n\tclosed       bool\n\tfinishInsert bool\n}\n\nfunc (s *insertStmt) Flush(ctx context.Context) error {\n\tdefer s.Close()\n\ts.finishInsert = true\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\ts.conn.contextWatcher.Watch(ctx)\n\t\tdefer s.conn.contextWatcher.Unwatch()\n\t}\n\n\terr := s.conn.sendEmptyBlock()\n\n\tif err != nil {\n\t\ts.hasError = true\n\t\treturn &InsertError{\n\t\t\terr:        err,\n\t\t\tremoteAddr: s.conn.RawConn().RemoteAddr(),\n\t\t}\n\t}\n\n\tvar res interface{}\n\tfor {\n\t\tres, err = s.conn.receiveAndProcessData(emptyOnProgress)\n\n\t\tif err != nil {\n\t\t\ts.hasError = true\n\t\t\treturn err\n\t\t}\n\n\t\tif res == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif profile, ok := res.(*Profile); ok {\n\t\t\tif s.queryOptions.OnProfile != nil {\n\t\t\t\ts.queryOptions.OnProfile(profile)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif progress, ok := res.(*Progress); ok {\n\t\t\tif s.queryOptions.OnProgress != nil {\n\t\t\t\ts.queryOptions.OnProgress(progress)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif profileEvent, ok := res.(*ProfileEvent); ok {\n\t\t\tif s.queryOptions.OnProfileEvent != nil {\n\t\t\t\ts.queryOptions.OnProfileEvent(profileEvent)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\ts.hasError = true\n\t\treturn &unexpectedPacket{expected: \"serverData\", actual: res}\n\t}\n}\n\n// Close close the statement and release the connection\n// If Next is called and returns false and there are no further blocks,\n// the Rows are closed automatically and it will suffice to check the result of Err.\n// Close is idempotent and does not affect the result of Err.\nfunc (s *insertStmt) Close() {\n\ts.conn.reader.SetCompress(false)\n\tif !s.closed {\n\t\ts.closed = true\n\t\ts.conn.contextWatcher.Unwatch()\n\t\ts.conn.unlock()\n\t\tif s.hasError || !s.finishInsert {\n\t\t\ts.conn.Close()\n\t\t}\n\t}\n}\n\nfunc (s *insertStmt) Write(ctx context.Context, columns ...column.ColumnBasic) error {\n\tif int(s.block.NumColumns) != len(columns) {\n\t\treturn &InsertError{\n\t\t\terr: &ColumnNumberWriteError{\n\t\t\t\tWriteColumn: len(columns),\n\t\t\t\tNeedColumn:  s.block.NumColumns,\n\t\t\t},\n\t\t\tremoteAddr: s.conn.RawConn().RemoteAddr(),\n\t\t}\n\t}\n\n\tvar err error\n\tif len(columns[0].Name()) != 0 {\n\t\tcolumns, err = s.block.reorderColumns(columns)\n\t\tif err != nil {\n\t\t\ts.hasError = true\n\t\t\treturn &InsertError{\n\t\t\t\terr:        err,\n\t\t\t\tremoteAddr: s.conn.RawConn().RemoteAddr(),\n\t\t\t}\n\t\t}\n\t}\n\tfor i, col := range columns {\n\t\tcol.SetType(s.block.Columns[i].ChType)\n\t\tif errValidate := col.Validate(); errValidate != nil {\n\t\t\ts.hasError = true\n\t\t\treturn errValidate\n\t\t}\n\t}\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\ts.conn.contextWatcher.Watch(ctx)\n\t\tdefer s.conn.contextWatcher.Unwatch()\n\t}\n\n\terr = s.conn.sendData(s.block, columns[0].NumRow())\n\tif err != nil {\n\t\ts.hasError = true\n\t\treturn &InsertError{\n\t\t\terr:        err,\n\t\t\tremoteAddr: s.conn.RawConn().RemoteAddr(),\n\t\t}\n\t}\n\n\terr = s.block.writeColumnsBuffer(s.conn, columns...)\n\tif err != nil {\n\t\ts.hasError = true\n\t\treturn &InsertError{\n\t\t\terr:        err,\n\t\t\tremoteAddr: s.conn.RawConn().RemoteAddr(),\n\t\t}\n\t}\n\tfor _, col := range columns {\n\t\tcol.Reset()\n\t}\n\treturn nil\n}\n\n// Insert send query for insert and commit columns\nfunc (ch *conn) Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error {\n\treturn ch.InsertWithOption(ctx, query, nil, columns...)\n}\n\n// Insert send query for insert and prepare insert stmt with setting option\nfunc (ch *conn) InsertWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *QueryOptions,\n\tcolumns ...column.ColumnBasic) error {\n\tstmt, err := ch.InsertStreamWithOption(ctx, query, queryOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif stmt == nil {\n\t\tch.reader.SetCompress(false)\n\t\tch.contextWatcher.Unwatch()\n\t\tch.unlock()\n\t\treturn nil\n\t}\n\tdefer stmt.Close()\n\terr = stmt.Write(ctx, columns...)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = stmt.Flush(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, col := range columns {\n\t\tcol.Reset()\n\t}\n\treturn nil\n}\n\nfunc (ch *conn) InsertStream(ctx context.Context, query string) (InsertStmt, error) {\n\treturn ch.InsertStreamWithOption(ctx, query, nil)\n}\n\n// Insert send query for insert and prepare insert stmt with setting option\nfunc (ch *conn) InsertStreamWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *QueryOptions) (InsertStmt, error) {\n\terr := ch.lock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar hasError bool\n\tdefer func() {\n\t\tif hasError {\n\t\t\tch.Close()\n\t\t}\n\t}()\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\tch.contextWatcher.Watch(ctx)\n\t\tdefer ch.contextWatcher.Unwatch()\n\t}\n\n\tif queryOptions == nil {\n\t\tqueryOptions = emptyQueryOptions\n\t}\n\n\terr = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters)\n\tif err != nil {\n\t\thasError = true\n\t\treturn nil, preferContextOverNetTimeoutError(ctx, err)\n\t}\n\tvar blockData *block\n\tfor {\n\t\tvar res interface{}\n\t\tres, err = ch.receiveAndProcessData(emptyOnProgress)\n\t\tif err != nil {\n\t\t\thasError = true\n\t\t\treturn nil, preferContextOverNetTimeoutError(ctx, err)\n\t\t}\n\t\tif b, ok := res.(*block); ok {\n\t\t\tblockData = b\n\t\t\tbreak\n\t\t}\n\n\t\tif profile, ok := res.(*Profile); ok {\n\t\t\tif queryOptions.OnProfile != nil {\n\t\t\t\tqueryOptions.OnProfile(profile)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif progress, ok := res.(*Progress); ok {\n\t\t\tif queryOptions.OnProgress != nil {\n\t\t\t\tqueryOptions.OnProgress(progress)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif profileEvent, ok := res.(*ProfileEvent); ok {\n\t\t\tif queryOptions.OnProfileEvent != nil {\n\t\t\t\tqueryOptions.OnProfileEvent(profileEvent)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif res == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\thasError = true\n\t\treturn nil, &unexpectedPacket{expected: \"serverData\", actual: res}\n\t}\n\n\terr = blockData.readColumns(ch)\n\tif err != nil {\n\t\thasError = true\n\t\treturn nil, preferContextOverNetTimeoutError(ctx, err)\n\t}\n\n\ts := &insertStmt{\n\t\tconn:         ch,\n\t\tquery:        query,\n\t\tblock:        blockData,\n\t\tqueryOptions: queryOptions,\n\t\tclientInfo:   nil,\n\t}\n\n\treturn s, nil\n}\n"
  },
  {
    "path": "insert_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\nfunc TestInsertError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\t// test lock error\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tc.(*conn).status = connStatusUninitialized\n\terr = c.Insert(context.Background(), \"insert into system.numbers VALUES\")\n\trequire.EqualError(t, err, \"conn uninitialized\")\n\trequire.EqualError(t, c.(*conn).lock(), \"conn uninitialized\")\n\tc.Close()\n\n\t// test write block info error\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tw:           w,\n\t\t\tnumberValid: 1,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Insert(context.Background(), \"insert into system.numbers VALUES\")\n\trequire.EqualError(t, err, \"write block info (timeout)\")\n\tassert.True(t, c.IsClosed())\n\n\t// test insert server error\n\tconfig.WriterFunc = nil\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Insert(context.Background(), \"insert into system.numbers VALUES\")\n\trequire.EqualError(t, err, \" DB::Exception (48): Method write is not supported by storage SystemNumbers\")\n\tassert.True(t, c.IsClosed())\n\n\t// test read column error\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error (\n\t\t\t\tint8  Int8\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\treturn &readErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tr:           r,\n\t\t\tnumberValid: 27,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\trequire.EqualError(t, err, \"block: read column name (timeout)\")\n\tassert.True(t, c.IsClosed())\n\n\tconfig, err = ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\trequire.EqualError(t, errors.Unwrap(err), \"write 0 column(s) but insert query needs 1 column(s)\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestInsertCtxError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\terr = c.Insert(ctx, `INSERT INTO clickhouse_test_insert_error (\n\t\t\t\tint8\n\t\t\t) VALUES`)\n\trequire.EqualError(t, err, \"timeout: context already done: context canceled\")\n\tassert.False(t, c.IsClosed())\n\n\tconfig, err = ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerSlowHelper{\n\t\t\tw:     w,\n\t\t\tsleep: time.Second,\n\t\t}\n\t}\n\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)\n\tdefer cancel()\n\terr = c.Insert(ctx, `INSERT INTO clickhouse_test_insert_error (\n\t\tint8\n\t) VALUES`)\n\trequire.EqualError(t, errors.Unwrap(err), \"context deadline exceeded\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestInsertMoreColumnsError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error_column`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error_column (\n\t\tint8  Int8\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\terr = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error_column (\n\t\t\tint8\n\t\t) VALUES`, column.New[int8](), column.New[int8]())\n\tremoteAddr := c.RawConn().RemoteAddr().String()\n\trequire.EqualError(t, err, \"failed to insert data: remoteAddr: \"+remoteAddr+\" - write 2 column(s) but insert query needs 1 column(s)\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestInsertMoreRowsError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error_rows`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error_rows (\n\t\tint8  Int8,\n\t\tint16 Int16\n\t) Engine=Memory`)\n\trequire.NoError(t, err)\n\n\tcol1 := column.New[int8]()\n\tcol2 := column.New[int16]()\n\tcol1.Append(1)\n\tcol1.Append(2)\n\tcol2.Append(2)\n\terr = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error_rows (\n\t\t\tint8,\n\t\t\tint16\n\t\t) VALUES`, col1, col2)\n\tremoteAddr := c.RawConn().RemoteAddr().String()\n\trequire.EqualError(t, err, \"failed to insert data: remoteAddr: \"+remoteAddr+\" - \\\"int8\\\" has 2 rows but \\\"int16\\\" column has 1 rows\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestInsert(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert`)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(), `CREATE TABLE test_insert (\n\t\t\t\tint8 Int8,\n\t\t\t\tint16 Int16,\n\t\t\t\tint32 Int32\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tcol8 := column.New[int8]()\n\tcol8.SetName([]byte(\"int8\"))\n\tcol16 := column.New[int16]()\n\tcol16.SetName([]byte(\"int16\"))\n\tcol32 := column.New[int32]()\n\tcol32.SetName([]byte(\"int32\"))\n\n\tvar col8Insert []int8\n\tvar col16Insert []int16\n\tvar col32Insert []int32\n\n\trows := 10\n\tfor i := 0; i < rows; i++ {\n\t\tcol8.Append(int8(i))\n\t\tcol16.Append(int16(i))\n\t\tcol32.Append(int32(i))\n\t\tcol8Insert = append(col8Insert, int8(i))\n\t\tcol16Insert = append(col16Insert, int16(i))\n\t\tcol32Insert = append(col32Insert, int32(i))\n\t}\n\n\t// send in invalid order to test sorted columns by name\n\terr = conn.InsertWithOption(context.Background(), `INSERT INTO test_insert (int8,int16,int32) VALUES`,\n\t\t&QueryOptions{\n\t\t\tOnProgress: func(progress *Progress) {\n\n\t\t\t},\n\t\t\tOnProfileEvent: func(pe *ProfileEvent) {\n\n\t\t\t},\n\t\t\tOnProfile: func(p *Profile) {\n\n\t\t\t},\n\t\t},\n\t\tcol32, col16, col8)\n\n\trequire.NoError(t, err)\n\n\t// example read al\n\tcol8Read := column.New[int8]()\n\tcol16Read := column.New[int16]()\n\tcol32Read := column.New[int32]()\n\tselectStmt, err := conn.Select(context.Background(), `SELECT int8,int16,int32 FROM test_insert`, col8Read, col16Read, col32Read)\n\trequire.NoError(t, err)\n\trequire.True(t, conn.IsBusy())\n\n\tvar col8Data []int8\n\tvar col16Data []int16\n\tvar col32Data []int32\n\n\tfor selectStmt.Next() {\n\t\tcol8Data = col8Read.Read(col8Data)\n\t\tcol16Data = col16Read.Read(col16Data)\n\t\tcol32Data = col32Read.Read(col32Data)\n\t}\n\n\trequire.NoError(t, selectStmt.Err())\n\tassert.Equal(t, col8Insert, col8Data)\n\tassert.Equal(t, col16Insert, col16Data)\n\tassert.Equal(t, col32Insert, col32Data)\n\n\tselectStmt.Close()\n\n\tconn.RawConn().Close()\n}\n\nfunc TestInsertNotFoundColumn(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert_not_found_column`)\n\trequire.NoError(t, err)\n\n\terr = conn.Exec(context.Background(), `CREATE TABLE test_insert_not_found_column (\n\t\t\t\tint8 Int8\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tcol8 := column.New[int8]()\n\tcol8.SetName([]byte(\"not_found\"))\n\n\trows := 10\n\tfor i := 0; i < rows; i++ {\n\t\tcol8.Append(int8(i))\n\t}\n\n\t// send in invalid order to test sorted columns by name\n\terr = conn.Insert(context.Background(), `INSERT INTO test_insert_not_found_column (int8) VALUES`, col8)\n\n\trequire.Equal(\n\t\tt,\n\t\terrors.Unwrap(err).Error(),\n\t\t\"the input columns do not contain column \\\"int8\\\". The column name must be set using the `SetName` method\")\n\n\tconn.RawConn().Close()\n}\n\nfunc TestCompressInsert(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname         string\n\t\tcompressType CompressMethod\n\t}{\n\t\t{\n\t\t\tname:         \"none\",\n\t\t\tcompressType: CompressNone,\n\t\t},\n\t\t{\n\t\t\tname:         \"lz4\",\n\t\t\tcompressType: CompressLZ4,\n\t\t},\n\t\t{\n\t\t\tname:         \"zstd\",\n\t\t\tcompressType: CompressZSTD,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\t\t\tparseConfig, err := ParseConfig(connString)\n\t\t\trequire.NoError(t, err)\n\t\t\tparseConfig.Compress = tt.compressType\n\t\t\tconn, err := ConnectConfig(context.Background(), parseConfig)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert_compress`)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = conn.Exec(context.Background(), `CREATE TABLE test_insert_compress (\n\t\t\t\tint8 Int8\n\t\t\t) Engine=Memory`)\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcol := column.New[int8]()\n\n\t\t\tvar colInsert []int8\n\n\t\t\trows := 1000\n\t\t\tfor i := 0; i < rows; i++ {\n\t\t\t\tval := int8(i)\n\n\t\t\t\tcol.Append(val)\n\t\t\t\tcolInsert = append(colInsert, val)\n\t\t\t}\n\n\t\t\terr = conn.Insert(context.Background(), `INSERT INTO test_insert_compress (int8) VALUES`, col)\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// example read all\n\t\t\tcolRead := column.New[int8]()\n\t\t\tselectStmt, err := conn.Select(context.Background(), `SELECT int8 FROM test_insert_compress`, colRead)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.True(t, conn.IsBusy())\n\n\t\t\tvar colData []int8\n\n\t\t\tfor selectStmt.Next() {\n\t\t\t\tcolData = colRead.Read(colData)\n\t\t\t}\n\n\t\t\tassert.Equal(t, colInsert, colData)\n\t\t\trequire.NoError(t, selectStmt.Err())\n\n\t\t\tselectStmt.Close()\n\n\t\t\tconn.RawConn().Close()\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error (\n\t\tint8  Int8\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write header\",\n\t\t\twantErr:     \"block: write header block data for column int8 (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write block data\",\n\t\t\twantErr:     \"block: write block data for column int8 (timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[int8]()\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error (int8) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnErrorCompress(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\tconfig.Compress = CompressLZ4\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_compress`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_compress (\n\t\tint8  Int8\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write header\",\n\t\t\twantErr:     \"write block info (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"flush block info\",\n\t\t\twantErr:     \"flush block info (timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"flush data\",\n\t\t\twantErr:     \"block: flush block data (timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[int8]()\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_compress (int8) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnDataError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_lc`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_lc (\n\t\tcol  LowCardinality(String)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tstartValidReader := 3\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"write header\",\n\t\t\twantErr:     \"block: write header block data for column col (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"write stype\",\n\t\t\twantErr:     \"block: write block data for column col (error writing stype: timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"write dictionarySize\",\n\t\t\twantErr:     \"block: write block data for column col (error writing dictionarySize: timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"write dictionary\",\n\t\t\twantErr:     \"block: write block data for column col (error writing dictionary: timeout)\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"write keys len\",\n\t\t\twantErr:     \"block: write block data for column col (error writing keys len: timeout)\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"write indices\",\n\t\t\twantErr:     \"block: write block data for column col (error writing indices: timeout)\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t\t{\n\t\t\tname:        \"write block info\",\n\t\t\twantErr:     \"write block info (timeout)\",\n\t\t\tnumberValid: startValidReader + 6,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\t\t\treturn &writerErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tw:           w,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\t\t\tc, err = ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.NewString().LowCardinality()\n\t\t\tcol.Append(\"test\")\n\t\t\terr = c.Insert(context.Background(),\n\t\t\t\t\"insert into clickhouse_test_insert_column_error_lc (col) VALUES\",\n\t\t\t\tcol,\n\t\t\t)\n\t\t\trequire.EqualError(t, errors.Unwrap(err), tt.wantErr)\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n\nfunc TestInsertColumnDataErrorValidate(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_l_validate`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_l_validate (\n\t\tcol  LowCardinality(String)\n\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tcol := column.NewString()\n\tcol.Append(\"test\")\n\terr = c.Insert(context.Background(),\n\t\t\"insert into clickhouse_test_insert_column_error_l_validate (col) VALUES\",\n\t\tcol,\n\t)\n\trequire.EqualError(t, err, \"mismatch column type: ClickHouse Type: LowCardinality(String), column types: String\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestInsertSelectStmt(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\t// test read column error\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_select`)\n\trequire.NoError(t, err)\n\n\terr = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_select (\n\t\t\t\tnumber  Int64\n\t\t\t) Engine=Memory`)\n\n\trequire.NoError(t, err)\n\n\terr = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_select (\n\t\tnumber\n\t\t\t) select number from system.numbers limit 10`)\n\trequire.NoError(t, err)\n\n\tcolRead := column.New[int64]()\n\tselectStmt, err := c.Select(context.Background(), `SELECT number FROM clickhouse_test_insert_select`, colRead)\n\trequire.NoError(t, err)\n\n\tvar colData []int64\n\n\tfor selectStmt.Next() {\n\t\tcolData = colRead.Read(colData)\n\t}\n\tassert.Equal(t, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, colData)\n\trequire.NoError(t, selectStmt.Err())\n}\n"
  },
  {
    "path": "internal/ctxwatch/context_watcher.go",
    "content": "package ctxwatch\n\nimport (\n\t\"context\"\n\t\"sync\"\n)\n\n// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a\n// time.\ntype ContextWatcher struct {\n\tonCancel             func()\n\tonUnwatchAfterCancel func()\n\tunwatchChan          chan struct{}\n\n\tlock              sync.Mutex\n\twatchInProgress   bool\n\tonCancelWasCalled bool\n}\n\n// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.\n// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and\n// onCancel called.\nfunc NewContextWatcher(onCancel, onUnwatchAfterCancel func()) *ContextWatcher {\n\tcw := &ContextWatcher{\n\t\tonCancel:             onCancel,\n\t\tonUnwatchAfterCancel: onUnwatchAfterCancel,\n\t\tunwatchChan:          make(chan struct{}),\n\t}\n\n\treturn cw\n}\n\n// Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called.\nfunc (cw *ContextWatcher) Watch(ctx context.Context) {\n\tcw.lock.Lock()\n\tdefer cw.lock.Unlock()\n\n\tif cw.watchInProgress {\n\t\tpanic(\"Watch already in progress\")\n\t}\n\n\tcw.onCancelWasCalled = false\n\n\tif ctx.Done() != nil {\n\t\tcw.watchInProgress = true\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tcw.onCancel()\n\t\t\t\tcw.onCancelWasCalled = true\n\t\t\t\t<-cw.unwatchChan\n\t\t\tcase <-cw.unwatchChan:\n\t\t\t}\n\t\t}()\n\t} else {\n\t\tcw.watchInProgress = false\n\t}\n}\n\n// Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was\n// called then onUnwatchAfterCancel will also be called.\nfunc (cw *ContextWatcher) Unwatch() {\n\tcw.lock.Lock()\n\tdefer cw.lock.Unlock()\n\n\tif cw.watchInProgress {\n\t\tcw.unwatchChan <- struct{}{}\n\t\tif cw.onCancelWasCalled {\n\t\t\tcw.onUnwatchAfterCancel()\n\t\t}\n\t\tcw.watchInProgress = false\n\t}\n}\n"
  },
  {
    "path": "internal/ctxwatch/context_watcher_test.go",
    "content": "package ctxwatch_test\n\nimport (\n\t\"context\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/ctxwatch\"\n)\n\nfunc TestContextWatcherContextCancelled(t *testing.T) {\n\tcanceledChan := make(chan struct{})\n\tcleanupCalled := false\n\tcw := ctxwatch.NewContextWatcher(func() {\n\t\tcanceledChan <- struct{}{}\n\t}, func() {\n\t\tcleanupCalled = true\n\t})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tcw.Watch(ctx)\n\tcancel()\n\n\tselect {\n\tcase <-canceledChan:\n\tcase <-time.NewTimer(time.Second).C:\n\t\tt.Fatal(\"Timed out waiting for cancel func to be called\")\n\t}\n\n\tcw.Unwatch()\n\n\trequire.True(t, cleanupCalled, \"Cleanup func was not called\")\n}\n\nfunc TestContextWatcherUnwatchdBeforeContextCancelled(t *testing.T) {\n\tcw := ctxwatch.NewContextWatcher(func() {\n\t\tt.Error(\"cancel func should not have been called\")\n\t}, func() {\n\t\tt.Error(\"cleanup func should not have been called\")\n\t})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tcw.Watch(ctx)\n\tcw.Unwatch()\n\tcancel()\n}\n\nfunc TestContextWatcherMultipleWatchPanics(t *testing.T) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tcw.Watch(ctx)\n\n\tctx2, cancel2 := context.WithCancel(context.Background())\n\tdefer cancel2()\n\trequire.Panics(t, func() { cw.Watch(ctx2) }, \"Expected panic when Watch called multiple times\")\n}\n\nfunc TestContextWatcherUnwatchWhenNotWatchingIsSafe(t *testing.T) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\tcw.Unwatch() // unwatch when not / never watching\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tcw.Watch(ctx)\n\tcw.Unwatch()\n\tcw.Unwatch() // double unwatch\n}\n\nfunc TestContextWatcherUnwatchIsConcurrencySafe(t *testing.T) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\n\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\tcw.Watch(ctx)\n\n\tgo cw.Unwatch()\n\tgo cw.Unwatch()\n\n\t<-ctx.Done()\n}\n\n//nolint:govet\nfunc TestContextWatcherStress(t *testing.T) {\n\tvar cancelFuncCalls int64\n\tvar cleanupFuncCalls int64\n\n\tcw := ctxwatch.NewContextWatcher(func() {\n\t\tatomic.AddInt64(&cancelFuncCalls, 1)\n\t}, func() {\n\t\tatomic.AddInt64(&cleanupFuncCalls, 1)\n\t})\n\n\tcycleCount := 100000\n\n\tfor i := 0; i < cycleCount; i++ {\n\t\t//nolint:govet\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tcw.Watch(ctx)\n\t\tif i%2 == 0 {\n\t\t\tcancel()\n\t\t}\n\n\t\t// Without time.Sleep, cw.Unwatch will almost always run before the cancel func which means cancel will never happen.\n\t\t// This gives us a better mix.\n\t\tif i%3 == 0 {\n\t\t\ttime.Sleep(time.Nanosecond)\n\t\t}\n\n\t\tcw.Unwatch()\n\t\tif i%2 == 1 {\n\t\t\tcancel()\n\t\t}\n\t}\n\n\tactualCancelFuncCalls := atomic.LoadInt64(&cancelFuncCalls)\n\tactualCleanupFuncCalls := atomic.LoadInt64(&cleanupFuncCalls)\n\n\tif actualCancelFuncCalls == 0 {\n\t\tt.Fatal(\"actualCancelFuncCalls == 0\")\n\t}\n\n\tmaxCancelFuncCalls := int64(cycleCount) / 2\n\tif actualCancelFuncCalls > maxCancelFuncCalls {\n\t\tt.Errorf(\"cancel func calls should be no more than %d but was %d\", actualCancelFuncCalls, maxCancelFuncCalls)\n\t}\n\n\tif actualCancelFuncCalls != actualCleanupFuncCalls {\n\t\tt.Errorf(\"cancel func calls (%d) should be equal to cleanup func calls (%d) but was not\", actualCancelFuncCalls, actualCleanupFuncCalls)\n\t}\n}\n\nfunc BenchmarkContextWatcherUncancellable(b *testing.B) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcw.Watch(context.Background())\n\t\tcw.Unwatch()\n\t}\n}\n\nfunc BenchmarkContextWatcherCancelled(b *testing.B) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\n\tfor i := 0; i < b.N; i++ {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tcw.Watch(ctx)\n\t\tcancel()\n\t\tcw.Unwatch()\n\t}\n}\n\nfunc BenchmarkContextWatcherCancellable(b *testing.B) {\n\tcw := ctxwatch.NewContextWatcher(func() {}, func() {})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcw.Watch(ctx)\n\t\tcw.Unwatch()\n\t}\n}\n"
  },
  {
    "path": "internal/helper/features.go",
    "content": "package helper\n\nconst (\n\tDbmsMinRevisionWithClientInfo                   = 54032\n\tDbmsMinRevisionWithServerTimezone               = 54058\n\tDbmsMinRevisionWithQuotaKeyInClientInfo         = 54060\n\tDbmsMinRevisionWithServerDisplayName            = 54372\n\tDbmsMinRevisionWithVersionPatch                 = 54401\n\tDbmsMinRevisionWithClientWriteInfo              = 54420\n\tDbmsMinRevisionWithSettingsSerializedAsStrings  = 54429\n\tDbmsMinRevisionWithInterServerSecret            = 54441\n\tDbmsMinRevisionWithOpenTelemetry                = 54442\n\tDbmsMinProtocolVersionWithDistributedDepth      = 54448\n\tDbmsMinProtocolVersionWithInitialQueryStartTime = 54449\n\tDbmsMinProtocolVersionWithParallelReplicas      = 54453\n\tDbmsMinProtocolWithCustomSerialization          = 54454\n\tDbmsMinProtocolWithQuotaKey                     = 54458\n\tDbmsMinProtocolWithParameters                   = 54459\n\tDbmsMinProtocolWithServerQueryTimeInProgress    = 54460\n)\n"
  },
  {
    "path": "internal/helper/strs.go",
    "content": "package helper\n\nconst (\n\tTupleStr    = \"Tuple(\"\n\tLenTupleStr = len(TupleStr)\n\tPointStr    = \"Point\"\n)\n\nvar PointMainTypeStr = []byte(\"Tuple(Float64, Float64)\")\n\nconst PolygonStr = \"Polygon\"\n\nvar PolygonMainTypeStr = []byte(\"Array(Array(Tuple(Float64, Float64)))\")\n\nconst MultiPolygonStr = \"MultiPolygon\"\n\nvar MultiPolygonMainTypeStr = []byte(\"Array(Array(Array(Tuple(Float64, Float64))))\")\n\nconst (\n\tArrayStr          = \"Array(\"\n\tLenArrayStr       = len(ArrayStr)\n\tArrayTypeStr      = \"Array(<type>)\"\n\tNestedStr         = \"Nested(\"\n\tLenNestedStr      = len(NestedStr)\n\tNestedToArrayTube = \"Array(Nested(\"\n\tRingStr           = \"Ring\"\n)\n\nvar RingMainTypeStr = []byte(\"Array(Tuple(Float64, Float64))\")\n\nconst (\n\tEnum8Str              = \"Enum8(\"\n\tEnum8StrLen           = len(Enum8Str)\n\tEnum16Str             = \"Enum16(\"\n\tEnum16StrLen          = len(Enum16Str)\n\tDateTimeStr           = \"DateTime(\"\n\tDateTimeStrLen        = len(DateTimeStr)\n\tDateTime64Str         = \"DateTime64(\"\n\tDateTime64StrLen      = len(DateTime64Str)\n\tDecimalStr            = \"Decimal(\"\n\tDecimalStrLen         = len(DecimalStr)\n\tFixedStringStr        = \"FixedString(\"\n\tFixedStringStrLen     = len(FixedStringStr)\n\tSimpleAggregateStr    = \"SimpleAggregateFunction(\"\n\tSimpleAggregateStrLen = len(SimpleAggregateStr)\n)\n\nconst (\n\tLowCardinalityStr             = \"LowCardinality(\"\n\tLenLowCardinalityStr          = len(LowCardinalityStr)\n\tLowCardinalityTypeStr         = \"LowCardinality(<type>)\"\n\tLowCardinalityNullableStr     = \"LowCardinality(Nullable(\"\n\tLenLowCardinalityNullableStr  = len(LowCardinalityNullableStr)\n\tLowCardinalityNullableTypeStr = \"LowCardinality(Nullable(<type>))\"\n)\n\nconst (\n\tMapStr     = \"Map(\"\n\tLenMapStr  = len(MapStr)\n\tMapTypeStr = \"Map(<key>, <value>)\"\n)\n\nconst (\n\tNullableStr     = \"Nullable(\"\n\tLenNullableStr  = len(NullableStr)\n\tNullableTypeStr = \"Nullable(<type>)\"\n)\n\nconst (\n\tStringStr = \"String\"\n)\n"
  },
  {
    "path": "internal/helper/validator.go",
    "content": "package helper\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\nfunc IsEnum8(chType []byte) bool {\n\treturn len(chType) > Enum8StrLen && (string(chType[:Enum8StrLen]) == Enum8Str)\n}\n\nfunc ExtractEnum(data []byte) (intToStringMap map[int16]string, stringToIntMap map[string]int16, err error) {\n\tenums := bytes.Split(data, []byte(\", \"))\n\tintToStringMap = make(map[int16]string)\n\tstringToIntMap = make(map[string]int16)\n\tfor _, enum := range enums {\n\t\tparts := bytes.SplitN(enum, []byte(\" = \"), 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid enum: %s\", enum)\n\t\t}\n\n\t\tid, err := strconv.ParseInt(string(parts[1]), 10, 8)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid enum id: %s\", parts[1])\n\t\t}\n\n\t\tval := string(parts[0][1 : len(parts[0])-1])\n\t\tintToStringMap[int16(id)] = val\n\t\tstringToIntMap[val] = int16(id)\n\t}\n\treturn intToStringMap, stringToIntMap, nil\n}\n\nfunc IsEnum16(chType []byte) bool {\n\treturn len(chType) > Enum16StrLen && (string(chType[:Enum16StrLen]) == Enum16Str)\n}\n\nfunc IsDateTimeWithParam(chType []byte) bool {\n\treturn len(chType) > DateTimeStrLen && (string(chType[:DateTimeStrLen]) == DateTimeStr)\n}\n\nfunc IsDateTime64(chType []byte) bool {\n\treturn len(chType) > DateTime64StrLen && (string(chType[:DateTime64StrLen]) == DateTime64Str)\n}\n\nfunc IsFixedString(chType []byte) bool {\n\treturn len(chType) > FixedStringStrLen && (string(chType[:FixedStringStrLen]) == FixedStringStr)\n}\n\nfunc IsDecimal(chType []byte) bool {\n\treturn len(chType) > DecimalStrLen && (string(chType[:DecimalStrLen]) == DecimalStr)\n}\n\nfunc IsRing(chType []byte) bool {\n\treturn string(chType) == RingStr\n}\n\nfunc IsMultiPolygon(chType []byte) bool {\n\treturn string(chType) == MultiPolygonStr\n}\n\nfunc IsNested(chType []byte) bool {\n\treturn len(chType) > LenNestedStr && string(chType[:LenNestedStr]) == NestedStr\n}\n\nfunc NestedToArrayType(chType []byte) []byte {\n\tif IsNested(chType) {\n\t\tnewChType := make([]byte, 0, len(chType)-LenNestedStr+LenArrayStr+LenTupleStr+1)\n\t\tnewChType = append(newChType, \"Array(Tuple(\"...)\n\t\tnewChType = append(newChType, chType[LenNestedStr:]...)\n\t\tnewChType = append(newChType, ')')\n\t\treturn newChType\n\t}\n\treturn chType\n}\n\nfunc IsArray(chType []byte) bool {\n\treturn len(chType) > LenArrayStr && string(chType[:LenArrayStr]) == ArrayStr\n}\n\nfunc IsPolygon(chType []byte) bool {\n\treturn string(chType) == PolygonStr\n}\n\nfunc IsString(chType []byte) bool {\n\treturn string(chType) == StringStr\n}\n\nfunc IsLowCardinality(chType []byte) bool {\n\treturn len(chType) > LenLowCardinalityStr && string(chType[:LenLowCardinalityStr]) == LowCardinalityStr\n}\n\nfunc IsNullableLowCardinality(chType []byte) bool {\n\treturn len(chType) > LenLowCardinalityNullableStr &&\n\t\tstring(chType[:LenLowCardinalityNullableStr]) == LowCardinalityNullableStr\n}\n\nfunc IsMap(chType []byte) bool {\n\treturn len(chType) > LenMapStr && string(chType[:LenMapStr]) == MapStr\n}\n\nfunc IsNullable(chType []byte) bool {\n\treturn len(chType) > LenNullableStr && string(chType[:LenNullableStr]) == NullableStr\n}\n\nfunc IsPoint(chType []byte) bool {\n\treturn string(chType) == PointStr\n}\n\nfunc IsTuple(chType []byte) bool {\n\treturn len(chType) > LenTupleStr && string(chType[:LenTupleStr]) == TupleStr\n}\n\ntype ColumnData struct {\n\tChType, Name []byte\n}\n\nfunc TypesInParentheses(b []byte) ([]ColumnData, error) {\n\tvar columns []ColumnData\n\tvar openFunc int\n\tvar hasBacktick bool\n\tcur := 0\n\tfor i, char := range b {\n\t\tif char == '`' {\n\t\t\tif !hasBacktick {\n\t\t\t\thasBacktick = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b[i-1] != '\\\\' {\n\t\t\t\thasBacktick = false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif hasBacktick {\n\t\t\tcontinue\n\t\t}\n\t\tif char == ',' {\n\t\t\tif openFunc == 0 {\n\t\t\t\tcolData, err := SplitNameType(b[cur:i])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tcolumns = append(columns, colData)\n\t\t\t\t//  add 2 to skip the ', '\n\t\t\t\tcur = i + 2\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif char == '(' {\n\t\t\topenFunc++\n\t\t\tcontinue\n\t\t}\n\t\tif char == ')' {\n\t\t\topenFunc--\n\t\t\tcontinue\n\t\t}\n\t}\n\tcolData, err := SplitNameType(b[cur:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append(columns, colData), nil\n}\n\nfunc SplitNameType(b []byte) (ColumnData, error) {\n\t// for example: `date f` Array(String)\n\tif b[0] == '`' {\n\t\tb = b[1:]\n\t\tfor i, char := range b {\n\t\t\tif char == '`' && b[i-1] != '\\\\' {\n\t\t\t\treturn ColumnData{\n\t\t\t\t\tName:   b[1 : i+1],\n\t\t\t\t\tChType: b[i+2:],\n\t\t\t\t}, nil\n\t\t\t}\n\t\t}\n\t\treturn ColumnData{}, fmt.Errorf(\"cannot find closing backtick in %s\", b)\n\t}\n\tfor i, char := range b {\n\t\tif char == '(' {\n\t\t\tbreak\n\t\t}\n\t\tif char == ' ' {\n\t\t\treturn ColumnData{\n\t\t\t\tName:   b[1 : i+1],\n\t\t\t\tChType: b[i+1:],\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn ColumnData{\n\t\tChType: b,\n\t}, nil\n}\n\nfunc FilterSimpleAggregate(chType []byte) []byte {\n\tif len(chType) <= SimpleAggregateStrLen || (string(chType[:SimpleAggregateStrLen]) != SimpleAggregateStr) {\n\t\treturn chType\n\t}\n\tchType = chType[SimpleAggregateStrLen:]\n\tfor i, v := range chType {\n\t\tif v == ',' {\n\t\t\treturn chType[i+2 : len(chType)-1]\n\t\t}\n\t}\n\tpanic(\"Cannot found nested type of \" + string(chType))\n}\n"
  },
  {
    "path": "internal/readerwriter/compress_reader.go",
    "content": "package readerwriter\n\n// copy from https://github.com/ClickHouse/ch-go/blob/4cde4e4bec24211c0bcdc6f385f4212d0ad522d9/compress/reader.go\n// some changes to compatible with chconn\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/go-faster/city\"\n\t\"github.com/klauspost/compress/zstd\"\n\t\"github.com/pierrec/lz4/v4\"\n)\n\ntype invalidCompressErr struct {\n\tmethod CompressMethod\n}\n\nfunc (e *invalidCompressErr) Error() string {\n\treturn fmt.Sprintf(\"unknown compression method: 0x%02x \", e.method)\n}\n\ntype compressReader struct {\n\treader io.Reader\n\tdata   []byte\n\tpos    int64\n\traw    []byte\n\theader []byte\n\tzstd   *zstd.Decoder\n}\n\n// NewCompressReader wrap the io.Reader\nfunc NewCompressReader(r io.Reader) io.Reader {\n\treturn &compressReader{\n\t\tzstd:   nil, // lazily initialized\n\t\treader: r,\n\t\theader: make([]byte, headerSize),\n\t}\n}\n\nfunc (r *compressReader) Read(buf []byte) (n int, err error) {\n\tif r.pos >= int64(len(r.data)) {\n\t\tif err := r.readBlock(); err != nil {\n\t\t\treturn 0, fmt.Errorf(\"read block: %w\", err)\n\t\t}\n\t}\n\tn = copy(buf, r.data[r.pos:])\n\tr.pos += int64(n)\n\treturn n, nil\n}\n\n// readBlock reads next compressed data into raw and decompresses into data.\nfunc (r *compressReader) readBlock() error {\n\tr.pos = 0\n\n\t_ = r.header[headerSize-1]\n\tif _, err := io.ReadFull(r.reader, r.header); err != nil {\n\t\treturn fmt.Errorf(\"read header: %w\", err)\n\t}\n\n\tvar (\n\t\trawSize  = int(binary.LittleEndian.Uint32(r.header[hRawSize:])) - compressHeaderSize\n\t\tdataSize = int(binary.LittleEndian.Uint32(r.header[hDataSize:]))\n\t)\n\tif dataSize < 0 || dataSize > maxDataSize {\n\t\treturn fmt.Errorf(\"data size should be %d < %d < %d\", 0, dataSize, maxDataSize)\n\t}\n\tif rawSize < 0 || rawSize > maxBlockSize {\n\t\treturn fmt.Errorf(\"raw size should be %d < %d < %d\", 0, rawSize, maxBlockSize)\n\t}\n\n\tr.data = append(r.data[:0], make([]byte, dataSize)...)\n\tr.raw = append(r.raw[:0], r.header...)\n\tr.raw = append(r.raw, make([]byte, rawSize)...)\n\t_ = r.raw[:rawSize+headerSize-1]\n\n\tif _, err := io.ReadFull(r.reader, r.raw[headerSize:]); err != nil {\n\t\treturn fmt.Errorf(\"read raw: %w\", err)\n\t}\n\thGot := city.U128{\n\t\tLow:  binary.LittleEndian.Uint64(r.raw[0:8]),\n\t\tHigh: binary.LittleEndian.Uint64(r.raw[8:16]),\n\t}\n\th := city.CH128(r.raw[hMethod:])\n\tif hGot != h {\n\t\treturn &CorruptedDataErr{\n\t\t\tActual:    h,\n\t\t\tReference: hGot,\n\t\t\tRawSize:   rawSize,\n\t\t\tDataSize:  dataSize,\n\t\t}\n\t}\n\t//nolint:exhaustive\n\tswitch m := CompressMethod(r.header[hMethod]); m {\n\tcase CompressLZ4:\n\t\tn, err := lz4.UncompressBlock(r.raw[headerSize:], r.data)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"lz4 decompress: %w\", err)\n\t\t}\n\t\tif n != dataSize {\n\t\t\treturn fmt.Errorf(\"unexpected uncompressed data size: %d (actual) != %d (got in header)\",\n\t\t\t\tn, dataSize,\n\t\t\t)\n\t\t}\n\tcase CompressZSTD:\n\t\tif r.zstd == nil {\n\t\t\t// Lazily initializing to prevent spawning goroutines in NewReader.\n\t\t\t// See https://github.com/golang/go/issues/47056#issuecomment-997436820\n\t\t\tzstdReader, err := zstd.NewReader(nil,\n\t\t\t\tzstd.WithDecoderConcurrency(1),\n\t\t\t\tzstd.WithDecoderLowmem(true),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"zstd new: %w\", err)\n\t\t\t}\n\t\t\tr.zstd = zstdReader\n\t\t}\n\t\tdata, err := r.zstd.DecodeAll(r.raw[headerSize:], r.data[:0])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"zstd decompress: %w\", err)\n\t\t}\n\t\tif len(data) != dataSize {\n\t\t\treturn fmt.Errorf(\"unexpected uncompressed data size: %d (actual) != %d (got in header)\",\n\t\t\t\tlen(data), dataSize,\n\t\t\t)\n\t\t}\n\t\tr.data = data\n\tcase CompressChecksum:\n\t\tcopy(r.data, r.raw[headerSize:])\n\tdefault:\n\t\treturn &invalidCompressErr{m}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "internal/readerwriter/compress_writer.go",
    "content": "package readerwriter\n\n// copy from https://github.com/ClickHouse/ch-go/blob/4cde4e4bec24211c0bcdc6f385f4212d0ad522d9/compress/writer.go\n// some changes to compatible with chconn\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/go-faster/city\"\n\t\"github.com/klauspost/compress/zstd\"\n\t\"github.com/pierrec/lz4/v4\"\n)\n\ntype compressWriter struct {\n\twriter io.Writer\n\t// data uncompressed\n\tdata []byte\n\t// data position\n\tpos int\n\t// data compressed\n\tzdata []byte\n\t// compression method\n\tmethod CompressMethod\n\n\tlz4  *lz4.Compressor\n\tzstd *zstd.Encoder\n}\n\n// NewCompressWriter wrap the io.Writer\nfunc NewCompressWriter(w io.Writer, method byte) io.Writer {\n\tp := &compressWriter{\n\t\twriter: w,\n\t\tmethod: CompressMethod(method),\n\t\tdata:   make([]byte, maxBlockSize),\n\t}\n\treturn p\n}\n\nfunc (cw *compressWriter) Write(buf []byte) (int, error) {\n\tvar n int\n\tfor len(buf) > 0 {\n\t\t// Accumulate the data to be compressed.\n\t\tm := copy(cw.data[cw.pos:], buf)\n\t\tcw.pos += m\n\t\tbuf = buf[m:]\n\t\tif cw.pos == len(cw.data) {\n\t\t\terr := cw.Flush()\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\tn += m\n\t}\n\n\treturn n, nil\n}\n\n// Compress buf into Data.\nfunc (cw *compressWriter) Flush() error {\n\tif cw.pos == 0 {\n\t\treturn nil\n\t}\n\tmaxSize := lz4.CompressBlockBound(len(cw.data[:cw.pos]))\n\tcw.zdata = append(cw.zdata[:0], make([]byte, maxSize+headerSize)...)\n\t_ = cw.zdata[:headerSize]\n\tcw.zdata[hMethod] = byte(cw.method)\n\n\tvar n int\n\t//nolint:exhaustive\n\tswitch cw.method {\n\tcase CompressLZ4:\n\t\tif cw.lz4 == nil {\n\t\t\tcw.lz4 = &lz4.Compressor{}\n\t\t}\n\t\tcompressedSize, err := cw.lz4.CompressBlock(cw.data[:cw.pos], cw.zdata[headerSize:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"lz4 compress error: %v\", err)\n\t\t}\n\t\tn = compressedSize\n\tcase CompressZSTD:\n\t\tif cw.zstd == nil {\n\t\t\tzw, err := zstd.NewWriter(nil,\n\t\t\t\tzstd.WithEncoderLevel(zstd.SpeedDefault),\n\t\t\t\tzstd.WithEncoderConcurrency(1),\n\t\t\t\tzstd.WithLowerEncoderMem(true),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"zstd new error: %v\", err)\n\t\t\t}\n\t\t\tcw.zstd = zw\n\t\t}\n\t\tcw.zdata = cw.zstd.EncodeAll(cw.data[:cw.pos], cw.zdata[:headerSize])\n\t\tn = len(cw.zdata) - headerSize\n\tcase CompressChecksum:\n\t\tn = copy(cw.zdata[headerSize:], cw.data[:cw.pos])\n\t}\n\n\tcw.zdata = cw.zdata[:n+headerSize]\n\n\tbinary.LittleEndian.PutUint32(cw.zdata[hRawSize:], uint32(n+compressHeaderSize))\n\tbinary.LittleEndian.PutUint32(cw.zdata[hDataSize:], uint32(cw.pos))\n\th := city.CH128(cw.zdata[hMethod:])\n\tbinary.LittleEndian.PutUint64(cw.zdata[0:8], h.Low)\n\tbinary.LittleEndian.PutUint64(cw.zdata[8:16], h.High)\n\n\t_, err := cw.writer.Write(cw.zdata)\n\tcw.pos = 0\n\treturn err\n}\n"
  },
  {
    "path": "internal/readerwriter/consts.go",
    "content": "package readerwriter\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/go-faster/city\"\n)\n\n// Method is compression codec.\ntype CompressMethod byte\n\nconst (\n\t// ChecksumSize is 128bits for cityhash102 checksum\n\tChecksumSize = 16\n\t// CompressHeaderSize magic + compressed_size + uncompressed_size\n\tCompressHeaderSize = 1 + 4 + 4\n\n\t// HeaderSize for compress header\n\tHeaderSize = ChecksumSize + CompressHeaderSize\n\t// BlockMaxSize 1MB\n\tBlockMaxSize = 1024 * 1024 * 128\n)\n\n// Possible compression methods.\nconst (\n\tCompressNone     CompressMethod = 0x00\n\tCompressChecksum CompressMethod = 0x02\n\tCompressLZ4      CompressMethod = 0x82\n\tCompressZSTD     CompressMethod = 0x90\n)\n\n// Constants for compression encoding.\n//\n// See https://go-faster.org/docs/clickhouse/compression for reference.\nconst (\n\tchecksumSize       = 16\n\tcompressHeaderSize = 1 + 4 + 4\n\theaderSize         = checksumSize + compressHeaderSize\n\n\t// Limiting total data/block size to protect from possible OOM.\n\tmaxDataSize  = 1024 * 1024 * 2 // 2MB\n\tmaxBlockSize = maxDataSize\n\n\thRawSize  = 17\n\thDataSize = 21\n\thMethod   = 16\n)\n\n// CorruptedDataErr means that provided hash mismatch with calculated.\ntype CorruptedDataErr struct {\n\tActual    city.U128\n\tReference city.U128\n\tRawSize   int\n\tDataSize  int\n}\n\nfunc (c *CorruptedDataErr) Error() string {\n\treturn fmt.Sprintf(\"corrupted data: %d (actual), %d (reference), compressed size: %d, data size: %d\",\n\t\tc.Actual.High, c.Reference.High, c.RawSize, c.DataSize,\n\t)\n}\n"
  },
  {
    "path": "internal/readerwriter/reader.go",
    "content": "package readerwriter\n\nimport (\n\t\"encoding/binary\"\n\t\"io\"\n)\n\n// Reader is a helper to read data from reader\ntype Reader struct {\n\tmainReader     io.Reader\n\tinput          io.Reader\n\tcompressReader io.Reader\n\tscratch        [binary.MaxVarintLen64]byte\n}\n\n// NewReader get new Reader\nfunc NewReader(input io.Reader) *Reader {\n\treturn &Reader{\n\t\tinput:      input,\n\t\tmainReader: input,\n\t}\n}\n\n// SetCompress set compress statusp\nfunc (r *Reader) SetCompress(c bool) {\n\tif c {\n\t\tif r.compressReader == nil {\n\t\t\tr.compressReader = NewCompressReader(r.mainReader)\n\t\t}\n\t\tr.input = r.compressReader\n\t\treturn\n\t}\n\tr.input = r.mainReader\n}\n\n// Uvarint read variable uint64 value\nfunc (r *Reader) Uvarint() (uint64, error) {\n\treturn binary.ReadUvarint(r)\n}\n\n// Int32 read Int32 value\nfunc (r *Reader) Int32() (int32, error) {\n\tv, err := r.Uint32()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int32(v), nil\n}\n\n// Uint32 read Uint32 value\nfunc (r *Reader) Uint32() (uint32, error) {\n\tif _, err := io.ReadFull(r.input, r.scratch[:4]); err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.LittleEndian.Uint32(r.scratch[:4]), nil\n}\n\n// Uint64 read Uint64 value\nfunc (r *Reader) Uint64() (uint64, error) {\n\tif _, err := io.ReadFull(r.input, r.scratch[:8]); err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.LittleEndian.Uint64(r.scratch[:8]), nil\n}\n\n// FixedString read FixedString value\nfunc (r *Reader) FixedString(strlen int) ([]byte, error) {\n\tbuf := make([]byte, strlen)\n\n\t_, err := io.ReadFull(r, buf)\n\treturn buf, err\n}\n\n// String read String value\nfunc (r *Reader) String() (string, error) {\n\tstrlen, err := r.Uvarint()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstr, err := r.FixedString(int(strlen))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(str), nil\n}\n\n// ByteString read string  value as []byte\nfunc (r *Reader) ByteString() ([]byte, error) {\n\tstrlen, err := r.Uvarint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif strlen == 0 {\n\t\treturn []byte{}, nil\n\t}\n\treturn r.FixedString(int(strlen))\n}\n\n// ReadByte read a single byte\nfunc (r *Reader) ReadByte() (byte, error) {\n\tif _, err := r.input.Read(r.scratch[:1]); err != nil {\n\t\treturn 0, err\n\t}\n\treturn r.scratch[0], nil\n}\n\n// Read  implement Read\nfunc (r *Reader) Read(buf []byte) (int, error) {\n\treturn io.ReadFull(r.input, buf)\n}\n"
  },
  {
    "path": "internal/readerwriter/writer.go",
    "content": "package readerwriter\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\n// Writer is a helper to write data into bytes.Buffer\ntype Writer struct {\n\toutput  *bytes.Buffer\n\tscratch [binary.MaxVarintLen64]byte\n}\n\n// NewWriter get new writer\nfunc NewWriter() *Writer {\n\treturn &Writer{\n\t\toutput: &bytes.Buffer{},\n\t}\n}\n\n// Uvarint write a variable uint64 value into writer\nfunc (w *Writer) Uvarint(v uint64) {\n\tln := binary.PutUvarint(w.scratch[:binary.MaxVarintLen64], v)\n\tw.Write(w.scratch[:ln])\n}\n\n// Int32 write Int32 value\nfunc (w *Writer) Int32(v int32) {\n\tw.Uint32(uint32(v))\n}\n\n// Int64 write Int64 value\nfunc (w *Writer) Int64(v int64) {\n\tw.Uint64(uint64(v))\n}\n\n// Uint8 write Uint8 value\nfunc (w *Writer) Uint8(v uint8) {\n\tw.output.WriteByte(v)\n}\n\n// Uint32 write Uint32 value\nfunc (w *Writer) Uint32(v uint32) {\n\tw.scratch[0] = byte(v)\n\tw.scratch[1] = byte(v >> 8)\n\tw.scratch[2] = byte(v >> 16)\n\tw.scratch[3] = byte(v >> 24)\n\tw.Write(w.scratch[:4])\n}\n\n// Uint64 write Uint64 value\nfunc (w *Writer) Uint64(v uint64) {\n\tw.scratch[0] = byte(v)\n\tw.scratch[1] = byte(v >> 8)\n\tw.scratch[2] = byte(v >> 16)\n\tw.scratch[3] = byte(v >> 24)\n\tw.scratch[4] = byte(v >> 32)\n\tw.scratch[5] = byte(v >> 40)\n\tw.scratch[6] = byte(v >> 48)\n\tw.scratch[7] = byte(v >> 56)\n\tw.Write(w.scratch[:8])\n}\n\n// String write string\nfunc (w *Writer) String(v string) {\n\tstr := str2Bytes(v)\n\tw.Uvarint(uint64(len(str)))\n\tw.Write(str)\n}\n\n// ByteString write []byte\nfunc (w *Writer) ByteString(v []byte) {\n\tw.Uvarint(uint64(len(v)))\n\tw.Write(v)\n}\n\n// Write write raw []byte data\nfunc (w *Writer) Write(b []byte) {\n\tw.output.Write(b)\n}\n\n// WriteTo implement WriteTo\nfunc (w *Writer) WriteTo(wt io.Writer) (int64, error) {\n\treturn w.output.WriteTo(wt)\n}\n\n// Reset reset all data\nfunc (w *Writer) Reset() {\n\tw.output.Reset()\n}\n\n// Output get raw *bytes.Buffer\nfunc (w *Writer) Output() *bytes.Buffer {\n\treturn w.output\n}\n\nfunc str2Bytes(str string) []byte {\n\theader := (*reflect.SliceHeader)(unsafe.Pointer(&str))\n\theader.Len = len(str)\n\theader.Cap = header.Len\n\treturn *(*[]byte)(unsafe.Pointer(header))\n}\n"
  },
  {
    "path": "ping.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n)\n\ntype pong struct{}\n\n// Check that connection to the server is alive.\nfunc (ch *conn) Ping(ctx context.Context) error {\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\tch.contextWatcher.Watch(ctx)\n\t\tdefer ch.contextWatcher.Unwatch()\n\t}\n\tch.writer.Uvarint(clientPing)\n\tvar hasError bool\n\tdefer func() {\n\t\tif hasError {\n\t\t\tch.Close()\n\t\t}\n\t}()\n\tif _, err := ch.writer.WriteTo(ch.writerTo); err != nil {\n\t\thasError = true\n\t\treturn &writeError{\"ping: write packet type\", preferContextOverNetTimeoutError(ctx, err)}\n\t}\n\n\tres, err := ch.receiveAndProcessData(emptyOnProgress)\n\tif err != nil {\n\t\thasError = true\n\t\treturn preferContextOverNetTimeoutError(ctx, err)\n\t}\n\tif _, ok := res.(*pong); !ok {\n\t\thasError = true\n\t\treturn &unexpectedPacket{expected: \"serverPong\", actual: res}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "ping_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestPing(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconn, err := Connect(context.Background(), connString)\n\trequire.NoError(t, err)\n\trequire.NoError(t, conn.Ping(context.Background()))\n\tconn.Close()\n}\n\nfunc TestPingWriteError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tw:           w,\n\t\t\tnumberValid: 1,\n\t\t}\n\t}\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\terr = c.Ping(context.Background())\n\trequire.EqualError(t, err, \"ping: write packet type (timeout)\")\n\trequire.EqualError(t, errors.Unwrap(err), \"timeout\")\n\n\tassert.True(t, c.IsClosed())\n\n\tconfig.WriterFunc = nil\n\n\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\treturn &readErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tr:           r,\n\t\t\tnumberValid: 13,\n\t\t}\n\t}\n\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\trequire.EqualError(t, c.Ping(context.Background()), \"packet: read packet type (timeout)\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestPingCtxError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\n\terr = c.Ping(ctx)\n\trequire.EqualError(t, err, \"timeout: context already done: context canceled\")\n\trequire.EqualError(t, errors.Unwrap(err), \"context already done: context canceled\")\n\n\tassert.False(t, c.IsClosed())\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerSlowHelper{\n\t\t\tw:     w,\n\t\t\tsleep: time.Second,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)\n\tdefer cancel()\n\terr = c.Ping(ctx)\n\trequire.EqualError(t, errors.Unwrap(errors.Unwrap(err)), \"context deadline exceeded\")\n\tassert.True(t, c.IsClosed())\n}\n"
  },
  {
    "path": "profile.go",
    "content": "package chconn\n\n// Profile detail of profile select query\ntype Profile struct {\n\tRows                      uint64\n\tBlocks                    uint64\n\tBytes                     uint64\n\tRowsBeforeLimit           uint64\n\tAppliedLimit              uint8\n\tCalculatedRowsBeforeLimit uint8\n}\n\nfunc newProfile() *Profile {\n\treturn &Profile{}\n}\n\nfunc (p *Profile) read(ch *conn) (err error) {\n\tif p.Rows, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"profile: read Rows\", err}\n\t}\n\tif p.Blocks, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"profile: read Blocks\", err}\n\t}\n\tif p.Bytes, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"profile: read Bytes\", err}\n\t}\n\n\tif p.AppliedLimit, err = ch.reader.ReadByte(); err != nil {\n\t\treturn &readError{\"profile: read AppliedLimit\", err}\n\t}\n\tif p.RowsBeforeLimit, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"profile: read RowsBeforeLimit\", err}\n\t}\n\tif p.CalculatedRowsBeforeLimit, err = ch.reader.ReadByte(); err != nil {\n\t\treturn &readError{\"profile: read CalculatedRowsBeforeLimit\", err}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "profile_event.go",
    "content": "package chconn\n\nimport (\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n)\n\n// Profile detail of profile select query\ntype ProfileEvent struct {\n\tHost     *column.String\n\tTime     *column.Base[uint32]\n\tThreadID *column.Base[uint64]\n\tType     *column.Base[int8]\n\tName     *column.String\n\tValue    *column.Base[int64]\n}\n\nfunc newProfileEvent() *ProfileEvent {\n\treturn &ProfileEvent{\n\t\tHost:     column.NewString(),\n\t\tTime:     column.New[uint32](),\n\t\tThreadID: column.New[uint64](),\n\t\tType:     column.New[int8](),\n\t\tName:     column.NewString(),\n\t\tValue:    column.New[int64](),\n\t}\n}\n\nfunc (p ProfileEvent) read(c *conn) error {\n\treturn c.block.readColumnsData(c, true, p.Host, p.Time, p.ThreadID, p.Type, p.Name, p.Value)\n}\n"
  },
  {
    "path": "profile_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n)\n\nfunc TestProfileReadError(t *testing.T) {\n\tstartValidReader := 43\n\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tif c.ServerInfo().Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress {\n\t\t// todo we need to fix this for clickhouse 22.10 and above\n\t\treturn\n\t}\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"profile: read Rows\",\n\t\t\twantErr:     \"profile: read Rows\",\n\t\t\tnumberValid: startValidReader,\n\t\t}, {\n\t\t\tname:        \"profile: read Blocks\",\n\t\t\twantErr:     \"profile: read Blocks\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t}, {\n\t\t\tname:        \"profile: read Bytes\",\n\t\t\twantErr:     \"profile: read Bytes\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t}, {\n\t\t\tname:        \"profile: read AppliedLimit\",\n\t\t\twantErr:     \"profile: read AppliedLimit\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t}, {\n\t\t\tname:        \"profile: read RowsBeforeLimit\",\n\t\t\twantErr:     \"profile: read RowsBeforeLimit\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t}, {\n\t\t\tname:        \"profile: read CalculatedRowsBeforeLimit\",\n\t\t\twantErr:     \"profile: read CalculatedRowsBeforeLimit\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif c.ServerInfo().Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress {\n\t\t\t\ttt.numberValid++\n\t\t\t}\n\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tcol := column.New[uint64]()\n\t\t\tstmt, err := c.Select(context.Background(), \"SELECT * FROM system.numbers LIMIT 1;\", col)\n\t\t\trequire.NoError(t, err)\n\t\t\tfor stmt.Next() {\n\t\t\t}\n\t\t\trequire.Error(t, stmt.Err())\n\t\t\treadErr, ok := stmt.Err().(*readError)\n\t\t\trequire.True(t, ok)\n\t\t\tfmt.Println(\"readErr.msg:\", readErr.msg)\n\t\t\trequire.Equal(t, tt.wantErr, readErr.msg)\n\t\t\trequire.EqualError(t, readErr.Unwrap(), \"timeout\")\n\t\t\tassert.True(t, c.IsClosed())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "progress.go",
    "content": "package chconn\n\nimport \"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\n// Progress details of progress select query\ntype Progress struct {\n\tReadRows     uint64\n\tReadBytes    uint64\n\tTotalRows    uint64\n\tWriterRows   uint64\n\tWrittenBytes uint64\n\tElapsedNS    uint64\n}\n\nfunc newProgress() *Progress {\n\treturn &Progress{}\n}\n\nfunc (p *Progress) read(ch *conn) (err error) {\n\tif p.ReadRows, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"progress: read ReadRows\", err}\n\t}\n\tif p.ReadBytes, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"progress: read ReadBytes\", err}\n\t}\n\n\tif p.TotalRows, err = ch.reader.Uvarint(); err != nil {\n\t\treturn &readError{\"progress: read TotalRows\", err}\n\t}\n\n\tif ch.serverInfo.Revision >= helper.DbmsMinRevisionWithClientWriteInfo {\n\t\tif p.WriterRows, err = ch.reader.Uvarint(); err != nil {\n\t\t\treturn &readError{\"progress: read WriterRows\", err}\n\t\t}\n\t\tif p.WrittenBytes, err = ch.reader.Uvarint(); err != nil {\n\t\t\treturn &readError{\"progress: read WrittenBytes\", err}\n\t\t}\n\t}\n\tif ch.serverInfo.Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress {\n\t\tif p.ElapsedNS, err = ch.reader.Uvarint(); err != nil {\n\t\t\treturn &readError{\"progress: read ElapsedNS\", err}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "select_stmt.go",
    "content": "package chconn\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\n// Select executes a query and return select stmt.\n// NOTE: only use for select query\nfunc (ch *conn) Select(ctx context.Context, query string, columns ...column.ColumnBasic) (SelectStmt, error) {\n\treturn ch.SelectWithOption(ctx, query, nil, columns...)\n}\n\n// Select executes a query with the the query options and return select stmt.\n// NOTE: only use for select query\nfunc (ch *conn) SelectWithOption(\n\tctx context.Context,\n\tquery string,\n\tqueryOptions *QueryOptions,\n\tcolumns ...column.ColumnBasic,\n) (SelectStmt, error) {\n\terr := ch.lock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar hasError bool\n\tdefer func() {\n\t\tif hasError {\n\t\t\tch.Close()\n\t\t}\n\t}()\n\n\tif ctx != context.Background() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, newContextAlreadyDoneError(ctx)\n\t\tdefault:\n\t\t}\n\t\tch.contextWatcher.Watch(ctx)\n\t}\n\n\tif queryOptions == nil {\n\t\tqueryOptions = emptyQueryOptions\n\t}\n\n\terr = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters)\n\tif err != nil {\n\t\thasError = true\n\t\treturn nil, preferContextOverNetTimeoutError(ctx, err)\n\t}\n\ts := &selectStmt{\n\t\tconn:           ch,\n\t\tquery:          query,\n\t\tqueryOptions:   queryOptions,\n\t\tclientInfo:     nil,\n\t\tctx:            ctx,\n\t\tcolumnsForRead: columns,\n\t}\n\tres, err := s.conn.receiveAndProcessData(nil)\n\tif err != nil {\n\t\ts.lastErr = err\n\t\ts.Close()\n\t\treturn nil, err\n\t}\n\tif block, ok := res.(*block); ok {\n\t\tif block.NumRows == 0 {\n\t\t\terr = s.readEmptyBlock(block)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn s, nil\n\t\t}\n\t}\n\treturn nil, &unexpectedPacket{expected: \"serverData with zero len\", actual: res}\n}\n\n// SelectStmt is a interface for select statement\ntype SelectStmt interface {\n\t// Next read the next block of data for reading.\n\t// It returns true on success, or false if there is no next result row or an error happened while preparing it.\n\t// Err should be consulted to distinguish between the two cases.\n\tNext() bool\n\t// Err returns the error, if any, that was encountered during iteration.\n\t// Err may be called after an explicit or implicit Close.\n\tErr() error\n\t// RowsInBlock return number of rows in this current block\n\tRowsInBlock() int\n\t// Columns return the columns of this select statement.\n\tColumns() []column.ColumnBasic\n\t// Close close the statement and release the connection\n\t// If Next is called and returns false and there are no further blocks,\n\t// the Rows are closed automatically and it will suffice to check the result of Err.\n\t// Close is idempotent and does not affect the result of Err.\n\tClose()\n}\n\ntype selectStmt struct {\n\tblock          *block\n\tconn           *conn\n\tquery          string\n\tqueryOptions   *QueryOptions\n\tclientInfo     *ClientInfo\n\tlastErr        error\n\tclosed         bool\n\tcolumnsForRead []column.ColumnBasic\n\tctx            context.Context\n\tfinishSelect   bool\n\tvalidateData   bool\n}\n\nvar _ SelectStmt = &selectStmt{}\n\nfunc (s *selectStmt) readEmptyBlock(b *block) error {\n\terr := b.readColumns(s.conn)\n\tif err != nil {\n\t\ts.lastErr = err\n\t\ts.Close()\n\t\treturn err\n\t}\n\tif len(s.columnsForRead) == 0 {\n\t\ts.columnsForRead, err = s.getColumnsByChType(b)\n\t\tif err != nil {\n\t\t\ts.lastErr = err\n\t\t\ts.Close()\n\t\t\treturn err\n\t\t}\n\t} else if len(s.columnsForRead[0].Name()) != 0 {\n\t\ts.columnsForRead, err = b.reorderColumns(s.columnsForRead)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *selectStmt) Next() bool {\n\t// protect after close\n\tif s.closed {\n\t\treturn false\n\t}\n\ts.conn.reader.SetCompress(false)\n\tres, err := s.conn.receiveAndProcessData(nil)\n\tif err != nil {\n\t\ts.lastErr = err\n\t\ts.Close()\n\t\treturn false\n\t}\n\n\tif block, ok := res.(*block); ok {\n\t\tif block.NumRows == 0 {\n\t\t\terr = s.readEmptyBlock(block)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn s.Next()\n\t\t}\n\t\ts.block = block\n\n\t\tneedValidateData := !s.validateData\n\t\ts.validateData = false\n\t\tif needValidateData {\n\t\t\tif errValidate := s.validate(); errValidate != nil {\n\t\t\t\ts.lastErr = errValidate\n\t\t\t\ts.Close()\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\terr = block.readColumnsData(s.conn, needValidateData, s.columnsForRead...)\n\t\tif err != nil {\n\t\t\ts.lastErr = preferContextOverNetTimeoutError(s.ctx, err)\n\t\t\ts.Close()\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\tif profile, ok := res.(*Profile); ok {\n\t\tif s.queryOptions.OnProfile != nil {\n\t\t\ts.queryOptions.OnProfile(profile)\n\t\t}\n\t\treturn s.Next()\n\t}\n\tif progress, ok := res.(*Progress); ok {\n\t\tif s.queryOptions.OnProgress != nil {\n\t\t\ts.queryOptions.OnProgress(progress)\n\t\t}\n\t\treturn s.Next()\n\t}\n\n\tif profileEvent, ok := res.(*ProfileEvent); ok {\n\t\tif s.queryOptions.OnProfileEvent != nil {\n\t\t\ts.queryOptions.OnProfileEvent(profileEvent)\n\t\t}\n\t\treturn s.Next()\n\t}\n\n\tif res == nil {\n\t\ts.finishSelect = true\n\t\ts.columnsForRead = nil\n\t\ts.Close()\n\t\treturn false\n\t}\n\n\ts.lastErr = &unexpectedPacket{expected: \"serverData\", actual: res}\n\ts.Close()\n\treturn false\n}\n\nfunc (s *selectStmt) validate() error {\n\tif int(s.block.NumColumns) != len(s.columnsForRead) {\n\t\treturn &ColumnNumberReadError{\n\t\t\tRead:      len(s.columnsForRead),\n\t\t\tAvailable: s.block.NumColumns,\n\t\t}\n\t}\n\treturn nil\n}\n\n// RowsInBlock return number of rows in this current block\nfunc (s *selectStmt) RowsInBlock() int {\n\treturn int(s.block.NumRows)\n}\n\n// Err returns the error, if any, that was encountered during iteration.\n// Err may be called after an explicit or implicit Close.\nfunc (s *selectStmt) Err() error {\n\treturn preferContextOverNetTimeoutError(s.ctx, s.lastErr)\n}\n\n// Close close the statement and release the connection\n// If Next is called and returns false and there are no further blocks,\n// the Rows are closed automatically and it will suffice to check the result of Err.\n// Close is idempotent and does not affect the result of Err.\nfunc (s *selectStmt) Close() {\n\ts.conn.reader.SetCompress(false)\n\tif !s.closed {\n\t\ts.closed = true\n\t\ts.conn.contextWatcher.Unwatch()\n\t\ts.conn.unlock()\n\t\tif s.Err() != nil || !s.finishSelect {\n\t\t\ts.conn.Close()\n\t\t}\n\t}\n}\n\nfunc (s *selectStmt) Columns() []column.ColumnBasic {\n\treturn s.columnsForRead\n}\n\nfunc (s *selectStmt) getColumnsByChType(b *block) ([]column.ColumnBasic, error) {\n\tcolumns := make([]column.ColumnBasic, len(b.Columns))\n\tfor i, col := range b.Columns {\n\t\tcolumnByType, err := s.columnByType(col.ChType, 0, false, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcolumnByType.SetName(col.Name)\n\t\tcolumnByType.SetType(col.ChType)\n\t\terr = columnByType.Validate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcolumns[i] = columnByType\n\t}\n\treturn columns, nil\n}\n\n//nolint:funlen,gocyclo\nfunc (s *selectStmt) columnByType(chType []byte, arrayLevel int, nullable, lc bool) (column.ColumnBasic, error) {\n\tswitch {\n\tcase string(chType) == \"Bool\":\n\t\treturn column.New[bool]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int8\" || helper.IsEnum8(chType):\n\t\treturn column.New[int8]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int16\" || helper.IsEnum16(chType):\n\t\treturn column.New[int16]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int32\":\n\t\treturn column.New[int32]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int64\":\n\t\treturn column.New[int64]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int128\":\n\t\treturn column.New[types.Int128]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Int256\":\n\t\treturn column.New[types.Int256]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt8\":\n\t\treturn column.New[uint8]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt16\":\n\t\treturn column.New[uint16]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt32\":\n\t\treturn column.New[uint32]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt64\":\n\t\treturn column.New[uint64]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt128\":\n\t\treturn column.New[types.Uint128]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"UInt256\":\n\t\treturn column.New[types.Uint256]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Float32\":\n\t\treturn column.New[float32]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Float64\":\n\t\treturn column.New[float64]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"String\":\n\t\treturn column.NewString().Elem(arrayLevel, nullable, lc), nil\n\tcase helper.IsFixedString(chType):\n\t\tstrLen, err := strconv.Atoi(string(chType[helper.FixedStringStrLen : len(chType)-1]))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid fixed string length: %s: %w\", string(chType), err)\n\t\t}\n\t\treturn getFixedType(strLen, arrayLevel, nullable, lc)\n\tcase string(chType) == \"Date\":\n\t\tif !s.queryOptions.UseGoTime {\n\t\t\treturn column.New[types.Date]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\treturn column.NewDate[types.Date]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"Date32\":\n\t\tif !s.queryOptions.UseGoTime {\n\t\t\treturn column.New[types.Date32]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\treturn column.NewDate[types.Date32]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"DateTime\" || helper.IsDateTimeWithParam(chType):\n\t\tif !s.queryOptions.UseGoTime {\n\t\t\treturn column.New[types.DateTime]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tvar params [][]byte\n\t\tif bytes.HasPrefix(chType, []byte(\"DateTime(\")) {\n\t\t\tparams = bytes.Split(chType[len(\"DateTime(\"):len(chType)-1], []byte(\", \"))\n\t\t}\n\t\tcol := column.NewDate[types.DateTime]()\n\t\tif len(params) > 0 && len(params[0]) >= 3 {\n\t\t\tif loc, err := time.LoadLocation(string(params[0][1 : len(params[0])-1])); err == nil {\n\t\t\t\tcol.SetLocation(loc)\n\t\t\t} else if loc, err := time.LoadLocation(s.conn.serverInfo.Timezone); err == nil {\n\t\t\t\tcol.SetLocation(loc)\n\t\t\t}\n\t\t}\n\t\treturn col.Elem(arrayLevel, nullable, lc), nil\n\tcase helper.IsDateTime64(chType):\n\t\tif !s.queryOptions.UseGoTime {\n\t\t\treturn column.New[types.DateTime64]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tparams := bytes.Split(chType[helper.DateTime64StrLen:len(chType)-1], []byte(\", \"))\n\t\tif len(params) == 0 {\n\t\t\tpanic(\"DateTime64 invalid params\")\n\t\t}\n\t\tprecision, err := strconv.Atoi(string(params[0]))\n\t\tif err != nil {\n\t\t\tpanic(\"DateTime64 invalid precision: \" + err.Error())\n\t\t}\n\t\tcol := column.NewDate[types.DateTime64]()\n\t\tcol.SetPrecision(precision)\n\t\tif len(params) > 1 && len(params[1]) >= 3 {\n\t\t\tif loc, err := time.LoadLocation(string(params[1][1 : len(params[1])-1])); err == nil {\n\t\t\t\tcol.SetLocation(loc)\n\t\t\t} else if loc, err := time.LoadLocation(s.conn.serverInfo.Timezone); err == nil {\n\t\t\t\tcol.SetLocation(loc)\n\t\t\t}\n\t\t}\n\t\treturn col.Elem(arrayLevel, nullable, lc), nil\n\n\tcase helper.IsDecimal(chType):\n\t\tparams := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(\", \"))\n\t\tprecision, _ := strconv.Atoi(string(params[0]))\n\n\t\tif precision <= 9 {\n\t\t\treturn column.New[types.Decimal32]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tif precision <= 18 {\n\t\t\treturn column.New[types.Decimal64]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tif precision <= 38 {\n\t\t\treturn column.New[types.Decimal128]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tif precision <= 76 {\n\t\t\treturn column.New[types.Decimal256]().Elem(arrayLevel, nullable, lc), nil\n\t\t}\n\t\tpanic(\"Decimal invalid precision: \" + string(chType))\n\n\tcase string(chType) == \"UUID\":\n\t\treturn column.New[types.UUID]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"IPv4\":\n\t\treturn column.New[types.IPv4]().Elem(arrayLevel, nullable, lc), nil\n\tcase string(chType) == \"IPv6\":\n\t\treturn column.New[types.IPv6]().Elem(arrayLevel, nullable, lc), nil\n\n\tcase helper.IsNullable(chType):\n\t\treturn s.columnByType(chType[helper.LenNullableStr:len(chType)-1], arrayLevel, true, lc)\n\n\tcase bytes.HasPrefix(chType, []byte(\"SimpleAggregateFunction(\")):\n\t\treturn s.columnByType(helper.FilterSimpleAggregate(chType), arrayLevel, nullable, lc)\n\tcase helper.IsArray(chType):\n\t\tif arrayLevel == 3 {\n\t\t\treturn nil, fmt.Errorf(\"max array level is 3\")\n\t\t}\n\t\tif nullable {\n\t\t\treturn nil, fmt.Errorf(\"array is not allowed in nullable\")\n\t\t}\n\t\tif lc {\n\t\t\treturn nil, fmt.Errorf(\"LowCardinality is not allowed in nullable\")\n\t\t}\n\t\treturn s.columnByType(chType[helper.LenArrayStr:len(chType)-1], arrayLevel+1, nullable, lc)\n\tcase helper.IsLowCardinality(chType):\n\t\treturn s.columnByType(chType[helper.LenLowCardinalityStr:len(chType)-1], arrayLevel, nullable, true)\n\tcase helper.IsTuple(chType):\n\t\tcolumnsTuple, err := helper.TypesInParentheses(chType[helper.LenTupleStr : len(chType)-1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"tuple invalid types: %w\", err)\n\t\t}\n\t\tcolumns := make([]column.ColumnBasic, len(columnsTuple))\n\t\tfor i, c := range columnsTuple {\n\t\t\tcol, err := s.columnByType(c.ChType, 0, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcol.SetName(c.Name)\n\t\t\tcolumns[i] = col\n\t\t}\n\t\treturn column.NewTuple(columns...).Elem(arrayLevel), nil\n\tcase helper.IsMap(chType):\n\t\tcolumnsMap, err := helper.TypesInParentheses(chType[helper.LenMapStr : len(chType)-1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"map invalid types: %w\", err)\n\t\t}\n\t\tif len(columnsMap) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"map must have 2 columns\")\n\t\t}\n\t\tcolumns := make([]column.ColumnBasic, len(columnsMap))\n\t\tfor i, col := range columnsMap {\n\t\t\tcol, err := s.columnByType(col.ChType, arrayLevel, nullable, lc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcolumns[i] = col\n\t\t}\n\t\treturn column.NewMapBase(columns[0], columns[1]), nil\n\tcase helper.IsNested(chType):\n\t\treturn s.columnByType(helper.NestedToArrayType(chType), arrayLevel, nullable, lc)\n\t}\n\treturn nil, fmt.Errorf(\"unknown type: %s\", chType)\n}\n\n//nolint:funlen,gocyclo\nfunc getFixedType(fixedLen, arrayLevel int, nullable, lc bool) (column.ColumnBasic, error) {\n\tswitch fixedLen {\n\tcase 1:\n\t\treturn column.New[[1]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 2:\n\t\treturn column.New[[2]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 3:\n\t\treturn column.New[[3]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 4:\n\t\treturn column.New[[4]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 5:\n\t\treturn column.New[[5]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 6:\n\t\treturn column.New[[6]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 7:\n\t\treturn column.New[[7]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 8:\n\t\treturn column.New[[8]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 9:\n\t\treturn column.New[[9]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 10:\n\t\treturn column.New[[10]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 11:\n\t\treturn column.New[[11]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 12:\n\t\treturn column.New[[12]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 13:\n\t\treturn column.New[[13]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 14:\n\t\treturn column.New[[14]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 15:\n\t\treturn column.New[[15]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 16:\n\t\treturn column.New[[16]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 17:\n\t\treturn column.New[[17]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 18:\n\t\treturn column.New[[18]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 19:\n\t\treturn column.New[[19]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 20:\n\t\treturn column.New[[20]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 21:\n\t\treturn column.New[[21]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 22:\n\t\treturn column.New[[22]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 23:\n\t\treturn column.New[[23]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 24:\n\t\treturn column.New[[24]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 25:\n\t\treturn column.New[[25]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 26:\n\t\treturn column.New[[26]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 27:\n\t\treturn column.New[[27]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 28:\n\t\treturn column.New[[28]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 29:\n\t\treturn column.New[[29]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 30:\n\t\treturn column.New[[30]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 31:\n\t\treturn column.New[[31]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 32:\n\t\treturn column.New[[32]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 33:\n\t\treturn column.New[[33]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 34:\n\t\treturn column.New[[34]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 35:\n\t\treturn column.New[[35]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 36:\n\t\treturn column.New[[36]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 37:\n\t\treturn column.New[[37]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 38:\n\t\treturn column.New[[38]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 39:\n\t\treturn column.New[[39]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 40:\n\t\treturn column.New[[40]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 41:\n\t\treturn column.New[[41]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 42:\n\t\treturn column.New[[42]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 43:\n\t\treturn column.New[[43]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 44:\n\t\treturn column.New[[44]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 45:\n\t\treturn column.New[[45]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 46:\n\t\treturn column.New[[46]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 47:\n\t\treturn column.New[[47]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 48:\n\t\treturn column.New[[48]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 49:\n\t\treturn column.New[[49]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 50:\n\t\treturn column.New[[50]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 51:\n\t\treturn column.New[[51]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 52:\n\t\treturn column.New[[52]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 53:\n\t\treturn column.New[[53]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 54:\n\t\treturn column.New[[54]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 55:\n\t\treturn column.New[[55]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 56:\n\t\treturn column.New[[56]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 57:\n\t\treturn column.New[[57]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 58:\n\t\treturn column.New[[58]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 59:\n\t\treturn column.New[[59]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 60:\n\t\treturn column.New[[60]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 61:\n\t\treturn column.New[[61]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 62:\n\t\treturn column.New[[62]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 63:\n\t\treturn column.New[[63]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 64:\n\t\treturn column.New[[64]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 65:\n\t\treturn column.New[[65]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 66:\n\t\treturn column.New[[66]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 67:\n\t\treturn column.New[[67]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 68:\n\t\treturn column.New[[68]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 69:\n\t\treturn column.New[[69]byte]().Elem(arrayLevel, nullable, lc), nil\n\tcase 70:\n\t\treturn column.New[[70]byte]().Elem(arrayLevel, nullable, lc), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"fixed length %d is not supported\", fixedLen)\n}\n"
  },
  {
    "path": "select_stmt_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/column\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/types\"\n)\n\nfunc TestSelectError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tc.(*conn).status = connStatusUninitialized\n\tres, err := c.Select(context.Background(), \"select * from system.numbers limit 5\")\n\trequire.Nil(t, res)\n\trequire.EqualError(t, err, \"conn uninitialized\")\n\trequire.EqualError(t, c.(*conn).lock(), \"conn uninitialized\")\n\tc.Close()\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerErrorHelper{\n\t\t\terr:         errors.New(\"timeout\"),\n\t\t\tw:           w,\n\t\t\tnumberValid: 1,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tres, err = c.Select(context.Background(), \"select * from system.numbers limit 5\")\n\trequire.EqualError(t, err, \"write block info (timeout)\")\n\trequire.Nil(t, res)\n\tassert.True(t, c.IsClosed())\n\n\tconfig.WriterFunc = nil\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tcolNumber := column.New[int64]()\n\tres, err = c.Select(context.Background(), \"select number,toNullable(number) from system.numbers limit 5\", colNumber)\n\trequire.NoError(t, err)\n\tfor res.Next() {\n\t}\n\tassert.False(t, res.Next())\n\trequire.EqualError(t, res.Err(), \"read 1 column(s), but available 2 column(s)\")\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestSelectCtxError(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\tres, err := c.Select(ctx, \"select * from system.numbers limit 1\")\n\trequire.EqualError(t, err, \"timeout: context already done: context canceled\")\n\trequire.Nil(t, res)\n\tassert.False(t, c.IsClosed())\n\n\tconfig.WriterFunc = func(w io.Writer) io.Writer {\n\t\treturn &writerSlowHelper{\n\t\t\tw:     w,\n\t\t\tsleep: time.Second,\n\t\t}\n\t}\n\tc, err = ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\tctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)\n\tdefer cancel()\n\tres, err = c.Select(ctx, \"select * from system.numbers\")\n\trequire.EqualError(t, errors.Unwrap(err), \"context deadline exceeded\")\n\trequire.Nil(t, res)\n\tassert.True(t, c.IsClosed())\n}\n\nfunc TestSelectProgress(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tcolSleep := column.New[uint8]()\n\tcolNumber := column.New[uint64]()\n\tres, err := c.SelectWithOption(context.Background(),\n\t\t\"SELECT sleep(1), * FROM system.numbers LIMIT 1\",\n\t\t&QueryOptions{\n\t\t\tOnProgress: func(p *Progress) {\n\t\t\t},\n\t\t\tOnProfile: func(p *Profile) {\n\t\t\t},\n\t\t\tOnProfileEvent: func(p *ProfileEvent) {\n\n\t\t\t},\n\t\t},\n\t\tcolSleep,\n\t\tcolNumber,\n\t)\n\trequire.NotNil(t, res)\n\trequire.NoError(t, err)\n\n\tfor res.Next() {\n\t}\n\trequire.NoError(t, res.Err())\n\n\tc.Close()\n}\n\nfunc TestSelectParameters(t *testing.T) {\n\tt.Parallel()\n\n\tconnString := os.Getenv(\"CHX_TEST_TCP_CONN_STRING\")\n\n\tconfig, err := ParseConfig(connString)\n\trequire.NoError(t, err)\n\n\tc, err := ConnectConfig(context.Background(), config)\n\trequire.NoError(t, err)\n\n\tcolA := column.New[int32]()\n\tcolAS := column.New[int32]().Array()\n\tcolB := column.NewString()\n\tcolBS := column.NewString().Array()\n\tcolC := column.NewDate[types.DateTime]()\n\tcolD := column.NewMap[string, uint8](column.NewString(), column.New[uint8]())\n\tcolE := column.New[uint32]()\n\tcolES := column.New[uint32]().Array()\n\tcolF32 := column.New[float32]()\n\tcolF32S := column.New[float32]().Array()\n\tcolF64 := column.New[float64]()\n\tcolF64S := column.New[float64]().Array()\n\n\tres, err := c.SelectWithOption(context.Background(),\n\t\t`SELECT {a: Int32},\n\t\t\t\t{as: Array(Int32)},\n\t\t\t\t{b: String},\n\t\t\t\t{bs: Array(String)},\n\t\t\t\t{c: DateTime},\n\t\t\t\t{d: Map(String, UInt8)},\n\t\t\t\t{e: UInt32},\n\t\t\t\t{es: Array(UInt32)},\n\t\t\t\t{f32: Float32},\n\t\t\t\t{f64: Float64},\n\t\t\t\t{f32s: Array(Float32)},\n\t\t\t\t{f64s: Array(Float64)}\n\t\t\t\t`,\n\t\t&QueryOptions{\n\t\t\tParameters: NewParameters(\n\t\t\t\tIntParameter(\"a\", 13),\n\t\t\t\tIntSliceParameter(\"as\", []int32{-15, -16}),\n\t\t\t\tStringParameter(\"b\", \"str'\"),\n\t\t\t\tStringSliceParameter(\"bs\", []string{\"str\", \"str2\\\\'\"}),\n\t\t\t\tStringParameter(\"c\", \"2022-08-04 18:30:53\"),\n\t\t\t\tStringParameter(\"d\", `{'a': 1, 'b': 2}`),\n\t\t\t\tUintParameter(\"e\", uint64(14)),\n\t\t\t\tUintSliceParameter(\"es\", []uint32{15, 16}),\n\t\t\t\tFloat32Parameter(\"f32\", float32(1.5)),\n\t\t\t\tFloat64Parameter(\"f64\", float64(1.8)),\n\t\t\t\tFloat32SliceParameter(\"f32s\", []float32{1.5, 1.6}),\n\t\t\t\tFloat64SliceParameter(\"f64s\", []float64{1.8, 1.9}),\n\t\t\t),\n\t\t},\n\t\tcolA,\n\t\tcolAS,\n\t\tcolB,\n\t\tcolBS,\n\t\tcolC,\n\t\tcolD,\n\t\tcolE,\n\t\tcolES,\n\t\tcolF32,\n\t\tcolF64,\n\t\tcolF32S,\n\t\tcolF64S,\n\t)\n\n\tif err != nil && err.Error() == \"parameters are not supported by the server\" {\n\t\tt.SkipNow()\n\t}\n\trequire.NoError(t, err)\n\trequire.NotNil(t, res)\n\n\tfor res.Next() {\n\t}\n\trequire.NoError(t, res.Err())\n\trequire.Len(t, colA.Data(), 1)\n\trequire.Len(t, colAS.Data(), 1)\n\trequire.Len(t, colB.Data(), 1)\n\trequire.Len(t, colBS.Data(), 1)\n\trequire.Len(t, colC.Data(), 1)\n\trequire.Len(t, colD.Data(), 1)\n\trequire.Len(t, colE.Data(), 1)\n\trequire.Len(t, colES.Data(), 1)\n\tassert.Equal(t, int32(13), colA.Data()[0])\n\tassert.Equal(t, []int32{-15, -16}, colAS.Data()[0])\n\tassert.Equal(t, \"str'\", colB.Data()[0])\n\tassert.Equal(t, []string{\"str\", \"str2\\\\'\"}, colBS.Data()[0])\n\tassert.Equal(t, \"2022-08-04 18:30:53\", colC.Data()[0].Format(\"2006-01-02 15:04:05\"))\n\tassert.Equal(t, map[string]uint8{\n\t\t\"a\": 1,\n\t\t\"b\": 2,\n\t}, colD.Data()[0])\n\tassert.Equal(t, uint32(14), colE.Data()[0])\n\tassert.Equal(t, []uint32{15, 16}, colES.Data()[0])\n\tassert.Equal(t, float32(1.5), colF32.Data()[0])\n\tassert.Equal(t, float64(1.8), colF64.Data()[0])\n\tassert.Equal(t, []float32{1.5, 1.6}, colF32S.Data()[0])\n\tassert.Equal(t, []float64{1.8, 1.9}, colF64S.Data()[0])\n\n\tc.Close()\n}\n\nfunc TestSelectProgressError(t *testing.T) {\n\tstartValidReader := 33\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t\tminRevision uint64\n\t}{\n\t\t{\n\t\t\tname:        \"read ReadRows\",\n\t\t\twantErr:     \"progress: read ReadRows (timeout)\",\n\t\t\tnumberValid: startValidReader,\n\t\t},\n\t\t{\n\t\t\tname:        \"read ReadBytes\",\n\t\t\twantErr:     \"progress: read ReadBytes (timeout)\",\n\t\t\tnumberValid: startValidReader + 1,\n\t\t},\n\t\t{\n\t\t\tname:        \"read TotalRows \",\n\t\t\twantErr:     \"progress: read TotalRows (timeout)\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t},\n\t\t{\n\t\t\tname:        \"read WriterRows\",\n\t\t\twantErr:     \"progress: read WriterRows (timeout)\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t},\n\t\t{\n\t\t\tname:        \"read WrittenBytes\",\n\t\t\twantErr:     \"progress: read WrittenBytes (timeout)\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t},\n\t\t{\n\t\t\tname:        \"read ElapsedNS\",\n\t\t\twantErr:     \"progress: read ElapsedNS (timeout)\",\n\t\t\tnumberValid: startValidReader + 5,\n\t\t\tminRevision: helper.DbmsMinProtocolWithServerQueryTimeInProgress,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc, err := ConnectConfig(context.Background(), config)\n\t\t\trequire.NoError(t, err)\n\t\t\tif c.ServerInfo().Revision < tt.minRevision {\n\t\t\t\tc.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcolSleep := column.New[uint8]()\n\t\t\tcolNumber := column.New[uint64]()\n\t\t\tres, err := c.SelectWithOption(context.Background(),\n\t\t\t\t\"SELECT sleep(1), * FROM system.numbers LIMIT 1\",\n\t\t\t\t&QueryOptions{\n\t\t\t\t\tOnProgress: func(p *Progress) {\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcolSleep,\n\t\t\t\tcolNumber,\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\n\t\t\tfor res.Next() {\n\t\t\t}\n\n\t\t\tassert.EqualError(t, res.Err(), tt.wantErr)\n\t\t})\n\t}\n}\n\nfunc TestGetFixedColumnType(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tlen  int\n\t\tcol  column.ColumnBasic\n\t}{\n\t\t{\n\t\t\tname: \"fixed 1\",\n\t\t\tlen:  1,\n\t\t\tcol:  column.New[[1]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 2\",\n\t\t\tlen:  2,\n\t\t\tcol:  column.New[[2]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 3\",\n\t\t\tlen:  3,\n\t\t\tcol:  column.New[[3]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 4\",\n\t\t\tlen:  4,\n\t\t\tcol:  column.New[[4]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 5\",\n\t\t\tlen:  5,\n\t\t\tcol:  column.New[[5]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 6\",\n\t\t\tlen:  6,\n\t\t\tcol:  column.New[[6]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 7\",\n\t\t\tlen:  7,\n\t\t\tcol:  column.New[[7]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 8\",\n\t\t\tlen:  8,\n\t\t\tcol:  column.New[[8]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 9\",\n\t\t\tlen:  9,\n\t\t\tcol:  column.New[[9]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 10\",\n\t\t\tlen:  10,\n\t\t\tcol:  column.New[[10]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 11\",\n\t\t\tlen:  11,\n\t\t\tcol:  column.New[[11]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 12\",\n\t\t\tlen:  12,\n\t\t\tcol:  column.New[[12]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 13\",\n\t\t\tlen:  13,\n\t\t\tcol:  column.New[[13]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 14\",\n\t\t\tlen:  14,\n\t\t\tcol:  column.New[[14]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 15\",\n\t\t\tlen:  15,\n\t\t\tcol:  column.New[[15]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 16\",\n\t\t\tlen:  16,\n\t\t\tcol:  column.New[[16]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 17\",\n\t\t\tlen:  17,\n\t\t\tcol:  column.New[[17]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 18\",\n\t\t\tlen:  18,\n\t\t\tcol:  column.New[[18]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 19\",\n\t\t\tlen:  19,\n\t\t\tcol:  column.New[[19]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 20\",\n\t\t\tlen:  20,\n\t\t\tcol:  column.New[[20]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 21\",\n\t\t\tlen:  21,\n\t\t\tcol:  column.New[[21]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 22\",\n\t\t\tlen:  22,\n\t\t\tcol:  column.New[[22]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 23\",\n\t\t\tlen:  23,\n\t\t\tcol:  column.New[[23]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 24\",\n\t\t\tlen:  24,\n\t\t\tcol:  column.New[[24]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 25\",\n\t\t\tlen:  25,\n\t\t\tcol:  column.New[[25]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 26\",\n\t\t\tlen:  26,\n\t\t\tcol:  column.New[[26]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 27\",\n\t\t\tlen:  27,\n\t\t\tcol:  column.New[[27]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 28\",\n\t\t\tlen:  28,\n\t\t\tcol:  column.New[[28]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 29\",\n\t\t\tlen:  29,\n\t\t\tcol:  column.New[[29]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 30\",\n\t\t\tlen:  30,\n\t\t\tcol:  column.New[[30]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 31\",\n\t\t\tlen:  31,\n\t\t\tcol:  column.New[[31]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 32\",\n\t\t\tlen:  32,\n\t\t\tcol:  column.New[[32]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 33\",\n\t\t\tlen:  33,\n\t\t\tcol:  column.New[[33]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 34\",\n\t\t\tlen:  34,\n\t\t\tcol:  column.New[[34]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 35\",\n\t\t\tlen:  35,\n\t\t\tcol:  column.New[[35]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 36\",\n\t\t\tlen:  36,\n\t\t\tcol:  column.New[[36]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 37\",\n\t\t\tlen:  37,\n\t\t\tcol:  column.New[[37]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 38\",\n\t\t\tlen:  38,\n\t\t\tcol:  column.New[[38]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 39\",\n\t\t\tlen:  39,\n\t\t\tcol:  column.New[[39]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 40\",\n\t\t\tlen:  40,\n\t\t\tcol:  column.New[[40]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 41\",\n\t\t\tlen:  41,\n\t\t\tcol:  column.New[[41]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 42\",\n\t\t\tlen:  42,\n\t\t\tcol:  column.New[[42]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 43\",\n\t\t\tlen:  43,\n\t\t\tcol:  column.New[[43]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 44\",\n\t\t\tlen:  44,\n\t\t\tcol:  column.New[[44]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 45\",\n\t\t\tlen:  45,\n\t\t\tcol:  column.New[[45]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 46\",\n\t\t\tlen:  46,\n\t\t\tcol:  column.New[[46]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 47\",\n\t\t\tlen:  47,\n\t\t\tcol:  column.New[[47]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 48\",\n\t\t\tlen:  48,\n\t\t\tcol:  column.New[[48]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 49\",\n\t\t\tlen:  49,\n\t\t\tcol:  column.New[[49]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 50\",\n\t\t\tlen:  50,\n\t\t\tcol:  column.New[[50]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 51\",\n\t\t\tlen:  51,\n\t\t\tcol:  column.New[[51]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 52\",\n\t\t\tlen:  52,\n\t\t\tcol:  column.New[[52]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 53\",\n\t\t\tlen:  53,\n\t\t\tcol:  column.New[[53]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 54\",\n\t\t\tlen:  54,\n\t\t\tcol:  column.New[[54]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 55\",\n\t\t\tlen:  55,\n\t\t\tcol:  column.New[[55]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 56\",\n\t\t\tlen:  56,\n\t\t\tcol:  column.New[[56]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 57\",\n\t\t\tlen:  57,\n\t\t\tcol:  column.New[[57]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 58\",\n\t\t\tlen:  58,\n\t\t\tcol:  column.New[[58]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 59\",\n\t\t\tlen:  59,\n\t\t\tcol:  column.New[[59]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 60\",\n\t\t\tlen:  60,\n\t\t\tcol:  column.New[[60]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 61\",\n\t\t\tlen:  61,\n\t\t\tcol:  column.New[[61]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 62\",\n\t\t\tlen:  62,\n\t\t\tcol:  column.New[[62]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 63\",\n\t\t\tlen:  63,\n\t\t\tcol:  column.New[[63]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 64\",\n\t\t\tlen:  64,\n\t\t\tcol:  column.New[[64]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 65\",\n\t\t\tlen:  65,\n\t\t\tcol:  column.New[[65]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 66\",\n\t\t\tlen:  66,\n\t\t\tcol:  column.New[[66]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 67\",\n\t\t\tlen:  67,\n\t\t\tcol:  column.New[[67]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 68\",\n\t\t\tlen:  68,\n\t\t\tcol:  column.New[[68]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 69\",\n\t\t\tlen:  69,\n\t\t\tcol:  column.New[[69]byte](),\n\t\t},\n\t\t{\n\t\t\tname: \"fixed 70\",\n\t\t\tlen:  70,\n\t\t\tcol:  column.New[[70]byte](),\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tf, err := getFixedType(tt.len, 0, false, false)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.IsType(t, f, tt.col)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "server_info.go",
    "content": "package chconn\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/helper\"\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// ServerInfo detail of server info\ntype ServerInfo struct {\n\tName               string\n\tRevision           uint64\n\tMinorVersion       uint64\n\tMajorVersion       uint64\n\tServerDisplayName  string\n\tServerVersionPatch uint64\n\tTimezone           string\n}\n\nfunc (srv *ServerInfo) read(r *readerwriter.Reader) (err error) {\n\tif srv.Name, err = r.String(); err != nil {\n\t\treturn &readError{\"ServerInfo: could not read server name\", err}\n\t}\n\tif srv.MajorVersion, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"ServerInfo: could not read server major version\", err}\n\t}\n\tif srv.MinorVersion, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"ServerInfo: could not read server minor version\", err}\n\t}\n\tif srv.Revision, err = r.Uvarint(); err != nil {\n\t\treturn &readError{\"ServerInfo: could not read server revision\", err}\n\t}\n\tif srv.Revision >= helper.DbmsMinRevisionWithServerTimezone {\n\t\tif srv.Timezone, err = r.String(); err != nil {\n\t\t\treturn &readError{\"ServerInfo: could not read server timezone\", err}\n\t\t}\n\t}\n\tif srv.Revision >= helper.DbmsMinRevisionWithServerDisplayName {\n\t\tif srv.ServerDisplayName, err = r.String(); err != nil {\n\t\t\treturn &readError{\"ServerInfo: could not read server display name\", err}\n\t\t}\n\t}\n\tif srv.Revision >= helper.DbmsMinRevisionWithVersionPatch {\n\t\tif srv.ServerVersionPatch, err = r.Uvarint(); err != nil {\n\t\t\treturn &readError{\"ServerInfo: could not read server version patch\", err}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (srv *ServerInfo) String() string {\n\treturn fmt.Sprintf(\"%s %d.%d.%d (%s) %s %d\",\n\t\tsrv.Name,\n\t\tsrv.MajorVersion,\n\t\tsrv.MinorVersion,\n\t\tsrv.Revision,\n\t\tsrv.Timezone,\n\t\tsrv.ServerDisplayName,\n\t\tsrv.ServerVersionPatch)\n}\n\n// ServerInfo get server info\nfunc (ch *conn) ServerInfo() *ServerInfo {\n\treturn ch.serverInfo\n}\n"
  },
  {
    "path": "server_info_test.go",
    "content": "package chconn\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestServerInfoError(t *testing.T) {\n\tstartValidReader := 1\n\n\ttests := []struct {\n\t\tname        string\n\t\twantErr     string\n\t\tnumberValid int\n\t}{\n\t\t{\n\t\t\tname:        \"server name\",\n\t\t\twantErr:     \"ServerInfo: could not read server name\",\n\t\t\tnumberValid: startValidReader,\n\t\t}, {\n\t\t\tname:        \"server major version\",\n\t\t\twantErr:     \"ServerInfo: could not read server major version\",\n\t\t\tnumberValid: startValidReader + 2,\n\t\t}, {\n\t\t\tname:        \"server minor version\",\n\t\t\twantErr:     \"ServerInfo: could not read server minor version\",\n\t\t\tnumberValid: startValidReader + 3,\n\t\t}, {\n\t\t\tname:        \"server revision\",\n\t\t\twantErr:     \"ServerInfo: could not read server revision\",\n\t\t\tnumberValid: startValidReader + 4,\n\t\t}, {\n\t\t\tname:        \"server timezone\",\n\t\t\twantErr:     \"ServerInfo: could not read server timezone\",\n\t\t\tnumberValid: startValidReader + 7,\n\t\t}, {\n\t\t\tname:        \"server display name\",\n\t\t\twantErr:     \"ServerInfo: could not read server display name\",\n\t\t\tnumberValid: startValidReader + 9,\n\t\t}, {\n\t\t\tname:        \"server version patch\",\n\t\t\twantErr:     \"ServerInfo: could not read server version patch\",\n\t\t\tnumberValid: startValidReader + 11,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconfig, err := ParseConfig(os.Getenv(\"CHX_TEST_TCP_CONN_STRING\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tconfig.ReaderFunc = func(r io.Reader) io.Reader {\n\t\t\t\treturn &readErrorHelper{\n\t\t\t\t\terr:         errors.New(\"timeout\"),\n\t\t\t\t\tr:           r,\n\t\t\t\t\tnumberValid: tt.numberValid,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, err = ConnectConfig(context.Background(), config)\n\t\t\trequire.Error(t, err)\n\t\t\treadErr, ok := err.(*readError)\n\t\t\trequire.True(t, ok)\n\t\t\trequire.Equal(t, readErr.msg, tt.wantErr)\n\t\t\trequire.EqualError(t, readErr.Unwrap(), \"timeout\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "settings.go",
    "content": "package chconn\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter\"\n)\n\n// Setting is a setting for the clickhouse query.\n//\n// The list of setting is here: https://clickhouse.com/docs/en/operations/settings/settings/\n// Some of settings doesn't have effect. for example `http_zlib_compression_level`\n// because chconn use TCP connection to send data not HTTP.\ntype Setting struct {\n\tName, Value                 string\n\tImportant, Custom, Obsolete bool\n}\n\nconst (\n\tsettingFlagImportant = 0x01\n\tsettingFlagCustom    = 0x02\n\tsettingFlagObsolete  = 0x04\n)\n\n// Settings is a list of settings for the clickhouse query.\ntype Settings []Setting\n\nfunc (st Setting) write(w *readerwriter.Writer) {\n\tw.String(st.Name)\n\n\tvar flag uint8\n\tif st.Important {\n\t\tflag |= settingFlagImportant\n\t}\n\tif st.Custom {\n\t\tflag |= settingFlagCustom\n\t}\n\tif st.Obsolete {\n\t\tflag |= settingFlagObsolete\n\t}\n\tw.Uint8(flag)\n\n\tw.String(st.Value)\n}\n\nfunc (s Settings) write(w *readerwriter.Writer) {\n\tfor _, st := range s {\n\t\tst.write(w)\n\t}\n}\n\n// Parameters is a list of params for the clickhouse query.\ntype Parameters struct {\n\tparams []Setting\n}\n\ntype Parameter func() Setting\n\nfunc NewParameters(input ...Parameter) *Parameters {\n\tparams := make([]Setting, len(input))\n\tfor i, p := range input {\n\t\tparams[i] = p()\n\t}\n\treturn &Parameters{\n\t\tparams: params,\n\t}\n}\n\n// IntParameter get int query parameter.\nfunc IntParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v T) Parameter {\n\treturn func() Setting {\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + strconv.FormatInt(int64(v), 10) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// IntSliceParameter get int query parameter.\nfunc IntSliceParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v []T) Parameter {\n\treturn func() Setting {\n\t\tvar b strings.Builder\n\t\tb.WriteString(\"[\")\n\t\tfor i, v := range v {\n\t\t\tif i > 0 {\n\t\t\t\tb.WriteString(\",\")\n\t\t\t}\n\t\t\tb.WriteString(strconv.FormatInt(int64(v), 10))\n\t\t}\n\t\tb.WriteString(\"]\")\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + b.String() + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// UintParameter get uint query parameter.\nfunc UintParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v T) Parameter {\n\treturn func() Setting {\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + strconv.FormatUint(uint64(v), 10) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// UintSliceParameter get uint slice query parameter.\nfunc UintSliceParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v []T) Parameter {\n\treturn func() Setting {\n\t\tvar b strings.Builder\n\t\tb.WriteString(\"[\")\n\t\tfor i, v := range v {\n\t\t\tif i > 0 {\n\t\t\t\tb.WriteString(\",\")\n\t\t\t}\n\t\t\tb.WriteString(strconv.FormatUint(uint64(v), 10))\n\t\t}\n\t\tb.WriteString(\"]\")\n\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + b.String() + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// Float32Parameter get float32 query parameter.\nfunc Float32Parameter[T ~float32](name string, v T) Parameter {\n\treturn func() Setting {\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + strconv.FormatFloat(float64(v), 'f', -1, 32) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// Float32SliceParameter get float32 slice query parameter.\nfunc Float32SliceParameter[T ~float32](name string, v []T) Parameter {\n\treturn func() Setting {\n\t\tvar b strings.Builder\n\t\tb.WriteString(\"[\")\n\t\tfor i, v := range v {\n\t\t\tif i > 0 {\n\t\t\t\tb.WriteString(\",\")\n\t\t\t}\n\t\t\tb.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 32))\n\t\t}\n\t\tb.WriteString(\"]\")\n\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + b.String() + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// Float64Parameter get float64 query parameter.\nfunc Float64Parameter[T ~float64](name string, v T) Parameter {\n\treturn func() Setting {\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + strconv.FormatFloat(float64(v), 'f', -1, 64) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// Float64SliceParameter get float64 slice query parameter.\nfunc Float64SliceParameter[T ~float64](name string, v []T) Parameter {\n\treturn func() Setting {\n\t\tvar b strings.Builder\n\t\tb.WriteString(\"[\")\n\t\tfor i, v := range v {\n\t\t\tif i > 0 {\n\t\t\t\tb.WriteString(\",\")\n\t\t\t}\n\t\t\tb.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 64))\n\t\t}\n\t\tb.WriteString(\"]\")\n\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + b.String() + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\nfunc addSlashes(str string) string {\n\tvar tmpRune []rune\n\tfor _, ch := range str {\n\t\tswitch ch {\n\t\tcase '\\\\', '\\'':\n\t\t\ttmpRune = append(tmpRune, '\\\\', ch)\n\t\tdefault:\n\t\t\ttmpRune = append(tmpRune, ch)\n\t\t}\n\t}\n\treturn string(tmpRune)\n}\n\n// StringParameter get string query parameter.\nfunc StringParameter(name, v string) Parameter {\n\treturn func() Setting {\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + addSlashes(v) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\n// StringSliceParameter get string array query parameter.\nfunc StringSliceParameter(name string, v []string) Parameter {\n\treturn func() Setting {\n\t\tvar b strings.Builder\n\t\tb.WriteString(\"[\")\n\t\tfor i, v := range v {\n\t\t\tif i > 0 {\n\t\t\t\tb.WriteString(\",\")\n\t\t\t}\n\t\t\tb.WriteString(\"'\" + addSlashes(v) + \"'\")\n\t\t}\n\t\tb.WriteString(\"]\")\n\t\treturn Setting{\n\t\t\tName:   name,\n\t\t\tValue:  \"'\" + addSlashes(b.String()) + \"'\",\n\t\t\tCustom: true,\n\t\t}\n\t}\n}\n\nfunc (p *Parameters) Params() []Setting {\n\treturn p.params\n}\n\nfunc (p *Parameters) hasParam() bool {\n\treturn p != nil && len(p.params) > 0\n}\n\nfunc (p *Parameters) write(w *readerwriter.Writer) {\n\tif p == nil {\n\t\treturn\n\t}\n\tfor _, st := range p.params {\n\t\tst.write(w)\n\t}\n}\n"
  },
  {
    "path": "sqlbuilder/injection.go",
    "content": "// sqlbuilder is a builder for SQL statements for clickhouse.\n// copy from https://github.com/huandu/go-sqlbuilder\n// change for chconn\npackage sqlbuilder\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n)\n\n// injection is a helper type to manage injected SQLs in all builders.\ntype injection struct {\n\tmarkerSQLs map[injectionMarker][]string\n}\n\ntype injectionMarker int\n\n// newInjection creates a new injection.\nfunc newInjection() *injection {\n\treturn &injection{\n\t\tmarkerSQLs: map[injectionMarker][]string{},\n\t}\n}\n\n// SQL adds sql to injection's sql list.\n// All sqls inside injection is ordered by marker in ascending order.\nfunc (injection *injection) SQL(marker injectionMarker, sql string) {\n\tinjection.markerSQLs[marker] = append(injection.markerSQLs[marker], sql)\n}\n\n// WriteTo joins all SQL strings at the same marker value with blank (\" \")\n// and writes the joined value to buf.\nfunc (injection *injection) WriteTo(buf *bytes.Buffer, marker injectionMarker) {\n\tsqls := injection.markerSQLs[marker]\n\tempty := buf.Len() == 0\n\n\tif len(sqls) == 0 {\n\t\treturn\n\t}\n\n\tif !empty {\n\t\tbuf.WriteByte(' ')\n\t}\n\n\ts := strings.Join(sqls, \" \")\n\tbuf.WriteString(s)\n\n\tif empty {\n\t\tbuf.WriteByte(' ')\n\t}\n}\n"
  },
  {
    "path": "sqlbuilder/select.go",
    "content": "// sqlbuilder is a builder for SQL statements for clickhouse.\n// copy from https://github.com/huandu/go-sqlbuilder\n// change for chconn\npackage sqlbuilder\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n)\n\nconst (\n\tselectMarkerInit injectionMarker = iota\n\tselectMarkerAfterSelect\n\tselectMarkerAfterFrom\n\tselectMarkerAfterArrayJoin\n\tselectMarkerAfterJoin\n\tselectMarkerAfterPreWhere\n\tselectMarkerAfterWhere\n\tselectMarkerAfterGroupBy\n\tselectMarkerAfterOrderBy\n\tselectMarkerAfterLimit\n\tselectMarkerAfterFor\n)\n\n// JoinOption is the option in JOIN.\ntype JoinOption string\n\n// Join options.\nconst (\n\tInnerJoin      JoinOption = \"INNER\"\n\tLeftJoin       JoinOption = \"LEFT\"\n\tLeftOuterJoin  JoinOption = \"LEFT OUTER\"\n\tLeftSemiJoin   JoinOption = \"LEFT SEMI\"\n\tLeftAntiJoin   JoinOption = \"LEFT ANTI\"\n\tRightJoin      JoinOption = \"RIGHT\"\n\tRightOuterJoin JoinOption = \"RIGHT OUTER\"\n\tRightSemiJoin  JoinOption = \"RIGHT SEMI\"\n\tRightAntiJoin  JoinOption = \"RIGHT ANTI\"\n\tFullJoin       JoinOption = \"FULL\"\n\tFullOuterJoin  JoinOption = \"FULL OUTER\"\n\tCrossJoin      JoinOption = \"CROSS\"\n)\n\nfunc NewSelectBuilder() *SelectBuilder {\n\treturn &SelectBuilder{\n\t\tlimit:     -1,\n\t\toffset:    -1,\n\t\tinjection: newInjection(),\n\t}\n}\n\n// SelectBuilder is a builder to build SELECT.\ntype SelectBuilder struct {\n\tparameters    []chconn.Parameter\n\tdistinct      bool\n\tfinal         bool\n\ttables        []string\n\tselectCols    []string\n\tleftArrayJoin bool\n\tarrayJoin     []string\n\tjoinOptions   []JoinOption\n\tjoinTables    []string\n\tjoinExprs     [][]string\n\twhereExprs    []string\n\tpreWhereExprs []string\n\thavingExprs   []string\n\tgroupByCols   []string\n\torderByCols   []string\n\tlimit         int\n\toffset        int\n\n\tinjection *injection\n\tmarker    injectionMarker\n}\n\n// var _ Builder = new(SelectBuilder)\n\n// Select sets columns in SELECT.\nfunc Select(col ...string) *SelectBuilder {\n\treturn NewSelectBuilder().Select(col...)\n}\n\n// Select sets columns in SELECT.\nfunc (sb *SelectBuilder) Select(col ...string) *SelectBuilder {\n\tsb.selectCols = col\n\tsb.marker = selectMarkerAfterSelect\n\treturn sb\n}\n\n// Select add columns in SELECT.\nfunc (sb *SelectBuilder) Column(col ...string) *SelectBuilder {\n\tsb.selectCols = append(sb.selectCols, col...)\n\tsb.marker = selectMarkerAfterSelect\n\treturn sb\n}\n\n// Distinct marks this SELECT as DISTINCT.\nfunc (sb *SelectBuilder) Distinct() *SelectBuilder {\n\tsb.distinct = true\n\tsb.marker = selectMarkerAfterSelect\n\treturn sb\n}\n\n// Final marks this SELECT as FINAL.\nfunc (sb *SelectBuilder) Final() *SelectBuilder {\n\tsb.final = true\n\tsb.marker = selectMarkerAfterSelect\n\treturn sb\n}\n\n// From sets table names in SELECT.\nfunc (sb *SelectBuilder) From(table ...string) *SelectBuilder {\n\tsb.tables = table\n\tsb.marker = selectMarkerAfterFrom\n\treturn sb\n}\n\n// arrayJoin sets expressions of Array Join in SELECT.\n//\n// It builds a ARRAY JOIN expression like\n//\n//\tArray JOIN onExpr[0], onExpr[1] ...\nfunc (sb *SelectBuilder) ArrayJoin(onExpr ...string) *SelectBuilder {\n\tsb.marker = selectMarkerAfterArrayJoin\n\tsb.arrayJoin = append(sb.arrayJoin, onExpr...)\n\treturn sb\n}\n\n// LeftArrayJoin marks this SELECT as LEFT ARRAY JOIN.\nfunc (sb *SelectBuilder) LeftArrayJoin() *SelectBuilder {\n\tsb.leftArrayJoin = true\n\treturn sb\n}\n\n// Join sets expressions of JOIN in SELECT.\n//\n// It builds a JOIN expression like\n//\n//\tJOIN table ON onExpr[0] AND onExpr[1] ...\nfunc (sb *SelectBuilder) Join(table string, onExpr ...string) *SelectBuilder {\n\tsb.marker = selectMarkerAfterJoin\n\treturn sb.JoinWithOption(\"\", table, onExpr...)\n}\n\n// JoinWithOption sets expressions of JOIN with an option.\n//\n// It builds a JOIN expression like\n//\n//\toption JOIN table ON onExpr[0] AND onExpr[1] ...\n//\n// Here is a list of supported options.\n//   - FullJoin: FULL JOIN\n//   - FullOuterJoin: FULL OUTER JOIN\n//   - InnerJoin: INNER JOIN\n//   - LeftJoin: LEFT JOIN\n//   - LeftOuterJoin: LEFT OUTER JOIN\n//   - RightJoin: RIGHT JOIN\n//   - RightOuterJoin: RIGHT OUTER JOIN\nfunc (sb *SelectBuilder) JoinWithOption(option JoinOption, table string, onExpr ...string) *SelectBuilder {\n\tsb.joinOptions = append(sb.joinOptions, option)\n\tsb.joinTables = append(sb.joinTables, table)\n\tsb.joinExprs = append(sb.joinExprs, onExpr)\n\tsb.marker = selectMarkerAfterJoin\n\treturn sb\n}\n\n// Where sets expressions of WHERE in SELECT.\nfunc (sb *SelectBuilder) Where(andExpr ...string) *SelectBuilder {\n\tsb.whereExprs = append(sb.whereExprs, andExpr...)\n\tsb.marker = selectMarkerAfterWhere\n\treturn sb\n}\n\n// PreWhere sets expressions of PREWHERE in SELECT.\nfunc (sb *SelectBuilder) PreWhere(andExpr ...string) *SelectBuilder {\n\tsb.marker = selectMarkerAfterPreWhere\n\tsb.preWhereExprs = append(sb.preWhereExprs, andExpr...)\n\treturn sb\n}\n\nfunc (sb *SelectBuilder) Parameters(p chconn.Parameter) *SelectBuilder {\n\tsb.parameters = append(sb.parameters, p)\n\treturn sb\n}\n\n// Having sets expressions of HAVING in SELECT.\nfunc (sb *SelectBuilder) Having(andExpr ...string) *SelectBuilder {\n\tsb.havingExprs = append(sb.havingExprs, andExpr...)\n\tsb.marker = selectMarkerAfterGroupBy\n\treturn sb\n}\n\n// GroupBy sets columns of GROUP BY in SELECT.\nfunc (sb *SelectBuilder) GroupBy(col ...string) *SelectBuilder {\n\tsb.groupByCols = append(sb.groupByCols, col...)\n\tsb.marker = selectMarkerAfterGroupBy\n\treturn sb\n}\n\n// OrderBy sets columns of ORDER BY in SELECT.\nfunc (sb *SelectBuilder) OrderBy(col ...string) *SelectBuilder {\n\tsb.orderByCols = append(sb.orderByCols, col...)\n\tsb.marker = selectMarkerAfterOrderBy\n\treturn sb\n}\n\n// Limit sets the LIMIT in SELECT.\nfunc (sb *SelectBuilder) Limit(limit int) *SelectBuilder {\n\tsb.limit = limit\n\tsb.marker = selectMarkerAfterLimit\n\treturn sb\n}\n\n// Offset sets the LIMIT offset in SELECT.\nfunc (sb *SelectBuilder) Offset(offset int) *SelectBuilder {\n\tsb.offset = offset\n\tsb.marker = selectMarkerAfterLimit\n\treturn sb\n}\n\n// As returns an AS expression.\nfunc As(name, alias string) string {\n\treturn fmt.Sprintf(\"%s AS %s\", name, alias)\n}\n\n// String returns the compiled SELECT string.\nfunc (sb *SelectBuilder) String() string {\n\ts, _ := sb.Build()\n\treturn s\n}\n\n// Build returns compiled SELECT string and args.\n// They can be used in `Select` directly.\nfunc (sb *SelectBuilder) Build() (sql string, params *chconn.Parameters) {\n\tbuf := &bytes.Buffer{}\n\tsb.injection.WriteTo(buf, selectMarkerInit)\n\tbuf.WriteString(\"SELECT \")\n\n\tif sb.distinct {\n\t\tbuf.WriteString(\"DISTINCT \")\n\t}\n\n\tbuf.WriteString(strings.Join(sb.selectCols, \", \"))\n\tsb.injection.WriteTo(buf, selectMarkerAfterSelect)\n\n\tbuf.WriteString(\" FROM \")\n\tbuf.WriteString(strings.Join(sb.tables, \", \"))\n\tsb.injection.WriteTo(buf, selectMarkerAfterFrom)\n\n\tif sb.final {\n\t\tbuf.WriteString(\" FINAL\")\n\t}\n\n\tif len(sb.arrayJoin) > 0 {\n\t\tif sb.leftArrayJoin {\n\t\t\tbuf.WriteString(\" LEFT\")\n\t\t}\n\t\tbuf.WriteString(\" ARRAY JOIN \")\n\t\tbuf.WriteString(strings.Join(sb.arrayJoin, \" , \"))\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterArrayJoin)\n\t}\n\n\tfor i := range sb.joinTables {\n\t\tif option := sb.joinOptions[i]; option != \"\" {\n\t\t\tbuf.WriteByte(' ')\n\t\t\tbuf.WriteString(string(option))\n\t\t}\n\n\t\tbuf.WriteString(\" JOIN \")\n\t\tbuf.WriteString(sb.joinTables[i])\n\n\t\tif exprs := sb.joinExprs[i]; len(exprs) > 0 {\n\t\t\tbuf.WriteString(\" ON \")\n\t\t\tbuf.WriteString(strings.Join(sb.joinExprs[i], \" AND \"))\n\t\t}\n\t}\n\n\tif len(sb.joinTables) > 0 {\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterJoin)\n\t}\n\n\tif len(sb.preWhereExprs) > 0 {\n\t\tbuf.WriteString(\" PREWHERE \")\n\t\tbuf.WriteString(strings.Join(sb.preWhereExprs, \" AND \"))\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterPreWhere)\n\t}\n\n\tif len(sb.whereExprs) > 0 {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tbuf.WriteString(strings.Join(sb.whereExprs, \" AND \"))\n\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterWhere)\n\t}\n\n\tif len(sb.groupByCols) > 0 {\n\t\tbuf.WriteString(\" GROUP BY \")\n\t\tbuf.WriteString(strings.Join(sb.groupByCols, \", \"))\n\n\t\tif len(sb.havingExprs) > 0 {\n\t\t\tbuf.WriteString(\" HAVING \")\n\t\t\tbuf.WriteString(strings.Join(sb.havingExprs, \" AND \"))\n\t\t}\n\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterGroupBy)\n\t}\n\n\tif len(sb.orderByCols) > 0 {\n\t\tbuf.WriteString(\" ORDER BY \")\n\t\tbuf.WriteString(strings.Join(sb.orderByCols, \", \"))\n\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterOrderBy)\n\t}\n\tif sb.limit >= 0 {\n\t\tbuf.WriteString(\" LIMIT \")\n\t\tbuf.WriteString(strconv.Itoa(sb.limit))\n\t}\n\n\tif sb.offset >= 0 {\n\t\tbuf.WriteString(\" OFFSET \")\n\t\tbuf.WriteString(strconv.Itoa(sb.offset))\n\t}\n\n\tif sb.limit >= 0 {\n\t\tsb.injection.WriteTo(buf, selectMarkerAfterLimit)\n\t}\n\treturn buf.String(), chconn.NewParameters(sb.parameters...)\n}\n\n// SQL adds an arbitrary sql to current position.\nfunc (sb *SelectBuilder) SQL(sql string) *SelectBuilder {\n\tsb.injection.SQL(sb.marker, sql)\n\treturn sb\n}\n"
  },
  {
    "path": "sqlbuilder/select_test.go",
    "content": "package sqlbuilder\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/vahid-sohrabloo/chconn/v2\"\n)\n\nfunc TestSelectBuilder(t *testing.T) {\n\tsb := Select(\"id\", \"name\", As(\"COUNT(*)\", \"t\")).Distinct()\n\tsb.Column(\"age\", \"birthday\")\n\tsb.From(\"user\").Final()\n\tsb.SQL(\"/* before */\")\n\tsb.ArrayJoin(\"roles\").LeftArrayJoin()\n\tsb.SQL(\"/* after */\")\n\tsb.PreWhere(\"id > 0\")\n\tsb.Where(\n\t\t\"id > {id: Int32}\",\n\t\t\"name LIKE {name: String}\",\n\t)\n\tsb.Parameters(chconn.IntParameter(\"id\", 1))\n\tsb.Parameters(chconn.StringParameter(\"name\", \"vahid\"))\n\tsb.Join(\"contract c\",\n\t\t\"u.id = c.user_id\",\n\t\t\"c.status = {status: Array(Int64)}\",\n\t)\n\tsb.Parameters(chconn.IntSliceParameter(\"status\", []int64{1, 2, 3}))\n\tsb.JoinWithOption(RightOuterJoin, \"person p\",\n\t\t\"u.id = p.user_id\",\n\t\t\"p.surname  = {surname: String}\",\n\t)\n\tsb.Parameters(chconn.StringParameter(\"surname\", \"sohrabloo\"))\n\tsb.GroupBy(\"status\").Having(\"status > 0\")\n\tsb.OrderBy(\"modified_at ASC\", \"created_at DESC\")\n\tsb.Limit(10).Offset(5)\n\n\ts, args := sb.Build()\n\n\tassert.Equal(t, \"SELECT DISTINCT id, name, COUNT(*) AS t, age, birthday /* before */ FROM user FINAL \"+\n\t\t\"LEFT ARRAY JOIN roles /* after */ \"+\n\t\t\"JOIN contract c ON u.id = c.user_id AND c.status = {status: Array(Int64)} \"+\n\t\t\"RIGHT OUTER JOIN person p ON u.id = p.user_id AND p.surname  = {surname: String} \"+\n\t\t\"PREWHERE id > 0 \"+\n\t\t\"WHERE id > {id: Int32} AND name LIKE {name: String} \"+\n\t\t\"GROUP BY status HAVING status > 0 \"+\n\t\t\"ORDER BY modified_at ASC, created_at DESC \"+\n\t\t\"LIMIT 10 OFFSET 5\",\n\t\ts,\n\t)\n\trequire.Len(t, args.Params(), 4)\n\tassert.Equal(t, \"id\", args.Params()[0].Name)\n\tassert.Equal(t, \"'1'\", args.Params()[0].Value)\n\tassert.Equal(t, \"name\", args.Params()[1].Name)\n\tassert.Equal(t, \"'vahid'\", args.Params()[1].Value)\n\tassert.Equal(t, \"status\", args.Params()[2].Name)\n\tassert.Equal(t, \"'[1,2,3]'\", args.Params()[2].Value)\n\tassert.Equal(t, \"surname\", args.Params()[3].Name)\n\tassert.Equal(t, \"'sohrabloo'\", args.Params()[3].Value)\n}\n"
  },
  {
    "path": "types/Int256.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n)\n\n// Note, Zero and Max are functions just to make read-only values.\n// We cannot define constants for structures, and global variables\n// are unacceptable because it will be possible to change them.\n\n// Zero is the lowest possible Int256 value.\nfunc Int256Zero() Int256 {\n\treturn Int256From64(0)\n}\n\n// Max is the largest possible Int256 value.\nfunc Int256Max() Int256 {\n\treturn Int256{\n\t\tLo: Uint128Max(),\n\t\tHi: Int128Max(),\n\t}\n}\n\n// Int256 is an unsigned 256-bit number.\n// All methods are immutable, works just like standard uint64.\ntype Int256 struct {\n\tLo Uint128 // lower 128-bit half\n\tHi Int128  // upper 128-bit half\n}\n\n// From128 converts 128-bit value v to a Int256 value.\n// Upper 128-bit half will be zero.\nfunc Int256From128(v Int128) Int256 {\n\tvar hi Int128\n\tif v.Hi < 0 {\n\t\thi = Int128{Lo: 0, Hi: -1}\n\t\tv = v.Neg()\n\t}\n\treturn Int256{Lo: Uint128{\n\t\tLo: v.Lo,\n\t\tHi: uint64(v.Hi),\n\t}, Hi: hi}\n}\n\n// From64 converts 64-bit value v to a Int256 value.\n// Upper 128-bit half will be zero.\nfunc Int256From64(v int64) Int256 {\n\treturn Int256From128(Int128From64(v))\n}\n\n// FromBig converts *big.Int to 256-bit Int256 value ignoring overflows.\n// If input integer is nil or negative then return Zero.\n// If input integer overflows 256-bit then return Max.\nfunc Int256FromBig(i *big.Int) Int256 {\n\tu, _ := Int256FromBigEx(i)\n\treturn u\n}\n\n// FromBigEx converts *big.Int to 256-bit Int256 value (eXtended version).\n// Provides ok successful flag as a second return value.\n// If input integer is negative or overflows 256-bit then ok=false.\n// If input is nil then zero 256-bit returned.\nfunc Int256FromBigEx(i *big.Int) (Int256, bool) {\n\tswitch {\n\tcase i == nil:\n\t\treturn Int256Zero(), true // assuming nil === 0\n\n\tcase i.BitLen() > 256:\n\t\treturn Int256Max(), false // value overflows 256-bit!\n\t}\n\n\tneg := false\n\tif i.Sign() == -1 {\n\t\ti = new(big.Int).Neg(i)\n\t\tneg = true\n\t}\n\n\tt := new(big.Int)\n\tlolo := i.Uint64()\n\tlohi := t.Rsh(i, 64).Uint64()\n\thilo := t.Rsh(i, 128).Uint64()\n\thihi := int64(t.Rsh(i, 192).Uint64())\n\tval := Int256{\n\t\tLo: Uint128{Lo: lolo, Hi: lohi},\n\t\tHi: Int128{Lo: hilo, Hi: hihi},\n\t}\n\tif neg {\n\t\tval = val.Neg()\n\t}\n\treturn val, true\n}\n\n// Big returns 256-bit value as a *big.Int.\n//\n//nolint:dupl\nfunc (u Int256) Big() *big.Int {\n\tt := new(big.Int)\n\ti := new(big.Int).SetInt64(u.Hi.Hi)\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Hi.Lo))\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Lo.Hi))\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Lo.Lo))\n\treturn i\n}\n\n// Equals returns true if two 256-bit values are equal.\n// Int256 values can be compared directly with == operator\n// but use of the Equals method is preferred for consistency.\nfunc (u Int256) Equals(v Int256) bool {\n\treturn u.Lo.Equals(v.Lo) && u.Hi.Equals(v.Hi)\n}\n\n// Neg returns the additive inverse of an Int256\nfunc (u Int256) Neg() (z Int256) {\n\tz.Hi = u.Hi.Neg()\n\tz.Lo.Lo = -u.Lo.Lo\n\tz.Lo.Hi = -u.Lo.Hi\n\t// TODO, I'm not sure here.\n\tif z.Lo.Hi > 0 || z.Lo.Lo > 0 {\n\t\tz.Hi.Lo--\n\t}\n\treturn z\n}\n"
  },
  {
    "path": "types/date_type.go",
    "content": "package types\n\nimport (\n\t\"time\"\n)\n\ntype Date uint16\n\nconst minDate32 = int32(-25567) // 1900-01-01 00:00:00 +0000 UTC\n\ntype Date32 int32\n\ntype DateTime uint32\n\nconst minDateTime64 = int64(-2208988800) // 1900-01-01 00:00:00 +0000 UTC\n\ntype DateTime64 int64\n\nconst daySeconds = 24 * 60 * 60\n\nfunc TimeToDate(t time.Time) Date {\n\tif t.Unix() <= 0 {\n\t\treturn 0\n\t}\n\t_, offset := t.Zone()\n\treturn Date((t.Unix() + int64(offset)) / daySeconds)\n}\n\nfunc (d Date) FromTime(v time.Time, precision int) Date {\n\treturn TimeToDate(v)\n}\n\nfunc (d Date) ToTime(loc *time.Location, precision int) time.Time {\n\treturn time.Unix(d.Unix(), 0).UTC()\n}\n\nfunc (d Date) Unix() int64 {\n\treturn daySeconds * int64(d)\n}\n\nfunc (d Date32) Unix() int64 {\n\treturn daySeconds * int64(d)\n}\n\nfunc (d Date32) FromTime(v time.Time, precision int) Date32 {\n\treturn TimeToDate32(v)\n}\n\nfunc (d Date32) ToTime(loc *time.Location, precision int) time.Time {\n\treturn time.Unix(d.Unix(), 0).UTC()\n}\n\nfunc TimeToDate32(t time.Time) Date32 {\n\t_, offset := t.Zone()\n\td := int32((t.Unix() + int64(offset)) / daySeconds)\n\tif d <= minDate32 {\n\t\treturn Date32(minDate32)\n\t}\n\n\treturn Date32(d)\n}\n\nfunc TimeToDateTime(t time.Time) DateTime {\n\tif t.Unix() <= 0 {\n\t\treturn 0\n\t}\n\treturn DateTime(t.Unix())\n}\n\nfunc (d DateTime) FromTime(v time.Time, precision int) DateTime {\n\treturn TimeToDateTime(v)\n}\n\nfunc (d DateTime) ToTime(loc *time.Location, precision int) time.Time {\n\treturn time.Unix(int64(d), 0).In(loc)\n}\n\nvar precisionFactor = [...]int64{\n\t1000000000,\n\t100000000,\n\t10000000,\n\t1000000,\n\t100000,\n\t10000,\n\t1000,\n\t100,\n\t10,\n\t1,\n}\n\nfunc TimeToDateTime64(t time.Time, precision int) DateTime64 {\n\tif t.Unix() <= minDateTime64 {\n\t\treturn DateTime64(minDateTime64)\n\t}\n\treturn DateTime64(t.UnixNano() / precisionFactor[precision])\n}\n\nfunc (d DateTime64) FromTime(v time.Time, precision int) DateTime64 {\n\treturn TimeToDateTime64(v, precision)\n}\n\nfunc (d DateTime64) ToTime(loc *time.Location, precision int) time.Time {\n\tif d == 0 {\n\t\treturn time.Time{}\n\t}\n\tnsec := int64(d) * precisionFactor[precision]\n\treturn time.Unix(nsec/1e9, nsec%1e9).In(loc)\n}\n"
  },
  {
    "path": "types/decimal.go",
    "content": "package types\n\n// Decimal32 represents a 32-bit decimal number.\ntype Decimal32 int32\n\n// Decimal64 represents a 64-bit decimal number.\ntype Decimal64 int64\n\n// Decimal128 represents a 128-bit decimal number.\ntype Decimal128 Int128\n\n// Decimal256 represents a 256-bit decimal number.\ntype Decimal256 Int256\n\n// Table of powers of 10 for fast casting from floating types to decimal type\n// representations.\nvar factors10 = []float64{\n\t1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13,\n\t1e14, 1e15, 1e16, 1e17, 1e18,\n}\n\n// Float64 converts decimal number to float64.\nfunc (d Decimal32) Float64(scale int) float64 {\n\treturn float64(d) / factors10[scale]\n}\n\n// Float64 converts decimal number to float64.\nfunc (d Decimal64) Float64(scale int) float64 {\n\treturn float64(d) / factors10[scale]\n}\n\n// Decimal32FromFloat64 converts float64 to decimal32 number.\nfunc Decimal32FromFloat64(f float64, scale int) Decimal32 {\n\treturn Decimal32(f * factors10[scale])\n}\n\n// Decimal64FromFloat64 converts float64 to decimal64 number.\nfunc Decimal64FromFloat64(f float64, scale int) Decimal64 {\n\treturn Decimal64(f * factors10[scale])\n}\n"
  },
  {
    "path": "types/decimal_test.go",
    "content": "package types\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestDecimal(t *testing.T) {\n\td32 := Decimal32(12_234)\n\tassert.Equal(t, d32.Float64(3), float64(12.234))\n\td64 := Decimal64(12_234)\n\tassert.Equal(t, d64.Float64(3), float64(12.234))\n\tassert.Equal(t, Decimal32FromFloat64(12.2334, 3), Decimal32(12233))\n\tassert.Equal(t, Decimal64FromFloat64(12.2334, 3), Decimal64(12233))\n}\n"
  },
  {
    "path": "types/int128.go",
    "content": "package types\n\nimport (\n\t\"math\"\n\t\"math/big\"\n)\n\n// Note, Zero and Max are functions just to make read-only values.\n// We cannot define constants for structures, and global variables\n// are unacceptable because it will be possible to change them.\n\n// Zero is the lowest possible Int128 value.\nfunc Int128Zero() Int128 {\n\treturn Int128From64(0)\n}\n\n// Max is the largest possible Int128 value.\nfunc Int128Max() Int128 {\n\treturn Int128{\n\t\tLo: math.MaxUint64,\n\t\tHi: math.MaxInt64,\n\t}\n}\n\n// Int128 is an unsigned 128-bit number.\n// All methods are immutable, works just like standard uint64.\ntype Int128 struct {\n\tLo uint64 // lower 64-bit half\n\tHi int64  // upper 64-bit half\n}\n\n// Note, there in no New(lo, hi) just not to confuse\n// which half goes first: lower or upper.\n// Use structure initialization Int128{Lo: ..., Hi: ...} instead.\n\n// From64 converts 64-bit value v to a Int128 value.\n// Upper 64-bit half will be zero.\nfunc Int128From64(v int64) Int128 {\n\tvar hi int64\n\tif v < 0 {\n\t\thi = -1\n\t}\n\treturn Int128{Lo: uint64(v), Hi: hi}\n}\n\n// FromBig converts *big.Int to 128-bit Int128 value ignoring overflows.\n// If input integer is nil or negative then return Zero.\n// If input interger overflows 128-bit then return Max.\nfunc Int128FromBig(i *big.Int) Int128 {\n\tu, _ := Int128FromBigEx(i)\n\treturn u\n}\n\n// FromBigEx converts *big.Int to 128-bit Int128 value (eXtended version).\n// Provides ok successful flag as a second return value.\n// If input integer is negative or overflows 128-bit then ok=false.\n// If input is nil then zero 128-bit returned.\nfunc Int128FromBigEx(i *big.Int) (Int128, bool) {\n\tswitch {\n\tcase i == nil:\n\t\treturn Int128Zero(), true // assuming nil === 0\n\tcase i.BitLen() > 128:\n\t\treturn Int128Max(), false // value overflows 128-bit!\n\t}\n\n\tneg := false\n\tif i.Sign() == -1 {\n\t\ti = new(big.Int).Neg(i)\n\t\tneg = true\n\t}\n\n\t// Note, actually result of big.Int.Uint64 is undefined\n\t// if stored value is greater than 2^64\n\t// but we assume that it just gets lower 64 bits.\n\tt := new(big.Int)\n\tlo := i.Uint64()\n\thi := int64(t.Rsh(i, 64).Uint64())\n\tval := Int128{\n\t\tLo: lo,\n\t\tHi: hi,\n\t}\n\tif neg {\n\t\treturn val.Neg(), true\n\t}\n\treturn val, true\n}\n\n// Big returns 128-bit value as a *big.Int.\nfunc (u Int128) Big() *big.Int {\n\ti := new(big.Int).SetInt64(u.Hi)\n\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, new(big.Int).SetUint64(u.Lo))\n\treturn i\n}\n\n// Equals returns true if two 128-bit values are equal.\n// Int128 values can be compared directly with == operator\n// but use of the Equals method is preferred for consistency.\nfunc (u Int128) Equals(v Int128) bool {\n\treturn (u.Lo == v.Lo) && (u.Hi == v.Hi)\n}\n\n// Neg returns the additive inverse of an Int128\nfunc (u Int128) Neg() (z Int128) {\n\tz.Hi = -u.Hi\n\tz.Lo = -u.Lo\n\tif z.Lo > 0 {\n\t\tz.Hi--\n\t}\n\treturn z\n}\n"
  },
  {
    "path": "types/int128_test.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// TestUint128 unit tests for various Int128 helpers.\nfunc TestInt128(t *testing.T) {\n\tt.Run(\"FromBig\", func(t *testing.T) {\n\t\tif got := Int128FromBig(nil); !got.Equals(Int128Zero()) {\n\t\t\tt.Fatalf(\"Int128FromBig(nil) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Int128FromBig(new(big.Int).Lsh(big.NewInt(1), 129)); !got.Equals(Int128Max()) {\n\t\t\tt.Fatalf(\"Int128FromBig(2^129) does not equal to Max(), got %#x\", got)\n\t\t}\n\t})\n\tt.Run(\"ToBig\", func(t *testing.T) {\n\t\ti := new(big.Int).SetInt64(-124)\n\t\tassert.Equal(t, Int128FromBig(i).Big().String(), \"-124\")\n\n\t\tint128From64 := Int128From64(-124)\n\t\tassert.Equal(t, int128From64.Big().String(), \"-124\")\n\t})\n}\n"
  },
  {
    "path": "types/int256_test.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// TestUint256 unit tests for various Int256 helpers.\nfunc TestInt256(t *testing.T) {\n\tt.Run(\"FromBig\", func(t *testing.T) {\n\t\tif got := Int256FromBig(nil); !got.Equals(Int256Zero()) {\n\t\t\tt.Fatalf(\"FromBig(nil) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Int256FromBig(new(big.Int).Lsh(big.NewInt(1), 257)); !got.Equals(Int256Max()) {\n\t\t\tt.Fatalf(\"FromBig(2^129) does not equal to Max(), got %#x\", got)\n\t\t}\n\t})\n\tt.Run(\"ToBig\", func(t *testing.T) {\n\t\ti := new(big.Int).SetInt64(124)\n\t\tassert.Equal(t, Int256FromBig(i).Big().String(), \"124\")\n\n\t\tint256From64 := Int256From64(124)\n\t\tassert.Equal(t, int256From64.Big().String(), \"124\")\n\t})\n}\n"
  },
  {
    "path": "types/ip_test.go",
    "content": "package types\n\nimport (\n\t\"net/netip\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestIP(t *testing.T) {\n\tipv4 := IPv4FromAddr(netip.AddrFrom4([4]byte{1, 2, 3, 4}))\n\tassert.Equal(t, ipv4.NetIP().As4(), [4]byte{1, 2, 3, 4})\n\tipv6 := IPv6FromAddr(netip.AddrFrom16([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}))\n\tassert.Equal(t, ipv6.NetIP().As16(), [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})\n}\n"
  },
  {
    "path": "types/ipv4.go",
    "content": "package types\n\nimport \"net/netip\"\n\n//\tIPv4 is a compatible type for IPv4 address in clickhouse.\n//\n// clickhouse use Little endian for IPv4. but golang use big endian\ntype IPv4 [4]byte\n\nfunc (ip IPv4) NetIP() netip.Addr {\n\treturn netip.AddrFrom4([4]byte{ip[3], ip[2], ip[1], ip[0]})\n}\n\nfunc IPv4FromAddr(ipAddr netip.Addr) IPv4 {\n\tip := ipAddr.As4()\n\treturn IPv4{ip[3], ip[2], ip[1], ip[0]}\n}\n"
  },
  {
    "path": "types/ipv6.go",
    "content": "package types\n\nimport \"net/netip\"\n\ntype IPv6 [16]byte\n\nfunc (ip IPv6) NetIP() netip.Addr {\n\treturn netip.AddrFrom16(ip)\n}\n\nfunc IPv6FromAddr(ipAddr netip.Addr) IPv6 {\n\treturn IPv6(ipAddr.As16())\n}\n"
  },
  {
    "path": "types/tuple.go",
    "content": "package types\n\ntype Point Tuple2[float64, float64]\n\ntype Tuple2[T1, T2 any] struct {\n\tCol1 T1\n\tCol2 T2\n}\n\ntype Tuple3[T1, T2, T3 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n}\n\ntype Tuple4[T1, T2, T3, T4 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n}\n\ntype Tuple5[T1, T2, T3, T4, T5 any] struct {\n\tCol1 T1\n\tCol2 T2\n\tCol3 T3\n\tCol4 T4\n\tCol5 T5\n}\n"
  },
  {
    "path": "types/uint128.go",
    "content": "package types\n\nimport (\n\t\"math\"\n\t\"math/big\"\n)\n\n// Note, Zero and Max are functions just to make read-only values.\n// We cannot define constants for structures, and global variables\n// are unacceptable because it will be possible to change them.\n\n// Zero is the lowest possible Uint128 value.\nfunc Uint128Zero() Uint128 {\n\treturn Uint128From64(0)\n}\n\n// Max is the largest possible Uint128 value.\nfunc Uint128Max() Uint128 {\n\treturn Uint128{\n\t\tLo: math.MaxUint64,\n\t\tHi: math.MaxUint64,\n\t}\n}\n\n// Uint128 is an unsigned 128-bit number.\n// All methods are immutable, works just like standard uint64.\ntype Uint128 struct {\n\tLo uint64 // lower 64-bit half\n\tHi uint64 // upper 64-bit half\n}\n\n// Note, there in no New(lo, hi) just not to confuse\n// which half goes first: lower or upper.\n// Use structure initialization Uint128{Lo: ..., Hi: ...} instead.\n\n// From64 converts 64-bit value v to a Uint128 value.\n// Upper 64-bit half will be zero.\nfunc Uint128From64(v uint64) Uint128 {\n\treturn Uint128{Lo: v}\n}\n\n// FromBig converts *big.Int to 128-bit Uint128 value ignoring overflows.\n// If input integer is nil or negative then return Zero.\n// If input interger overflows 128-bit then return Max.\nfunc Uint128FromBig(i *big.Int) Uint128 {\n\tu, _ := Uint128FromBigEx(i)\n\treturn u\n}\n\n// FromBigEx converts *big.Int to 128-bit Uint128 value (eXtended version).\n// Provides ok successful flag as a second return value.\n// If input integer is negative or overflows 128-bit then ok=false.\n// If input is nil then zero 128-bit returned.\nfunc Uint128FromBigEx(i *big.Int) (Uint128, bool) {\n\tswitch {\n\tcase i == nil:\n\t\treturn Uint128Zero(), true // assuming nil === 0\n\tcase i.Sign() < 0:\n\t\treturn Uint128Zero(), false // value cannot be negative!\n\tcase i.BitLen() > 128:\n\t\treturn Uint128Max(), false // value overflows 128-bit!\n\t}\n\n\t// Note, actually result of big.Int.Uint64 is undefined\n\t// if stored value is greater than 2^64\n\t// but we assume that it just gets lower 64 bits.\n\tt := new(big.Int)\n\tlo := i.Uint64()\n\thi := t.Rsh(i, 64).Uint64()\n\treturn Uint128{\n\t\tLo: lo,\n\t\tHi: hi,\n\t}, true\n}\n\n// Big returns 128-bit value as a *big.Int.\nfunc (u Uint128) Big() *big.Int {\n\ti := new(big.Int).SetUint64(u.Hi)\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, new(big.Int).SetUint64(u.Lo))\n\treturn i\n}\n\n// Equals returns true if two 128-bit values are equal.\n// Uint128 values can be compared directly with == operator\n// but use of the Equals method is preferred for consistency.\nfunc (u Uint128) Equals(v Uint128) bool {\n\treturn (u.Lo == v.Lo) && (u.Hi == v.Hi)\n}\n"
  },
  {
    "path": "types/uint128_test.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\n// TestUint128 unit tests for various Uint128 helpers.\nfunc TestUint128(t *testing.T) {\n\tt.Run(\"FromBig\", func(t *testing.T) {\n\t\tif got := Uint128FromBig(nil); !got.Equals(Uint128Zero()) {\n\t\t\tt.Fatalf(\"Uint128FromBig(nil) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint128FromBig(big.NewInt(-1)); !got.Equals(Uint128Zero()) {\n\t\t\tt.Fatalf(\"Uint128FromBig(-1) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint256FromBig(big.NewInt(124)).Big().String(); got != \"124\" {\n\t\t\tt.Fatalf(\"Uint256FromBig(big.NewInt(124)) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint128FromBig(new(big.Int).Lsh(big.NewInt(1), 129)); !got.Equals(Uint128Max()) {\n\t\t\tt.Fatalf(\"Uint128FromBig(2^129) does not equal to Max(), got %#x\", got)\n\t\t}\n\t})\n\n\tt.Run(\"ToBig\", func(t *testing.T) {\n\t\ti := new(big.Int).SetInt64(124)\n\t\tassert.Equal(t, Uint256FromBig(i).Big().String(), \"124\")\n\n\t\tUint256From64 := Uint256From64(124)\n\t\tassert.Equal(t, Uint256From64.Big().String(), \"124\")\n\t})\n}\n"
  },
  {
    "path": "types/uint256.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n)\n\n// Note, Zero and Max are functions just to make read-only values.\n// We cannot define constants for structures, and global variables\n// are unacceptable because it will be possible to change them.\n\n// Zero is the lowest possible Uint256 value.\nfunc Uint256Zero() Uint256 {\n\treturn Uint256From64(0)\n}\n\n// Max is the largest possible Uint256 value.\nfunc Uint256Max() Uint256 {\n\treturn Uint256{\n\t\tLo: Uint128Max(),\n\t\tHi: Uint128Max(),\n\t}\n}\n\n// Uint256 is an unsigned 256-bit number.\n// All methods are immutable, works just like standard uint64.\ntype Uint256 struct {\n\tLo Uint128 // lower 128-bit half\n\tHi Uint128 // upper 128-bit half\n}\n\n// From128 converts 128-bit value v to a Uint256 value.\n// Upper 128-bit half will be zero.\nfunc Uint256From128(v Uint128) Uint256 {\n\treturn Uint256{Lo: v}\n}\n\n// From64 converts 64-bit value v to a Uint256 value.\n// Upper 128-bit half will be zero.\nfunc Uint256From64(v uint64) Uint256 {\n\treturn Uint256From128(Uint128From64(v))\n}\n\n// FromBig converts *big.Int to 256-bit Uint256 value ignoring overflows.\n// If input integer is nil or negative then return Zero.\n// If input interger overflows 256-bit then return Max.\nfunc Uint256FromBig(i *big.Int) Uint256 {\n\tu, _ := Uint256FromBigEx(i)\n\treturn u\n}\n\n// FromBigEx converts *big.Int to 256-bit Uint256 value (eXtended version).\n// Provides ok successful flag as a second return value.\n// If input integer is negative or overflows 256-bit then ok=false.\n// If input is nil then zero 256-bit returned.\nfunc Uint256FromBigEx(i *big.Int) (Uint256, bool) {\n\tswitch {\n\tcase i == nil:\n\t\treturn Uint256Zero(), true // assuming nil === 0\n\tcase i.Sign() < 0:\n\t\treturn Uint256Zero(), false // value cannot be negative!\n\tcase i.BitLen() > 256:\n\t\treturn Uint256Max(), false // value overflows 256-bit!\n\t}\n\n\t// Note, actually result of big.Int.Uint64 is undefined\n\t// if stored value is greater than 2^64\n\t// but we assume that it just gets lower 64 bits.\n\tt := new(big.Int)\n\tlolo := i.Uint64()\n\tlohi := t.Rsh(i, 64).Uint64()\n\thilo := t.Rsh(i, 128).Uint64()\n\thihi := t.Rsh(i, 192).Uint64()\n\treturn Uint256{\n\t\tLo: Uint128{Lo: lolo, Hi: lohi},\n\t\tHi: Uint128{Lo: hilo, Hi: hihi},\n\t}, true\n}\n\n// Big returns 256-bit value as a *big.Int.\n//\n//nolint:dupl\nfunc (u Uint256) Big() *big.Int {\n\tt := new(big.Int)\n\ti := new(big.Int).SetUint64(u.Hi.Hi)\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Hi.Lo))\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Lo.Hi))\n\ti = i.Lsh(i, 64)\n\ti = i.Or(i, t.SetUint64(u.Lo.Lo))\n\treturn i\n}\n\n// Equals returns true if two 256-bit values are equal.\n// Uint256 values can be compared directly with == operator\n// but use of the Equals method is preferred for consistency.\nfunc (u Uint256) Equals(v Uint256) bool {\n\treturn u.Lo.Equals(v.Lo) && u.Hi.Equals(v.Hi)\n}\n"
  },
  {
    "path": "types/uint256_test.go",
    "content": "package types\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n)\n\n// TestUint256 unit tests for various Uint256 helpers.\nfunc TestUint256(t *testing.T) {\n\tt.Run(\"FromBig\", func(t *testing.T) {\n\t\tif got := Uint256FromBig(nil); !got.Equals(Uint256Zero()) {\n\t\t\tt.Fatalf(\"Uint256FromBig(nil) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint256FromBig(big.NewInt(-1)); !got.Equals(Uint256Zero()) {\n\t\t\tt.Fatalf(\"Uint256FromBig(-1) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint256FromBig(big.NewInt(124)).Big().String(); got != \"124\" {\n\t\t\tt.Fatalf(\"Uint256FromBig(big.NewInt(124)) does not equal to 0, got %#x\", got)\n\t\t}\n\n\t\tif got := Uint256FromBig(new(big.Int).Lsh(big.NewInt(1), 257)); !got.Equals(Uint256Max()) {\n\t\t\tt.Fatalf(\"Uint256FromBig(2^129) does not equal to Max(), got %#x\", got)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "types/uuid.go",
    "content": "package types\n\ntype UUID [16]byte\n\nfunc UUIDFromBigEndian(b [16]byte) UUID {\n\tvar val [16]byte\n\tval[0], val[7] = b[7], b[0]\n\tval[1], val[6] = b[6], b[1]\n\tval[2], val[5] = b[5], b[2]\n\tval[3], val[4] = b[4], b[3]\n\tval[8], val[15] = b[15], b[8]\n\tval[9], val[14] = b[14], b[9]\n\tval[10], val[13] = b[13], b[10]\n\tval[11], val[12] = b[12], b[11]\n\treturn val\n}\n\nfunc (u UUID) BigEndian() [16]byte {\n\treturn UUIDFromBigEndian(u)\n}\n"
  },
  {
    "path": "types/uuid_test.go",
    "content": "package types\n\nimport (\n\t\"testing\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestUUID(t *testing.T) {\n\tu := uuid.New()\n\tuuidData := UUIDFromBigEndian(u)\n\tassert.Equal(t, uuidData.BigEndian(), [16]byte(u))\n}\n"
  }
]