Repository: vahid-sohrabloo/chconn Branch: main Commit: 519075190b83 Files: 123 Total size: 603.4 KB Directory structure: gitextract_xi8nbcuh/ ├── .codecov.yml ├── .github/ │ ├── dependabot.yml │ └── workflows/ │ ├── ci.yaml │ └── lint.yaml ├── .gitignore ├── .golangci.yml ├── LICENSE ├── Makefile ├── README.md ├── block.go ├── block_test.go ├── chconn.go ├── chconn_test.go ├── chpool/ │ ├── common_test.go │ ├── conn.go │ ├── insert_stmt.go │ ├── pool.go │ ├── pool_test.go │ ├── select_stmt.go │ └── stat.go ├── client_info.go ├── column/ │ ├── array.go │ ├── array2.go │ ├── array2_nullable.go │ ├── array3.go │ ├── array3_nullable.go │ ├── array_base.go │ ├── array_nullable.go │ ├── base.go │ ├── base_big_cpu.go │ ├── base_little_cpu.go │ ├── base_test.go │ ├── base_validate.go │ ├── bench_test.go │ ├── column_helper.go │ ├── date.go │ ├── date_test.go │ ├── error_test.go │ ├── errors.go │ ├── helper_test.go │ ├── lc.go │ ├── lc_indices.go │ ├── lc_nullable.go │ ├── lc_test.go │ ├── map.go │ ├── map_base.go │ ├── map_nullable.go │ ├── map_test.go │ ├── nested.go │ ├── nested_test.go │ ├── nullable.go │ ├── nullable_test.go │ ├── point.go │ ├── size.go │ ├── string.go │ ├── string_base.go │ ├── string_test.go │ ├── tuple.go │ ├── tuple1.go │ ├── tuple2_gen.go │ ├── tuple3_gen.go │ ├── tuple4_gen.go │ ├── tuple5_gen.go │ ├── tuple_test.go │ ├── tuples_template/ │ │ ├── tuple.go.tmpl │ │ ├── tuple2.json │ │ ├── tuple3.json │ │ ├── tuple4.json │ │ └── tuple5.json │ └── tuples_test.go ├── config.go ├── config_test.go ├── doc.go ├── doc_test.go ├── errors.go ├── errors_ch_code.go ├── errors_test.go ├── go.mod ├── go.sum ├── helper_test.go ├── insert.go ├── insert_test.go ├── internal/ │ ├── ctxwatch/ │ │ ├── context_watcher.go │ │ └── context_watcher_test.go │ ├── helper/ │ │ ├── features.go │ │ ├── strs.go │ │ └── validator.go │ └── readerwriter/ │ ├── compress_reader.go │ ├── compress_writer.go │ ├── consts.go │ ├── reader.go │ └── writer.go ├── ping.go ├── ping_test.go ├── profile.go ├── profile_event.go ├── profile_test.go ├── progress.go ├── select_stmt.go ├── select_stmt_test.go ├── server_info.go ├── server_info_test.go ├── settings.go ├── sqlbuilder/ │ ├── injection.go │ ├── select.go │ └── select_test.go └── types/ ├── Int256.go ├── date_type.go ├── decimal.go ├── decimal_test.go ├── int128.go ├── int128_test.go ├── int256_test.go ├── ip_test.go ├── ipv4.go ├── ipv6.go ├── tuple.go ├── uint128.go ├── uint128_test.go ├── uint256.go ├── uint256_test.go ├── uuid.go └── uuid_test.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .codecov.yml ================================================ ignore: - "**/main.go" - "./internal/readerwriter/*" coverage: status: project: default: target: 50% threshold: null patch: false changes: false range: 70..95 round: up precision: 1 ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: gomod directory: "/" schedule: interval: daily - package-ecosystem: github-actions directory: "/" schedule: interval: daily ================================================ FILE: .github/workflows/ci.yaml ================================================ name: CI on: push: branches: - master pull_request: jobs: test-coverage: name: Test Coverage runs-on: ubuntu-latest env: VERBOSE: 1 GOFLAGS: -mod=readonly steps: - uses: vahid-sohrabloo/clickhouse-action@v1 with: version: '22.9' - name: Set up Go uses: actions/setup-go@v3 with: go-version: 1.19 - name: Checkout code uses: actions/checkout@v3.3.0 - name: Test run: make test-cover - name: Send coverage uses: codecov/codecov-action@v3 with: file: coverage.out test: name: Test runs-on: ubuntu-latest strategy: matrix: golang-version: [1.18.5, 1.19] clickhouse-version: ['22.11', '22.10', '22.9', '22.8', '22.7', '22.6', '22.5', '22.4'] env: VERBOSE: 1 GOFLAGS: -mod=readonly steps: - uses: vahid-sohrabloo/clickhouse-action@v1 with: version: '${{ matrix.clickhouse-version }}' - name: Set up Go uses: actions/setup-go@v3 with: go-version: 1.18.5 - name: Checkout code uses: actions/checkout@v3.3.0 - name: Test run: make test ================================================ FILE: .github/workflows/lint.yaml ================================================ name: golangci-lint on: push: branches: - main pull_request: jobs: lint: name: lint runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v3 with: go-version: 1.19 - name: Checkout code uses: actions/checkout@v3.3.0 - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: version: v1.50 args: --timeout=10m ================================================ FILE: .gitignore ================================================ .envrc bin/ vendor/ build/ coverage.out ================================================ FILE: .golangci.yml ================================================ linters-settings: dupl: threshold: 100 funlen: lines: 130 statements: 60 goconst: min-len: 5 min-occurrences: 3 gocritic: enabled-tags: - diagnostic - experimental - opinionated - performance - style disabled-checks: - dupImport # https://github.com/go-critic/go-critic/issues/845 - ifElseChain - octalLiteral - whyNoLint - wrapperFunc gocyclo: min-complexity: 20 goimports: local-prefixes: github.com/golangci/golangci-lint gomnd: settings: mnd: # don't include the "operation" and "assign" checks: argument,case,condition,return ignored-numbers: 1000000 govet: check-shadowing: false lll: line-length: 140 maligned: suggest-new: true misspell: locale: US nolintlint: allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) allow-unused: false # report any unused nolint directives require-explanation: false # don't require an explanation for nolint directives require-specific: false # don't require nolint directives to be specific about which linter is being skipped linters: disable-all: true enable: # - bodyclose - depguard - dogsled - dupl - errcheck - exportloopref - funlen - gochecknoinits - goconst - gocritic - gocyclo - gofmt - goimports - goprintffuncname - gosec - gosimple - govet - ineffassign - lll - misspell - nakedret # - noctx - nolintlint - staticcheck - stylecheck - typecheck - unconvert # - unparam - unused - whitespace # don't enable: # - asciicheck # - scopelint # - gochecknoglobals # - gocognit # - godot # - godox # - goerr113 # - interfacer # - maligned # - nestif # - prealloc # - testpackage # - revive # - wsl # - gomnd issues: # Excluding configuration per-path, per-linter, per-text and per-source exclude-rules: - path: _test\.go linters: - goconst - dupl - funlen - gocyclo - gosec - goerr113 - maligned - errcheck - path: cmd/chgogen linters: - goconst - funlen - gocyclo - path: _unsafe\.go linters: - dupl - path: main\.go linters: - goconst - gocritic - dupl # todo fix later run: skip-dirs: ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2020 vahid-sohrabloo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Makefile ================================================ # A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html OS = $(shell uname | tr A-Z a-z) export PATH := $(abspath bin/):${PATH} # Build variables BUILD_DIR ?= build VERSION ?= $(shell git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD) COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null) DATE_FMT = +%FT%T%z ifdef SOURCE_DATE_EPOCH BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") else BUILD_DATE ?= $(shell date "$(DATE_FMT)") endif LDFLAGS += -X main.version=${VERSION} -X main.commitHash=${COMMIT_HASH} -X main.buildDate=${BUILD_DATE} export CGO_ENABLED ?= 1 ifeq (${VERBOSE}, 1) ifeq ($(filter -v,${GOARGS}),) GOARGS += -v endif TEST_FORMAT = short-verbose endif # Project variables # Dependency versions GOTESTSUM_VERSION = 1.8.1 GOLANGCI_VERSION = 1.50.0 GOLANG_VERSION = 1.14 # Add the ability to override some variables # Use with care -include override.mk .PHONY: up up: start config.toml ## Set up the development environment .PHONY: down down: clear ## Destroy the development environment docker-compose down --volumes --remove-orphans --rmi local rm -rf var/docker/volumes/* .PHONY: reset reset: down up ## Reset the development environment .PHONY: clear clear: ## Clear the working area and the project rm -rf bin/ docker-compose.override.yml: cp docker-compose.override.yml.dist docker-compose.override.yml .PHONY: start start: docker-compose.override.yml ## Start docker development environment @ if [ docker-compose.override.yml -ot docker-compose.override.yml.dist ]; then diff -u docker-compose.override.yml docker-compose.override.yml.dist || (echo "!!! The distributed docker-compose.override.yml example changed. Please update your file accordingly (or at least touch it). !!!" && false); fi docker-compose up -d .PHONY: stop stop: ## Stop docker development environment docker-compose stop config.toml: sed 's/production/development/g; s/debug = false/debug = true/g; s/shutdownTimeout = "15s"/shutdownTimeout = "0s"/g; s/format = "json"/format = "logfmt"/g; s/level = "info"/level = "debug"/g; s/addr = ":10000"/addr = "127.0.0.1:10000"/g; s/httpAddr = ":8000"/httpAddr = "127.0.0.1:8000"/g; s/grpcAddr = ":8001"/grpcAddr = "127.0.0.1:8001"/g' config.toml.dist > config.toml .PHONY: run-% run-%: build-% ${BUILD_DIR}/$* .PHONY: run run: $(patsubst cmd/%,run-%,$(wildcard cmd/*)) ## Build and execute a binary .PHONY: clean clean: ## Clean builds rm -rf ${BUILD_DIR}/ rm -rf cmd/*/pkged.go .PHONY: goversion goversion: ifneq (${IGNORE_GOLANG_VERSION_REQ}, 1) @printf "${GOLANG_VERSION}\n$$(go version | awk '{sub(/^go/, "", $$3);print $$3}')" | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -g | head -1 | grep -q -E "^${GOLANG_VERSION}$$" || (printf "Required Go version is ${GOLANG_VERSION}\nInstalled: `go version`" && exit 1) endif .PHONY: build-% build-%: goversion ifeq (${VERBOSE}, 1) go env endif go build ${GOARGS} -tags "${GOTAGS}" -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/$* ./cmd/$* .PHONY: build build: goversion ## Build all binaries ifeq (${VERBOSE}, 1) go env endif @mkdir -p ${BUILD_DIR} go build ${GOARGS} -tags "${GOTAGS}" -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/ ./cmd/... .PHONY: build-release build-release: @${MAKE} LDFLAGS="-w ${LDFLAGS}" GOARGS="${GOARGS} -trimpath" BUILD_DIR="${BUILD_DIR}/release" build .PHONY: build-debug build-debug: ## Build all binaries with remote debugging capabilities @${MAKE} GOARGS="${GOARGS} -gcflags \"all=-N -l\"" BUILD_DIR="${BUILD_DIR}/debug" build .PHONY: check check: test-all lint ## Run tests and linters bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION} @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum bin/gotestsum-${GOTESTSUM_VERSION}: @mkdir -p bin curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION} TEST_PKGS ?= ./... TEST_REPORT_NAME ?= results.xml .PHONY: test test: TEST_REPORT ?= main test: TEST_FORMAT ?= short test: SHELL = /bin/bash test: bin/gotestsum ## Run tests bin/gotestsum --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverprofile=coverage.out -race -parallel 1 $(if ${TEST_PKGS},${TEST_PKGS},./...) @go tool cover -func=coverage.out @rm coverage.out .PHONY: test-purego test-purego: TEST_REPORT ?= main test-purego: TEST_FORMAT ?= standard-quiet test-purego: SHELL = /bin/bash test-purego: bin/gotestsum ## Run tests bin/gotestsum --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverprofile=coverage.out -race -parallel 1 -tags purego $(if ${TEST_PKGS},${TEST_PKGS},./...) @go tool cover -func=coverage.out @rm coverage.out CVPKG = $(shell go list ./... | grep -v 'chgogen\|generator' | tr '\n' ',') .PHONY: test-cover test-cover: TEST_REPORT ?= main test-cover: TEST_FORMAT ?= standard-quiet test-cover: SHELL = /bin/bash test-cover: bin/gotestsum ## Run tests bin/gotestsum --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) -coverpkg=${CVPKG} -coverprofile=coverage.out -covermode=atomic -parallel 1 $(if ${TEST_PKGS},${TEST_PKGS},./...) @go tool cover -func=coverage.out .PHONY: test-all test-all: ## Run all tests @${MAKE} GOARGS="${GOARGS} -run .\* " TEST_REPORT=all test .PHONY: test-integration test-integration: ## Run integration tests @${MAKE} GOARGS="${GOARGS} -run ^TestIntegration\$$\$$" TEST_REPORT=integration test .PHONY: test-functional test-functional: ## Run functional tests @${MAKE} GOARGS="${GOARGS} -run ^TestFunctional\$$\$$" TEST_REPORT=functional test bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint bin/golangci-lint-${GOLANGCI_VERSION}: @mkdir -p bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINARY=golangci-lint bash -s -- v${GOLANGCI_VERSION} @mv bin/golangci-lint $@ .PHONY: lint lint: bin/golangci-lint ## Run linter bin/golangci-lint run --deadline=20m --concurrency 1 lint-fix: bin/golangci-lint ## Run linter bin/golangci-lint run --deadline=20m --concurrency 1 --fix release-%: TAG_PREFIX = v release-%: ifneq (${DRY}, 1) @sed -e "s/^## \[Unreleased\]$$/## [Unreleased]\\"$$'\n'"\\"$$'\n'"\\"$$'\n'"## [$*] - $$(date +%Y-%m-%d)/g; s|^\[Unreleased\]: \(.*\/compare\/\)\(.*\)...HEAD$$|[Unreleased]: \1${TAG_PREFIX}$*...HEAD\\"$$'\n'"[$*]: \1\2...${TAG_PREFIX}$*|g" CHANGELOG.md > CHANGELOG.md.new @mv CHANGELOG.md.new CHANGELOG.md ifeq (${TAG}, 1) git add CHANGELOG.md git commit -m 'Prepare release $*' git tag -m 'Release $*' ${TAG_PREFIX}$* ifeq (${PUSH}, 1) git push; git push origin ${TAG_PREFIX}$* endif endif endif @echo "Version updated to $*!" ifneq (${PUSH}, 1) @echo @echo "Review the changes made by this script then execute the following:" ifneq (${TAG}, 1) @echo @echo "git add CHANGELOG.md && git commit -m 'Prepare release $*' && git tag -m 'Release $*' ${TAG_PREFIX}$*" @echo @echo "Finally, push the changes:" endif @echo @echo "git push; git push origin ${TAG_PREFIX}$*" endif .PHONY: patch patch: ## Release a new patch version @${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo "0.0.0") | sed 's/^v//' | awk -F'[ .]' '{print $$1"."$$2"."$$3+1}') .PHONY: minor minor: ## Release a new minor version @${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo "0.0.0") | sed 's/^v//' | awk -F'[ .]' '{print $$1"."$$2+1".0"}') .PHONY: major major: ## Release a new major version @${MAKE} release-$(shell (git describe --abbrev=0 --tags 2> /dev/null || echo "0.0.0") | sed 's/^v//' | awk -F'[ .]' '{print $$1+1".0.0"}') .PHONY: list list: ## List all make targets @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort .PHONY: help .DEFAULT_GOAL := help help: @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' # Variable outputting/exporting rules var-%: ; @echo $($*) varexport-%: ; @echo $*=$($*) ================================================ FILE: README.md ================================================ [![Go Reference](https://pkg.go.dev/badge/github.com/vahid-sohrabloo/chconn/v2.svg)](https://pkg.go.dev/github.com/vahid-sohrabloo/chconn/v2) [![codecov](https://codecov.io/gh/vahid-sohrabloo/chconn/branch/master/graph/badge.svg?token=K3JN6XWFVV)](https://codecov.io/gh/vahid-sohrabloo/chconn) [![Go Report Card](https://goreportcard.com/badge/github.com/vahid-sohrabloo/chconn/v2)](https://goreportcard.com/report/github.com/vahid-sohrabloo/chconn/v2) [![Actions Status](https://github.com/vahid-sohrabloo/chconn/workflows/CI/badge.svg)](https://github.com/vahid-sohrabloo/chconn/actions) [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn?ref=badge_shield) # chconn - ClickHouse low level Driver chconn is a pure generic Go (1.18+) driver for [ClickHouse](https://clickhouse.com/) that use Native protocol chconn aims to be low-level, fast, and performant. For comparison with other libraries, please see https://github.com/vahid-sohrabloo/go-ch-benchmark and https://github.com/go-faster/ch-bench#benchmarks If you have any suggestion or comment, please feel free to open an issue ## Example Usage ```go package main import ( "context" "fmt" "os" "time" "github.com/vahid-sohrabloo/chconn/v2/chpool" "github.com/vahid-sohrabloo/chconn/v2/column" ) func main() { conn, err := chpool.New(os.Getenv("DATABASE_URL")) if err != nil { panic(err) } defer conn.Close() // to check if the connection is alive err = conn.Ping(context.Background()) if err != nil { panic(err) } err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS example_table`) if err != nil { panic(err) } err = conn.Exec(context.Background(), `CREATE TABLE example_table ( uint64 UInt64, uint64_nullable Nullable(UInt64) ) Engine=Memory`) if err != nil { panic(err) } col1 := column.New[uint64]() col2 := column.New[uint64]().Nullable() rows := 1_000_0000 // Ten million rows - insert in 10 times numInsert := 10 col1.SetWriteBufferSize(rows) col2.SetWriteBufferSize(rows) startInsert := time.Now() for i := 0; i < numInsert; i++ { for y := 0; y < rows; y++ { col1.Append(uint64(i)) if i%2 == 0 { col2.Append(uint64(i)) } else { col2.AppendNil() } } ctxInsert, cancelInsert := context.WithTimeout(context.Background(), time.Second*30) // insert data err = conn.Insert(ctxInsert, "INSERT INTO example_table (uint64,uint64_nullable) VALUES", col1, col2) if err != nil { cancelInsert() panic(err) } cancelInsert() } fmt.Println("inserted 10M rows in ", time.Since(startInsert)) // select data col1Read := column.New[uint64]() col2Read := column.New[uint64]().Nullable() ctxSelect, cancelSelect := context.WithTimeout(context.Background(), time.Second*30) defer cancelSelect() startSelect := time.Now() selectStmt, err := conn.Select(ctxSelect, "SELECT uint64,uint64_nullable FROM example_table", col1Read, col2Read) if err != nil { panic(err) } // make sure the stmt close after select. but it's not necessary defer selectStmt.Close() var col1Data []uint64 var col2DataNil []bool var col2Data []uint64 // read data block by block // for more information about block, see: https://clickhouse.com/docs/en/development/architecture/#block for selectStmt.Next() { col1Data = col1Data[:0] col1Data = col1Read.Read(col1Data) col2DataNil = col2DataNil[:0] col2DataNil = col2Read.ReadNil(col2DataNil) col2Data = col2Data[:0] col2Data = col2Read.Read(col2Data) } // check errors if selectStmt.Err() != nil { panic(selectStmt.Err()) } fmt.Println("selected 10M rows in ", time.Since(startSelect)) } ``` ``` inserted 10M rows in 1.206666188s selected 10M rows in 880.505004ms ``` **For moe information**, please see the [documentation](https://github.com/vahid-sohrabloo/chconn/wiki) ## Features * Generics (go1.18) for column types * Connection pool with after-connect hook for arbitrary connection setup similar to pgx (thanks @jackc) * Support DSN and Query connection string (thanks @jackc) * Support All ClickHouse data types * Read and write data in column-oriented (like ClickHouse) * Do not use `interface{}` , `reflect` * Batch select and insert * Full TLS connection control * Read raw binary data * Supports profile and progress * database url connection very like pgx (thanks @jackc) * Code generator for Insert * Support LZ4 and ZSTD compression protocol * Support execution telemetry streaming profiles and progress ## Supported types * UInt8, UInt16, UInt32, UInt64, UInt128, UInt256 * Int8, Int16, Int32, Int64, Int128, Int256 * Date, Date32, DateTime, DateTime64 * Decimal32, Decimal64, Decimal128, Decimal256 * IPv4, IPv6 * String, FixedString(N) * UUID * Array(T) * Enums * LowCardinality(T) * Map(K, V) * Tuple(T1, T2, ..., Tn) * Nullable(T) * Point, Ring, Polygon, MultiPolygon # Benchmarks the source code of this benchmark here https://github.com/vahid-sohrabloo/go-ch-benchmark ``` name \ time/op chconn chgo go-clickhouse uptrace TestSelect100MUint64-16 150ms 154ms 8019ms 3045ms TestSelect10MString-16 271ms 447ms 969ms 822ms TestInsert10M-16 198ms 514ms 561ms 304ms name \ alloc/op chconn chgo go-clickhouse uptrace TestSelect100MUint64-16 111kB 262kB 3202443kB 800941kB TestSelect10MString-16 1.63MB 1.79MB 1626.51MB 241.03MB TestInsert10M-16 26.0MB 283.7MB 1680.4MB 240.2MB name \ allocs/op chconn chgo go-clickhouse uptrace TestSelect100MUint64-16 35.0 6683.0 200030937.0 100006069.0 TestSelect10MString-16 49.0 1748.0 30011991.0 20001120.0 TestInsert10M-16 26.0 80.0 224.0 50.0 ``` ## License [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fvahid-sohrabloo%2Fchconn?ref=badge_large) ================================================ FILE: block.go ================================================ package chconn import ( "bytes" "fmt" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // Column contains details of ClickHouse column type chColumn struct { ChType []byte Name []byte } type block struct { Columns []chColumn NumRows uint64 NumColumns uint64 info blockInfo headerWriter *readerwriter.Writer } func newBlock() *block { return &block{ headerWriter: readerwriter.NewWriter(), } } func (block *block) reset() { block.headerWriter.Reset() block.Columns = block.Columns[:0] block.NumRows = 0 block.NumColumns = 0 } func (block *block) read(ch *conn) error { if _, err := ch.reader.ByteString(); err != nil { // temporary table return &readError{"block: temporary table", err} } ch.reader.SetCompress(ch.compress) defer ch.reader.SetCompress(false) var err error err = block.info.read(ch.reader) if err != nil { return err } block.NumColumns, err = ch.reader.Uvarint() if err != nil { return &readError{"block: read NumColumns", err} } block.NumRows, err = ch.reader.Uvarint() if err != nil { return &readError{"block: read NumRows", err} } return nil } func (block *block) readColumns(ch *conn) error { ch.reader.SetCompress(ch.compress) defer ch.reader.SetCompress(false) block.Columns = make([]chColumn, block.NumColumns) for i := uint64(0); i < block.NumColumns; i++ { col, err := block.nextColumn(ch) if err != nil { return err } block.Columns[i] = col } return nil } func (block *block) readColumnsData(ch *conn, needValidateData bool, columns ...column.ColumnBasic) error { ch.reader.SetCompress(ch.compress) defer ch.reader.SetCompress(false) for _, col := range columns { err := col.HeaderReader(ch.reader, true, ch.serverInfo.Revision) if err != nil { return fmt.Errorf("read column header: %w", err) } if needValidateData { if errValidate := col.Validate(); errValidate != nil { return fmt.Errorf("validate %q: %w", col.Name(), errValidate) } } err = col.ReadRaw(int(block.NumRows), ch.reader) if err != nil { return fmt.Errorf("read data %q: %w", col.Name(), err) } } return nil } func (block *block) reorderColumns(columns []column.ColumnBasic) ([]column.ColumnBasic, error) { for i, c := range block.Columns { // check if already sorted if bytes.Equal(columns[i].Name(), block.Columns[i].Name) { continue } index, col := findColumn(columns, c.Name) if col == nil { return nil, &ColumnNotFoundError{ Column: string(c.Name), } } columns[index] = columns[i] columns[i] = col } return columns, nil } func findColumn(columns []column.ColumnBasic, name []byte) (int, column.ColumnBasic) { for i, col := range columns { if bytes.Equal(col.Name(), name) { return i, col } } return 0, nil } func (block *block) nextColumn(ch *conn) (chColumn, error) { col := chColumn{} var err error if col.Name, err = ch.reader.ByteString(); err != nil { return col, &readError{"block: read column name", err} } if col.ChType, err = ch.reader.ByteString(); err != nil { return col, &readError{"block: read column type", err} } if ch.serverInfo.Revision >= helper.DbmsMinProtocolWithCustomSerialization { customSerialization, err := ch.reader.ReadByte() if err != nil { return col, &readError{"block: read custom serialization", err} } if customSerialization == 1 { return col, &readError{"block: custom serialization not supported", nil} } } return col, nil } func (block *block) writeHeader(ch *conn, numRows int) error { block.info.write(ch.writer) // NumColumns ch.writer.Uvarint(block.NumColumns) // NumRows ch.writer.Uvarint(uint64(numRows)) _, err := ch.writer.WriteTo(ch.writerToCompress) if err != nil { return &writeError{"write block info", err} } err = ch.flushCompress() if err != nil { return &writeError{"flush block info", err} } return nil } func (block *block) writeColumnsBuffer(ch *conn, columns ...column.ColumnBasic) error { numRows := columns[0].NumRow() for i, column := range block.Columns { if numRows != columns[i].NumRow() { return &NumberWriteError{ FirstNumRow: numRows, NumRow: columns[i].NumRow(), Column: string(column.Name), FirstColumn: string(block.Columns[0].Name), } } block.headerWriter.Reset() block.headerWriter.ByteString(column.Name) block.headerWriter.ByteString(column.ChType) if ch.serverInfo.Revision >= helper.DbmsMinProtocolWithCustomSerialization { block.headerWriter.Uint8(0) } columns[i].HeaderWriter(block.headerWriter) if _, err := block.headerWriter.WriteTo(ch.writerToCompress); err != nil { return &writeError{"block: write header block data for column " + string(column.Name), err} } if _, err := columns[i].WriteTo(ch.writerToCompress); err != nil { return &writeError{"block: write block data for column " + string(column.Name), err} } } err := ch.flushCompress() if err != nil { return &writeError{"block: flush block data", err} } return nil } type blockInfo struct { field1 uint64 isOverflows uint8 field2 uint64 bucketNum int32 num3 uint64 } func (info *blockInfo) read(r *readerwriter.Reader) error { var err error if info.field1, err = r.Uvarint(); err != nil { return &readError{"blockInfo: read field1", err} } if info.isOverflows, err = r.ReadByte(); err != nil { return &readError{"blockInfo: read isOverflows", err} } if info.field2, err = r.Uvarint(); err != nil { return &readError{"blockInfo: read field2", err} } if info.bucketNum, err = r.Int32(); err != nil { return &readError{"blockInfo: read bucketNum", err} } if info.num3, err = r.Uvarint(); err != nil { return &readError{"blockInfo: read num3", err} } return nil } func (info *blockInfo) write(w *readerwriter.Writer) { w.Uvarint(1) w.Uint8(info.isOverflows) w.Uvarint(2) if info.bucketNum == 0 { info.bucketNum = -1 } w.Int32(info.bucketNum) w.Uvarint(0) } ================================================ FILE: block_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestBlockReadError(t *testing.T) { startValidReader := 15 tests := []struct { name string wantErr string numberValid int }{ { name: "blockInfo: temporary table", wantErr: "block: temporary table", numberValid: startValidReader - 1, }, { name: "blockInfo: read field1", wantErr: "blockInfo: read field1", numberValid: startValidReader, }, { name: "blockInfo: read isOverflows", wantErr: "blockInfo: read isOverflows", numberValid: startValidReader + 1, }, { name: "blockInfo: read field2", wantErr: "blockInfo: read field2", numberValid: startValidReader + 2, }, { name: "blockInfo: read bucketNum", wantErr: "blockInfo: read bucketNum", numberValid: startValidReader + 3, }, { name: "blockInfo: read num3", wantErr: "blockInfo: read num3", numberValid: startValidReader + 4, }, { name: "block: read NumColumns", wantErr: "block: read NumColumns", numberValid: startValidReader + 5, }, { name: "block: read NumRows", wantErr: "block: read NumRows", numberValid: startValidReader + 6, }, { name: "block: read column name", wantErr: "block: read column name", numberValid: startValidReader + 8, }, { name: "block: read column type", wantErr: "block: read column type", numberValid: startValidReader + 10, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := ConnectConfig(context.Background(), config) assert.NoError(t, err) stmt, err := c.Select(context.Background(), "SELECT * FROM system.numbers LIMIT 5;") require.Error(t, err) require.Nil(t, stmt) readErr, ok := err.(*readError) require.True(t, ok) require.Equal(t, readErr.msg, tt.wantErr) require.EqualError(t, readErr.Unwrap(), "timeout") assert.True(t, c.IsClosed()) }) } } ================================================ FILE: chconn.go ================================================ package chconn import ( "bufio" "context" "crypto/tls" "errors" "fmt" "io" "net" "strconv" "time" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/internal/ctxwatch" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) const ( connStatusUninitialized = iota connStatusConnecting connStatusClosed connStatusIdle connStatusBusy ) const ( // Name, version, revision, default DB clientHello = 0 // whether the compression must be used, // query text (without data for INSERTs). clientQuery = 1 // A block of data (compressed or not). clientData = 2 // Check that connection to the server is alive. clientPing = 4 ) const ( // Name, version, revision. serverHello = 0 // A block of data (compressed or not). serverData = 1 // The exception during query execution. serverException = 2 // Query execution progress: rows read, bytes read. serverProgress = 3 // Ping response serverPong = 4 // All packets were transmitted serverEndOfStream = 5 // Packet with profiling info. serverProfileInfo = 6 // A block with totals (compressed or not). serverTotals = 7 // A block with minimums and maximums (compressed or not). serverExtremes = 8 // Columns' description for default values calculation serverTableColumns = 11 // list of unique parts ids. //nolint:deadcode,unused,varcheck serverPartUUIDs = 12 // String (UUID) describes a request for which next task is needed //nolint:deadcode,unused,varcheck serverReadTaskRequest = 13 // Packet with profile events from server serverProfileEvents = 14 ) const ( dbmsVersionMajor = 1 dbmsVersionMinor = 0 dbmsVersionPatch = 0 dbmsVersionRevision = 54460 ) type queryProcessingStage uint64 const ( // queryProcessingStageComplete Completely. queryProcessingStageComplete queryProcessingStage = 2 ) // DialFunc is a function that can be used to connect to a ClickHouse server. type DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) // LookupFunc is a function that can be used to lookup IPs addrs from host. type LookupFunc func(ctx context.Context, host string) (addrs []string, err error) // ReaderFunc is a function that can be used get reader for read from server type ReaderFunc func(io.Reader) io.Reader // WriterFunc is a function that can be used to get writer to writer from server // Note: DO NOT use bufio.Writer, chconn doesn't support flush type WriterFunc func(io.Writer) io.Writer // Conn is a low-level Clickhouse connection handle. It is not safe for concurrent usage. type Conn interface { // RawConn Get Raw Connection. Do not use unless you know what you want to do RawConn() net.Conn // Close the connection to database Close() error // IsClosed reports if the connection has been closed. IsClosed() bool // IsBusy reports if the connection is busy. IsBusy() bool // ServerInfo get Server info ServerInfo() *ServerInfo // Ping sends a ping to check that the connection to the server is alive. Ping(ctx context.Context) error // Exec executes a query without returning any rows. // NOTE: don't use it for insert and select query Exec(ctx context.Context, query string) error // ExecWithOption executes a query without returning any rows with Query options. // NOTE: don't use it for insert and select query ExecWithOption( ctx context.Context, query string, queryOptions *QueryOptions, ) error // Insert executes a insert query and commit all columns data. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error // InsertWithOption executes a insert query with the query options and commit all columns data. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query InsertWithOption(ctx context.Context, query string, queryOptions *QueryOptions, columns ...column.ColumnBasic) error // Insert executes a insert query and return a InsertStmt. // // NOTE: only use for insert query InsertStream(ctx context.Context, query string) (InsertStmt, error) // InsertWithOption executes a insert query with the query options and return a InsertStmt. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query InsertStreamWithOption( ctx context.Context, query string, queryOptions *QueryOptions) (InsertStmt, error) // Select executes a query and return select stmt. // // NOTE: only use for select query Select(ctx context.Context, query string, columns ...column.ColumnBasic) (SelectStmt, error) // Select executes a query with the the query options and return select stmt. // // NOTE: only use for select query SelectWithOption( ctx context.Context, query string, queryOptions *QueryOptions, columns ...column.ColumnBasic, ) (SelectStmt, error) } type writeFlusher interface { io.Writer Flush() error } type conn struct { conn net.Conn // the underlying TCP connection parameterStatuses map[string]string // parameters that have been reported by the server serverInfo *ServerInfo clientInfo *ClientInfo config *Config status byte // One of connStatus* constants writer *readerwriter.Writer writerTo io.Writer writerToCompress io.Writer reader *readerwriter.Reader compress bool contextWatcher *ctxwatch.ContextWatcher block *block profileEvent *ProfileEvent } // Connect establishes a connection to a ClickHouse server using the environment and connString (in URL or DSN format) // to provide configuration. See documentation for ParseConfig for details. ctx can be used to cancel a connect attempt. func Connect(ctx context.Context, connString string) (Conn, error) { config, err := ParseConfig(connString) if err != nil { return nil, err } return ConnectConfig(ctx, config) } // ConnectConfig establishes a connection to a ClickHouse server using config. config must have been constructed with // ParseConfig. ctx can be used to cancel a connect attempt. // // If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An // authentication error will terminate the chain of attempts (like libpq: // https://www.postgresql.org/docs/12/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error. Otherwise, // if all attempts fail the last error is returned. func ConnectConfig(octx context.Context, config *Config) (c Conn, err error) { // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from // zero values. if !config.createdByParseConfig { panic("config must be created by ParseConfig") } // Simplify usage by treating primary config and fallbacks the same. fallbackConfigs := []*FallbackConfig{ { Host: config.Host, Port: config.Port, TLSConfig: config.TLSConfig, }, } fallbackConfigs = append(fallbackConfigs, config.Fallbacks...) ctx := octx fallbackConfigs, err = expandWithIPs(ctx, config.LookupFunc, fallbackConfigs) if err != nil { return nil, &connectError{config: config, msg: "hostname resolving error", err: err} } if len(fallbackConfigs) == 0 { return nil, &connectError{config: config, msg: "hostname resolving error", err: ErrIPNotFound} } foundBestServer := false var fallbackConfig *FallbackConfig for _, fc := range fallbackConfigs { // ConnectTimeout restricts the whole connection process. if config.ConnectTimeout != 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout) //nolint:gocritic defer cancel() } else { ctx = octx } c, err = connect(ctx, config, fc) if err == nil { foundBestServer = true break } else if chErr, ok := err.(*ChError); ok { return nil, &connectError{config: config, msg: "server error", err: chErr} } } if !foundBestServer && fallbackConfig != nil { c, err = connect(ctx, config, fallbackConfig) if cherr, ok := err.(*ChError); ok { err = &connectError{config: config, msg: "server error", err: cherr} } } if err != nil { return nil, err // no need to wrap in connectError because it will already be wrapped in all cases except ChError } if config.AfterConnect != nil { err := config.AfterConnect(ctx, c) if err != nil { c.RawConn().Close() return nil, &connectError{config: config, msg: "AfterConnect error", err: err} } } return c, nil } func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*FallbackConfig) ([]*FallbackConfig, error) { var configs []*FallbackConfig for _, fb := range fallbacks { ips, err := lookupFn(ctx, fb.Host) if err != nil { return nil, err } for _, ip := range ips { splitIP, splitPort, err := net.SplitHostPort(ip) if err == nil { port, err := strconv.ParseUint(splitPort, 10, 16) if err != nil { return nil, fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err) } configs = append(configs, &FallbackConfig{ Host: splitIP, Port: uint16(port), TLSConfig: fb.TLSConfig, }) } else { configs = append(configs, &FallbackConfig{ Host: ip, Port: fb.Port, TLSConfig: fb.TLSConfig, }) } } } return configs, nil } func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig) (Conn, error) { c := new(conn) c.config = config c.compress = config.Compress != CompressNone var err error network, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port) c.conn, err = config.DialFunc(ctx, network, address) if err != nil { var netErr net.Error if errors.As(err, &netErr) && netErr.Timeout() { err = &errTimeout{err: err} } return nil, &connectError{config: config, msg: "dial error", err: err} } c.parameterStatuses = make(map[string]string) if fallbackConfig.TLSConfig != nil { c.conn = tls.Client(c.conn, fallbackConfig.TLSConfig) } c.status = connStatusConnecting c.contextWatcher = ctxwatch.NewContextWatcher( func() { c.conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) //nolint:errcheck //no need }, func() { c.conn.SetDeadline(time.Time{}) //nolint:errcheck //no need }, ) if ctx != context.Background() { select { case <-ctx.Done(): return nil, newContextAlreadyDoneError(ctx) default: } c.contextWatcher.Watch(ctx) defer c.contextWatcher.Unwatch() } c.writer = readerwriter.NewWriter() if config.ReaderFunc != nil { c.reader = readerwriter.NewReader(config.ReaderFunc(c.conn)) } else { c.reader = readerwriter.NewReader(bufio.NewReaderSize(c.conn, c.config.MinReadBufferSize)) } if config.WriterFunc != nil { c.writerTo = config.WriterFunc(c.conn) } else { c.writerTo = c.conn } if c.compress { c.writerToCompress = readerwriter.NewCompressWriter(c.writerTo, byte(config.Compress)) } else { c.writerToCompress = c.writerTo } c.serverInfo = &ServerInfo{} err = c.hello() if err != nil { return nil, preferContextOverNetTimeoutError(ctx, err) } c.sendAddendum() c.block = newBlock() c.profileEvent = newProfileEvent() c.status = connStatusIdle return c, nil } func (ch *conn) sendAddendum() { if ch.serverInfo.Revision >= helper.DbmsMinProtocolWithQuotaKey { ch.writer.String(ch.config.QuotaKey) } } func (ch *conn) flushCompress() error { if w, ok := ch.writerToCompress.(writeFlusher); ok { return w.Flush() } return nil } func (ch *conn) RawConn() net.Conn { return ch.conn } // send hello to ClickHouse func (ch *conn) hello() error { ch.writer.Uvarint(clientHello) ch.writer.String(ch.config.ClientName) ch.writer.Uvarint(dbmsVersionMajor) ch.writer.Uvarint(dbmsVersionMinor) ch.writer.Uvarint(dbmsVersionRevision) ch.writer.String(ch.config.Database) ch.writer.String(ch.config.User) ch.writer.String(ch.config.Password) if _, err := ch.writer.WriteTo(ch.writerTo); err != nil { return fmt.Errorf("write hello: %w", err) } res, err := ch.receiveAndProcessData(emptyOnProgress) if err != nil { return err } if ch.serverInfo.Revision == 0 { return &unexpectedPacket{expected: "serverHello", actual: res} } return nil } // IsClosed reports if the connection has been closed. func (ch *conn) IsClosed() bool { return ch.status < connStatusIdle } // IsBusy reports if the connection is busy. func (ch *conn) IsBusy() bool { return ch.status == connStatusBusy } // lock locks the connection. func (ch *conn) lock() error { switch ch.status { case connStatusBusy: return &connLockError{status: "conn busy"} // This only should be possible in case of an application bug. case connStatusClosed: return &connLockError{status: "conn closed"} case connStatusUninitialized: return &connLockError{status: "conn uninitialized"} } ch.status = connStatusBusy return nil } func (ch *conn) unlock() { switch ch.status { case connStatusBusy: ch.status = connStatusIdle case connStatusClosed: default: panic("BUG: cannot unlock unlocked connection") // This should only be possible if there is a bug in this package. } } func (ch *conn) sendQueryWithOption( query, queryID string, settings Settings, parameters *Parameters, ) error { ch.writer.Uvarint(clientQuery) ch.writer.String(queryID) if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithClientInfo { if ch.clientInfo == nil { ch.clientInfo = &ClientInfo{} } ch.clientInfo.fillOSUserHostNameAndVersionInfo() ch.clientInfo.ClientName = ch.config.Database + " " + ch.config.ClientName ch.clientInfo.write(ch) } // setting if settings != nil && ch.serverInfo.Revision >= helper.DbmsMinRevisionWithSettingsSerializedAsStrings { settings.write(ch.writer) } ch.writer.String("") if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithInterServerSecret { ch.writer.String("") } ch.writer.Uvarint(uint64(queryProcessingStageComplete)) // compression if ch.compress { ch.writer.Uint8(1) } else { ch.writer.Uint8(0) } ch.writer.String(query) if ch.serverInfo.Revision >= helper.DbmsMinProtocolWithParameters { parameters.write(ch.writer) ch.writer.String("") } else if parameters.hasParam() { return errors.New("parameters are not supported by the server") } return ch.sendEmptyBlock() } func (ch *conn) sendData(block *block, numRows int) error { ch.writer.Uvarint(clientData) // name ch.writer.String("") // if compress enable we must send this part with uncompressed data if ch.compress { _, err := ch.writer.WriteTo(ch.writerTo) if err != nil { return &writeError{"write block info", err} } } return block.writeHeader(ch, numRows) } func (ch *conn) sendEmptyBlock() error { ch.block.reset() return ch.sendData(ch.block, 0) } func (ch *conn) Close() error { if ch.status == connStatusClosed { return nil } ch.contextWatcher.Unwatch() ch.status = connStatusClosed return ch.conn.Close() } func (ch *conn) readTableColumn() { // todo check errors ch.reader.String() //nolint:errcheck //no needed ch.reader.String() //nolint:errcheck //no needed } func (ch *conn) receiveAndProcessData(onProgress func(*Progress)) (interface{}, error) { packet, err := ch.reader.Uvarint() if err != nil { return nil, &readError{"packet: read packet type", err} } switch packet { case serverData, serverTotals, serverExtremes: ch.block.reset() err = ch.block.read(ch) return ch.block, err case serverProfileInfo: profile := newProfile() err = profile.read(ch) return profile, err case serverProgress: progress := newProgress() err = progress.read(ch) if err == nil && onProgress != nil { onProgress(progress) return ch.receiveAndProcessData(onProgress) } return progress, err case serverHello: err = ch.serverInfo.read(ch.reader) return nil, err case serverPong: return &pong{}, err case serverException: err := &ChError{} defer ch.Close() if errRead := err.read(ch.reader); errRead != nil { return nil, errRead } return nil, err case serverEndOfStream: return nil, nil case serverTableColumns: ch.readTableColumn() return ch.receiveAndProcessData(onProgress) case serverProfileEvents: ch.block.reset() oldCompress := ch.compress defer func() { ch.compress = oldCompress }() ch.compress = false err = ch.block.read(ch) if err != nil { return nil, err } err := ch.profileEvent.read(ch) if err != nil { return nil, err } return ch.profileEvent, nil } return nil, ¬ImplementedPacket{packet: packet} } var emptyOnProgress = func(*Progress) { } var emptyQueryOptions = &QueryOptions{ OnProgress: emptyOnProgress, } type QueryOptions struct { QueryID string Settings Settings OnProgress func(*Progress) OnProfile func(*Profile) OnProfileEvent func(*ProfileEvent) Parameters *Parameters UseGoTime bool } func (ch *conn) Exec(ctx context.Context, query string) error { return ch.ExecWithOption(ctx, query, nil) } func (ch *conn) ExecWithOption( ctx context.Context, query string, queryOptions *QueryOptions, ) error { err := ch.lock() if err != nil { return err } defer func() { ch.unlock() if err != nil { ch.Close() } }() if ctx != context.Background() { select { case <-ctx.Done(): return newContextAlreadyDoneError(ctx) default: } ch.contextWatcher.Watch(ctx) defer ch.contextWatcher.Unwatch() } if queryOptions == nil { queryOptions = emptyQueryOptions } err = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters) if err != nil { return preferContextOverNetTimeoutError(ctx, err) } if queryOptions.OnProgress == nil { queryOptions.OnProgress = emptyOnProgress } _, err = ch.receiveAndProcessData(queryOptions.OnProgress) return preferContextOverNetTimeoutError(ctx, err) } ================================================ FILE: chconn_test.go ================================================ package chconn import ( "context" "crypto/tls" "errors" "io" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConnect(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") + " connect_timeout=10" conn, err := Connect(context.Background(), connString) require.NoError(t, err) require.NoError(t, conn.Ping(context.Background())) require.NotEmpty(t, conn.ServerInfo().String()) require.Nil(t, conn.Close()) require.True(t, conn.IsClosed()) // test protected two close require.Nil(t, conn.Close()) } func TestConnectError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) config.Password = "invalid password" config.User = "invalid username" conn, err := ConnectConfig(context.Background(), config) assert.Contains(t, err.Error(), "server error ( DB::Exception (516)") assert.Contains(t, errors.Unwrap(err).Error(), " DB::Exception (516):") assert.Nil(t, conn) conn, err = Connect(context.Background(), "host>0") assert.EqualError(t, err, "cannot parse `host>0`: failed to parse as DSN (invalid dsn)") assert.Nil(t, conn) ctx, cancel := context.WithCancel(context.Background()) cancel() conn, err = Connect(ctx, connString) assert.Error(t, errors.Unwrap(err), context.Canceled) assert.Nil(t, conn) conn, err = Connect(context.Background(), "host=invalid_host") assert.Contains(t, err.Error(), "hostname resolving error") assert.Nil(t, conn) config, err = ParseConfig(connString) require.NoError(t, err) config.Port = 63666 conn, err = ConnectConfig(context.Background(), config) assert.Contains(t, err.Error(), "connect: connection refused") assert.Nil(t, conn) config, err = ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.AfterConnect = func(ctx context.Context, c Conn) error { return errors.New("afterConnect err") } _, err = ConnectConfig(context.Background(), config) assert.EqualError(t, errors.Unwrap(err), "afterConnect err") config, err = ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: 0, } } _, err = ConnectConfig(context.Background(), config) assert.EqualError(t, err, "write hello: timeout") } func TestEndOfStream(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), `CREATE TABLE IF NOT EXISTS example ( country_code FixedString(2), os_id UInt8, browser_id UInt8, categories Array(Int16), action_day Date, action_time DateTime ) engine=Memory`) require.NoError(t, err) } func TestException(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := Connect(context.Background(), connString) require.NoError(t, err) require.NoError(t, conn.Ping(context.Background())) err = conn.Exec(context.Background(), `invalid query`) var chError *ChError require.True(t, errors.As(err, &chError)) require.Equal(t, chError.Code, ChErrorSyntaxError) require.Equal(t, chError.Name, "DB::Exception") } func TestTlsPreferConnect(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_TLS_CONN_STRING") if connString == "" { t.Skip("please set CHX_TEST_TCP_TLS_CONN_STRING env") return } conn, err := Connect(context.Background(), connString) require.NoError(t, err) require.NoError(t, conn.Ping(context.Background())) if _, ok := conn.RawConn().(*tls.Conn); !ok { t.Error("not a TLS connection") } conn.RawConn().Close() } func TestConnectConfigRequiresConnConfigFromParseConfig(t *testing.T) { t.Parallel() config := &Config{} require.PanicsWithValue(t, "config must be created by ParseConfig", func() { ConnectConfig(context.Background(), config) }) } func TestLockError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") c, err := Connect(context.Background(), connString) require.NoError(t, err) c.(*conn).status = connStatusBusy require.EqualError(t, c.(*conn).lock(), "conn busy") c.(*conn).status = connStatusClosed require.EqualError(t, c.(*conn).lock(), "conn closed") c.(*conn).status = connStatusUninitialized require.EqualError(t, c.(*conn).lock(), "conn uninitialized") resSelect, err := c.Select(context.Background(), "SET enable_http_compression=1") require.EqualError(t, err, "conn uninitialized") require.Nil(t, resSelect) require.EqualError(t, c.(*conn).lock(), "conn uninitialized") } func TestUnlockError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") c, err := Connect(context.Background(), connString) require.NoError(t, err) c.(*conn).status = connStatusUninitialized require.PanicsWithValue(t, "BUG: cannot unlock unlocked connection", func() { c.(*conn).unlock() }) } func TestExecError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) c.(*conn).status = connStatusUninitialized err = c.Exec(context.Background(), "SET enable_http_compression=1") require.EqualError(t, err, "conn uninitialized") require.EqualError(t, c.(*conn).lock(), "conn uninitialized") c.Close() config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: 1, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), "SET enable_http_compression=1") require.EqualError(t, err, "write block info (timeout)") assert.True(t, c.IsClosed()) } func TestExecCtxError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) cancel() err = c.Exec(ctx, "select * from system.numbers limit 1") require.EqualError(t, err, "timeout: context already done: context canceled") assert.False(t, c.IsClosed()) config.WriterFunc = func(w io.Writer) io.Writer { return &writerSlowHelper{ w: w, sleep: time.Second, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() err = c.Exec(ctx, "select * from system.numbers") require.EqualError(t, errors.Unwrap(err), "context deadline exceeded") assert.True(t, c.IsClosed()) } func TestReceivePackError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: 13, } } c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `SELECT * FROM system.numbers limit 1`) require.EqualError(t, err, "packet: read packet type (timeout)") assert.True(t, c.IsClosed()) } ================================================ FILE: chpool/common_test.go ================================================ package chpool import ( "context" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) // Conn.Release is an asynchronous process that returns immediately. There is no signal when the actual work is // completed. To test something that relies on the actual work for Conn.Release being completed we must simply wait. // This function wraps the sleep so there is more meaning for the callers. func waitForReleaseToComplete() { time.Sleep(500 * time.Millisecond) } type execer interface { Exec(ctx context.Context, sql string) error } func testExec(t *testing.T, db execer) { err := db.Exec(context.Background(), "SET enable_http_compression=1") require.NoError(t, err) } type selecter interface { Select(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error) } func testSelect(t *testing.T, db selecter) { var ( num []uint64 ) col := column.New[uint64]() stmt, err := db.Select(context.Background(), "SELECT * FROM system.numbers LIMIT 5;", col) require.NoError(t, err) for stmt.Next() { assert.NoError(t, err) num = col.Read(num) assert.NoError(t, err) } assert.NoError(t, stmt.Err()) assert.Equal(t, 5, len(num)) stmt.Close() assert.ElementsMatch(t, []uint64{0, 1, 2, 3, 4}, num) } func assertConfigsEqual(t *testing.T, expected, actual *Config, testName string) { if !assert.NotNil(t, expected) { return } if !assert.NotNil(t, actual) { return } assert.Equalf(t, expected.ConnString(), actual.ConnString(), "%s - ConnString", testName) // Can't test function equality, so just test that they are set or not. assert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, "%s - AfterConnect", testName) assert.Equalf(t, expected.BeforeAcquire == nil, actual.BeforeAcquire == nil, "%s - BeforeAcquire", testName) assert.Equalf(t, expected.AfterRelease == nil, actual.AfterRelease == nil, "%s - AfterRelease", testName) assert.Equalf(t, expected.MaxConnLifetime, actual.MaxConnLifetime, "%s - MaxConnLifetime", testName) assert.Equalf(t, expected.MaxConnIdleTime, actual.MaxConnIdleTime, "%s - MaxConnIdleTime", testName) assert.Equalf(t, expected.MaxConns, actual.MaxConns, "%s - MaxConns", testName) assert.Equalf(t, expected.MinConns, actual.MinConns, "%s - MinConns", testName) assert.Equalf(t, expected.HealthCheckPeriod, actual.HealthCheckPeriod, "%s - HealthCheckPeriod", testName) assertConnConfigsEqual(t, expected.ConnConfig, actual.ConnConfig, testName) } func assertConnConfigsEqual(t *testing.T, expected, actual *chconn.Config, testName string) { if !assert.NotNil(t, expected) { return } if !assert.NotNil(t, actual) { return } assert.Equalf(t, expected.ConnString(), actual.ConnString(), "%s - ConnString", testName) assert.Equalf(t, expected.Host, actual.Host, "%s - Host", testName) assert.Equalf(t, expected.Database, actual.Database, "%s - Database", testName) assert.Equalf(t, expected.Port, actual.Port, "%s - Port", testName) assert.Equalf(t, expected.User, actual.User, "%s - User", testName) assert.Equalf(t, expected.Password, actual.Password, "%s - Password", testName) assert.Equalf(t, expected.ConnectTimeout, actual.ConnectTimeout, "%s - ConnectTimeout", testName) assert.Equalf(t, expected.RuntimeParams, actual.RuntimeParams, "%s - RuntimeParams", testName) // Can't test function equality, so just test that they are set or not. assert.Equalf(t, expected.ValidateConnect == nil, actual.ValidateConnect == nil, "%s - ValidateConnect", testName) assert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, "%s - AfterConnect", testName) if assert.Equalf(t, expected.TLSConfig == nil, actual.TLSConfig == nil, "%s - TLSConfig", testName) { if expected.TLSConfig != nil { assert.Equalf(t, expected.TLSConfig.InsecureSkipVerify, actual.TLSConfig.InsecureSkipVerify, "%s - TLSConfig InsecureSkipVerify", testName) assert.Equalf(t, expected.TLSConfig.ServerName, actual.TLSConfig.ServerName, "%s - TLSConfig ServerName", testName) } } if assert.Equalf(t, len(expected.Fallbacks), len(actual.Fallbacks), "%s - Fallbacks", testName) { for i := range expected.Fallbacks { assert.Equalf(t, expected.Fallbacks[i].Host, actual.Fallbacks[i].Host, "%s - Fallback %d - Host", testName, i) assert.Equalf(t, expected.Fallbacks[i].Port, actual.Fallbacks[i].Port, "%s - Fallback %d - Port", testName, i) if assert.Equalf(t, expected.Fallbacks[i].TLSConfig == nil, actual.Fallbacks[i].TLSConfig == nil, "%s - Fallback %d - TLSConfig", testName, i) { if expected.Fallbacks[i].TLSConfig != nil { assert.Equalf(t, expected.Fallbacks[i].TLSConfig.InsecureSkipVerify, actual.Fallbacks[i].TLSConfig.InsecureSkipVerify, "%s - Fallback %d - TLSConfig InsecureSkipVerify", testName) assert.Equalf(t, expected.Fallbacks[i].TLSConfig.ServerName, actual.Fallbacks[i].TLSConfig.ServerName, "%s - Fallback %d - TLSConfig ServerName", testName) } } } } } ================================================ FILE: chpool/conn.go ================================================ package chpool import ( "context" "sync/atomic" puddle "github.com/jackc/puddle/v2" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) // Conn is an acquired *chconn.Conn from a Pool. type Conn interface { Release() // ExecWithOption executes a query without returning any rows with Query options. // NOTE: don't use it for insert and select query ExecWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, ) error // Select executes a query with the the query options and return select stmt. // NOTE: only use for select query SelectWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic, ) (chconn.SelectStmt, error) // InsertWithSetting executes a query with the query options and commit all columns data. // NOTE: only use for insert query InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error // InsertWithSetting executes a query with the query options and commit all columns data. // NOTE: only use for insert query InsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) // Conn get the underlying chconn.Conn Conn() chconn.Conn // Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack // will panic if called on an already released or hijacked connection. Hijack() chconn.Conn Ping(ctx context.Context) error } type conn struct { res *puddle.Resource[*connResource] p *pool } // Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called. // However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored. func (c *conn) Release() { if c.res == nil { return } conn := c.Conn() res := c.res c.res = nil if conn.IsClosed() || conn.IsBusy() { res.Destroy() // Signal to the health check to run since we just destroyed a connections // and we might be below minConns now c.p.triggerHealthCheck() return } // If the pool is consistently being used, we might never get to check the // lifetime of a connection since we only check idle connections in checkConnsHealth // so we also check the lifetime here and force a health check if c.p.isExpired(res) { atomic.AddInt64(&c.p.lifetimeDestroyCount, 1) res.Destroy() // Signal to the health check to run since we just destroyed a connections // and we might be below minConns now c.p.triggerHealthCheck() return } if c.p.afterRelease == nil { res.Release() return } go func() { if c.p.afterRelease(conn) { res.Release() } else { res.Destroy() // Signal to the health check to run since we just destroyed a connections // and we might be below minConns now c.p.triggerHealthCheck() } }() } // Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack // will panic if called on an already released or hijacked connection. func (c *conn) Hijack() chconn.Conn { if c.res == nil { panic("cannot hijack already released or hijacked connection") } conn := c.Conn() res := c.res c.res = nil res.Hijack() return conn } func (c *conn) ExecWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, ) error { return c.Conn().ExecWithOption(ctx, query, queryOptions) } func (c *conn) Ping(ctx context.Context) error { return c.Conn().Ping(ctx) } func (c *conn) SelectWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic, ) (chconn.SelectStmt, error) { s, err := c.Conn().SelectWithOption(ctx, query, queryOptions, columns...) if err != nil { return nil, err } return &selectStmt{ SelectStmt: s, conn: c, }, nil } func (c *conn) InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error { return c.Conn().InsertWithOption(ctx, query, queryOptions, columns...) } func (c *conn) InsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) { s, err := c.Conn().InsertStreamWithOption(ctx, query, queryOptions) if err != nil { return nil, err } return &insertStmt{ InsertStmt: s, conn: c, }, nil } func (c *conn) Conn() chconn.Conn { return c.connResource().conn } func (c *conn) connResource() *connResource { return c.res.Value() } ================================================ FILE: chpool/insert_stmt.go ================================================ package chpool import ( "context" "github.com/vahid-sohrabloo/chconn/v2" ) type insertStmt struct { chconn.InsertStmt conn Conn } func (s *insertStmt) Flush(ctx context.Context) error { if s.conn == nil { return nil } defer s.conn.Release() return s.InsertStmt.Flush(ctx) } func (s *insertStmt) Close() { if s.conn == nil { return } s.InsertStmt.Close() s.conn.Release() } ================================================ FILE: chpool/pool.go ================================================ package chpool import ( "context" "errors" "fmt" "math/rand" "runtime" "strconv" "sync" "sync/atomic" "syscall" "time" puddle "github.com/jackc/puddle/v2" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) var defaultMaxConns = int32(4) var defaultMinConns = int32(0) var defaultCreateIdleTimeout = time.Second * 10 var defaultMaxConnLifetime = time.Hour var defaultMaxConnIdleTime = time.Minute * 30 var defaultHealthCheckPeriod = time.Minute type connResource struct { conn chconn.Conn conns []conn } func (cr *connResource) getConn(p *pool, res *puddle.Resource[*connResource]) Conn { if len(cr.conns) == 0 { cr.conns = make([]conn, 128) } c := &cr.conns[len(cr.conns)-1] cr.conns = cr.conns[0 : len(cr.conns)-1] c.res = res c.p = p return c } // Pool is a connection pool for chconn type Pool interface { // Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned // to pool and closed. Close() Acquire(ctx context.Context) (Conn, error) // AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the // call of f. The return value is either an error acquiring the Conn or the return value of f. The Conn is // automatically released after the call of f. AcquireFunc(ctx context.Context, f func(Conn) error) error // AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and // keep-alive functionality. It does not update pool statistics. AcquireAllIdle(ctx context.Context) []Conn // Exec executes a query without returning any rows. // NOTE: don't use it for insert and select query Exec(ctx context.Context, query string) error // ExecWithOption executes a query without returning any rows with Query options. // NOTE: don't use it for insert and select query ExecWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, ) error // Insert executes a insert query and commit all columns data. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error // InsertWithOption executes a insert query with the query options and commit all columns data. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error // Insert executes a insert query and return a InsertStmt. // // NOTE: only use for insert query InsertStream(ctx context.Context, query string) (chconn.InsertStmt, error) // InsertWithOption executes a insert query with the query options and return a InsertStmt. // // If the query is successful, the columns buffer will be reset. // // NOTE: only use for insert query InsertStreamWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) // Select executes a query and return select stmt. // // NOTE: only use for select query Select(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error) // Select executes a query with the the query options and return select stmt. // // NOTE: only use for select query SelectWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic, ) (chconn.SelectStmt, error) // Ping sends a ping to check that the connection to the server is alive. Ping(ctx context.Context) error // Stat returns a chpool.Stat struct with a snapshot of Pool statistics. Stat() *Stat // Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would // disrupt all connections (such as a network interruption or a server state change). // // It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned // to the pool. Reset() // Config returns a copy of config that was used to initialize this pool. Config() *Config } type pool struct { p *puddle.Pool[*connResource] config *Config beforeConnect func(context.Context, *chconn.Config) error afterConnect func(context.Context, chconn.Conn) error beforeAcquire func(context.Context, chconn.Conn) bool afterRelease func(chconn.Conn) bool minConns int32 maxConns int32 maxConnLifetime time.Duration maxConnLifetimeJitter time.Duration maxConnIdleTime time.Duration healthCheckPeriod time.Duration healthCheckChan chan struct{} newConnsCount int64 lifetimeDestroyCount int64 idleDestroyCount int64 closeOnce sync.Once closeChan chan struct{} } // Config is the configuration struct for creating a pool. It must be created by ParseConfig and then it can be // modified. A manually initialized Config will cause ConnectConfig to panic. type Config struct { ConnConfig *chconn.Config // BeforeConnect is called before a new connection is made. It is passed a copy of the underlying chconn.Config and // will not impact any existing open connections. BeforeConnect func(context.Context, *chconn.Config) error // AfterConnect is called after a connection is established, but before it is added to the pool. AfterConnect func(context.Context, chconn.Conn) error // BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the // acquision or false to indicate that the connection should be destroyed and a different connection should be // acquired. BeforeAcquire func(context.Context, chconn.Conn) bool // AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to // return the connection to the pool or false to destroy the connection. AfterRelease func(chconn.Conn) bool // MaxConnLifetime is the duration since creation after which a connection will be automatically closed. MaxConnLifetime time.Duration // MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection. // This helps prevent all connections from being closed at the exact same time, starving the pool. MaxConnLifetimeJitter time.Duration // MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check. MaxConnIdleTime time.Duration // MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU(). MaxConns int32 // MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low // number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance // to create new connections. MinConns int32 // HealthCheckPeriod is the duration between checks of the health of idle connections. HealthCheckPeriod time.Duration // CreateIdleTimeout is the timeout for create idle connection CreateIdleTimeout time.Duration createdByParseConfig bool // Used to enforce created by ParseConfig rule. } // Copy returns a deep copy of the config that is safe to use and modify. // The only exception is the tls.Config: // according to the tls.Config docs it must not be modified after creation. func (c *Config) Copy() *Config { newConfig := new(Config) *newConfig = *c newConfig.ConnConfig = c.ConnConfig.Copy() return newConfig } // ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config. func (c *Config) ConnString() string { return c.ConnConfig.ConnString() } // New creates a new Pool. See ParseConfig for information on connString format. func New(connString string) (Pool, error) { config, err := ParseConfig(connString) if err != nil { return nil, err } return NewWithConfig(config) } // NewWithConfig creates a new Pool. config must have been created by ParseConfig. func NewWithConfig(config *Config) (Pool, error) { // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from // zero values. if !config.createdByParseConfig { panic("config must be created by ParseConfig") } p := &pool{ config: config, beforeConnect: config.BeforeConnect, afterConnect: config.AfterConnect, beforeAcquire: config.BeforeAcquire, afterRelease: config.AfterRelease, minConns: config.MinConns, maxConns: config.MaxConns, maxConnLifetime: config.MaxConnLifetime, maxConnLifetimeJitter: config.MaxConnLifetimeJitter, maxConnIdleTime: config.MaxConnIdleTime, healthCheckPeriod: config.HealthCheckPeriod, healthCheckChan: make(chan struct{}, 1), closeChan: make(chan struct{}), } var err error p.p, err = puddle.NewPool( &puddle.Config[*connResource]{ Constructor: func(ctx context.Context) (*connResource, error) { connConfig := p.config.ConnConfig.Copy() // Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever. if connConfig.ConnectTimeout <= 0 { connConfig.ConnectTimeout = 2 * time.Minute } if p.beforeConnect != nil { if err := p.beforeConnect(ctx, connConfig); err != nil { return nil, err } } c, err := chconn.ConnectConfig(ctx, connConfig) if err != nil { return nil, err } if p.afterConnect != nil { err := p.afterConnect(ctx, c) if err != nil { c.Close() return nil, err } } cr := &connResource{ conn: c, conns: make([]conn, 64), } return cr, nil }, Destructor: func(value *connResource) { value.conn.Close() }, MaxSize: config.MaxConns, }, ) if err != nil { return nil, err } go func() { //nolint:errcheck // todo find a way to handle this error p.createIdleResources(int(p.minConns)) p.backgroundHealthCheck() }() return p, nil } // ParseConfig builds a Config from connString. It parses connString with the same behavior as chconn.ParseConfig with the // addition of the following variables: // // pool_max_conns: integer greater than 0 // pool_min_conns: integer 0 or greater // pool_max_conn_lifetime: duration string // pool_max_conn_idle_time: duration string // pool_health_check_period: duration string // pool_max_conn_lifetime_jitter: duration string // pool_create_idle_timeout: duration string // // See Config for definitions of these arguments. // // # Example DSN // user=vahid password=secret host=clickhouse.example.com port=9000 dbname=mydb sslmode=verify-ca pool_max_conns=10 // // # Example URL // clickhouse://vahid:secret@ch.example.com:9000/mydb?sslmode=verify-ca&pool_max_conns=10 func ParseConfig(connString string) (*Config, error) { chConfig, err := chconn.ParseConfig(connString) if err != nil { return nil, err } config := &Config{ ConnConfig: chConfig, createdByParseConfig: true, } if s, ok := config.ConnConfig.RuntimeParams["pool_max_conns"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_max_conns") n, err := strconv.ParseInt(s, 10, 32) if err != nil { return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err) } if n < 1 { //nolint:goerr113 return nil, fmt.Errorf("pool_max_conns too small: %d", n) } config.MaxConns = int32(n) } else { config.MaxConns = defaultMaxConns if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns { config.MaxConns = numCPU } } if s, ok := config.ConnConfig.RuntimeParams["pool_min_conns"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_min_conns") n, err := strconv.ParseInt(s, 10, 32) if err != nil { return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err) } config.MinConns = int32(n) } else { config.MinConns = defaultMinConns } if s, ok := config.ConnConfig.RuntimeParams["pool_max_conn_lifetime"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_max_conn_lifetime") d, err := time.ParseDuration(s) if err != nil { return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err) } config.MaxConnLifetime = d } else { config.MaxConnLifetime = defaultMaxConnLifetime } if s, ok := config.ConnConfig.RuntimeParams["pool_max_conn_idle_time"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_max_conn_idle_time") d, err := time.ParseDuration(s) if err != nil { return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err) } config.MaxConnIdleTime = d } else { config.MaxConnIdleTime = defaultMaxConnIdleTime } if s, ok := config.ConnConfig.RuntimeParams["pool_health_check_period"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_health_check_period") d, err := time.ParseDuration(s) if err != nil { return nil, fmt.Errorf("invalid pool_health_check_period: %w", err) } config.HealthCheckPeriod = d } else { config.HealthCheckPeriod = defaultHealthCheckPeriod } if s, ok := config.ConnConfig.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_max_conn_lifetime_jitter") d, err := time.ParseDuration(s) if err != nil { return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err) } config.MaxConnLifetimeJitter = d } if s, ok := config.ConnConfig.RuntimeParams["pool_create_idle_timeout"]; ok { delete(config.ConnConfig.RuntimeParams, "pool_create_idle_timeout") d, err := time.ParseDuration(s) if err != nil { return nil, fmt.Errorf("invalid pool_create_idle_timeout: %w", err) } config.CreateIdleTimeout = d } else { config.CreateIdleTimeout = defaultCreateIdleTimeout } return config, nil } // Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned // to pool and closed. func (p *pool) Close() { p.closeOnce.Do(func() { close(p.closeChan) p.p.Close() }) } func (p *pool) isExpired(res *puddle.Resource[*connResource]) bool { now := time.Now() // Small optimization to avoid rand. If it's over lifetime AND jitter, immediately // return true. if now.Sub(res.CreationTime()) > p.maxConnLifetime+p.maxConnLifetimeJitter { return true } if p.maxConnLifetimeJitter == 0 { return false } //nolint:gosec // rand is not used for security purposes jitterSecs := rand.Float64() * p.maxConnLifetimeJitter.Seconds() return now.Sub(res.CreationTime()) > p.maxConnLifetime+(time.Duration(jitterSecs)*time.Second) } func (p *pool) triggerHealthCheck() { go func() { // Destroy is asynchronous so we give it time to actually remove itself from // the pool otherwise we might try to check the pool size too soon time.Sleep(500 * time.Millisecond) select { case p.healthCheckChan <- struct{}{}: default: } }() } func (p *pool) backgroundHealthCheck() { ticker := time.NewTicker(p.healthCheckPeriod) defer ticker.Stop() for { select { case <-p.closeChan: return case <-p.healthCheckChan: p.checkHealth() case <-ticker.C: p.checkHealth() } } } func (p *pool) checkHealth() { for { // If checkMinConns failed we don't destroy any connections since we couldn't // even get to minConns if err := p.checkMinConns(); err != nil { // Should we log this error somewhere? break } if !p.checkConnsHealth() { // Since we didn't destroy any connections we can stop looping break } // Technically Destroy is asynchronous but 500ms should be enough for it to // remove it from the underlying pool select { case <-p.closeChan: return case <-time.After(500 * time.Millisecond): } } } // checkConnsHealth will check all idle connections, destroy a connection if // it's idle or too old, and returns true if any were destroyed func (p *pool) checkConnsHealth() bool { var destroyed bool totalConns := p.Stat().TotalConns() resources := p.p.AcquireAllIdle() for _, res := range resources { // We're okay going under minConns if the lifetime is up if p.isExpired(res) && totalConns >= p.minConns { atomic.AddInt64(&p.lifetimeDestroyCount, 1) res.Destroy() destroyed = true // Since Destroy is async we manually decrement totalConns. totalConns-- } else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns { atomic.AddInt64(&p.idleDestroyCount, 1) res.Destroy() destroyed = true // Since Destroy is async we manually decrement totalConns. totalConns-- } else { res.ReleaseUnused() } } return destroyed } func (p *pool) checkMinConns() error { // TotalConns can include ones that are being destroyed but we should have // sleep(500ms) around all of the destroys to help prevent that from throwing // off this check toCreate := p.minConns - p.Stat().TotalConns() if toCreate > 0 { return p.createIdleResources(int(toCreate)) } return nil } func (p *pool) createIdleResources(targetResources int) error { ctx, cancel := context.WithTimeout(context.Background(), p.config.CreateIdleTimeout) defer cancel() errs := make(chan error, targetResources) for i := 0; i < targetResources; i++ { go func() { atomic.AddInt64(&p.newConnsCount, 1) err := p.p.CreateResource(ctx) errs <- err }() } var firstError error for i := 0; i < targetResources; i++ { err := <-errs if err != nil && firstError == nil { cancel() firstError = err } } return firstError } // Acquire returns a connection (Conn) from the Pool func (p *pool) Acquire(ctx context.Context) (Conn, error) { for { res, err := p.p.Acquire(ctx) if err != nil { return nil, fmt.Errorf("acquire: %w", err) } cr := res.Value() if res.IdleDuration() > time.Second { err := cr.conn.Ping(ctx) if err != nil { res.Destroy() continue } } if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) { return cr.getConn(p, res), nil } res.Destroy() } } // AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the // call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is // automatically released after the call of f. func (p *pool) AcquireFunc(ctx context.Context, f func(Conn) error) error { conn, err := p.Acquire(ctx) if err != nil { return err } defer conn.Release() return f(conn) } // AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and // keep-alive functionality. It does not update pool statistics. func (p *pool) AcquireAllIdle(ctx context.Context) []Conn { resources := p.p.AcquireAllIdle() conns := make([]Conn, 0, len(resources)) for _, res := range resources { cr := res.Value() if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) { conns = append(conns, cr.getConn(p, res)) } else { res.Destroy() } } return conns } // Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would // disrupt all connections (such as a network interruption or a server state change). // // It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned // to the pool. func (p *pool) Reset() { p.p.Reset() } // Config returns a copy of config that was used to initialize this pool. func (p *pool) Config() *Config { return p.config.Copy() } // Stat returns a chpool.Stat struct with a snapshot of Pool statistics. func (p *pool) Stat() *Stat { return &Stat{ s: p.p.Stat(), newConnsCount: atomic.LoadInt64(&p.newConnsCount), lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount), idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount), } } func (p *pool) Exec(ctx context.Context, query string) error { return p.ExecWithOption(ctx, query, nil) } func (p *pool) ExecWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, ) error { for { c, err := p.Acquire(ctx) if err != nil { return err } err = c.ExecWithOption(ctx, query, queryOptions) c.Release() if errors.Is(err, syscall.EPIPE) { continue } return err } } func (p *pool) Select(ctx context.Context, query string, columns ...column.ColumnBasic) (chconn.SelectStmt, error) { return p.SelectWithOption(ctx, query, nil, columns...) } func (p *pool) SelectWithOption( ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic, ) (chconn.SelectStmt, error) { for { c, err := p.Acquire(ctx) if err != nil { return nil, err } s, err := c.SelectWithOption(ctx, query, queryOptions, columns...) if err != nil { c.Release() if errors.Is(err, syscall.EPIPE) { continue } return nil, err } return s, nil } } func (p *pool) Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error { return p.InsertWithOption(ctx, query, nil, columns...) } func (p *pool) InsertWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions, columns ...column.ColumnBasic) error { for { c, err := p.Acquire(ctx) if err != nil { return err } err = c.InsertWithOption(ctx, query, queryOptions, columns...) c.Release() if err != nil && errors.Is(err, syscall.EPIPE) { continue } return err } } func (p *pool) InsertStream(ctx context.Context, query string) (chconn.InsertStmt, error) { return p.InsertStreamWithOption(ctx, query, nil) } func (p *pool) InsertStreamWithOption(ctx context.Context, query string, queryOptions *chconn.QueryOptions) (chconn.InsertStmt, error) { for { c, err := p.Acquire(ctx) if err != nil { return nil, err } s, err := c.InsertStreamWithOption(ctx, query, queryOptions) if err != nil { c.Release() if errors.Is(err, syscall.EPIPE) { continue } return nil, err } return s, nil } } // Ping acquires a connection from the Pool and send ping // If returns without error, the database Ping is considered successful, otherwise, the error is returned. func (p *pool) Ping(ctx context.Context) error { for { c, err := p.Acquire(ctx) if err != nil { return err } err = c.Ping(ctx) c.Release() if errors.Is(err, syscall.EPIPE) { continue } return err } } ================================================ FILE: chpool/pool_test.go ================================================ package chpool import ( "context" "errors" "fmt" "os" "runtime" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestNew(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") pool, err := New(connString) require.NoError(t, err) assert.Equal(t, connString, pool.Config().ConnString()) pool.Close() } func TestNewWithConfig(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) pool, err := NewWithConfig(config) require.NoError(t, err) assertConfigsEqual(t, config, pool.Config(), "Pool.Config() returns original config") pool.Close() } func TestParseConfigExtractsPoolArguments(t *testing.T) { t.Parallel() config, err := ParseConfig(`pool_max_conns=42 pool_min_conns=1 pool_max_conn_lifetime=30s pool_max_conn_idle_time=31s pool_health_check_period=32s`) assert.NoError(t, err) assert.EqualValues(t, 42, config.MaxConns) assert.EqualValues(t, 42, config.MaxConns) assert.EqualValues(t, time.Second*30, config.MaxConnLifetime) assert.EqualValues(t, time.Second*31, config.MaxConnIdleTime) assert.EqualValues(t, time.Second*32, config.HealthCheckPeriod) assert.NotContains(t, config.ConnConfig.RuntimeParams, "pool_max_conns") assert.NotContains(t, config.ConnConfig.RuntimeParams, "pool_min_conns") assert.NotContains(t, config.ConnConfig.RuntimeParams, "pool_max_conn_lifetime") assert.NotContains(t, config.ConnConfig.RuntimeParams, "pool_max_conn_idle_time") assert.NotContains(t, config.ConnConfig.RuntimeParams, "pool_health_check_period") } func TestConnectConfigRequiresConnConfigFromParseConfig(t *testing.T) { t.Parallel() config := &Config{} require.PanicsWithValue(t, "config must be created by ParseConfig", func() { NewWithConfig(config) }) } func TestConfigCopyReturnsEqualConfig(t *testing.T) { connString := "clickhouse://vahid:secret@localhost:9000/mydb?client_name=chxtest&connect_timeout=5" original, err := ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assertConfigsEqual(t, original, copied, t.Name()) } func TestConfigCopyCanBeUsedToNew(t *testing.T) { connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") original, err := ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assert.NotPanics(t, func() { _, err = NewWithConfig(copied) }) assert.NoError(t, err) } func TestPoolAcquireAndConnRelease(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() c, err := pool.Acquire(context.Background()) require.NoError(t, err) c.Release() } func TestPoolAcquireAndConnHijack(t *testing.T) { t.Parallel() ctx := context.Background() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() c, err := pool.Acquire(ctx) require.NoError(t, err) connsBeforeHijack := pool.Stat().TotalConns() conn := c.Hijack() defer conn.Close() connsAfterHijack := pool.Stat().TotalConns() require.Equal(t, connsBeforeHijack-1, connsAfterHijack) col := column.New[uint64]() stmt, err := conn.Select(context.Background(), "SELECT * FROM system.numbers LIMIT 5;", col) require.NoError(t, err) for stmt.Next() { } require.NoError(t, stmt.Err()) } func TestPoolAcquireFunc(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() err = pool.AcquireFunc(context.Background(), func(c Conn) error { return c.Ping(context.Background()) }) require.NoError(t, err) } func TestPoolAcquireFuncReturnsFnError(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() err = pool.AcquireFunc(context.Background(), func(c Conn) error { return fmt.Errorf("some error") }) require.EqualError(t, err, "some error") } func TestPoolBeforeConnect(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.BeforeConnect = func(_ context.Context, cfg *chconn.Config) error { cfg.ClientName = "chx2" return nil } db, err := NewWithConfig(config) require.NoError(t, err) db.Close() // todo find a way to check it } func TestPoolAfterConnect(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) var trigger bool config.AfterConnect = func(_ context.Context, _ chconn.Conn) error { trigger = true return nil } db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() err = db.Ping(context.Background()) require.NoError(t, err) assert.True(t, trigger) } func TestPoolBeforeAcquire(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) acquireAttempts := 0 config.BeforeAcquire = func(ctx context.Context, c chconn.Conn) bool { acquireAttempts++ return acquireAttempts%2 == 0 } db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() conns := make([]Conn, 4) for i := range conns { conns[i], err = db.Acquire(context.Background()) assert.NoError(t, err) } for _, c := range conns { c.Release() } waitForReleaseToComplete() assert.EqualValues(t, 8, acquireAttempts) conns = db.AcquireAllIdle(context.Background()) assert.Len(t, conns, 2) for _, c := range conns { c.Release() } waitForReleaseToComplete() assert.EqualValues(t, 12, acquireAttempts) } func TestPoolAfterRelease(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) afterReleaseCount := 0 config.AfterRelease = func(c chconn.Conn) bool { afterReleaseCount++ return afterReleaseCount%2 == 1 } db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() conns := map[string]struct{}{} for i := 0; i < 10; i++ { conn, err := db.Acquire(context.Background()) assert.NoError(t, err) conns[conn.Conn().RawConn().LocalAddr().String()] = struct{}{} conn.Release() waitForReleaseToComplete() } assert.EqualValues(t, 5, len(conns)) } func TestPoolAcquireAllIdle(t *testing.T) { t.Parallel() db, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer db.Close() conns := make([]Conn, 3) for i := range conns { conns[i], err = db.Acquire(context.Background()) assert.NoError(t, err) } for _, c := range conns { if c != nil { c.Release() } } waitForReleaseToComplete() conns = db.AcquireAllIdle(context.Background()) assert.Len(t, conns, 3) for _, c := range conns { c.Release() } } func TestPoolReset(t *testing.T) { t.Parallel() db, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer db.Close() conns := make([]Conn, 3) for i := range conns { conns[i], err = db.Acquire(context.Background()) assert.NoError(t, err) } db.Reset() for _, c := range conns { if c != nil { c.Release() } } waitForReleaseToComplete() require.EqualValues(t, 0, db.Stat().TotalConns()) } func TestConnReleaseChecksMaxConnLifetime(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.MaxConnLifetime = 250 * time.Millisecond db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() c, err := db.Acquire(context.Background()) require.NoError(t, err) time.Sleep(config.MaxConnLifetime) c.Release() waitForReleaseToComplete() stats := db.Stat() assert.EqualValues(t, 0, stats.TotalConns()) } func TestConnReleaseClosesBusyConn(t *testing.T) { t.Parallel() db, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer db.Close() c, err := db.Acquire(context.Background()) require.NoError(t, err) col := column.New[uint64]() _, err = c.Conn().Select(context.Background(), "SELECT * FROM system.numbers LIMIT 10;", col) require.NoError(t, err) c.Release() waitForReleaseToComplete() // wait for the connection to actually be destroyed for i := 0; i < 1000; i++ { if db.Stat().TotalConns() == 0 { break } time.Sleep(time.Millisecond) } stats := db.Stat() assert.EqualValues(t, 0, stats.TotalConns()) } func TestPoolBackgroundChecksMaxConnLifetime(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.MaxConnLifetime = 100 * time.Millisecond config.HealthCheckPeriod = 100 * time.Millisecond db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() c, err := db.Acquire(context.Background()) require.NoError(t, err) c.Release() time.Sleep(config.MaxConnLifetime + 100*time.Millisecond) stats := db.Stat() assert.EqualValues(t, 0, stats.TotalConns()) assert.EqualValues(t, 0, stats.MaxIdleDestroyCount()) assert.EqualValues(t, 1, stats.MaxLifetimeDestroyCount()) } func TestPoolBackgroundChecksMaxConnIdleTime(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.MaxConnLifetime = 1 * time.Minute config.MaxConnIdleTime = 100 * time.Millisecond config.HealthCheckPeriod = 150 * time.Millisecond db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() c, err := db.Acquire(context.Background()) require.NoError(t, err) c.Release() time.Sleep(config.HealthCheckPeriod) for i := 0; i < 1000; i++ { if db.Stat().TotalConns() == 0 { break } time.Sleep(time.Millisecond) } stats := db.Stat() assert.EqualValues(t, 0, stats.TotalConns()) assert.EqualValues(t, 1, stats.MaxIdleDestroyCount()) assert.EqualValues(t, 0, stats.MaxLifetimeDestroyCount()) } func TestPoolBackgroundChecksMinConns(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.HealthCheckPeriod = 100 * time.Millisecond config.MinConns = 2 db, err := NewWithConfig(config) require.NoError(t, err) defer db.Close() time.Sleep(config.HealthCheckPeriod + 500*time.Millisecond) stats := db.Stat() assert.EqualValues(t, 2, stats.TotalConns()) assert.EqualValues(t, 0, stats.MaxLifetimeDestroyCount()) assert.EqualValues(t, 2, stats.NewConnsCount()) c, err := db.Acquire(context.Background()) require.NoError(t, err) err = c.Conn().Close() require.NoError(t, err) c.Release() time.Sleep(config.HealthCheckPeriod + 500*time.Millisecond) stats = db.Stat() assert.EqualValues(t, 2, stats.TotalConns()) assert.EqualValues(t, 0, stats.MaxIdleDestroyCount()) assert.EqualValues(t, 3, stats.NewConnsCount()) } func TestPoolExec(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() testExec(t, pool) } func TestPoolExecError(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) testExec(t, pool) pool.Close() err = pool.Exec(context.Background(), "SET enable_http_compression=1") if assert.Error(t, err) { assert.Equal(t, "acquire: closed pool", err.Error()) } } func TestPoolSelect(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() // Test common usage testSelect(t, pool) waitForReleaseToComplete() // Test expected pool behavior col := column.New[uint64]() stmt, err := pool.Select(context.Background(), "SELECT * FROM system.numbers LIMIT 5;", col) require.NoError(t, err) stats := pool.Stat() assert.EqualValues(t, 1, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) for stmt.Next() { } require.NoError(t, stmt.Err()) waitForReleaseToComplete() stats = pool.Stat() assert.EqualValues(t, 0, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) // more coverage assert.EqualValues(t, 2, stats.AcquireCount()) assert.GreaterOrEqual(t, int64(time.Second), int64(stats.AcquireDuration())) assert.EqualValues(t, 0, stats.AcquiredConns()) assert.EqualValues(t, 0, stats.CanceledAcquireCount()) assert.EqualValues(t, 0, stats.ConstructingConns()) assert.EqualValues(t, 1, stats.EmptyAcquireCount()) assert.EqualValues(t, 1, stats.IdleConns()) maxConns := defaultMaxConns if numCPU := int32(runtime.NumCPU()); numCPU > maxConns { maxConns = numCPU } assert.EqualValues(t, maxConns, stats.MaxConns()) } func TestPoolSelectError(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() // Test common usage testSelect(t, pool) waitForReleaseToComplete() // Test expected pool behavior stmt, err := pool.Select(context.Background(), "SELECT * FROM not_fount_table LIMIT 10;") assert.Error(t, err) assert.Nil(t, stmt) pool.Close() stmt, err = pool.Select(context.Background(), "SELECT * FROM not_fount_table LIMIT 10;") if assert.Error(t, err) { assert.Equal(t, "acquire: closed pool", err.Error()) } require.Nil(t, stmt) } func TestPoolAcquireSelectError(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() // Test common usage testSelect(t, pool) waitForReleaseToComplete() // Test expected pool behavior conn, err := pool.Acquire(context.Background()) require.NoError(t, err) conn.Conn().RawConn().Close() _, err = conn.Conn().Select(context.Background(), "SELECT * FROM system.numbers LIMIT 5;") conn.Release() require.Error(t, err) } func TestPoolInsert(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() require.NoError(t, pool.Ping(context.Background())) err = pool.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_pool`) require.NoError(t, err) err = pool.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_pool ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) col := column.New[int8]() for i := 1; i <= 10; i++ { col.Append(int8(-1 * i)) } stmt, err := pool.InsertStream(context.Background(), `INSERT INTO clickhouse_test_insert_pool ( int8 ) VALUES`) require.NoError(t, err) stats := pool.Stat() assert.EqualValues(t, 1, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) require.NoError(t, stmt.Write(context.Background(), col)) require.NoError(t, stmt.Write(context.Background(), col)) require.NoError(t, stmt.Flush(context.Background())) waitForReleaseToComplete() stats = pool.Stat() assert.EqualValues(t, 0, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) } func TestPoolInsertError(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) err = pool.Insert(context.Background(), `INSERT INTO not_found_table ( int8 ) VALUES`) if assert.Error(t, err) { assert.Equal(t, " DB::Exception (60): Table default.not_found_table doesn't exist", err.Error()) } pool.Close() err = pool.Insert(context.Background(), `INSERT INTO not_found_table ( int8 ) VALUES`) if assert.Error(t, err) { assert.Equal(t, "acquire: closed pool", err.Error()) } } func TestPoolInsertStream(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() require.NoError(t, pool.Ping(context.Background())) err = pool.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_pool_stream`) require.NoError(t, err) err = pool.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_pool_stream ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) col := column.New[int8]() for i := 1; i <= 10; i++ { col.Append(int8(-1 * i)) } err = pool.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_pool_stream ( int8 ) VALUES`, col) require.NoError(t, err) colInt8 := column.New[int8]() selectStmt, err := pool.Select(context.Background(), `SELECT int8 FROM clickhouse_test_insert_pool_stream`, colInt8) require.NoError(t, err) stats := pool.Stat() assert.EqualValues(t, 1, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() waitForReleaseToComplete() stats = pool.Stat() assert.EqualValues(t, 0, stats.AcquiredConns()) assert.EqualValues(t, 1, stats.TotalConns()) } func TestConnReleaseClosesConnInFailedTransaction(t *testing.T) { t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() c, err := pool.Acquire(ctx) require.NoError(t, err) pid := c.Conn().RawConn().LocalAddr().String() stmt, err := c.Conn().Select(ctx, "SELECT * FROM system.numbers2 LIMIT 5;") assert.Error(t, err) assert.Nil(t, stmt) c.Release() waitForReleaseToComplete() c, err = pool.Acquire(ctx) require.NoError(t, err) assert.NotEqual(t, pid, c.Conn().RawConn().LocalAddr().String()) c.Release() } func TestConnReleaseDestroysClosedConn(t *testing.T) { t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() c, err := pool.Acquire(ctx) require.NoError(t, err) c.Conn().Close() err = c.Conn().Close() require.NoError(t, err) assert.EqualValues(t, 1, pool.Stat().TotalConns()) c.Release() waitForReleaseToComplete() // wait for the connection to actually be destroyed for i := 0; i < 1000; i++ { if pool.Stat().TotalConns() == 0 { break } time.Sleep(time.Millisecond) } assert.EqualValues(t, 0, pool.Stat().TotalConns()) } func TestConnPoolQueryConcurrentLoad(t *testing.T) { t.Parallel() pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) defer pool.Close() n := 100 done := make(chan bool) for i := 0; i < n; i++ { go func() { defer func() { done <- true }() testSelect(t, pool) }() } for i := 0; i < n; i++ { <-done } } func TestParseConfigError(t *testing.T) { t.Parallel() parseConfigErrorTests := []struct { name string connString string err string }{ { name: "invalid host", connString: "host>0", err: "cannot parse `host>0`: failed to parse as DSN (invalid dsn)", }, { name: "invalid pool_max_conns", connString: "pool_max_conns=invalid", err: "cannot parse pool_max_conns: strconv.ParseInt: parsing \"invalid\": invalid syntax", }, { name: "low pool_max_conns", connString: "pool_max_conns=0", err: "pool_max_conns too small: 0", }, { name: "invalid pool_min_conns", connString: "pool_min_conns=invalid", err: "cannot parse pool_min_conns: strconv.ParseInt: parsing \"invalid\": invalid syntax", }, { name: "invalid pool_max_conn_lifetime", connString: "pool_max_conn_lifetime=invalid", err: "invalid pool_max_conn_lifetime: time: invalid duration \"invalid\"", }, { name: "invalid pool_max_conn_idle_time", connString: "pool_max_conn_idle_time=invalid", err: "invalid pool_max_conn_idle_time: time: invalid duration \"invalid\"", }, { name: "invalid pool_health_check_period", connString: "pool_health_check_period=invalid", err: "invalid pool_health_check_period: time: invalid duration \"invalid\"", }, { name: "invalid pool_max_conn_lifetime_jitter", connString: "pool_max_conn_lifetime_jitter=invalid", err: "invalid pool_max_conn_lifetime_jitter: time: invalid duration \"invalid\"", }, { name: "invalid pool_create_idle_timeout", connString: "pool_create_idle_timeout=invalid", err: "invalid pool_create_idle_timeout: time: invalid duration \"invalid\"", }, } for i, tt := range parseConfigErrorTests { _, err := ParseConfig(tt.connString) if !assert.Errorf(t, err, "Test %d (%s)", i, tt.name) { continue } if !assert.Equalf(t, err.Error(), tt.err, "Test %d (%s)", i, tt.name) { continue } } } func TestNewParseError(t *testing.T) { t.Parallel() pool, err := New("host>0") assert.Nil(t, pool) assert.Equal(t, "cannot parse `host>0`: failed to parse as DSN (invalid dsn)", err.Error()) } func TestNewError(t *testing.T) { t.Parallel() pool, err := New("host=invalidhost") assert.NotNil(t, pool) assert.NoError(t, err) err = pool.Ping(context.Background()) assert.Error(t, err) config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.AfterConnect = func(ctx context.Context, c chconn.Conn) error { return errors.New("afterConnect err") } pool, err = NewWithConfig(config) require.NoError(t, err) err = pool.Ping(context.Background()) assert.Error(t, err) assert.EqualError(t, err, "acquire: afterConnect err") } func TestIdempotentPoolClose(t *testing.T) { pool, err := New(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) // Close the open pool. require.NotPanics(t, func() { pool.Close() }) // Close the already closed pool. require.NotPanics(t, func() { pool.Close() }) } func TestConnectEagerlyReachesMinPoolSize(t *testing.T) { t.Parallel() config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.MinConns = int32(12) config.MaxConns = int32(15) acquireAttempts := int64(0) connectAttempts := int64(0) config.BeforeAcquire = func(ctx context.Context, conn chconn.Conn) bool { atomic.AddInt64(&acquireAttempts, 1) return true } config.BeforeConnect = func(ctx context.Context, cfg *chconn.Config) error { atomic.AddInt64(&connectAttempts, 1) return nil } pool, err := NewWithConfig(config) require.NoError(t, err) defer pool.Close() for i := 0; i < 500; i++ { time.Sleep(10 * time.Millisecond) stat := pool.Stat() if stat.IdleConns() == 12 && stat.AcquireCount() == 0 && stat.TotalConns() == 12 && atomic.LoadInt64(&acquireAttempts) == 0 && atomic.LoadInt64(&connectAttempts) == 12 { return } } t.Fatal("did not reach min pool size") } ================================================ FILE: chpool/select_stmt.go ================================================ package chpool import ( "github.com/vahid-sohrabloo/chconn/v2" ) type selectStmt struct { chconn.SelectStmt conn Conn } func (s *selectStmt) Next() bool { if s.conn == nil { return false } next := s.SelectStmt.Next() if s.SelectStmt.Err() != nil && s.conn != nil { s.conn.Release() s.conn = nil } if !next && s.conn != nil { s.conn.Release() s.conn = nil } return next } func (s *selectStmt) Close() { if s.conn == nil { return } s.SelectStmt.Close() s.conn.Release() } ================================================ FILE: chpool/stat.go ================================================ package chpool import ( "time" "github.com/jackc/puddle/v2" ) // Stat is a snapshot of Pool statistics. type Stat struct { s *puddle.Stat newConnsCount int64 lifetimeDestroyCount int64 idleDestroyCount int64 } // AcquireCount returns the cumulative count of successful acquires from the pool. func (s *Stat) AcquireCount() int64 { return s.s.AcquireCount() } // AcquireDuration returns the total duration of all successful acquires from // the pool. func (s *Stat) AcquireDuration() time.Duration { return s.s.AcquireDuration() } // AcquiredConns returns the number of currently acquired connections in the pool. func (s *Stat) AcquiredConns() int32 { return s.s.AcquiredResources() } // CanceledAcquireCount returns the cumulative count of acquires from the pool // that were canceled by a context. func (s *Stat) CanceledAcquireCount() int64 { return s.s.CanceledAcquireCount() } // ConstructingConns returns the number of conns with construction in progress in // the pool. func (s *Stat) ConstructingConns() int32 { return s.s.ConstructingResources() } // EmptyAcquireCount returns the cumulative count of successful acquires from the pool // that waited for a resource to be released or constructed because the pool was // empty. func (s *Stat) EmptyAcquireCount() int64 { return s.s.EmptyAcquireCount() } // IdleConns returns the number of currently idle conns in the pool. func (s *Stat) IdleConns() int32 { return s.s.IdleResources() } // MaxConns returns the maximum size of the pool. func (s *Stat) MaxConns() int32 { return s.s.MaxResources() } // TotalConns returns the total number of resources currently in the pool. // The value is the sum of ConstructingConns, AcquiredConns, and // IdleConns. func (s *Stat) TotalConns() int32 { return s.s.TotalResources() } // NewConnsCount returns the cumulative count of new connections opened. func (s *Stat) NewConnsCount() int64 { return s.newConnsCount } // MaxLifetimeDestroyCount returns the cumulative count of connections destroyed // because they exceeded MaxConnLifetime. func (s *Stat) MaxLifetimeDestroyCount() int64 { return s.lifetimeDestroyCount } // MaxIdleDestroyCount returns the cumulative count of connections destroyed because // they exceeded MaxConnIdleTime. func (s *Stat) MaxIdleDestroyCount() int64 { return s.idleDestroyCount } ================================================ FILE: client_info.go ================================================ package chconn import ( "os/user" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" ) // ClientInfo Information about client for query. // Some fields are passed explicitly from client and some are calculated automatically. // Contains info about initial query source, for tracing distributed queries // where one query initiates many other queries. type ClientInfo struct { InitialUser string InitialQueryID string OSUser string ClientHostname string ClientName string ClientVersionMajor uint64 ClientVersionMinor uint64 ClientVersionPatch uint64 ClientRevision uint64 DistributedDepth uint64 QuotaKey string } // Write Only values that are not calculated automatically or passed separately are serialized. // Revisions are passed to use format that server will understand or client was used. func (c *ClientInfo) write(ch *conn) { // InitialQuery ch.writer.Uint8(1) ch.writer.String(c.InitialUser) ch.writer.String(c.InitialQueryID) ch.writer.String("[::ffff:127.0.0.1]:0") if ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithInitialQueryStartTime { ch.writer.Uint64(0) } // iface type ch.writer.Uint8(1) // tcp ch.writer.String(c.OSUser) ch.writer.String(c.ClientHostname) ch.writer.String(c.ClientName) ch.writer.Uvarint(c.ClientVersionMajor) ch.writer.Uvarint(c.ClientVersionMinor) ch.writer.Uvarint(c.ClientRevision) if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithQuotaKeyInClientInfo { ch.writer.String(c.QuotaKey) } if ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithDistributedDepth { ch.writer.Uvarint(c.DistributedDepth) } if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithVersionPatch { ch.writer.Uvarint(c.ClientVersionPatch) } if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithOpenTelemetry { ch.writer.Uint8(0) } if ch.serverInfo.Revision >= helper.DbmsMinProtocolVersionWithParallelReplicas { ch.writer.Uvarint(0) // collaborate_with_initiator ch.writer.Uvarint(0) // count_participating_replicas ch.writer.Uvarint(0) // number_of_current_replica } } func (c *ClientInfo) fillOSUserHostNameAndVersionInfo() { u, err := user.Current() if err == nil { c.OSUser = u.Username } c.ClientVersionMajor = dbmsVersionMajor c.ClientVersionMinor = dbmsVersionMinor c.ClientVersionPatch = dbmsVersionPatch c.ClientRevision = dbmsVersionRevision } ================================================ FILE: column/array.go ================================================ package column // Array is a column of Array(T) ClickHouse data type type Array[T any] struct { ArrayBase columnData []T } // NewArray create a new array column of Array(T) ClickHouse data type func NewArray[T any](dataColumn Column[T]) *Array[T] { a := &Array[T]{ ArrayBase: ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), }, } a.resetHook = func() { a.columnData = a.columnData[:0] } return a } // Data get all the data in current block as a slice. func (c *Array[T]) Data() [][]T { values := make([][]T, c.offsetColumn.numRow) offsets := c.Offsets() var lastOffset uint64 columnData := c.getColumnData() for i, offset := range offsets { val := make([]T, offset-lastOffset) copy(val, columnData[lastOffset:offset]) values[i] = val lastOffset = offset } return values } // Read reads all the data in current block and append to the input. func (c *Array[T]) Read(value [][]T) [][]T { offsets := c.Offsets() var lastOffset uint64 columnData := c.getColumnData() for _, offset := range offsets { val := make([]T, offset-lastOffset) copy(val, columnData[lastOffset:offset]) value = append(value, val) lastOffset = offset } return value } // Row return the value of given row. // NOTE: Row number start from zero func (c *Array[T]) Row(row int) []T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val []T val = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...) return val } // Append value for insert func (c *Array[T]) Append(v ...[]T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.(Column[T]).Append(v...) } } // Append single item value for insert // // it should use with AppendLen // // Example: // // c.AppendLen(2) // insert 2 items // c.AppendItem(1, 2) func (c *Array[T]) AppendItem(v ...T) { c.dataColumn.(Column[T]).Append(v...) } // Array return a Array type for this column func (c *Array[T]) Array() *Array2[T] { return NewArray2(c) } func (c *Array[T]) getColumnData() []T { if len(c.columnData) == 0 { c.columnData = c.dataColumn.(Column[T]).Data() } return c.columnData } func (c *Array[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/array2.go ================================================ package column // Array2 is a column of Array(Array(T)) ClickHouse data type type Array2[T any] struct { ArrayBase } // NewArray create a new array column of Array(Array(T)) ClickHouse data type func NewArray2[T any](array *Array[T]) *Array2[T] { a := &Array2[T]{ ArrayBase: ArrayBase{ dataColumn: array, offsetColumn: New[uint64](), }, } return a } // Data get all the data in current block as a slice. func (c *Array2[T]) Data() [][][]T { values := make([][][]T, c.offsetColumn.numRow) for i := range values { values[i] = c.Row(i) } return values } // Read reads all the data in current block and append to the input. func (c *Array2[T]) Read(value [][][]T) [][][]T { if cap(value)-len(value) >= c.NumRow() { value = (value)[:len(value)+c.NumRow()] } else { value = append(value, make([][][]T, c.NumRow())...) } val := (value)[len(value)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i] = c.Row(i) } return value } // Row return the value of given row. // NOTE: Row number start from zero func (c *Array2[T]) Row(row int) [][]T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val [][]T lastRow := c.offsetColumn.Row(row) for ; lastOffset < lastRow; lastOffset++ { val = append(val, c.dataColumn.(*Array[T]).Row(int(lastOffset))) } return val } // Append value for insert func (c *Array2[T]) Append(v ...[][]T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.(*Array[T]).Append(v...) } } func (c *Array2[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/array2_nullable.go ================================================ package column import "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" // Array is a column of Array(Array(Nullable(T))) ClickHouse data type type Array2Nullable[T comparable] struct { Array2[T] dataColumn *ArrayNullable[T] columnData [][]*T } // NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type func NewArray2Nullable[T comparable](dataColumn *ArrayNullable[T]) *Array2Nullable[T] { a := &Array2Nullable[T]{ dataColumn: dataColumn, Array2: Array2[T]{ ArrayBase: ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), }, }, } a.resetHook = func() { a.columnData = a.columnData[:0] } return a } // Data get all the nullable data in current block as a slice of pointer. func (c *Array2Nullable[T]) DataP() [][][]*T { values := make([][][]*T, c.offsetColumn.numRow) var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { values[i] = columnData[lastOffset:c.offsetColumn.Row(i)] lastOffset = c.offsetColumn.Row(i) } return values } // Read reads all the nullable data in current block as a slice pointer and append to the input. func (c *Array2Nullable[T]) ReadP(value [][][]*T) [][][]*T { var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { value = append(value, columnData[lastOffset:c.offsetColumn.Row(i)]) lastOffset = c.offsetColumn.Row(i) } return value } // RowP return the nullable value of given row as a pointer // NOTE: Row number start from zero func (c *Array2Nullable[T]) RowP(row int) [][]*T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val [][]*T val = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...) return val } // AppendP a nullable value for insert func (c *Array2Nullable[T]) AppendP(v ...[][]*T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.AppendP(v...) } } // ReadRaw read raw data from the reader. it runs automatically func (c *Array2Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error { err := c.Array2.ReadRaw(num, r) if err != nil { return err } c.columnData = c.dataColumn.DataP() return nil } // Array return a Array type for this column func (c *Array2Nullable[T]) Array() *Array3Nullable[T] { return NewArray3Nullable(c) } func (c *Array2Nullable[T]) getColumnData() [][]*T { if len(c.columnData) == 0 { c.columnData = c.dataColumn.DataP() } return c.columnData } func (c *Array2Nullable[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/array3.go ================================================ package column // Array3 is a column of Array(Array(Array(T))) ClickHouse data type type Array3[T any] struct { ArrayBase } // NewArray create a new array column of Array(Array(Array(T))) ClickHouse data type func NewArray3[T any](array *Array2[T]) *Array3[T] { a := &Array3[T]{ ArrayBase: ArrayBase{ dataColumn: array, offsetColumn: New[uint64](), }, } return a } // Data get all the data in current block as a slice. func (c *Array3[T]) Data() [][][][]T { values := make([][][][]T, c.offsetColumn.numRow) for i := range values { values[i] = c.Row(i) } return values } // Read reads all the data in current block and append to the input. func (c *Array3[T]) Read(value [][][][]T) [][][][]T { if cap(value)-len(value) >= c.NumRow() { value = (value)[:len(value)+c.NumRow()] } else { value = append(value, make([][][][]T, c.NumRow())...) } val := (value)[len(value)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i] = c.Row(i) } return value } // Row return the value of given row. // NOTE: Row number start from zero func (c *Array3[T]) Row(row int) [][][]T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val [][][]T lastRow := c.offsetColumn.Row(row) for ; lastOffset < lastRow; lastOffset++ { val = append(val, c.dataColumn.(*Array2[T]).Row(int(lastOffset))) } return val } // Append value for insert func (c *Array3[T]) Append(v ...[][][]T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.(*Array2[T]).Append(v...) } } // Array return a Array type for this column func (c *Array2[T]) Array() *Array3[T] { return NewArray3(c) } func (c *Array3[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { panic("array level is too deep") } return c } ================================================ FILE: column/array3_nullable.go ================================================ package column import "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" // Array is a column of Array(Array(Nullable(T))) ClickHouse data type type Array3Nullable[T comparable] struct { Array3[T] dataColumn *Array2Nullable[T] columnData [][][]*T } // NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type func NewArray3Nullable[T comparable](dataColumn *Array2Nullable[T]) *Array3Nullable[T] { a := &Array3Nullable[T]{ dataColumn: dataColumn, Array3: Array3[T]{ ArrayBase: ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), }, }, } a.resetHook = func() { a.columnData = a.columnData[:0] } return a } // Data get all the nullable data in current block as a slice of pointer. func (c *Array3Nullable[T]) DataP() [][][][]*T { values := make([][][][]*T, c.offsetColumn.numRow) var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { values[i] = columnData[lastOffset:c.offsetColumn.Row(i)] lastOffset = c.offsetColumn.Row(i) } return values } // Read reads all the nullable data in current block as a slice pointer and append to the input. func (c *Array3Nullable[T]) ReadP(value [][][][]*T) [][][][]*T { var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { value = append(value, columnData[lastOffset:c.offsetColumn.Row(i)]) lastOffset = c.offsetColumn.Row(i) } return value } // RowP return the nullable value of given row as a pointer // NOTE: Row number start from zero func (c *Array3Nullable[T]) RowP(row int) [][][]*T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val [][][]*T val = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...) return val } // AppendP a nullable value for insert func (c *Array3Nullable[T]) AppendP(v ...[][][]*T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.AppendP(v...) } } // ReadRaw read raw data from the reader. it runs automatically func (c *Array3Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error { err := c.Array3.ReadRaw(num, r) if err != nil { return err } c.columnData = c.dataColumn.DataP() return nil } func (c *Array3Nullable[T]) getColumnData() [][][]*T { if len(c.columnData) == 0 { c.columnData = c.dataColumn.DataP() } return c.columnData } func (c *Array3Nullable[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { panic("array level is too deep") } return c } ================================================ FILE: column/array_base.go ================================================ package column import ( "encoding/binary" "fmt" "io" "strings" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // ArrayBase is a column of Array(T) ClickHouse data type // // ArrayBase is a base class for other arrays or use for none generic use type ArrayBase struct { column offsetColumn *Base[uint64] dataColumn ColumnBasic offset uint64 resetHook func() } // NewArray create a new array column of Array(T) ClickHouse data type func NewArrayBase(dataColumn ColumnBasic) *ArrayBase { a := &ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), } return a } // AppendLen Append len of array for insert func (c *ArrayBase) AppendLen(v int) { c.offset += uint64(v) c.offsetColumn.Append(c.offset) } // NumRow return number of row for this block func (c *ArrayBase) NumRow() int { return c.offsetColumn.NumRow() } // Array return a Array type for this column func (c *ArrayBase) Array() *ArrayBase { return NewArrayBase(c) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *ArrayBase) Reset() { c.offsetColumn.Reset() c.dataColumn.Reset() c.offset = 0 } // Offsets return all the offsets in current block // Note: Only available in the current block func (c *ArrayBase) Offsets() []uint64 { return c.offsetColumn.Data() } // TotalRows return total rows on this block of array data func (c *ArrayBase) TotalRows() int { if c.offsetColumn.totalByte == 0 { return 0 } return int(binary.LittleEndian.Uint64(c.offsetColumn.b[c.offsetColumn.totalByte-8 : c.offsetColumn.totalByte])) } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *ArrayBase) SetWriteBufferSize(row int) { c.offsetColumn.SetWriteBufferSize(row) c.dataColumn.SetWriteBufferSize(row) } // ReadRaw read raw data from the reader. it runs automatically func (c *ArrayBase) ReadRaw(num int, r *readerwriter.Reader) error { c.offsetColumn.Reset() err := c.offsetColumn.ReadRaw(num, r) if err != nil { return fmt.Errorf("array: read offset column: %w", err) } err = c.dataColumn.ReadRaw(c.TotalRows(), r) if err != nil { return fmt.Errorf("array: read data column: %w", err) } if c.resetHook != nil { c.resetHook() } return nil } // HeaderReader reads header data from reader // it uses internally func (c *ArrayBase) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r err := c.readColumn(readColumn, revision) if err != nil { return err } // never return error //nolint:errcheck c.offsetColumn.HeaderReader(r, false, revision) return c.dataColumn.HeaderReader(r, false, revision) } // Column returns the sub column func (c *ArrayBase) Column() ColumnBasic { return c.dataColumn } func (c *ArrayBase) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) switch { case helper.IsRing(chType): chType = helper.RingMainTypeStr case helper.IsPolygon(chType): chType = helper.PolygonMainTypeStr case helper.IsMultiPolygon(chType): chType = helper.MultiPolygonMainTypeStr } chType = helper.NestedToArrayType(chType) if !helper.IsArray(chType) { return ErrInvalidType{ column: c, } } c.dataColumn.SetType(chType[helper.LenArrayStr : len(chType)-1]) if c.dataColumn.Validate() != nil { return ErrInvalidType{ column: c, } } return nil } func (c *ArrayBase) ColumnType() string { return strings.ReplaceAll(helper.ArrayTypeStr, "", c.dataColumn.ColumnType()) } // WriteTo write data to ClickHouse. // it uses internally func (c *ArrayBase) WriteTo(w io.Writer) (int64, error) { nw, err := c.offsetColumn.WriteTo(w) if err != nil { return 0, fmt.Errorf("write len data: %w", err) } n, errDataColumn := c.dataColumn.WriteTo(w) return nw + n, errDataColumn } // HeaderWriter writes header data to writer // it uses internally func (c *ArrayBase) HeaderWriter(w *readerwriter.Writer) { c.dataColumn.HeaderWriter(w) } func (c *ArrayBase) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/array_nullable.go ================================================ package column import "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" // Array is a column of Array(Nullable(T)) ClickHouse data type type ArrayNullable[T comparable] struct { Array[T] dataColumn NullableColumn[T] columnData []*T } // NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type func NewArrayNullable[T comparable](dataColumn NullableColumn[T]) *ArrayNullable[T] { a := &ArrayNullable[T]{ dataColumn: dataColumn, Array: Array[T]{ ArrayBase: ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), }, }, } a.resetHook = func() { a.columnData = a.columnData[:0] } return a } // Data get all the nullable data in current block as a slice of pointer. func (c *ArrayNullable[T]) DataP() [][]*T { values := make([][]*T, c.offsetColumn.numRow) var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { values[i] = columnData[lastOffset:c.offsetColumn.Row(i)] lastOffset = c.offsetColumn.Row(i) } return values } // Read reads all the nullable data in current block as a slice pointer and append to the input. func (c *ArrayNullable[T]) ReadP(value [][]*T) [][]*T { var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { value = append(value, columnData[lastOffset:c.offsetColumn.Row(i)]) lastOffset = c.offsetColumn.Row(i) } return value } // RowP return the nullable value of given row as a pointer // NOTE: Row number start from zero func (c *ArrayNullable[T]) RowP(row int) []*T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val []*T val = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...) return val } // AppendP a nullable value for insert func (c *ArrayNullable[T]) AppendP(v ...[]*T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.AppendP(v...) } } // AppendItemP Append nullable item value for insert // // it should use with AppendLen // // Example: // // c.AppendLen(2) // insert 2 items // c.AppendItemP(val1, val2) // insert item 1 func (c *ArrayNullable[T]) AppendItemP(v ...*T) { c.dataColumn.AppendP(v...) } // ArrayOf return a Array type for this column func (c *ArrayNullable[T]) ArrayOf() *Array2Nullable[T] { return NewArray2Nullable(c) } // ReadRaw read raw data from the reader. it runs automatically func (c *ArrayNullable[T]) ReadRaw(num int, r *readerwriter.Reader) error { err := c.Array.ReadRaw(num, r) if err != nil { return err } c.columnData = c.dataColumn.DataP() return nil } func (c *ArrayNullable[T]) getColumnData() []*T { if len(c.columnData) == 0 { c.columnData = c.dataColumn.DataP() } return c.columnData } func (c *ArrayNullable[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.ArrayOf().elem(arrayLevel - 1) } return c } ================================================ FILE: column/base.go ================================================ package column import ( "fmt" "unsafe" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // Column use for most (fixed size) ClickHouse Columns type type Base[T comparable] struct { column size int numRow int values []T params []interface{} } // New create a new column func New[T comparable]() *Base[T] { var tmpValue T size := int(unsafe.Sizeof(tmpValue)) return &Base[T]{ size: size, } } // Data get all the data in current block as a slice. // // NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read func (c *Base[T]) Data() []T { value := *(*[]T)(unsafe.Pointer(&c.b)) return value[:c.numRow] } // Read reads all the data in current block and append to the input. func (c *Base[T]) Read(value []T) []T { return append(value, c.Data()...) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Base[T]) Row(row int) T { i := row * c.size return *(*T)(unsafe.Pointer(&c.b[i])) } // Append value for insert func (c *Base[T]) Append(v ...T) { c.values = append(c.values, v...) c.numRow += len(v) } // NumRow return number of row for this block func (c *Base[T]) NumRow() int { return c.numRow } // Array return a Array type for this column func (c *Base[T]) Array() *Array[T] { return NewArray[T](c) } // Nullable return a nullable type for this column func (c *Base[T]) Nullable() *Nullable[T] { return NewNullable[T](c) } // LC return a low cardinality type for this column func (c *Base[T]) LC() *LowCardinality[T] { return NewLC[T](c) } // LowCardinality return a low cardinality type for this column func (c *Base[T]) LowCardinality() *LowCardinality[T] { return NewLowCardinality[T](c) } // appendEmpty append empty value for insert // this use internally for nullable and low cardinality nullable column func (c *Base[T]) appendEmpty() { var emptyValue T c.Append(emptyValue) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *Base[T]) Reset() { c.numRow = 0 c.values = c.values[:0] } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *Base[T]) SetWriteBufferSize(row int) { if cap(c.values) < row { c.values = make([]T, 0, row) } } // ReadRaw read raw data from the reader. it runs automatically func (c *Base[T]) ReadRaw(num int, r *readerwriter.Reader) error { c.Reset() c.r = r c.numRow = num c.totalByte = num * c.size err := c.readBuffer() if err != nil { err = fmt.Errorf("read data: %w", err) } c.readyBufferHook() return err } func (c *Base[T]) readBuffer() error { if cap(c.b) < c.totalByte { c.b = make([]byte, c.totalByte) } else { c.b = c.b[:c.totalByte] } _, err := c.r.Read(c.b) return err } // HeaderReader reads header data from reader // it uses internally func (c *Base[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r return c.readColumn(readColumn, revision) } // HeaderWriter writes header data to writer // it uses internally func (c *Base[T]) HeaderWriter(w *readerwriter.Writer) { } func (c *Base[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic { if nullable { return c.Nullable().elem(arrayLevel, lc) } if lc { return c.LowCardinality().elem(arrayLevel) } if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/base_big_cpu.go ================================================ //go:build !(386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64) // +build !386,!amd64,!amd64p32,!arm,!arm64,!mipsle,!mips64le,!mips64p32le,!ppc64le,!riscv,!riscv64 package column // ReadAll read all value in this block and append to the input slice func (c *Base[T]) readyBufferHook() { for i := 0; i < c.totalByte; i += c.size { reverseBuffer(c.b[i : i+c.size]) } } func reverseBuffer(s []byte) { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } } // slice is the runtime representation of a slice. // It cannot be used safely or portably and its representation may // change in a later release. // Moreover, the Data field is not sufficient to guarantee the data // it references will not be garbage collected, so programs must keep // a separate, correctly typed pointer to the underlying data. type slice struct { Data uintptr Len int Cap int } func (c *Base[T]) WriteTo(w io.Writer) (int64, error) { s := *(*slice)(unsafe.Pointer(&c.values)) s.Len *= c.size s.Cap *= c.size b := *(*[]byte)(unsafe.Pointer(&s)) for i := 0; i < len(b); i += c.size { reverseBuffer(b[i : i+c.size]) } var n int64 nw, err := w.Write(*(*[]byte)(unsafe.Pointer(&s))) return int64(nw) + n, err } ================================================ FILE: column/base_little_cpu.go ================================================ //go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64 // +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv riscv64 package column import ( "io" "unsafe" ) func (c *Base[T]) readyBufferHook() { } // slice is the runtime representation of a slice. // It cannot be used safely or portably and its representation may // change in a later release. // Moreover, the Data field is not sufficient to guarantee the data // it references will not be garbage collected, so programs must keep // a separate, correctly typed pointer to the underlying data. type slice struct { Data uintptr Len int Cap int } func (c *Base[T]) WriteTo(w io.Writer) (int64, error) { s := *(*slice)(unsafe.Pointer(&c.values)) s.Len *= c.size s.Cap *= c.size var n int64 src := *(*[]byte)(unsafe.Pointer(&s)) nw, err := w.Write(src) return int64(nw) + n, err } ================================================ FILE: column/base_test.go ================================================ package column_test import ( "context" "fmt" "math" "math/big" "net/netip" "os" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestBool(t *testing.T) { testColumn(t, true, "Bool", "bool", func(i int) bool { return true }, func(i int) bool { return false }) } func TestBoolUint8(t *testing.T) { testColumn(t, true, "UInt8", "bool", func(i int) bool { return true }, func(i int) bool { return false }) } func TestUint8(t *testing.T) { testColumn(t, true, "UInt8", "uint8", func(i int) uint8 { return uint8(i) }, func(i int) uint8 { return uint8(i + 1) }) } func TestUint16(t *testing.T) { testColumn(t, true, "UInt16", "uint16", func(i int) uint16 { return uint16(i) }, func(i int) uint16 { return uint16(i + 1) }) } func TestUint32(t *testing.T) { testColumn(t, true, "UInt32", "uint32", func(i int) uint32 { return uint32(i) }, func(i int) uint32 { return uint32(i + 1) }) } func TestUint64(t *testing.T) { testColumn(t, true, "UInt64", "uint64", func(i int) uint64 { return uint64(i) }, func(i int) uint64 { return uint64(i + 1) }) } func TestUint128(t *testing.T) { testColumn(t, true, "UInt128", "uint128", func(i int) types.Uint128 { return types.Uint128FromBig(big.NewInt(int64(i))) }, func(i int) types.Uint128 { x := big.NewInt(int64(i)) x = x.Mul(x, big.NewInt(math.MaxInt64)) return types.Uint128FromBig(x) }) } func TestUint256(t *testing.T) { testColumn(t, true, "UInt256", "uint256", func(i int) types.Uint256 { return types.Uint256FromBig(big.NewInt(int64(i))) }, func(i int) types.Uint256 { x := big.NewInt(int64(i)) x = x.Mul(x, big.NewInt(math.MaxInt64)) x = x.Mul(x, big.NewInt(math.MaxInt64)) return types.Uint256FromBig(x) }) } func TestInt8(t *testing.T) { testColumn(t, true, "Int8", "int8", func(i int) int8 { return int8(i) }, func(i int) int8 { return int8(i + 1) }) } func TestInt16(t *testing.T) { testColumn(t, true, "Int16", "int16", func(i int) int16 { return int16(i) }, func(i int) int16 { return int16(i + 1) }) } func TestInt32(t *testing.T) { testColumn(t, true, "Int32", "int32", func(i int) int32 { return int32(i) }, func(i int) int32 { return int32(i + 1) }) } func TestInt64(t *testing.T) { testColumn(t, true, "Int64", "int64", func(i int) int64 { return int64(i) }, func(i int) int64 { return int64(i + 1) }) } func TestInt128(t *testing.T) { testColumn(t, true, "Int128", "int128", func(i int) types.Int128 { return types.Int128FromBig(big.NewInt(int64(i * -1))) }, func(i int) types.Int128 { x := big.NewInt(int64(i) * -1) x = x.Mul(x, big.NewInt(math.MaxInt64)) return types.Int128FromBig(x) }) } func TestInt256(t *testing.T) { testColumn(t, true, "Int256", "int256", func(i int) types.Int256 { return types.Int256FromBig(big.NewInt(int64(i))) }, func(i int) types.Int256 { x := big.NewInt(int64(i) * -1) x = x.Mul(x, big.NewInt(math.MaxInt64)) x = x.Mul(x, big.NewInt(math.MaxInt64)) return types.Int256FromBig(x) }) } func TestFixedString(t *testing.T) { testColumn(t, true, "FixedString(2)", "fixedString", func(i int) [2]byte { return [2]byte{byte(i), byte(i + 1)} }, func(i int) [2]byte { return [2]byte{byte(i + 1), byte(i + 2)} }) } func TestFloat32(t *testing.T) { testColumn(t, true, "Float32", "float32", func(i int) float32 { return float32(i) }, func(i int) float32 { return float32(i + 1) }) } func TestFloat64(t *testing.T) { testColumn(t, true, "Float64", "float64", func(i int) float64 { return float64(i) }, func(i int) float64 { return float64(i + 1) }) } func TestDecimal32(t *testing.T) { testColumn(t, false, "Decimal32(3)", "decimal32", func(i int) types.Decimal32 { return types.Decimal32(i) }, func(i int) types.Decimal32 { return types.Decimal32(i + 1) }) } func TestDecimal64(t *testing.T) { testColumn(t, false, "Decimal64(3)", "decimal64", func(i int) types.Decimal64 { return types.Decimal64(i) }, func(i int) types.Decimal64 { return types.Decimal64(i + 1) }) } func TestDecimal128(t *testing.T) { testColumn(t, false, "Decimal128(3)", "decimal128", func(i int) types.Decimal128 { return types.Decimal128(types.Int128FromBig(big.NewInt(int64(i)))) }, func(i int) types.Decimal128 { return types.Decimal128(types.Int128FromBig(big.NewInt(int64(i + 1)))) }) } func TestDecimal256(t *testing.T) { testColumn(t, false, "Decimal256(3)", "decimal256", func(i int) types.Decimal256 { return types.Decimal256(types.Int256FromBig(big.NewInt(int64(i)))) }, func(i int) types.Decimal256 { return types.Decimal256(types.Int256FromBig(big.NewInt(int64(i + 1)))) }) } func TestIPv4(t *testing.T) { testColumn(t, true, "IPv4", "ipv4", func(i int) types.IPv4 { // or directly return types.IPv4 return types.IPv4FromAddr(netip.AddrFrom4([4]byte{0, 0, 0, byte(i)})) }, func(i int) types.IPv4 { // or directly return types.IPv4 return types.IPv4FromAddr(netip.AddrFrom4([4]byte{0, 0, byte(i), 0})) }) } func TestIPv6(t *testing.T) { testColumn(t, true, "IPv6", "ipv6", func(i int) types.IPv6 { // or directly return types.IPv6 return types.IPv6FromAddr(netip.MustParseAddr("2001:0db8:85a3:0000:0000:8a2e:0370:7334")) }, func(i int) types.IPv6 { // or directly return types.IPv6 return types.IPv6FromAddr(netip.AddrFrom16([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i + 1)})) }) } func TestUUID(t *testing.T) { testColumn(t, true, "UUID", "uuid", func(i int) types.UUID { return types.UUIDFromBigEndian(uuid.New()) }, func(i int) types.UUID { return types.UUIDFromBigEndian(uuid.New()) }) } func testColumn[T comparable]( t *testing.T, isLC bool, chType, tableName string, firstVal func(i int) T, secondVal func(i int) T, ) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", Important: true, }, } var sqlCreate string if isLC { sqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s ( block_id UInt8, %[1]s %[2]s, %[1]s_nullable Nullable(%[2]s), %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)), %[1]s_lc LowCardinality(%[2]s), %[1]s_nullable_lc LowCardinality(Nullable(%[2]s)), %[1]s_array_lc Array(LowCardinality(%[2]s)), %[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s))) ) Engine=Memory`, tableName, chType) } else { sqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s ( block_id UInt8, %[1]s %[2]s, %[1]s_nullable Nullable(%[2]s), %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)) ) Engine=Memory`, tableName, chType) } err = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) blockID := column.New[uint8]() col := column.New[T]() colNullable := column.New[T]().Nullable() colArray := column.New[T]().Array() colNullableArray := column.New[T]().Nullable().Array() colLC := column.New[T]().LC() colLCNullable := column.New[T]().Nullable().LC() colArrayLC := column.New[T]().LC().Array() colArrayLCNullable := column.New[T]().Nullable().LC().Array() var colInsert []T var colNullableInsert []*T var colArrayInsert [][]T var colArrayNullableInsert [][]*T var colLCInsert []T var colLCNullableInsert []*T var colLCArrayInsert [][]T var colLCNullableArrayInsert [][]*T // SetWriteBufferSize is not necessary. this just to show how to set write buffer col.SetWriteBufferSize(10) colNullable.SetWriteBufferSize(10) colArray.SetWriteBufferSize(10) colNullableArray.SetWriteBufferSize(10) colLC.SetWriteBufferSize(10) colLCNullable.SetWriteBufferSize(10) colArrayLC.SetWriteBufferSize(10) colArrayLCNullable.SetWriteBufferSize(10) for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { blockID.Append(uint8(insertN)) val := firstVal(i * (insertN + 1)) val2 := secondVal(i * (insertN + 1)) valArray := []T{val, val2} valArrayNil := []*T{&val, nil} col.Append(val) colInsert = append(colInsert, val) // example add nullable if i%2 == 0 { colNullableInsert = append(colNullableInsert, &val) colNullable.Append(val) colLCNullableInsert = append(colLCNullableInsert, &val) colLCNullable.Append(val) } else { colNullableInsert = append(colNullableInsert, nil) colNullable.AppendNil() colLCNullableInsert = append(colLCNullableInsert, nil) colLCNullable.AppendNil() } colArray.Append(valArray) colArrayInsert = append(colArrayInsert, valArray) colNullableArray.AppendP(valArrayNil) colArrayNullableInsert = append(colArrayNullableInsert, valArrayNil) colLCInsert = append(colLCInsert, val) colLC.Append(val) colLCArrayInsert = append(colLCArrayInsert, valArray) colArrayLC.Append(valArray) colLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil) colArrayLCNullable.AppendP(valArrayNil) } if isLC { err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( block_id, %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), blockID, col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, ) } else { err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( block_id, %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable ) VALUES`, tableName), blockID, col, colNullable, colArray, colNullableArray, ) } require.NoError(t, err) } // test read all colRead := column.New[T]() colNullableRead := column.New[T]().Nullable() colArrayRead := column.New[T]().Array() colNullableArrayRead := column.New[T]().Nullable().Array() colLCRead := column.New[T]().LC() colLCNullableRead := column.New[T]().Nullable().LC() colArrayLCRead := column.New[T]().LC().Array() colArrayLCNullableRead := column.New[T]().Nullable().LC().Array() var selectStmt chconn.SelectStmt if isLC { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) } else { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s order by block_id`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, ) } require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []T var colNullableData []*T var colArrayData [][]T var colArrayNullableData [][]*T var colLCData []T var colLCDataWithKeys []T var dictData []T var dictKey []int var colLCNullableData []*T var colLCArrayData [][]T var colLCNullableArrayData [][]*T for selectStmt.Next() { colData = colRead.Read(colData) colNullableData = colNullableRead.ReadP(colNullableData) colArrayData = colArrayRead.Read(colArrayData) colArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData) if isLC { colLCData = colLCRead.Read(colLCData) colLCNullableData = colLCNullableRead.ReadP(colLCNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData) dictData = colLCRead.Dicts() dictKey = colLCRead.Keys() // get data from dict and keys for _, val := range dictKey { colLCDataWithKeys = append(colLCDataWithKeys, dictData[val]) } } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) if isLC { assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCInsert, colLCDataWithKeys) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) } // test row colRead = column.New[T]() colNullableRead = column.New[T]().Nullable() colArrayRead = column.New[T]().Array() colNullableArrayRead = column.New[T]().Nullable().Array() colLCRead = column.New[T]().LowCardinality() colLCNullableRead = column.New[T]().Nullable().LowCardinality() colArrayLCRead = column.New[T]().LowCardinality().Array() colArrayLCNullableRead = column.New[T]().Nullable().LowCardinality().Array() if isLC { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) } else { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s order by block_id`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, ) } require.NoError(t, err) require.True(t, conn.IsBusy()) colData = colData[:0] colNullableData = colNullableData[:0] colArrayData = colArrayData[:0] colArrayNullableData = colArrayNullableData[:0] colLCData = colLCData[:0] colLCNullableData = colLCNullableData[:0] colLCArrayData = colLCArrayData[:0] colLCNullableArrayData = colLCNullableArrayData[:0] for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { colData = append(colData, colRead.Row(i)) colNullableData = append(colNullableData, colNullableRead.RowP(i)) colArrayData = append(colArrayData, colArrayRead.Row(i)) colArrayNullableData = append(colArrayNullableData, colNullableArrayRead.RowP(i)) if isLC { colLCData = append(colLCData, colLCRead.Row(i)) colLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i)) colLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i)) colLCNullableArrayData = append(colLCNullableArrayData, colArrayLCNullableRead.RowP(i)) } } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) if isLC { assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) } // check dynamic column if isLC { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), ) } else { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s order by block_id`, tableName), ) } require.NoError(t, err) autoColumns := selectStmt.Columns() if isLC { assert.Len(t, autoColumns, 8) if tableName == "bool" { assert.Equal(t, column.New[uint8]().ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, column.New[uint8]().Nullable().ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, column.New[uint8]().Array().ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, column.New[uint8]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, column.New[uint8]().LowCardinality().ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, column.New[uint8]().Nullable().LowCardinality().ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, column.New[uint8]().LowCardinality().Array().ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, column.New[uint8]().Nullable().LowCardinality().Array().ColumnType(), autoColumns[7].ColumnType()) } else { assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType()) } } else { assert.Len(t, autoColumns, 4) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) } for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } func TestEmptyCollection(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) tableName := "empty_collection" err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } sqlCreate := fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)), %[1]s_array_lc Array(LowCardinality(%[2]s)), %[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s))) ) Engine=Memory`, tableName, "UInt16") err = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) colArray := column.New[uint16]().Array() colNullableArray := column.New[uint16]().Nullable().Array() colArrayLC := column.New[uint16]().LC().Array() colArrayLCNullable := column.New[uint16]().Nullable().LC().Array() colArray.Append() colArray.Append([]uint16{}) colNullableArray.AppendP() colNullableArray.AppendP([]*uint16{}) colArrayLC.Append() colArrayLC.Append([]uint16{}) colArrayLCNullable.AppendP() colArrayLCNullable.AppendP([]*uint16{}) err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s_array, %[1]s_array_nullable, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), colArray, colNullableArray, colArrayLC, colArrayLCNullable, ) require.NoError(t, err) // test read all colArrayRead := column.New[uint16]().Array() colNullableArrayRead := column.New[uint16]().Nullable().Array() colArrayLCRead := column.New[uint16]().LC().Array() colArrayLCNullableRead := column.New[uint16]().Nullable().LC().Array() var selectStmt chconn.SelectStmt selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s_array, %[1]s_array_nullable, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s `, tableName), colArrayRead, colNullableArrayRead, colArrayLCRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) var colArrayData [][]uint16 var colArrayNullableData [][]*uint16 var colLCArrayData [][]uint16 var colLCNullableArrayData [][]*uint16 for selectStmt.Next() { colArrayData = colArrayRead.Read(colArrayData) colArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, [][]uint16{{}}, colArrayData) assert.Equal(t, [][]*uint16{{}}, colArrayNullableData) assert.Equal(t, [][]uint16{{}}, colLCArrayData) assert.Equal(t, [][]*uint16{{}}, colLCNullableArrayData) } ================================================ FILE: column/base_validate.go ================================================ package column import ( "bytes" "fmt" "strconv" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" ) var chColumnByteSize = map[string]int{ "Bool": 1, "Int8": 1, "Int16": 2, "Int32": 4, "Int64": 8, "Int128": 16, "Int256": 32, "UInt8": 1, "UInt16": 2, "UInt32": 4, "UInt64": 8, "UInt128": 16, "UInt256": 32, "Float32": 4, "Float64": 8, "Date": 2, "Date32": 4, "DateTime": 4, "DateTime64": 8, "UUID": 16, "IPv4": 4, "IPv6": 16, } var byteChColumnType = map[int]string{ 1: "Int8|UInt8|Enum8", 2: "Int16|UInt16|Enum16|Date", 4: "Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4", 8: "Int64|UInt64|Float64|Decimal64|DateTime64", 16: "Int128|UInt128|Decimal128|IPv6|UUID", 32: "Int256|UInt256|Decimal256", } func (c *Base[T]) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if byteSize, ok := chColumnByteSize[string(chType)]; ok { if byteSize != c.size { return &ErrInvalidType{ column: c, } } return nil } if ok, err := c.checkEnum8(chType); ok { return err } if ok, err := c.checkEnum16(chType); ok { return err } if ok, err := c.checkDateTime(chType); ok { return err } if ok, err := c.checkDateTime(chType); ok { return err } if ok, err := c.checkDateTime64(chType); ok { return err } if ok, err := c.checkFixedString(chType); ok { return err } if ok, err := c.checkDecimal(chType); ok { return err } return &ErrInvalidType{ column: c, } } func (c *Base[T]) checkEnum8(chType []byte) (bool, error) { if helper.IsEnum8(chType) { if c.size != Uint8Size { return true, &ErrInvalidType{ column: c, } } return true, nil } return false, nil } func (c *Base[T]) checkEnum16(chType []byte) (bool, error) { if helper.IsEnum16(chType) { if c.size != Uint16Size { return true, &ErrInvalidType{ column: c, } } return true, nil } return false, nil } func (c *Base[T]) checkDateTime(chType []byte) (bool, error) { if helper.IsDateTimeWithParam(chType) { if c.size != 4 { return true, &ErrInvalidType{ column: c, } } c.params = []interface{}{ // precision 0, // timezone chType[helper.DateTimeStrLen : len(chType)-1], } return true, nil } return false, nil } func (c *Base[T]) checkDateTime64(chType []byte) (bool, error) { if helper.IsDateTime64(chType) { if c.size != 8 { return true, &ErrInvalidType{ column: c, } } parts := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(", ")) c.params = []interface{}{ parts[0], []byte{}, } if len(parts) > 1 { c.params[1] = parts[1] } return true, nil } return false, nil } func (c *Base[T]) checkFixedString(chType []byte) (bool, error) { if helper.IsFixedString(chType) { size, err := strconv.Atoi(string(chType[helper.FixedStringStrLen : len(chType)-1])) if err != nil { return true, fmt.Errorf("invalid size: %s", err) } if c.size != size { return true, &ErrInvalidType{ column: c, } } return true, nil } return false, nil } func (c *Base[T]) checkDecimal(chType []byte) (bool, error) { if helper.IsDecimal(chType) { parts := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(", ")) if len(parts) != 2 { return true, fmt.Errorf("invalid decimal type (should have precision and scale): %s", c.chType) } precision, err := strconv.Atoi(string(parts[0])) if err != nil { return true, fmt.Errorf("invalid precision: %s", err) } scale, err := strconv.Atoi(string(parts[1])) if err != nil { return true, fmt.Errorf("invalid scale: %s", err) } c.params = []interface{}{precision, scale} var size int switch { case precision >= 1 && precision <= 9: size = 4 case precision >= 10 && precision <= 18: size = 8 case precision >= 19 && precision <= 38: size = 16 case precision >= 39 && precision <= 76: size = 32 default: return true, fmt.Errorf("invalid precision: %d. it should be between 1 and 76", precision) } if c.size != size { return true, &ErrInvalidType{ column: c, } } return true, nil } return false, nil } func (c *Base[T]) ColumnType() string { if ok, _ := c.checkFixedString(c.chType); !ok { if str, ok := byteChColumnType[c.size]; ok { return str } } return fmt.Sprintf("T(%d bytes size)", c.size) } ================================================ FILE: column/bench_test.go ================================================ package column_test import ( "context" "testing" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func BenchmarkTestChconnSelect100MUint64(b *testing.B) { // return ctx := context.Background() c, err := chconn.Connect(ctx, "password=salam") if err != nil { b.Fatal(err) } colRead := column.New[uint64]() for n := 0; n < b.N; n++ { s, err := c.Select(ctx, "SELECT number FROM system.numbers_mt LIMIT 100000000", colRead) if err != nil { b.Fatal(err) } for s.Next() { colRead.Data() } if err := s.Err(); err != nil { b.Fatal(err) } s.Close() } } func BenchmarkTestChconnSelect1MString(b *testing.B) { ctx := context.Background() c, err := chconn.Connect(ctx, "password=salam") if err != nil { b.Fatal(err) } colRead := column.NewString() var data [][]byte for n := 0; n < b.N; n++ { s, err := c.Select(ctx, "SELECT randomString(20) FROM system.numbers_mt LIMIT 1000000", colRead) if err != nil { b.Fatal(err) } for s.Next() { data = data[:0] colRead.DataBytes() } if err := s.Err(); err != nil { b.Fatal(err) } s.Close() } } func BenchmarkTestChconnInsert10M(b *testing.B) { // return ctx := context.Background() c, err := chconn.Connect(ctx, "password=salam") if err != nil { b.Fatal(err) } err = c.Exec(ctx, "DROP TABLE IF EXISTS test_insert_chconn") if err != nil { b.Fatal(err) } err = c.Exec(ctx, "CREATE TABLE test_insert_chconn (id UInt64) ENGINE = Null") if err != nil { b.Fatal(err) } const ( rowsInBlock = 10_000_000 ) idColumns := column.New[uint64]() idColumns.SetWriteBufferSize(rowsInBlock) for n := 0; n < b.N; n++ { for y := 0; y < rowsInBlock; y++ { idColumns.Append(1) } err := c.Insert(ctx, "INSERT INTO test_insert_chconn VALUES", idColumns) if err != nil { b.Fatal(err) } } } ================================================ FILE: column/column_helper.go ================================================ package column import ( "fmt" "io" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) type ColumnBasic interface { ReadRaw(num int, r *readerwriter.Reader) error HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error HeaderWriter(*readerwriter.Writer) WriteTo(io.Writer) (int64, error) NumRow() int Reset() SetType(v []byte) Type() []byte SetName(v []byte) Name() []byte Validate() error ColumnType() string SetWriteBufferSize(int) } type Column[T any] interface { ColumnBasic Data() []T Read([]T) []T Row(int) T Append(...T) } type NullableColumn[T any] interface { Column[T] DataP() []*T ReadP([]*T) []*T RowP(int) *T AppendP(...*T) } type column struct { r *readerwriter.Reader b []byte totalByte int name []byte chType []byte parent ColumnBasic } func (c *column) readColumn(readColumn bool, revision uint64) error { if c.parent != nil || !readColumn { return nil } strLen, err := c.r.Uvarint() if err != nil { return fmt.Errorf("read column name length: %w", err) } if cap(c.name) < int(strLen) { c.name = make([]byte, strLen) } else { c.name = c.name[:strLen] } _, err = c.r.Read(c.name) if err != nil { return fmt.Errorf("read column name: %w", err) } strLen, err = c.r.Uvarint() if err != nil { return fmt.Errorf("read column type length: %w", err) } if cap(c.chType) < int(strLen) { c.chType = make([]byte, strLen) } else { c.chType = c.chType[:strLen] } _, err = c.r.Read(c.chType) if err != nil { return fmt.Errorf("read column type: %w", err) } if revision >= helper.DbmsMinProtocolWithCustomSerialization { hasCustomSerialization, err := c.r.ReadByte() if err != nil { return fmt.Errorf("read custom serialization: %w", err) } // todo check with json object if hasCustomSerialization == 1 { return fmt.Errorf("custom serialization not supported") } } return nil } // Name get name of the column func (c *column) Name() []byte { return c.name } // Type get clickhouse type func (c *column) Type() []byte { return c.chType } // SetName set name of the column func (c *column) SetName(v []byte) { c.name = v } // SetType set clickhouse type func (c *column) SetType(v []byte) { c.chType = v } ================================================ FILE: column/date.go ================================================ package column import ( "strings" "time" "unsafe" ) // DateType is an interface to handle convert between time.Time and T. type DateType[T any] interface { comparable FromTime(val time.Time, precision int) T ToTime(val *time.Location, precision int) time.Time } // Date is a date column of ClickHouse date type (Date, Date32, DateTime, DateTime64). // it is a wrapper of time.Time. but if you want to work with the raw data like unix timestamp // you can directly use `Column` (`New[T]()`) // // `uint16` or `types.Date` or any 16 bits data types For `Date`. // // `uint32` or `types.Date32` or any 32 bits data types For `Date32` // // `uint32` or `types.DateTime` or any 32 bits data types For `DateTime` // // `uint64` or `types.DateTime64` or any 64 bits data types For `DateTime64` type Date[T DateType[T]] struct { Base[T] loc *time.Location precision int } // NewDate create a new date column of ClickHouse date type (Date, Date32, DateTime, DateTime64). // it is a wrapper of time.Time. but if you want to work with the raw data like unix timestamp // you can directly use `Column` (`New[T]()``) // // `uint16` or `types.Date` or any 16 bits data types For `Date`. // // `uint32` or `types.Date32` or any 32 bits data types For `Date32` // // `uint32` or `types.DateTime` or any 32 bits data types For `DateTime` // // `uint64` or `types.DateTime64` or any 64 bits data types For `DateTime64` // // ONLY ON SELECT, timezone set automatically for `DateTime` and `DateTime64` if not set and present in clickhouse datatype) func NewDate[T DateType[T]]() *Date[T] { var tmpValue T size := int(unsafe.Sizeof(tmpValue)) return &Date[T]{ Base: Base[T]{ size: size, }, } } // SetLocation set the location of the time.Time. Only use for `DateTime` and `DateTime64` func (c *Date[T]) SetLocation(loc *time.Location) *Date[T] { c.loc = loc return c } // Location get location // // ONLY ON SELECT, set automatically for `DateTime` and `DateTime64` if not set and present in clickhouse datatype) func (c *Date[T]) Location() *time.Location { if c.loc == nil && len(c.params) >= 2 && len(c.params[1].([]byte)) > 0 { loc, err := time.LoadLocation(strings.Trim(string(c.params[1].([]byte)), "'")) if err == nil { c.SetLocation(loc) } else { c.SetLocation(time.Local) } } if c.loc == nil { c.SetLocation(time.Local) } return c.loc } // SetPrecision set the precision of the time.Time. Only use for `DateTime64` func (c *Date[T]) SetPrecision(precision int) *Date[T] { c.precision = precision return c } // Data get all the data in current block as a slice. func (c *Date[T]) Data() []time.Time { values := make([]time.Time, c.numRow) for i := 0; i < c.numRow; i++ { values[i] = c.Row(i) } return values } // Read reads all the data in current block and append to the input. func (c *Date[T]) Read(value []time.Time) []time.Time { if cap(value)-len(value) >= c.NumRow() { value = (value)[:len(value)+c.NumRow()] } else { value = append(value, make([]time.Time, c.NumRow())...) } val := (value)[len(value)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i] = c.Row(i) } return value } // Row return the value of given row // NOTE: Row number start from zero func (c *Date[T]) Row(row int) time.Time { i := row * c.size return (*(*T)(unsafe.Pointer(&c.b[i]))).ToTime(c.Location(), c.precision) } // Append value for insert func (c *Date[T]) Append(v ...time.Time) { var val T for _, v := range v { c.values = append(c.values, val.FromTime(v, c.precision)) } c.numRow += len(v) } // Array return a Array type for this column func (c *Date[T]) Array() *Array[time.Time] { return NewArray[time.Time](c) } // Nullable return a nullable type for this column func (c *Date[T]) Nullable() *Nullable[time.Time] { return NewNullable[time.Time](c) } // LC return a low cardinality type for this column func (c *Date[T]) LC() *LowCardinality[time.Time] { return NewLC[time.Time](c) } // LowCardinality return a low cardinality type for this column func (c *Date[T]) LowCardinality() *LowCardinality[time.Time] { return NewLC[time.Time](c) } func (c *Date[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic { if nullable { return c.Nullable().elem(arrayLevel, lc) } if lc { return c.LowCardinality().elem(arrayLevel) } if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/date_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestDate(t *testing.T) { testDateColumn(t, true, "Date", "date", func(i int) time.Time { return time.Date(2020, 1, i, 0, 0, 0, 0, time.UTC) }, func(i int) time.Time { return time.Date(2020, 1, i+1, 0, 0, 0, 0, time.UTC) }, func() *column.Date[types.Date] { return column.NewDate[types.Date]() }) } func TestDate32(t *testing.T) { testDateColumn(t, true, "Date32", "date32", func(i int) time.Time { return time.Date(2020, 1, i, 0, 0, 0, 0, time.UTC) }, func(i int) time.Time { return time.Date(2020, 1, i+1, 0, 0, 0, 0, time.UTC) }, func() *column.Date[types.Date32] { return column.NewDate[types.Date32]() }) } func TestDateTime(t *testing.T) { testDateColumn(t, true, "DateTime", "dateTime", func(i int) time.Time { return time.Date(2020, 1, i, 0, 0, i+1, 0, time.Local) }, func(i int) time.Time { return time.Date(2020, 1, i, 0, 0, i+2, 0, time.Local) }, func() *column.Date[types.DateTime] { return column.NewDate[types.DateTime]() }) } func TestDateTimeTimezone(t *testing.T) { testDateColumn(t, true, "DateTime('America/New_York')", "dateTime_timezone", func(i int) time.Time { loc, err := time.LoadLocation("America/New_York") require.NoError(t, err) return time.Date(2020, 1, i, 0, 0, i+1, 0, loc) }, func(i int) time.Time { loc, err := time.LoadLocation("America/New_York") require.NoError(t, err) return time.Date(2020, 1, i, 0, 0, i+2, 0, loc) }, func() *column.Date[types.DateTime] { return column.NewDate[types.DateTime]() }) } func TestDateTime64(t *testing.T) { testDateColumn(t, false, "DateTime64(9, 'America/New_York')", "dateTime64", func(i int) time.Time { loc, err := time.LoadLocation("America/New_York") require.NoError(t, err) return time.Date(2020, 1, i, 0, 0, i+1, i+110, loc) }, func(i int) time.Time { loc, err := time.LoadLocation("America/New_York") require.NoError(t, err) return time.Date(2020, 1, i, 0, 0, i+1, i+1101, loc) }, func() *column.Date[types.DateTime64] { return column.NewDate[types.DateTime64]().SetPrecision(9) }) } func testDateColumn[T column.DateType[T]]( t *testing.T, isLC bool, chType, tableName string, firstVal func(i int) time.Time, secondVal func(i int) time.Time, getBaseColumn func() *column.Date[T], ) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } var sqlCreate string if isLC { sqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s %[2]s, %[1]s_nullable Nullable(%[2]s), %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)), %[1]s_lc LowCardinality(%[2]s), %[1]s_nullable_lc LowCardinality(Nullable(%[2]s)), %[1]s_array_lc Array(LowCardinality(%[2]s)), %[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s))) ) Engine=Memory`, tableName, chType) } else { sqlCreate = fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s %[2]s, %[1]s_nullable Nullable(%[2]s), %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)) ) Engine=Memory`, tableName, chType) } err = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col := getBaseColumn() colNullable := getBaseColumn().Nullable() colArray := getBaseColumn().Array() colNullableArray := getBaseColumn().Nullable().Array() colLC := getBaseColumn().LC() colLCNullable := getBaseColumn().Nullable().LC() colArrayLC := getBaseColumn().LC().Array() colArrayLCNullable := getBaseColumn().Nullable().LC().Array() var colInsert []time.Time var colNullableInsert []*time.Time var colArrayInsert [][]time.Time var colArrayNullableInsert [][]*time.Time var colLCInsert []time.Time var colLCNullableInsert []*time.Time var colLCArrayInsert [][]time.Time var colLCNullableArrayInsert [][]*time.Time // SetWriteBufferSize is not necessary. this just to show how to set write buffer col.SetWriteBufferSize(10) colNullable.SetWriteBufferSize(10) colArray.SetWriteBufferSize(10) colNullableArray.SetWriteBufferSize(10) colLC.SetWriteBufferSize(10) colLCNullable.SetWriteBufferSize(10) colArrayLC.SetWriteBufferSize(10) colArrayLCNullable.SetWriteBufferSize(10) for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { val := firstVal(i) val2 := secondVal(i) valArray := []time.Time{val, val2} valArrayNil := []*time.Time{&val, nil} col.Append(val) colInsert = append(colInsert, val) // example add nullable if i%2 == 0 { colNullableInsert = append(colNullableInsert, &val) colNullable.Append(val) colLCNullableInsert = append(colLCNullableInsert, &val) colLCNullable.Append(val) } else { colNullableInsert = append(colNullableInsert, nil) colNullable.AppendNil() colLCNullableInsert = append(colLCNullableInsert, nil) colLCNullable.AppendNil() } colArray.Append(valArray) colArrayInsert = append(colArrayInsert, valArray) colNullableArray.AppendP(valArrayNil) colArrayNullableInsert = append(colArrayNullableInsert, valArrayNil) colLCInsert = append(colLCInsert, val) colLC.Append(val) colLCArrayInsert = append(colLCArrayInsert, valArray) colArrayLC.Append(valArray) colLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil) colArrayLCNullable.AppendP(valArrayNil) } if isLC { err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, ) } else { err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable ) VALUES`, tableName), col, colNullable, colArray, colNullableArray, ) } require.NoError(t, err) } // test read all colRead := getBaseColumn() colNullableRead := getBaseColumn().Nullable() colArrayRead := getBaseColumn().Array() colNullableArrayRead := getBaseColumn().Nullable().Array() colLCRead := getBaseColumn().LC() colLCNullableRead := getBaseColumn().Nullable().LC() colArrayLCRead := getBaseColumn().LC().Array() colArrayLCNullableRead := getBaseColumn().Nullable().LC().Array() var selectStmt chconn.SelectStmt if isLC { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) } else { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, ) } require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []time.Time var colNullableData []*time.Time var colArrayData [][]time.Time var colArrayNullableData [][]*time.Time var colLCData []time.Time var colLCNullableData []*time.Time var colLCArrayData [][]time.Time var colLCNullableArrayData [][]*time.Time for selectStmt.Next() { colData = colRead.Read(colData) colNullableData = colNullableRead.ReadP(colNullableData) colArrayData = colArrayRead.Read(colArrayData) colArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData) if isLC { colLCData = colLCRead.Read(colLCData) colLCNullableData = colLCNullableRead.ReadP(colLCNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData) } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) if isLC { assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) } // test row colRead = getBaseColumn() colNullableRead = getBaseColumn().Nullable() colArrayRead = getBaseColumn().Array() colNullableArrayRead = getBaseColumn().Nullable().Array() colLCRead = getBaseColumn().LowCardinality() colLCNullableRead = getBaseColumn().Nullable().LowCardinality() colArrayLCRead = getBaseColumn().LowCardinality().Array() colArrayLCNullableRead = getBaseColumn().Nullable().LowCardinality().Array() if isLC { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) } else { selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, ) } require.NoError(t, err) require.True(t, conn.IsBusy()) colData = colData[:0] colNullableData = colNullableData[:0] colArrayData = colArrayData[:0] colArrayNullableData = colArrayNullableData[:0] colLCData = colLCData[:0] colLCNullableData = colLCNullableData[:0] colLCArrayData = colLCArrayData[:0] colLCNullableArrayData = colLCNullableArrayData[:0] for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { colData = append(colData, colRead.Row(i)) colNullableData = append(colNullableData, colNullableRead.RowP(i)) colArrayData = append(colArrayData, colArrayRead.Row(i)) colArrayNullableData = append(colArrayNullableData, colNullableArrayRead.RowP(i)) if isLC { colLCData = append(colLCData, colLCRead.Row(i)) colLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i)) colLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i)) colLCNullableArrayData = append(colLCNullableArrayData, colArrayLCNullableRead.RowP(i)) } } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) if isLC { assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) } // check dynamic column if isLC { selectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), &chconn.QueryOptions{ UseGoTime: false, }, ) } else { selectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s`, tableName, ), &chconn.QueryOptions{ UseGoTime: false, }, ) } require.NoError(t, err) autoColumns := selectStmt.Columns() if isLC { assert.Len(t, autoColumns, 8) assert.Equal(t, column.New[T]().ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, column.New[T]().Nullable().ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, column.New[T]().Array().ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, column.New[T]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, column.New[T]().LowCardinality().ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, column.New[T]().Nullable().LowCardinality().ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, column.New[T]().LowCardinality().Array().ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, column.New[T]().Nullable().LowCardinality().Array().ColumnType(), autoColumns[7].ColumnType()) } else { assert.Len(t, autoColumns, 4) assert.Equal(t, column.New[T]().ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, column.New[T]().Nullable().ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, column.New[T]().Array().ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, column.New[T]().Nullable().Array().ColumnType(), autoColumns[3].ColumnType()) } for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() // check dynamic column if isLC { selectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), &chconn.QueryOptions{ UseGoTime: true, }, ) } else { selectStmt, err = conn.SelectWithOption(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable FROM test_%[1]s`, tableName, ), &chconn.QueryOptions{ UseGoTime: true, }, ) } require.NoError(t, err) autoColumns = selectStmt.Columns() if isLC { assert.Len(t, autoColumns, 8) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType()) } else { assert.Len(t, autoColumns, 4) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) } for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } func TestInvalidNegativeTimes(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_invalid_dates`, ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } sqlCreate := `CREATE TABLE test_invalid_dates ( date Date, date32 Date32, dateTime DateTime, dateTime64 DateTime64(3) ) Engine=Memory` err = conn.ExecWithOption(context.Background(), sqlCreate, &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) colDate := column.NewDate[types.Date]() colDate32 := column.NewDate[types.Date32]() colDateTime := column.NewDate[types.DateTime]() colDateTime64 := column.NewDate[types.DateTime64]() invalidTime := time.Unix(-3208988700, 0) // 1868 colDate.Append(invalidTime) colDate32.Append(invalidTime) colDateTime.Append(invalidTime) colDateTime64.Append(invalidTime) err = conn.Insert(context.Background(), `INSERT INTO test_invalid_dates ( date, date32, dateTime, dateTime64 ) VALUES`, colDate, colDate32, colDateTime, colDateTime64, ) require.NoError(t, err) // test read all colDateRead := column.NewDate[types.Date]() colDate32Read := column.NewDate[types.Date32]() colDateTimeRead := column.NewDate[types.DateTime]() colDateTime64Read := column.NewDate[types.DateTime64]() var selectStmt chconn.SelectStmt selectStmt, err = conn.Select(context.Background(), `SELECT date, date32, dateTime, dateTime64 FROM test_invalid_dates`, colDateRead, colDate32Read, colDateTimeRead, colDateTime64Read, ) require.NoError(t, err) require.True(t, conn.IsBusy()) for selectStmt.Next() { } assert.Equal(t, colDateRead.Row(0).In(time.UTC).Format(time.RFC3339), "1970-01-01T00:00:00Z") assert.Equal(t, colDate32Read.Row(0).In(time.UTC).Format(time.RFC3339), "1900-01-01T00:00:00Z") assert.Equal(t, colDateTimeRead.Row(0).In(time.UTC).Format(time.RFC3339), "1970-01-01T00:00:00Z") assert.Equal(t, colDateTime64Read.Row(0).In(time.UTC).Format(time.RFC3339), "1900-01-01T00:00:00Z") require.NoError(t, selectStmt.Err()) } ================================================ FILE: column/error_test.go ================================================ package column_test import ( "context" "errors" "fmt" "io" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestInsertColumnLowCardinalityError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_lc`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_lc ( col LowCardinality(String) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write header", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write stype", wantErr: "block: write block data for column col (error writing stype: timeout)", numberValid: startValidReader + 1, }, { name: "write dictionarySize", wantErr: "block: write block data for column col (error writing dictionarySize: timeout)", numberValid: startValidReader + 2, }, { name: "write dictionary", wantErr: "block: write block data for column col (error writing dictionary: timeout)", numberValid: startValidReader + 3, }, { name: "write keys len", wantErr: "block: write block data for column col (error writing keys len: timeout)", numberValid: startValidReader + 4, }, { name: "write indices", wantErr: "block: write block data for column col (error writing indices: timeout)", numberValid: startValidReader + 5, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.NewString().LowCardinality() col.Append("test") err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_lc (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadLCError(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int }{ { name: "read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, }, { name: "error reading keys serialization version", wantErr: "read column header: error reading keys serialization version: timeout", numberValid: startValidReader + 5, }, { name: "error reading serialization type", wantErr: "read data \"toLowCardinality(toString(number))\": error reading serialization type: timeout", numberValid: startValidReader + 6, }, { name: "error reading dictionary size", wantErr: "read data \"toLowCardinality(toString(number))\": error reading dictionary size: timeout", numberValid: startValidReader + 7, }, { name: "error reading dictionary", wantErr: "read data \"toLowCardinality(toString(number))\": error reading dictionary: error read string len: timeout", numberValid: startValidReader + 8, }, { name: "error reading string len", wantErr: "read data \"toLowCardinality(toString(number))\": error reading dictionary: error read string len: timeout", numberValid: startValidReader + 9, }, { name: "error reading string", wantErr: "read data \"toLowCardinality(toString(number))\": error reading dictionary: error read string: timeout", numberValid: startValidReader + 10, }, { name: "error reading indices size", wantErr: "read data \"toLowCardinality(toString(number))\": error reading indices size: timeout", numberValid: startValidReader + 11, }, { name: "error reading indices", wantErr: "read data \"toLowCardinality(toString(number))\": error reading indices: read data: timeout", numberValid: startValidReader + 12, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.NewString().LC() stmt, err := c.Select(context.Background(), "SELECT toLowCardinality(toString(number)) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnArrayError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array ( col Array(UInt8) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write block data", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write len data", wantErr: "block: write block data for column col (write len data: timeout)", numberValid: startValidReader + 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[uint8]().Array() col.Append([]uint8{1}) err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_array (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadArrayError(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int }{ { name: "read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, }, { name: "read offset error", wantErr: "read data \"array(number, number)\": array: read offset column: read data: timeout", numberValid: startValidReader + 5, }, { name: "read data column", wantErr: "read data \"array(number, number)\": array: read data column: read data: timeout", numberValid: startValidReader + 6, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.New[uint64]().Array() stmt, err := c.Select(context.Background(), "SELECT array(number,number) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnArrayNullable(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array_nullable`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array_nullable ( col Array(Nullable(UInt8)) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write block data", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write len data", wantErr: "block: write block data for column col (write len data: timeout)", numberValid: startValidReader + 1, }, { name: "write nullable data", wantErr: "block: write block data for column col (write nullable data: timeout)", numberValid: startValidReader + 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[uint8]().Nullable().Array() col.Append([]uint8{1}) err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_array_nullable (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadArrayNullableError(t *testing.T) { startValidReader := 39 tests := []struct { name string wantErr string numberValid int }{ { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 1, }, { name: "read offset error", wantErr: "read data \"array(toNullable(number))\": array: read offset column: read data: timeout", numberValid: startValidReader + 2, }, { name: "read data column", wantErr: "read data \"array(toNullable(number))\": array: read data column: read nullable data: read nullable data: timeout", numberValid: startValidReader + 3, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.New[uint64]().Nullable().Array() stmt, err := c.Select(context.Background(), "SELECT array(toNullable(number)) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestSelectReadNullableError(t *testing.T) { startValidReader := 39 tests := []struct { name string wantErr string numberValid int }{ { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 1, }, { name: "read nullable data", wantErr: "read data \"toNullable(number)\": read nullable data: read nullable data: timeout", numberValid: startValidReader + 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.New[uint64]().Nullable() stmt, err := c.Select(context.Background(), "SELECT toNullable(number) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnArray2Error(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array2`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array2 ( col Array(Array(UInt8)) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write block data", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write len data", wantErr: "block: write block data for column col (write len data: timeout)", numberValid: startValidReader + 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[uint8]().Array().Array() col.Append([][]uint8{{1}}) err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_array2 (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadArray2Error(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int }{ { name: "read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, }, { name: "read offset error", wantErr: "read data \"array(array(number, number))\": array: read offset column: read data: timeout", numberValid: startValidReader + 5, }, { name: "read data column", wantErr: "read data \"array(array(number, number))\": array: read data column: array: read offset column: read data: timeout", numberValid: startValidReader + 6, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.New[uint64]().Array().Array() stmt, err := c.Select(context.Background(), "SELECT array(array(number,number)) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnArray3Error(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_array3`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_array3 ( col Array(Array(Array(UInt8))) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write block data", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write len data", wantErr: "block: write block data for column col (write len data: timeout)", numberValid: startValidReader + 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[uint8]().Array().Array().Array() col.Append([][][]uint8{{{1}}}) err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_array3 (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadArray3Error(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int }{ { name: "read column header: read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column header: read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column header: read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column header: read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, }, { name: "read offset error", wantErr: "read data \"array(array(array(number, number)))\": array: read offset column: read data: timeout", numberValid: startValidReader + 5, }, { name: "read data column", wantErr: "read data \"array(array(array(number, number)))\": array: read data column: array: read offset column: read data: timeout", numberValid: startValidReader + 6, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) col := column.New[uint64]().Array().Array().Array() stmt, err := c.Select(context.Background(), "SELECT array(array(array(number,number))) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnTupleError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_tuple`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_tuple ( col Tuple(String) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write header", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write columns", wantErr: "block: write block data for column col (tuple: write column index 0: timeout)", numberValid: startValidReader + 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.NewString() colTuple := column.NewTuple(col) col.Append("test") err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_tuple (col) VALUES", colTuple, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadTupleError(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int lc bool }{ { name: "read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, lc: true, }, { name: "read sub column header", wantErr: "read column header: tuple: read column header index 0: error reading keys serialization version: timeout", numberValid: startValidReader + 5, lc: true, }, { name: "read column index 2", wantErr: "read data \"tuple(1)\": tuple: read column index 0: read data: timeout", numberValid: startValidReader + 5, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) // we can't use tupp[le(toLowCardinality('1')) so we use this tricky way // https://github.com/ClickHouse/ClickHouse/issues/39109 var col column.ColumnBasic if tt.lc { col = column.New[uint64]().LC() } else { col = column.New[uint8]() } colTuple := column.NewTuple(col) stmt, err := c.Select(context.Background(), "SELECT tuple(1);", colTuple) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInsertColumnMapError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_map`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_map ( col Map(UInt8,UInt8) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write block data", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write len data", wantErr: "block: write block data for column col (write len data: timeout)", numberValid: startValidReader + 1, }, { name: "write key data", wantErr: "block: write block data for column col (write key data: timeout)", numberValid: startValidReader + 2, }, { name: "write value data", wantErr: "block: write block data for column col (write value data: timeout)", numberValid: startValidReader + 3, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) colValue := column.New[uint8]() col := column.NewMap[uint8, uint8](column.New[uint8](), colValue) col.Append(map[uint8]uint8{1: 1}) err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_map (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestSelectReadMapError(t *testing.T) { startValidReader := 36 tests := []struct { name string wantErr string numberValid int lc bool }{ { name: "read column name length", wantErr: "read column header: read column name length: timeout", numberValid: startValidReader, }, { name: "read column name", wantErr: "read column header: read column name: timeout", numberValid: startValidReader + 1, }, { name: "read column type length", wantErr: "read column header: read column type length: timeout", numberValid: startValidReader + 2, }, { name: "read column type error", wantErr: "read column header: read column type: timeout", numberValid: startValidReader + 3, }, { name: "read custom serialization", wantErr: "read column header: read custom serialization: timeout", numberValid: startValidReader + 4, lc: true, }, { name: "read value header", wantErr: "read column header: map: read key header: error reading keys serialization version: timeout", numberValid: startValidReader + 5, lc: true, }, { name: "read value header", wantErr: "read column header: map: read value header: error reading keys serialization version: timeout", numberValid: startValidReader + 6, lc: true, }, { name: "read offset error", wantErr: "read data \"map(number, number)\": map: read offset column: read data: timeout", numberValid: startValidReader + 5, }, { name: "read key column", wantErr: "read data \"map(number, number)\": map: read key column: read data: timeout", numberValid: startValidReader + 6, }, { name: "read value column", wantErr: "read data \"map(number, number)\": map: read value column: read data: timeout", numberValid: startValidReader + 7, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := chconn.ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := chconn.ConnectConfig(context.Background(), config) assert.NoError(t, err) var colKey column.Column[uint64] var colValue column.Column[uint64] if tt.lc { colKey = column.New[uint64]().LC() colValue = column.New[uint64]().LC() } else { colKey = column.New[uint64]() colValue = column.New[uint64]() } col := column.NewMap(colKey, colValue) stmt, err := c.Select(context.Background(), "SELECT map(number,number) FROM system.numbers LIMIT 1;", col) require.NoError(t, err) stmt.Next() assert.EqualError(t, stmt.Err(), tt.wantErr) }) } } func TestInvalidType(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := chconn.ParseConfig(connString) require.NoError(t, err) c, err := chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) require.NoError(t, err) tests := []struct { name string columnSelector string wantErr string column column.ColumnBasic }{ { name: "1 byte invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Int8|UInt8|Enum8", column: column.New[int8](), }, { name: "2 bytes invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Int16|UInt16|Enum16|Date", column: column.New[int16](), }, { name: "4 bytes invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4", column: column.New[int32](), }, { name: "8 bytes invalid", columnSelector: "toInt32(number)", wantErr: "mismatch column type: ClickHouse Type: Int32, column types: Int64|UInt64|Float64|Decimal64|DateTime64", column: column.New[int64](), }, { name: "16 bytes invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Int128|UInt128|Decimal128|IPv6|UUID", column: column.New[types.Int128](), }, { name: "32 bytes invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Int256|UInt256|Decimal256", column: column.New[types.Int256](), }, { name: "string invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: String", column: column.NewString(), }, { name: "fixed string invalid", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: T(20 bytes size)", column: column.New[[20]byte](), }, { name: "fixed string invalid size", columnSelector: "toFixedString(toString(number),2)", wantErr: "mismatch column type: ClickHouse Type: FixedString(2), column types: T(20 bytes size)", column: column.New[[20]byte](), }, { name: "invalid nullable", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Nullable(Int64|UInt64|Float64|Decimal64|DateTime64)", column: column.New[int64]().Nullable(), }, { name: "invalid nullable inside", columnSelector: "toNullable(number)", wantErr: "mismatch column type: ClickHouse Type: Nullable(UInt64), column types: Nullable(Int8|UInt8|Enum8)", column: column.New[int8]().Nullable(), }, { name: "invalid LowCardinality", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: LowCardinality(Int64|UInt64|Float64|Decimal64|DateTime64)", column: column.New[int64]().LC(), }, { name: "invalid LowCardinality inside", columnSelector: "toLowCardinality(number)", wantErr: "mismatch column type: ClickHouse Type: LowCardinality(UInt64), column types: LowCardinality(Int8|UInt8|Enum8)", column: column.New[int8]().LC(), }, { name: "invalid nullable LowCardinality", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: " + "LowCardinality(Nullable(Int64|UInt64|Float64|Decimal64|DateTime64))", column: column.New[int64]().Nullable().LC(), }, { name: "invalid nullable LowCardinality inside", columnSelector: "toLowCardinality(toNullable(number))", wantErr: "mismatch column type: ClickHouse Type: LowCardinality(Nullable(UInt64)), column types: " + "LowCardinality(Int8|UInt8|Enum8)", column: column.New[int8]().LC(), }, { name: "invalid array", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Array(Int64|UInt64|Float64|Decimal64|DateTime64)", column: column.New[int64]().Array(), }, { name: "invalid array inside", columnSelector: "array(number)", wantErr: "mismatch column type: ClickHouse Type: Array(UInt64), column types: Array(Int8|UInt8|Enum8)", column: column.New[int8]().Array(), }, { name: "invalid array nullable", columnSelector: "array(number)", wantErr: "mismatch column type: ClickHouse Type: Array(UInt64), column types: Array(Nullable(Int8|UInt8|Enum8))", column: column.New[int8]().Nullable().Array(), }, { name: "invalid map", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Map(Int8|UInt8|Enum8, Int8|UInt8|Enum8)", column: column.NewMap[int8, int8](column.New[int8](), column.New[int8]()), }, { name: "invalid map key", columnSelector: "map(number,number)", wantErr: "mismatch column type: ClickHouse Type: Map(UInt64, UInt64), column types: Map(Int8|UInt8|Enum8, Int8|UInt8|Enum8)", column: column.NewMap[int8, int8](column.New[int8](), column.New[int8]()), }, { name: "invalid map value", columnSelector: "map(number,number)", wantErr: "mismatch column type: ClickHouse Type: Map(UInt64, UInt64), column types: " + "Map(Int64|UInt64|Float64|Decimal64|DateTime64, Int8|UInt8|Enum8)", column: column.NewMap[int64, int8](column.New[int64](), column.New[int8]()), }, { name: "invalid tuple", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: " + "Tuple(Int64|UInt64|Float64|Decimal64|DateTime64,Int8|UInt8|Enum8)", column: column.NewTuple(column.New[int64](), column.New[int8]()), }, { name: "invalid tuple inside", columnSelector: "tuple(number)", wantErr: "mismatch column type: ClickHouse Type: Tuple(UInt64), column types: Tuple(Int8|UInt8|Enum8)", column: column.NewTuple(column.New[int8]()), }, { name: "invalid tuple columns", columnSelector: "tuple(number)", wantErr: "columns number for tuple(number) (Tuple(UInt64)) is not equal to tuple columns number: 1 != 2", column: column.NewTuple(column.New[uint64](), column.New[uint64]()), }, { name: "date time with timezone", columnSelector: "toDateTime('2010-01-01', 'America/New_York') + number", wantErr: "mismatch column type: ClickHouse Type: DateTime('America/New_York'), column types: " + "Int64|UInt64|Float64|Decimal64|DateTime64", column: column.New[uint64](), }, { name: "date time 64 with timezone", columnSelector: "toDateTime64('2010-01-01', 3, 'America/New_York') + number", wantErr: "mismatch column type: ClickHouse Type: DateTime64(3, 'America/New_York'), column types: " + "Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4", column: column.New[uint32](), }, { name: "Decimal", columnSelector: "toDecimal32(number,3)", wantErr: "mismatch column type: ClickHouse Type: Decimal(9, 3), column types: Int64|UInt64|Float64|Decimal64|DateTime64", column: column.New[uint64](), }, { name: "Array2", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64))", column: column.New[uint64]().Array().Array(), }, { name: "Array2 inside", columnSelector: "array(number,number)", wantErr: "mismatch column type: ClickHouse Type: Array(UInt64), column types:" + " Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64))", column: column.New[uint64]().Array().Array(), }, { name: "Array3", columnSelector: "number", wantErr: "mismatch column type: ClickHouse Type: UInt64, column types: " + "Array(Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64)))", column: column.New[uint64]().Array().Array().Array(), }, { name: "Array3 inside", columnSelector: "array(number,number)", wantErr: "mismatch column type: ClickHouse Type: Array(UInt64), column types: " + "Array(Array(Array(Int64|UInt64|Float64|Decimal64|DateTime64)))", column: column.New[uint64]().Array().Array().Array(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c, err = chconn.ConnectConfig(context.Background(), config) require.NoError(t, err) stmt, err := c.Select(context.Background(), fmt.Sprintf("SELECT %s FROM system.numbers limit 1", tt.columnSelector), tt.column, ) require.NoError(t, err) for stmt.Next() { } require.EqualError(t, errors.Unwrap(stmt.Err()), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestMapInvalidColumnNumber(t *testing.T) { m := column.NewMap[uint8, uint8](column.New[uint8](), column.New[uint8]()) m.SetType([]byte("Map(UInt8,UInt8,UInt8)")) err := m.Validate() assert.Equal(t, err.Error(), "columns number is not equal to map columns number: 3 != 2") } func TestFixedStringInvalidType(t *testing.T) { m := column.New[[20]byte]() m.SetType([]byte("FixedString(a)")) err := m.Validate() assert.Equal(t, err.Error(), "invalid size: strconv.Atoi: parsing \"a\": invalid syntax") } func TestEnum8InvalidType(t *testing.T) { m := column.New[int16]() m.SetType([]byte("Enum8()")) err := m.Validate() assert.Equal(t, err.Error(), "mismatch column type: ClickHouse Type: Enum8(), column types: Int16|UInt16|Enum16|Date") } func TestEnum16InvalidType(t *testing.T) { m := column.New[int32]() m.SetType([]byte("Enum16()")) err := m.Validate() assert.Equal(t, err.Error(), "mismatch column type: ClickHouse Type: Enum16(), "+ "column types: Int32|UInt32|Float32|Decimal32|Date32|DateTime|IPv4") } func TestDecimalInvalidType(t *testing.T) { m := column.New[[20]byte]() m.SetType([]byte("Decimal()")) err := m.Validate() assert.Equal(t, err.Error(), "invalid decimal type (should have precision and scale): Decimal()") m.SetType([]byte("Decimal(a, a)")) err = m.Validate() assert.Equal(t, err.Error(), "invalid precision: strconv.Atoi: parsing \"a\": invalid syntax") m.SetType([]byte("Decimal(3, a)")) err = m.Validate() assert.Equal(t, err.Error(), "invalid scale: strconv.Atoi: parsing \"a\": invalid syntax") m.SetType([]byte("Decimal(200, 3)")) err = m.Validate() assert.Equal(t, err.Error(), "invalid precision: 200. it should be between 1 and 76") } func TestInvalidDate(t *testing.T) { m := column.NewDate[types.DateTime]() m.SetType([]byte("DateTime('InvalidTimeZone')")) err := m.Validate() assert.NoError(t, err) assert.Equal(t, m.Location(), time.Local) } func TestInvalidSimpleAggregateFunction(t *testing.T) { m := column.New[int]() m.SetType([]byte("SimpleAggregateFunction(sum))")) assert.Panics(t, func() { m.Validate() }) } ================================================ FILE: column/errors.go ================================================ package column import ( "fmt" ) type ErrInvalidType struct { column ColumnBasic ColumnType string } func (e ErrInvalidType) Error() string { return fmt.Sprintf("mismatch column type: ClickHouse Type: %s, column types: %s", string(e.column.Type()), e.column.ColumnType(), ) } ================================================ FILE: column/helper_test.go ================================================ package column_test import ( "io" ) type readErrorHelper struct { numberValid int err error r io.Reader count int } func (r *readErrorHelper) Read(p []byte) (int, error) { r.count++ if r.count > r.numberValid { return 0, r.err } return r.r.Read(p) } type writerErrorHelper struct { numberValid int err error w io.Writer count int } func (w *writerErrorHelper) Write(p []byte) (int, error) { w.count++ if w.count > w.numberValid { return 0, w.err } return w.w.Write(p) } ================================================ FILE: column/lc.go ================================================ package column import ( "fmt" "io" "math" "strings" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) const ( // Need to read additional keys. // Additional keys are stored before indexes as value N and N keys // after them. hasAdditionalKeysBit = 1 << 9 // Need to update dictionary. // It means that previous granule has different dictionary. needUpdateDictionary = 1 << 10 serializationType = hasAdditionalKeysBit | needUpdateDictionary ) // LowCardinality use for LowCardinality ClickHouse DataTypes type LowCardinality[T comparable] struct { column numRow int dictColumn Column[T] indices indicesColumnI oldIndicesType int scratch [8]byte readedKeys []int readedDict []T dict map[T]int keys []int nullable bool } // NewLowCardinality return new LC for LowCardinality ClickHouse DataTypes func NewLowCardinality[T comparable](dictColumn Column[T]) *LowCardinality[T] { return NewLC(dictColumn) } // NewLC return new LC for LowCardinality ClickHouse DataTypes func NewLC[T comparable](dictColumn Column[T]) *LowCardinality[T] { l := &LowCardinality[T]{ dict: make(map[T]int), dictColumn: dictColumn, } return l } // Data get all the data in current block as a slice. // // NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read func (c *LowCardinality[T]) Data() []T { result := make([]T, c.NumRow()) for i, k := range c.readedKeys { result[i] = c.readedDict[k] } return result } // Read reads all the data in current block and append to the input. func (c *LowCardinality[T]) Read(value []T) []T { for _, k := range c.readedKeys { value = append(value, c.readedDict[k]) } return value } // Row return the value of given row. // NOTE: Row number start from zero func (c *LowCardinality[T]) Row(row int) T { return c.readedDict[c.readedKeys[row]] } // Append value for insert func (c *LowCardinality[T]) Append(v ...T) { for _, v := range v { key, ok := c.dict[v] if !ok { key = len(c.dict) c.dict[v] = key c.dictColumn.Append(v) } c.keys = append(c.keys, key) } c.numRow += len(v) } // Dicts get dictionary data // each key is an index of the dictionary func (c *LowCardinality[T]) Dicts() []T { return c.readedDict } // Keys get keys of data // each key is an index of the dictionary func (c *LowCardinality[T]) Keys() []int { return c.readedKeys } // NumRow return number of row for this block func (c *LowCardinality[T]) NumRow() int { return c.numRow } // Array return a Array type for this column func (c *LowCardinality[T]) Array() *Array[T] { return NewArray[T](c) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *LowCardinality[T]) Reset() { c.dictColumn.Reset() c.dict = make(map[T]int) c.keys = c.keys[:0] c.readedDict = c.readedDict[:0] c.readedKeys = c.readedKeys[:0] c.numRow = 0 } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *LowCardinality[T]) SetWriteBufferSize(row int) { if cap(c.keys) < row { c.keys = make([]int, 0, row) } } // ReadRaw read raw data from the reader. it runs automatically func (c *LowCardinality[T]) ReadRaw(num int, r *readerwriter.Reader) error { c.r = r c.numRow = num if c.numRow == 0 { c.indices = newIndicesColumn[uint8]() c.readedDict = c.readedDict[:0] c.readedKeys = c.readedKeys[:0] // to reset nullable dictionary return c.dictColumn.ReadRaw(0, r) } serializationType, err := c.r.Uint64() if err != nil { return fmt.Errorf("error reading serialization type: %w", err) } intType := int(serializationType & 0xf) dictionarySize, err := c.r.Uint64() if err != nil { return fmt.Errorf("error reading dictionary size: %w", err) } err = c.dictColumn.ReadRaw(int(dictionarySize), r) if err != nil { return fmt.Errorf("error reading dictionary: %w", err) } indicesSize, err := r.Uint64() c.numRow = int(indicesSize) if err != nil { return fmt.Errorf("error reading indices size: %w", err) } if c.indices == nil || c.oldIndicesType != intType { c.indices = getLCIndicate(intType) c.oldIndicesType = intType } err = c.indices.ReadRaw(c.numRow, c.r) if err != nil { return fmt.Errorf("error reading indices: %w", err) } c.readedDict = c.readedDict[:0] c.readedKeys = c.readedKeys[:0] c.readedDict = c.dictColumn.Read(c.readedDict) c.indices.readInt(&c.readedKeys) return nil } // HeaderReader writes header data to writer // it uses internally func (c *LowCardinality[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r err := c.readColumn(readColumn, revision) if err != nil { return err } // ready KeysSerializationVersion. _, err = r.Uint64() if err != nil { return fmt.Errorf("error reading keys serialization version: %w", err) } if !c.nullable { return c.dictColumn.HeaderReader(r, false, revision) } return c.dictColumn.HeaderReader(r, false, revision) } func (c *LowCardinality[T]) ColumnType() string { if !c.nullable { return strings.ReplaceAll(helper.LowCardinalityTypeStr, "", c.dictColumn.ColumnType()) } return strings.ReplaceAll(helper.LowCardinalityNullableTypeStr, "", c.dictColumn.ColumnType()) } func (c *LowCardinality[T]) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if !c.nullable { if !helper.IsLowCardinality(chType) { return &ErrInvalidType{ column: c, } } c.dictColumn.SetType(chType[helper.LenLowCardinalityStr : len(chType)-1]) } else { if !helper.IsNullableLowCardinality(chType) { return &ErrInvalidType{ column: c, } } c.dictColumn.SetType(chType[helper.LenLowCardinalityNullableStr : len(chType)-2]) } if err := c.dictColumn.Validate(); err != nil { return &ErrInvalidType{ column: c, } } return nil } // WriteTo write data to ClickHouse. // it uses internally func (c *LowCardinality[T]) WriteTo(w io.Writer) (int64, error) { dictionarySize := c.dictColumn.NumRow() // Do not write anything for empty column. // May happen while writing empty arrays. if dictionarySize == 0 || (c.nullable && dictionarySize == 1) { return 0, nil } var n int64 intType := int(math.Log2(float64(dictionarySize)) / 8) stype := serializationType | intType nw, err := c.writeUint64(w, uint64(stype)) n += int64(nw) if err != nil { return n, fmt.Errorf("error writing stype: %w", err) } nw, err = c.writeUint64(w, uint64(dictionarySize)) n += int64(nw) if err != nil { return n, fmt.Errorf("error writing dictionarySize: %w", err) } nwd, err := c.dictColumn.WriteTo(w) n += nwd if err != nil { return n, fmt.Errorf("error writing dictionary: %w", err) } nw, err = c.writeUint64(w, uint64(len(c.keys))) n += int64(nw) if err != nil { return n, fmt.Errorf("error writing keys len: %w", err) } if c.indices == nil || c.oldIndicesType != intType { c.indices = getLCIndicate(intType) c.oldIndicesType = intType } else { c.indices.Reset() } c.indices = getLCIndicate(intType) c.indices.appendInts(c.keys) nwt, err := c.indices.WriteTo(w) if err != nil { return n, fmt.Errorf("error writing indices: %w", err) } return n + nwt, err } // HeaderWriter reader header data // it uses internally func (c *LowCardinality[T]) HeaderWriter(w *readerwriter.Writer) { // write KeysSerializationVersion. for more information see clickhouse docs w.Int64(1) } func getLCIndicate(intType int) indicesColumnI { switch intType { case 0: return newIndicesColumn[uint8]() case 1: return newIndicesColumn[uint16]() case 2: return newIndicesColumn[uint32]() case 3: panic("cannot handle this amount of data for lc") } // this should never happen unless something wrong with the code panic("cannot not find indicate type") } func (c *LowCardinality[T]) writeUint64(w io.Writer, v uint64) (int, error) { c.scratch[0] = byte(v) c.scratch[1] = byte(v >> 8) c.scratch[2] = byte(v >> 16) c.scratch[3] = byte(v >> 24) c.scratch[4] = byte(v >> 32) c.scratch[5] = byte(v >> 40) c.scratch[6] = byte(v >> 48) c.scratch[7] = byte(v >> 56) return w.Write(c.scratch[:8]) } func (c *LowCardinality[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/lc_indices.go ================================================ package column import ( "io" "unsafe" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) type indicesColumnI interface { ReadRaw(num int, r *readerwriter.Reader) error WriteTo(io.Writer) (int64, error) appendInts([]int) readInt(value *[]int) Reset() } type indicatedTypes interface { uint8 | uint16 | uint32 | uint64 } type indicesColumn[T indicatedTypes] struct { Base[T] } func newIndicesColumn[T indicatedTypes]() *indicesColumn[T] { var tmpValue T size := int(unsafe.Sizeof(tmpValue)) return &indicesColumn[T]{ Base: Base[T]{ size: size, }, } } func (c *indicesColumn[T]) readInt(value *[]int) { for _, v := range c.Data() { *value = append(*value, int(v), ) } } func (c *indicesColumn[T]) appendInts(values []int) { for _, v := range values { c.values = append(c.values, T(v)) } } ================================================ FILE: column/lc_nullable.go ================================================ package column // LowCardinalityNullable for LowCardinality(Nullable(T)) ClickHouse DataTypes type LowCardinalityNullable[T comparable] struct { LowCardinality[T] } // NewLowCardinalityNullable return new LowCardinalityNullable for nullable LowCardinality ClickHouse DataTypes func NewLowCardinalityNullable[T comparable](dictColumn Column[T]) *LowCardinalityNullable[T] { return NewLCNullable(dictColumn) } // NewLCNullable return new LowCardinalityNullable for nullable LowCardinality ClickHouse DataTypes func NewLCNullable[T comparable](dictColumn Column[T]) *LowCardinalityNullable[T] { var empty T dictColumn.Append(empty) l := &LowCardinalityNullable[T]{ LowCardinality: LowCardinality[T]{ nullable: true, dict: make(map[T]int), dictColumn: dictColumn, }, } return l } // Data get all nullable data in current block as a slice. // // NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read func (c *LowCardinalityNullable[T]) DataP() []*T { result := make([]*T, c.NumRow()) for i, k := range c.readedKeys { if k == 0 { result[i] = nil } else { val := c.readedDict[k] result[i] = &val } } return result } // Read reads all nullable data in current block and append to the input. func (c *LowCardinalityNullable[T]) ReadP(value []*T) []*T { for _, k := range c.readedKeys { if k == 0 { value = append(value, nil) } else { val := c.readedDict[k] value = append(value, &val) } } return value } // Row return nullable value of given row // NOTE: Row number start from zero func (c *LowCardinalityNullable[T]) RowP(row int) *T { if c.readedKeys[row] == 0 { return nil } val := c.readedDict[c.readedKeys[row]] return &val } // Append value for insert func (c *LowCardinalityNullable[T]) Append(v ...T) { for _, v := range v { key, ok := c.dict[v] if !ok { key = len(c.dict) c.dict[v] = key c.dictColumn.Append(v) } c.keys = append(c.keys, key+1) } c.numRow += len(v) } // Append nil value for insert func (c *LowCardinalityNullable[T]) AppendNil() { c.keys = append(c.keys, 0) c.numRow++ } // Append nullable value for insert // // as an alternative (for better performance), you can use `Append` and `AppendNil` to insert a value func (c *LowCardinalityNullable[T]) AppendP(v ...*T) { for _, v := range v { if v == nil { c.keys = append(c.keys, 0) continue } key, ok := c.dict[*v] if !ok { key = len(c.dict) c.dict[*v] = key c.dictColumn.Append(*v) } c.keys = append(c.keys, key+1) } c.numRow += len(v) } // Array return a Array type for this column func (c *LowCardinalityNullable[T]) Array() *ArrayNullable[T] { return NewArrayNullable[T](c) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *LowCardinalityNullable[T]) Reset() { c.LowCardinality.Reset() var empty T c.dictColumn.Append(empty) } func (c *LowCardinalityNullable[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/lc_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestLcIndicator16(t *testing.T) { tableName := "lc_indicator_16" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s_lc LowCardinality(Int64) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col := column.New[int64]().LC() var colInsert []int64 rows := int(^uint8(0)) + 10 for i := 0; i < rows; i++ { val := int64(i + 1) col.Append(val) colInsert = append(colInsert, val) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s_lc ) VALUES`, tableName), col, ) require.NoError(t, err) // test read row colRead := column.New[int64]().LC() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s_lc FROM test_%[1]s`, tableName), colRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []int64 for selectStmt.Next() { colData = colRead.Read(colData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) } func TestLcIndicator32(t *testing.T) { tableName := "lc_indicator_32" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s_lc LowCardinality(Int64) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col := column.New[int64]().LC() var colInsert []int64 rows := int(^uint16(0)) + 10 for i := 0; i < rows; i++ { val := int64(i + 1) col.Append(val) colInsert = append(colInsert, val) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s_lc ) VALUES`, tableName), col, ) require.NoError(t, err) // test read row colRead := column.New[int64]().LC() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s_lc FROM test_%[1]s`, tableName), colRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []int64 for selectStmt.Next() { colData = colRead.Read(colData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) } ================================================ FILE: column/map.go ================================================ package column // Map is a column of Map(K,V) ClickHouse data type // Map in clickhouse actually is a array of pair(K,V) type Map[K comparable, V any] struct { MapBase keyColumnData []K valueColumnData []V } // NewMap create a new map column of Map(K,V) ClickHouse data type func NewMap[K comparable, V any]( keyColumn Column[K], valueColumn Column[V], ) *Map[K, V] { a := &Map[K, V]{ MapBase: MapBase{ keyColumn: keyColumn, valueColumn: valueColumn, offsetColumn: New[uint64](), }, } a.resetHook = func() { a.keyColumnData = a.keyColumnData[:0] a.valueColumnData = a.valueColumnData[:0] } return a } // Data get all the data in current block as a slice. func (c *Map[K, V]) Data() []map[K]V { values := make([]map[K]V, c.offsetColumn.numRow) offsets := c.Offsets() if len(offsets) == 0 { return values } keyColumnData := c.getKeyColumnData() valueColumnData := c.getValueColumnData() var lastOffset uint64 for i, offset := range offsets { val := make(map[K]V) for ki, key := range keyColumnData[lastOffset:offset] { val[key] = valueColumnData[lastOffset:offset][ki] } values[i] = val lastOffset = offset } return values } // Read reads all the data in current block and append to the input. func (c *Map[K, V]) Read(value []map[K]V) []map[K]V { return append(value, c.Data()...) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Map[K, V]) Row(row int) map[K]V { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } keyColumnData := c.getKeyColumnData() valueColumnData := c.getValueColumnData() val := make(map[K]V) offset := c.offsetColumn.Row(row) for ki, key := range keyColumnData[lastOffset:offset] { val[key] = valueColumnData[lastOffset:offset][ki] } return val } // Append value for insert func (c *Map[K, V]) Append(v map[K]V) { c.AppendLen(len(v)) for k, d := range v { c.keyColumn.(Column[K]).Append(k) c.valueColumn.(Column[V]).Append(d) } } func (c *Map[K, V]) getKeyColumnData() []K { if len(c.keyColumnData) == 0 { c.keyColumnData = c.keyColumn.(Column[K]).Data() } return c.keyColumnData } func (c *Map[K, V]) getValueColumnData() []V { if len(c.valueColumnData) == 0 { c.valueColumnData = c.valueColumn.(Column[V]).Data() } return c.valueColumnData } // KeyColumn return the key column func (c *Map[K, V]) KeyColumn() Column[K] { return c.keyColumn.(Column[K]) } // ValueColumn return the value column func (c *Map[K, V]) ValueColumn() Column[V] { return c.valueColumn.(Column[V]) } ================================================ FILE: column/map_base.go ================================================ package column import ( "encoding/binary" "fmt" "io" "strings" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // Map is a column of Map(K,V) ClickHouse data type // Map in clickhouse actually is a array of pair(K,V) // // MapBase is a base class for map and also for non generic of map to use dynamic select column type MapBase struct { column offsetColumn *Base[uint64] keyColumn ColumnBasic valueColumn ColumnBasic offset uint64 resetHook func() } // NewMapBase create a new map column of Map(K,V) ClickHouse data type func NewMapBase( keyColumn, valueColumn ColumnBasic, ) *MapBase { a := &MapBase{ keyColumn: keyColumn, valueColumn: valueColumn, offsetColumn: New[uint64](), } return a } // Each run the given function for each row in the column with start and end offsets. // // in some cases like Map(K,Array(Nullable)) you can't read the data with generic for this situations. you can use this function. // // For example // colNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data() // colNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[V]).DataP() // // colNullableArrayRead.Each(func(start, end uint64) bool { // val := make(map[string][]*V) // for ki, key := range colNullableArrayReadKey[start:end] { // val[key] = colNullableArrayReadValue[start:end][ki] // } // colArrayNullableData = append(colArrayNullableData, val) // return true // }) func (c *MapBase) Each(f func(start, end uint64) bool) { offsets := c.Offsets() if len(offsets) == 0 { return } var lastOffset uint64 for _, offset := range offsets { if !f(lastOffset, offset) { return } lastOffset = offset } } // AppendLen Append len for insert func (c *MapBase) AppendLen(v int) { c.offset += uint64(v) c.offsetColumn.Append(c.offset) } // NumRow return number of row for this block func (c *MapBase) NumRow() int { return c.offsetColumn.NumRow() } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *MapBase) Reset() { c.offsetColumn.Reset() c.keyColumn.Reset() c.valueColumn.Reset() c.offset = 0 } // Offsets return all the offsets in current block func (c *MapBase) Offsets() []uint64 { return c.offsetColumn.Data() } // TotalRows return total rows on this block of array data func (c *MapBase) TotalRows() int { if c.offsetColumn.totalByte == 0 { return 0 } return int(binary.LittleEndian.Uint64(c.offsetColumn.b[c.offsetColumn.totalByte-8 : c.offsetColumn.totalByte])) } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *MapBase) SetWriteBufferSize(row int) { c.offsetColumn.SetWriteBufferSize(row) c.keyColumn.SetWriteBufferSize(row) c.valueColumn.SetWriteBufferSize(row) } // ReadRaw read raw data from the reader. it runs automatically func (c *MapBase) ReadRaw(num int, r *readerwriter.Reader) error { c.offsetColumn.Reset() err := c.offsetColumn.ReadRaw(num, r) if err != nil { return fmt.Errorf("map: read offset column: %w", err) } err = c.keyColumn.ReadRaw(c.TotalRows(), r) if err != nil { return fmt.Errorf("map: read key column: %w", err) } err = c.valueColumn.ReadRaw(c.TotalRows(), r) if err != nil { return fmt.Errorf("map: read value column: %w", err) } if c.resetHook != nil { c.resetHook() } return nil } // KeyColumn return the key column func (c *MapBase) KeyColumn() ColumnBasic { return c.keyColumn } // ValueColumn return the value column func (c *MapBase) ValueColumn() ColumnBasic { return c.valueColumn } // HeaderReader reads header data from reader // it uses internally func (c *MapBase) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { err := c.offsetColumn.HeaderReader(r, readColumn, revision) if err != nil { return err } c.name = c.offsetColumn.name c.chType = c.offsetColumn.chType c.keyColumn.SetName(c.name) c.valueColumn.SetName(c.name) err = c.keyColumn.HeaderReader(r, false, revision) if err != nil { return fmt.Errorf("map: read key header: %w", err) } err = c.valueColumn.HeaderReader(r, false, revision) if err != nil { return fmt.Errorf("map: read value header: %w", err) } return nil } func (c *MapBase) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if !helper.IsMap(chType) { return ErrInvalidType{ column: c, } } columnsMap, err := helper.TypesInParentheses(chType[helper.LenMapStr : len(chType)-1]) if err != nil { return fmt.Errorf("map invalid types %w", err) } if len(columnsMap) != 2 { //nolint:goerr113 return fmt.Errorf("columns number is not equal to map columns number: %d != %d", len(columnsMap), 2) } c.keyColumn.SetType(columnsMap[0].ChType) c.keyColumn.SetName(columnsMap[0].Name) c.valueColumn.SetType(columnsMap[1].ChType) c.valueColumn.SetName(columnsMap[1].Name) if c.keyColumn.Validate() != nil { return ErrInvalidType{ column: c, } } if c.valueColumn.Validate() != nil { return ErrInvalidType{ column: c, } } return nil } func (c *MapBase) ColumnType() string { return strings.ReplaceAll( strings.ReplaceAll(helper.MapTypeStr, "", c.keyColumn.ColumnType()), "", c.valueColumn.ColumnType()) } // WriteTo write data to ClickHouse. // it uses internally func (c *MapBase) WriteTo(w io.Writer) (int64, error) { nw, err := c.offsetColumn.WriteTo(w) if err != nil { return nw, fmt.Errorf("write len data: %w", err) } n, errDataColumn := c.keyColumn.WriteTo(w) nw += n if errDataColumn != nil { return nw, fmt.Errorf("write key data: %w", errDataColumn) } n, errDataColumn = c.valueColumn.WriteTo(w) nw += n if errDataColumn != nil { return nw, fmt.Errorf("write value data: %w", errDataColumn) } return nw + n, errDataColumn } // HeaderWriter writes header data to writer // it uses internally func (c *MapBase) HeaderWriter(w *readerwriter.Writer) { c.keyColumn.HeaderWriter(w) c.valueColumn.HeaderWriter(w) } ================================================ FILE: column/map_nullable.go ================================================ package column import "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" // MapNullable is a column of Map(K,V) ClickHouse data type where V is nullable. // Map in clickhouse actually is a array of pair(K,V) type MapNullable[K comparable, V any] struct { Map[K, V] valueColumn NullableColumn[V] keyColumnData []K valueColumnData []*V } // NewMapNullable create a new map column of Map(K,V) ClickHouse data type func NewMapNullable[K comparable, V any]( keyColumn Column[K], valueColumn NullableColumn[V], ) *MapNullable[K, V] { a := &MapNullable[K, V]{ valueColumn: valueColumn, Map: Map[K, V]{ MapBase: MapBase{ keyColumn: keyColumn, valueColumn: valueColumn, offsetColumn: New[uint64](), }, }, } return a } // Data get all the data in current block as a slice. func (c *MapNullable[T, V]) DataP() []map[T]*V { values := make([]map[T]*V, c.offsetColumn.numRow) var lastOffset uint64 for i := 0; i < c.offsetColumn.numRow; i++ { val := make(map[T]*V) offset := c.offsetColumn.Row(i) for ki, key := range c.keyColumnData[lastOffset:offset] { v := c.valueColumnData[lastOffset:offset][ki] val[key] = v } values[i] = val lastOffset = c.offsetColumn.Row(i) } return values } // Read reads all the data in current block and append to column. func (c *MapNullable[T, V]) ReadP(value []map[T]*V) []map[T]*V { return append(value, c.DataP()...) } // Row return the value of given row. // NOTE: Row number start from zero func (c *MapNullable[T, V]) RowP(row int) map[T]*V { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } val := make(map[T]*V) offset := c.offsetColumn.Row(row) for ki, key := range c.keyColumnData[lastOffset:offset] { v := c.valueColumnData[lastOffset:offset][ki] val[key] = v } return val } func (c *MapNullable[K, V]) AppendP(v map[K]*V) { c.AppendLen(len(v)) for k, d := range v { c.keyColumn.(Column[K]).Append(k) c.valueColumn.AppendP(d) } } // ReadRaw read raw data from the reader. it runs automatically func (c *MapNullable[K, V]) ReadRaw(num int, r *readerwriter.Reader) error { err := c.Map.ReadRaw(num, r) if err != nil { return err } c.keyColumnData = c.keyColumn.(Column[K]).Data() c.valueColumnData = c.valueColumn.DataP() return nil } // ValueColumn return the value column func (c *MapNullable[K, V]) ValueColumn() NullableColumn[V] { return c.valueColumn } ================================================ FILE: column/map_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestMapUint8(t *testing.T) { testMapColumn(t, "UInt8", "uint8", func(i int) []uint8 { d := make([]uint8, 2) d[0] = uint8(i) d[1] = uint8(i + 1) return d }, func(i int) []uint8 { d := make([]uint8, 2) d[0] = uint8(i) d[1] = uint8(i + 1) return d }) } func TestMapUint16(t *testing.T) { testMapColumn(t, "UInt16", "uint16", func(i int) []uint16 { d := make([]uint16, 2) d[0] = uint16(i) d[1] = uint16(i + 1) return d }, func(i int) []uint16 { d := make([]uint16, 2) d[0] = uint16(i) d[1] = uint16(i + 1) return d }) } func TestMapUint32(t *testing.T) { testMapColumn(t, "UInt32", "uint32", func(i int) []uint32 { d := make([]uint32, 2) d[0] = uint32(i) d[1] = uint32(i + 1) return d }, func(i int) []uint32 { d := make([]uint32, 2) d[0] = uint32(i) d[1] = uint32(i + 1) return d }) } func TestMapUint64(t *testing.T) { testMapColumn(t, "UInt64", "uint64", func(i int) []uint64 { d := make([]uint64, 2) d[0] = uint64(i) d[1] = uint64(i + 1) return d }, func(i int) []uint64 { d := make([]uint64, 2) d[0] = uint64(i) d[1] = uint64(i + 1) return d }) } func TestMapInt8(t *testing.T) { testMapColumn(t, "Int8", "int8", func(i int) []int8 { d := make([]int8, 2) d[0] = int8(i) d[1] = int8(i + 1) return d }, func(i int) []int8 { d := make([]int8, 2) d[0] = int8(i) d[1] = int8(i + 1) return d }) } func TestMapInt16(t *testing.T) { testMapColumn(t, "Int16", "int16", func(i int) []int16 { d := make([]int16, 2) d[0] = int16(i) d[1] = int16(i + 1) return d }, func(i int) []int16 { d := make([]int16, 2) d[0] = int16(i) d[1] = int16(i + 1) return d }) } func TestMapInt32(t *testing.T) { testMapColumn(t, "Int32", "int32", func(i int) []int32 { d := make([]int32, 2) d[0] = int32(i) d[1] = int32(i + 1) return d }, func(i int) []int32 { d := make([]int32, 2) d[0] = int32(i) d[1] = int32(i + 1) return d }) } func TestMapInt64(t *testing.T) { testMapColumn(t, "Int64", "int64", func(i int) []int64 { d := make([]int64, 2) d[0] = int64(i) d[1] = int64(i + 1) return d }, func(i int) []int64 { d := make([]int64, 2) d[0] = int64(i) d[1] = int64(i + 1) return d }) } func TestMapFloat32(t *testing.T) { testMapColumn(t, "Float32", "float32", func(i int) []float32 { d := make([]float32, 2) d[0] = float32(i) d[1] = float32(i + 1) return d }, func(i int) []float32 { d := make([]float32, 2) d[0] = float32(i) d[1] = float32(i + 1) return d }) } func TestMapFloat64(t *testing.T) { testMapColumn(t, "Float64", "float64", func(i int) []float64 { d := make([]float64, 2) d[0] = float64(i) d[1] = float64(i + 1) return d }, func(i int) []float64 { d := make([]float64, 2) d[0] = float64(i) d[1] = float64(i + 1) return d }) } func testMapColumn[V comparable]( t *testing.T, chType, tableName string, firstVal func(i int) []V, secondVal func(i int) []V, ) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_map_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_map_%[1]s ( %[1]s Map(String,%[2]s), %[1]s_nullable Map(String,Nullable(%[2]s)), %[1]s_array Map(String,Array(%[2]s)), %[1]s_array_nullable Map(String,Array(Nullable(%[2]s))), %[1]s_lc Map(String,LowCardinality(%[2]s)), %[1]s_nullable_lc Map(String,LowCardinality(Nullable(%[2]s))), %[1]s_array_lc Map(String,Array(LowCardinality(%[2]s))), %[1]s_array_lc_nullable Map(String,Array(LowCardinality(Nullable(%[2]s)))) ) Engine=Memory`, tableName, chType), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col := column.NewMap[string, V]( column.NewString(), column.New[V](), ) colNullable := column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable(), ) colArray := column.NewMap[string, []V]( column.NewString(), column.New[V]().Array(), ) colNullableArray := column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().Array(), ) colLC := column.NewMap[string, V]( column.NewString(), column.New[V]().LC(), ) colLCNullable := column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable().LC(), ) colArrayLC := column.NewMap[string, []V]( column.NewString(), column.New[V]().LC().Array(), ) colArrayLCNullable := column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().LC().Array(), ) var colInsert []map[string]V var colNullableInsert []map[string]*V var colArrayInsert []map[string][]V var colArrayNullableInsert []map[string][]*V var colLCInsert []map[string]V var colLCNullableInsert []map[string]*V var colLCArrayInsert []map[string][]V var colLCNullableArrayInsert []map[string][]*V // SetWriteBufferSize is not necessary. this just to show how to set write buffer col.SetWriteBufferSize(10) colNullable.SetWriteBufferSize(10) colArray.SetWriteBufferSize(10) colNullableArray.SetWriteBufferSize(10) colLC.SetWriteBufferSize(10) colLCNullable.SetWriteBufferSize(10) colArrayLC.SetWriteBufferSize(10) colArrayLCNullable.SetWriteBufferSize(10) for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { valData := firstVal(i) val2Data := secondVal(i) val := map[string]V{ "a": valData[0], "b": valData[1], } valNullable := map[string]*V{ "a": &valData[0], "b": &valData[1], } valNullable2 := map[string]*V{ "a": &valData[1], "b": nil, } valArray := map[string][]V{ "a": valData, "b": val2Data, } valArrayNil := map[string][]*V{ "a": {&valData[0], &valData[1]}, "b": {&valData[1], nil}, } col.Append(val) colInsert = append(colInsert, val) // example add nullable if i%2 == 0 { colNullableInsert = append(colNullableInsert, valNullable) colNullable.AppendP(valNullable) colLCNullableInsert = append(colLCNullableInsert, valNullable) colLCNullable.AppendP(valNullable) } else { colNullableInsert = append(colNullableInsert, valNullable2) colNullable.AppendP(valNullable2) colLCNullableInsert = append(colLCNullableInsert, valNullable2) colLCNullable.AppendP(valNullable2) } colArray.Append(valArray) colArrayInsert = append(colArrayInsert, valArray) colNullableArray.AppendLen(len(valArrayNil)) for k, v := range valArrayNil { colNullableArray.KeyColumn().Append(k) colNullableArray.ValueColumn().(*column.ArrayNullable[V]).AppendP(v) } colArrayNullableInsert = append(colArrayNullableInsert, valArrayNil) colLCInsert = append(colLCInsert, val) colLC.Append(val) colLCArrayInsert = append(colLCArrayInsert, valArray) colArrayLC.Append(valArray) colLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil) colArrayLCNullable.AppendLen(len(valArrayNil)) for k, v := range valArrayNil { colArrayLCNullable.KeyColumn().Append(k) colArrayLCNullable.ValueColumn().(*column.ArrayNullable[V]).AppendP(v) } } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_map_%[1]s ( %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, ) require.NoError(t, err) } // test read all colRead := column.NewMap[string, V]( column.NewString(), column.New[V](), ) colNullableRead := column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable(), ) colArrayRead := column.NewMap[string, []V]( column.NewString(), column.New[V]().Array(), ) colNullableArrayRead := column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().Array(), ) colLCRead := column.NewMap[string, V]( column.NewString(), column.New[V]().LC(), ) colLCNullableRead := column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable().LC(), ) colArrayLCRead := column.NewMap[string, []V]( column.NewString(), column.New[V]().LC().Array(), ) colArrayLCNullableRead := column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().LC().Array(), ) var colData []map[string]V var colNullableData []map[string]*V var colArrayData []map[string][]V var colArrayNullableData []map[string][]*V var colLCData []map[string]V var colLCNullableData []map[string]*V var colLCArrayData []map[string][]V var colLCNullableArrayData []map[string][]*V selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_map_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) for selectStmt.Next() { colData = colRead.Read(colData) colNullableData = colNullableRead.ReadP(colNullableData) colArrayData = colArrayRead.Read(colArrayData) colNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data() colNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[V]).DataP() colNullableArrayRead.Each(func(start, end uint64) bool { val := make(map[string][]*V) for ki, key := range colNullableArrayReadKey[start:end] { val[key] = colNullableArrayReadValue[start:end][ki] } colArrayNullableData = append(colArrayNullableData, val) return true }) colLCData = colLCRead.Read(colLCData) colLCNullableData = colLCNullableRead.ReadP(colLCNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colArrayLCNullableReadKey := colArrayLCNullableRead.KeyColumn().Data() colArrayLCNullableReadValue := colArrayLCNullableRead.ValueColumn().(*column.ArrayNullable[V]).DataP() colArrayLCNullableRead.Each(func(start, end uint64) bool { val := make(map[string][]*V) for ki, key := range colArrayLCNullableReadKey[start:end] { val[key] = colArrayLCNullableReadValue[start:end][ki] } colLCNullableArrayData = append(colLCNullableArrayData, val) return true }) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) // test read Row colRead = column.NewMap[string, V]( column.NewString(), column.New[V](), ) colNullableRead = column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable(), ) colArrayRead = column.NewMap[string, []V]( column.NewString(), column.New[V]().Array(), ) colNullableArrayRead = column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().Array(), ) colLCRead = column.NewMap[string, V]( column.NewString(), column.New[V]().LC(), ) colLCNullableRead = column.NewMapNullable[string, V]( column.NewString(), column.New[V]().Nullable().LC(), ) colArrayLCRead = column.NewMap[string, []V]( column.NewString(), column.New[V]().LC().Array(), ) colArrayLCNullableRead = column.NewMap[string, []V]( column.NewString(), column.New[V]().Nullable().LC().Array(), ) colData = colData[:0] colNullableData = colNullableData[:0] colArrayData = colArrayData[:0] colArrayNullableData = colArrayNullableData[:0] colLCData = colLCData[:0] colLCNullableData = colLCNullableData[:0] colLCArrayData = colLCArrayData[:0] colLCNullableArrayData = colLCNullableArrayData[:0] selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_map_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { colData = append(colData, colRead.Row(i)) colNullableData = append(colNullableData, colNullableRead.RowP(i)) colArrayData = append(colArrayData, colArrayRead.Row(i)) colLCData = append(colLCData, colLCRead.Row(i)) colLCNullableData = append(colLCNullableData, colLCNullableRead.RowP(i)) colLCArrayData = append(colLCArrayData, colArrayLCRead.Row(i)) } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) // check dynamic column selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_map_%[1]s`, tableName), ) require.NoError(t, err) autoColumns := selectStmt.Columns() assert.Len(t, autoColumns, 8) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } func TestMapEmptyResult(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) // test read all colRead := column.NewMap[uint64, uint64]( column.New[uint64](), column.New[uint64](), ) selectStmt, err := conn.Select(context.Background(), `SELECT map(number,number) from system.numbers limit 0`, colRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) assert.Equal(t, colRead.Data(), []map[uint64]uint64{}) assert.Equal(t, colRead.TotalRows(), 0) colRead.Each(func(start, end uint64) bool { assert.Fail(t, "should not be called") return true }) } func TestMapEmpty(t *testing.T) { t.Parallel() tableName := "map_empty" connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_map_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_map_%[1]s ( %[1]s Map(String,%[2]s), %[1]s_nullable Map(String,Nullable(%[2]s)), %[1]s_array Map(String,Array(%[2]s)), %[1]s_array_nullable Map(String,Array(Nullable(%[2]s))), %[1]s_lc Map(String,LowCardinality(%[2]s)), %[1]s_nullable_lc Map(String,LowCardinality(Nullable(%[2]s))), %[1]s_array_lc Map(String,Array(LowCardinality(%[2]s))), %[1]s_array_lc_nullable Map(String,Array(LowCardinality(Nullable(%[2]s)))) ) Engine=Memory`, tableName, "UInt16"), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col := column.NewMap[string, uint16]( column.NewString(), column.New[uint16](), ) colNullable := column.NewMapNullable[string, uint16]( column.NewString(), column.New[uint16]().Nullable(), ) colArray := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Array(), ) colNullableArray := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Nullable().Array(), ) colLC := column.NewMap[string, uint16]( column.NewString(), column.New[uint16]().LC(), ) colLCNullable := column.NewMapNullable[string, uint16]( column.NewString(), column.New[uint16]().Nullable().LC(), ) colArrayLC := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().LC().Array(), ) colArrayLCNullable := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Nullable().LC().Array(), ) col.Append(nil) col.Append(map[string]uint16{}) colNullable.Append(nil) colNullable.AppendP(map[string]*uint16{}) colArray.Append(nil) colArray.Append(map[string][]uint16{}) colNullableArray.Append(nil) colNullableArray.Append(map[string][]uint16{}) colLC.Append(nil) colLC.Append(map[string]uint16{}) colLCNullable.Append(nil) colLCNullable.AppendP(map[string]*uint16{}) colArrayLC.Append(nil) colArrayLC.Append(map[string][]uint16{}) colArrayLCNullable.Append(nil) colArrayLCNullable.Append(map[string][]uint16{}) err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_map_%[1]s ( %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, ) require.NoError(t, err) // test read all colRead := column.NewMap[string, uint16]( column.NewString(), column.New[uint16](), ) colNullableRead := column.NewMapNullable[string, uint16]( column.NewString(), column.New[uint16]().Nullable(), ) colArrayRead := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Array(), ) colNullableArrayRead := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Nullable().Array(), ) colLCRead := column.NewMap[string, uint16]( column.NewString(), column.New[uint16]().LC(), ) colLCNullableRead := column.NewMapNullable[string, uint16]( column.NewString(), column.New[uint16]().Nullable().LC(), ) colArrayLCRead := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().LC().Array(), ) colArrayLCNullableRead := column.NewMap[string, []uint16]( column.NewString(), column.New[uint16]().Nullable().LC().Array(), ) var colData []map[string]uint16 var colNullableData []map[string]*uint16 var colArrayData []map[string][]uint16 var colArrayNullableData []map[string][]*uint16 var colLCData []map[string]uint16 var colLCNullableData []map[string]*uint16 var colLCArrayData []map[string][]uint16 var colLCNullableArrayData []map[string][]*uint16 selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_map_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) for selectStmt.Next() { colData = colRead.Read(colData) colNullableData = colNullableRead.ReadP(colNullableData) colArrayData = colArrayRead.Read(colArrayData) colNullableArrayReadKey := colNullableArrayRead.KeyColumn().Data() colNullableArrayReadValue := colNullableArrayRead.ValueColumn().(*column.ArrayNullable[uint16]).DataP() colNullableArrayRead.Each(func(start, end uint64) bool { val := make(map[string][]*uint16) for ki, key := range colNullableArrayReadKey[start:end] { val[key] = colNullableArrayReadValue[start:end][ki] } colArrayNullableData = append(colArrayNullableData, val) return true }) colLCData = colLCRead.Read(colLCData) colLCNullableData = colLCNullableRead.ReadP(colLCNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colArrayLCNullableReadKey := colArrayLCNullableRead.KeyColumn().Data() colArrayLCNullableReadValue := colArrayLCNullableRead.ValueColumn().(*column.ArrayNullable[uint16]).DataP() colArrayLCNullableRead.Each(func(start, end uint64) bool { val := make(map[string][]*uint16) for ki, key := range colArrayLCNullableReadKey[start:end] { val[key] = colArrayLCNullableReadValue[start:end][ki] } colLCNullableArrayData = append(colLCNullableArrayData, val) return true }) } require.NoError(t, selectStmt.Err()) assert.Equal(t, []map[string]uint16{{}, {}}, colData) assert.Equal(t, []map[string]uint16{{}, {}}, colRead.Data()) assert.Equal(t, []map[string]*uint16{{}, {}}, colNullableData) assert.Equal(t, []map[string]*uint16{{}, {}}, colNullableRead.DataP()) assert.Equal(t, []map[string][]uint16{{}, {}}, colArrayData) assert.Equal(t, []map[string][]uint16{{}, {}}, colArrayRead.Data()) assert.Equal(t, []map[string][]*uint16{{}, {}}, colArrayNullableData) assert.Equal(t, []map[string]uint16{{}, {}}, colLCData) assert.Equal(t, []map[string]uint16{{}, {}}, colLCRead.Data()) assert.Equal(t, []map[string]*uint16{{}, {}}, colLCNullableData) assert.Equal(t, []map[string]*uint16{{}, {}}, colLCNullableRead.DataP()) assert.Equal(t, []map[string][]uint16{{}, {}}, colLCArrayData) assert.Equal(t, []map[string][]uint16{{}, {}}, colArrayLCRead.Data()) assert.Equal(t, []map[string][]*uint16{{}, {}}, colLCNullableArrayData) } ================================================ FILE: column/nested.go ================================================ package column // NewNested create a new nested of Nested(T1,T2,.....,Tn) ClickHouse data type // // this is actually an alias for NewTuple(T1,T2,.....,Tn).Array() func NewNested(columns ...ColumnBasic) *ArrayBase { return NewTuple(columns...).Array() } ================================================ FILE: column/nested_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestNestedNoFlattened(t *testing.T) { tableName := "nested_no_flattened" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, { Name: "flatten_nested", Value: "false", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( col1 Nested(col1_n1 Int64, col2_n1 String), col2 Nested(col1_n2 Int64, col2_n2 Nested(col1_n2_n1 Int64, col2_n2_n2 String)) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) type Col1Type types.Tuple2[int64, string] col1 := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString()) type Col2Type types.Tuple2[int64, []Col1Type] col2N2 := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString()) col2 := column.NewNested2[Col2Type, int64, []Col1Type](column.New[int64](), col2N2) var col1Insert [][]Col1Type var col2Insert [][]Col2Type for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { valString := fmt.Sprintf("string %d", i) valInt := int64(i) val2String := fmt.Sprintf("string %d", i+1) val2Int := int64(i + 1) col1.Append([]Col1Type{ { Col1: valInt, Col2: valString, }, }, ) col1Insert = append(col1Insert, []Col1Type{ { Col1: valInt, Col2: valString, }, }) col2.Append([]Col2Type{ { Col1: valInt, Col2: []Col1Type{ { Col1: val2Int, Col2: val2String, }, }, }, }) col2Insert = append(col2Insert, []Col2Type{ { Col1: valInt, Col2: []Col1Type{ { Col1: val2Int, Col2: val2String, }, }, }, }) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( col1, col2 ) VALUES`, tableName), col1, col2, ) require.NoError(t, err) } // example read all col1Read := column.NewTuple2[Col1Type, int64, string](column.New[int64](), column.NewString()).Array() col2N2Read := column.NewNested2[Col1Type, int64, string](column.New[int64](), column.NewString()) col2Read := column.NewNested2[Col2Type, int64, []Col1Type](column.New[int64](), col2N2Read) selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT col1,col2 FROM test_%[1]s`, tableName), col1Read, col2Read) require.NoError(t, err) require.True(t, conn.IsBusy()) var col1Data [][]Col1Type var col2Data [][]Col2Type for selectStmt.Next() { col1Data = col1Read.Read(col1Data) col2Data = col2Read.Read(col2Data) } require.NoError(t, selectStmt.Err()) assert.Equal(t, col1Insert, col1Data) assert.Equal(t, col2Insert, col2Data) // // check dynamic column selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT col1, col2 FROM test_%[1]s`, tableName)) require.NoError(t, err) autoColumns := selectStmt.Columns() assert.Len(t, autoColumns, 2) assert.Equal(t, column.NewTuple(column.New[int64](), column.NewString()).Array().ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, column.NewTuple(column.New[int64](), column.NewTuple(column.New[int64](), column.NewString()).Array()).Array(). ColumnType(), autoColumns[1].ColumnType()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } ================================================ FILE: column/nullable.go ================================================ package column import ( "fmt" "io" "strings" "unsafe" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) type appendEmptyInterface interface { appendEmpty() } // Nullable is a column of Nullable(T) ClickHouse data type type Nullable[T comparable] struct { column numRow int dataColumn Column[T] writerData []byte b []byte } // NewNullable return new Nullable for Nullable(T) ClickHouse DataType func NewNullable[T comparable](dataColumn Column[T]) *Nullable[T] { return &Nullable[T]{ dataColumn: dataColumn, } } // Data get all the data in current block as a slice. // // NOTE: the return slice only valid in current block, if you want to use it after, you should copy it. or use Read func (c *Nullable[T]) Data() []T { return c.dataColumn.Data() } // Data get all the nullable data in current block as a slice of pointer. // // As an alternative (for better performance). // You can use `Data` and one of `RowIsNil` and `ReadNil` and `DataNil` to detect if value is null or not. func (c *Nullable[T]) DataP() []*T { val := make([]*T, c.numRow) for i, d := range c.dataColumn.Data() { if c.RowIsNil(i) { val[i] = nil } else { // make a copy of the value v := d val[i] = &v } } return val } // Read reads all the data in current block and append to the input. func (c *Nullable[T]) Read(value []T) []T { return c.dataColumn.Read(value) } // ReadP read all value in this block and append to the input slice (for nullable data) // // As an alternative (for better performance), You can use `Read` and one of `RowIsNil` and `ReadNil` and `DataNil` // to detect if value is null or not. func (c *Nullable[T]) ReadP(value []*T) []*T { for i := 0; i < c.numRow; i++ { value = append(value, c.RowP(i)) } return value } // Append value for insert func (c *Nullable[T]) Row(i int) T { return c.dataColumn.Row(i) } // RowP return the value of given row for nullable data // NOTE: Row number start from zero // // As an alternative (for better performance), you can use `Row()` to get a value and `RowIsNil()` to check if it is null. func (c *Nullable[T]) RowP(row int) *T { if c.b[row] == 1 { return nil } val := c.dataColumn.Row(row) return &val } // ReadAll read all nils state in this block and append to the input func (c *Nullable[T]) ReadNil(value []bool) []bool { return append(value, *(*[]bool)(unsafe.Pointer(&c.b))...) } // DataNil get all nil state in this block func (c *Nullable[T]) DataNil() []bool { return *(*[]bool)(unsafe.Pointer(&c.b)) } // RowIsNil return true if the row is null func (c *Nullable[T]) RowIsNil(row int) bool { return c.b[row] == 1 } // Append value for insert func (c *Nullable[T]) Append(v ...T) { c.writerData = append(c.writerData, make([]uint8, len(v))...) c.dataColumn.Append(v...) } // Append nullable value for insert // // as an alternative (for better performance), you can use `Append` and `AppendNil` to insert a value func (c *Nullable[T]) AppendP(v ...*T) { for _, v := range v { if v == nil { c.AppendNil() continue } c.Append(*v) } } // Append nil value for insert func (c *Nullable[T]) AppendNil() { c.writerData = append(c.writerData, 1) c.dataColumn.(appendEmptyInterface).appendEmpty() } // NumRow return number of row for this block func (c *Nullable[T]) NumRow() int { return c.dataColumn.NumRow() } // Array return a Array type for this column func (c *Nullable[T]) Array() *ArrayNullable[T] { return NewArrayNullable[T](c) } // LC return a low cardinality type for this column func (c *Nullable[T]) LC() *LowCardinalityNullable[T] { return NewLowCardinalityNullable(c.dataColumn) } // LowCardinality return a low cardinality type for this column func (c *Nullable[T]) LowCardinality() *LowCardinalityNullable[T] { return NewLowCardinalityNullable(c.dataColumn) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *Nullable[T]) Reset() { c.b = c.b[:0] c.numRow = 0 c.writerData = c.writerData[:0] c.dataColumn.Reset() } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *Nullable[T]) SetWriteBufferSize(row int) { if cap(c.writerData) < row { c.writerData = make([]byte, 0, row) } c.dataColumn.SetWriteBufferSize(row) } // ReadRaw read raw data from the reader. it runs automatically func (c *Nullable[T]) ReadRaw(num int, r *readerwriter.Reader) error { c.Reset() c.r = r c.numRow = num err := c.readBuffer() if err != nil { return fmt.Errorf("read nullable data: %w", err) } return c.dataColumn.ReadRaw(num, r) } func (c *Nullable[T]) readBuffer() error { if cap(c.b) < c.numRow { c.b = make([]byte, c.numRow) } else { c.b = c.b[:c.numRow] } _, err := c.r.Read(c.b) if err != nil { return fmt.Errorf("read nullable data: %w", err) } return nil } // HeaderReader reads header data from reader // it uses internally func (c *Nullable[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r err := c.readColumn(readColumn, revision) if err != nil { return err } return c.dataColumn.HeaderReader(r, false, revision) } func (c *Nullable[T]) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if !helper.IsNullable(chType) { return ErrInvalidType{ column: c, } } c.dataColumn.SetType(chType[helper.LenNullableStr : len(chType)-1]) if c.dataColumn.Validate() != nil { return ErrInvalidType{ column: c, } } return nil } func (c *Nullable[T]) ColumnType() string { return strings.ReplaceAll(helper.NullableTypeStr, "", c.dataColumn.ColumnType()) } // WriteTo write data to ClickHouse. // it uses internally func (c *Nullable[T]) WriteTo(w io.Writer) (int64, error) { n, err := w.Write(c.writerData) if err != nil { return int64(n), fmt.Errorf("write nullable data: %w", err) } nw, err := c.dataColumn.WriteTo(w) return nw + int64(n), err } // HeaderWriter writes header data to writer // it uses internally func (c *Nullable[T]) HeaderWriter(w *readerwriter.Writer) { } func (c *Nullable[T]) elem(arrayLevel int, lc bool) ColumnBasic { if lc { return c.LowCardinality().elem(arrayLevel) } if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/nullable_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestNullableAsNormal(t *testing.T) { tableName := "nullable" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( block_id UInt8, %[1]s_nullable Nullable(Int64), %[1]s_array_nullable Array(Nullable(Int64)), %[1]s_nullable_lc LowCardinality(Nullable(Int64)), %[1]s_array_lc_nullable Array(LowCardinality(Nullable(Int64))) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) blockID := column.New[uint8]() colNullable := column.New[int64]().Nullable() colNullableArray := column.New[int64]().Nullable().Array() colLCNullable := column.New[int64]().Nullable().LC() colArrayLCNullable := column.New[int64]().Nullable().LC().Array() var colInsert []int64 var colArrayInsert [][]int64 for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { val := int64(i + 1) blockID.Append(uint8(insertN)) colNullable.Append(val) colNullableArray.Append([]int64{val, val + 1}) colLCNullable.Append(val) colArrayLCNullable.Append([]int64{val, val + 1}) colInsert = append(colInsert, val) colArrayInsert = append(colArrayInsert, []int64{val, val + 1}) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( block_id, %[1]s_nullable, %[1]s_array_nullable, %[1]s_nullable_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), blockID, colNullable, colNullableArray, colLCNullable, colArrayLCNullable, ) require.NoError(t, err) } // test read row colNullableRead := column.New[int64]().Nullable() colNullableArrayRead := column.New[int64]().Nullable().Array() colLCNullableRead := column.New[int64]().Nullable().LC() colArrayLCNullableRead := column.New[int64]().Nullable().LC().Array() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s_nullable, %[1]s_array_nullable, %[1]s_nullable_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), colNullableRead, colNullableArrayRead, colLCNullableRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []int64 var colArrayData [][]int64 var colLCData []int64 var colLCArrayData [][]int64 var colDataNilRead []bool var colDataNilData []bool for selectStmt.Next() { colData = colNullableRead.Read(colData) colDataNilRead = colNullableRead.ReadNil(colDataNilRead) colDataNilData = append(colDataNilData, colNullableRead.DataNil()...) colArrayData = colNullableArrayRead.Read(colArrayData) colLCData = colLCNullableRead.Read(colLCData) colLCArrayData = colArrayLCNullableRead.Read(colLCArrayData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colInsert, colLCData) assert.Equal(t, colArrayInsert, colLCArrayData) assert.Equal(t, colDataNilRead, colDataNilData) assert.Equal(t, make([]bool, len(colInsert)), colDataNilRead) // test read all colNullableRead = column.New[int64]().Nullable() colNullableArrayRead = column.New[int64]().Nullable().Array() colLCNullableRead = column.New[int64]().Nullable().LC() colArrayLCNullableRead = column.New[int64]().Nullable().LC().Array() selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s_nullable, %[1]s_array_nullable, %[1]s_nullable_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), colNullableRead, colNullableArrayRead, colLCNullableRead, colArrayLCNullableRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) colData = colData[:0] colArrayData = colArrayData[:0] colLCData = colLCData[:0] colLCArrayData = colLCArrayData[:0] for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { colData = append(colData, colNullableRead.Row(i)) colArrayData = append(colArrayData, colNullableArrayRead.Row(i)) colLCData = append(colLCData, colLCNullableRead.Row(i)) colLCArrayData = append(colLCArrayData, colArrayLCNullableRead.Row(i)) } } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colInsert, colLCData) assert.Equal(t, colArrayInsert, colLCArrayData) } ================================================ FILE: column/point.go ================================================ package column import "github.com/vahid-sohrabloo/chconn/v2/types" func NewPoint() *Tuple2[types.Point, float64, float64] { return NewTuple2[types.Point, float64, float64](New[float64](), New[float64]()) } ================================================ FILE: column/size.go ================================================ package column const ( // Uint8Size data Size of Uint8 Column Uint8Size = 1 // Uint16Size data Size of Uint16 Column Uint16Size = 2 // Uint32Size data Size of Uint32 Column Uint32Size = 4 // Uint64Size data Size of Uint64 Column Uint64Size = 8 // Uint128Size data Size of Uint128 Column Uint128Size = 16 // Uint256Size data Size of Uint256 Column Uint256Size = 32 // Int8Size data Size of Int8 Column Int8Size = 1 // Int16Size data Size of Int16 Column Int16Size = 2 // Int32Size data Size of Int32 Column Int32Size = 4 // Int64Size data Size of Int64 Column Int64Size = 8 // Int128Size data Size of Int128 Column Int128Size = 16 // Int256Size data Size of Int256 Column Int256Size = 32 // Float32Size data Size of Float32 Column Float32Size = 4 // Float64Size data Size of Float64 Column Float64Size = 8 // DateSize data Size of Date Column DateSize = 2 // Date32Size data Size of Date32 Column Date32Size = 4 // DatetimeSize data Size of Datetime Column DatetimeSize = 4 // Datetime64Size data Size of Datetime64 Column Datetime64Size = 8 // IPv4Size data Size of IPv4 Column IPv4Size = 4 // IPv6Size data Size of IPv6 Column IPv6Size = 16 // Decimal32Size data Size of Decimal32 Column Decimal32Size = 4 // Decimal64Size data Size of Decimal64 Column Decimal64Size = 8 // Decimal128Size data Size of Decimal128 Column Decimal128Size = 16 // Decimal256Size data Size of Decimal256 Column Decimal256Size = 32 // ArraylenSize data Size of Arraylen Column ArraylenSize = 8 // MaplenSize data Size of Maplen Column MaplenSize = 8 // UUIDSize data Size of UUID Column UUIDSize = 16 ) ================================================ FILE: column/string.go ================================================ package column // String is a column of String ClickHouse data type type String struct { StringBase[string] } // NewString is a column of String ClickHouse data type func NewString() *String { return &String{} } func (c *String) Elem(arrayLevel int, nullable, lc bool) ColumnBasic { if nullable { return c.Nullable().elem(arrayLevel, lc) } if lc { return c.LowCardinality().elem(arrayLevel) } if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/string_base.go ================================================ package column import ( "fmt" "io" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) type stringPos struct { start int end int } // StringBase is a column of String ClickHouse data type with generic type type StringBase[T ~string] struct { column numRow int writerData []byte vals []byte pos []stringPos } // NewString is a column of String ClickHouse data type with generic type func NewStringBase[T ~string]() *StringBase[T] { return &StringBase[T]{} } // Data get all the data in current block as a slice. func (c *StringBase[T]) Data() []T { val := make([]T, len(c.pos)) for i, v := range c.pos { val[i] = T(c.vals[v.start:v.end]) } return val } // Data get all the data in current block as a slice of []byte. func (c *StringBase[T]) DataBytes() [][]byte { return c.ReadBytes(nil) } // Read reads all the data in current block and append to the input. func (c *StringBase[T]) Read(value []T) []T { if cap(value)-len(value) >= len(c.pos) { value = (value)[:len(value)+len(c.pos)] } else { value = append(value, make([]T, len(c.pos))...) } val := (value)[len(value)-len(c.pos):] for i, v := range c.pos { val[i] = T(c.vals[v.start:v.end]) } return value } // Read reads all the data as `[]byte` in current block and append to the input. // // data is valid only in the current block. func (c *StringBase[T]) ReadBytes(value [][]byte) [][]byte { if cap(value)-len(value) >= len(c.pos) { value = (value)[:len(value)+len(c.pos)] } else { value = append(value, make([][]byte, len(c.pos))...) } val := (value)[len(value)-len(c.pos):] for i, v := range c.pos { val[i] = c.vals[v.start:v.end] } return value } // Row return the value of given row. // // NOTE: Row number start from zero func (c *StringBase[T]) Row(row int) T { return T(c.RowBytes(row)) } // Row return the value of given row. // // Data is valid only in the current block. func (c *StringBase[T]) RowBytes(row int) []byte { pos := c.pos[row] return c.vals[pos.start:pos.end] } func (c *StringBase[T]) Each(f func(i int, b []byte) bool) { for i, p := range c.pos { if !f(i, c.vals[p.start:p.end]) { return } } } func (c *StringBase[T]) appendLen(x int) { i := 0 for x >= 0x80 { c.writerData = append(c.writerData, byte(x)|0x80) x >>= 7 i++ } c.writerData = append(c.writerData, byte(x)) } // Append value for insert func (c *StringBase[T]) Append(v ...T) { for _, v := range v { c.appendLen(len(v)) c.writerData = append(c.writerData, v...) } c.numRow += len(v) } // AppendBytes value of bytes for insert func (c *StringBase[T]) AppendBytes(v ...[]byte) { for _, v := range v { c.appendLen(len(v)) c.writerData = append(c.writerData, v...) } c.numRow += len(v) } // NumRow return number of row for this block func (c *StringBase[T]) NumRow() int { return c.numRow } // Array return a Array type for this column func (c *StringBase[T]) Array() *Array[T] { return NewArray[T](c) } // Nullable return a nullable type for this column func (c *StringBase[T]) Nullable() *Nullable[T] { return NewNullable[T](c) } // LC return a low cardinality type for this column func (c *StringBase[T]) LC() *LowCardinality[T] { return NewLC[T](c) } // LowCardinality return a low cardinality type for this column func (c *StringBase[T]) LowCardinality() *LowCardinality[T] { return NewLC[T](c) } // Reset all status and buffer data // // Reading data does not require a reset after each read. The reset will be triggered automatically. // // However, writing data requires a reset after each write. func (c *StringBase[T]) Reset() { c.numRow = 0 c.vals = c.vals[:0] c.pos = c.pos[:0] c.writerData = c.writerData[:0] } // SetWriteBufferSize set write buffer (number of bytes) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *StringBase[T]) SetWriteBufferSize(b int) { if cap(c.writerData) < b { c.writerData = make([]byte, 0, b) } } // ReadRaw read raw data from the reader. it runs automatically when you call `ReadColumns()` func (c *StringBase[T]) ReadRaw(num int, r *readerwriter.Reader) error { c.Reset() c.r = r c.numRow = num var p stringPos for i := 0; i < num; i++ { l, err := c.r.Uvarint() if err != nil { return fmt.Errorf("error read string len: %w", err) } p.start = p.end p.end += int(l) c.vals = append(c.vals, make([]byte, l)...) if _, err := c.r.Read(c.vals[p.start:p.end]); err != nil { return fmt.Errorf("error read string: %w", err) } c.pos = append(c.pos, p) } return nil } // HeaderReader reads header data from read // it uses internally func (c *StringBase[T]) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r return c.readColumn(readColumn, revision) } func (c *StringBase[T]) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if !helper.IsString(chType) { return ErrInvalidType{ column: c, } } return nil } func (c *StringBase[T]) ColumnType() string { return helper.StringStr } // WriteTo write data to ClickHouse. // it uses internally func (c *StringBase[T]) WriteTo(w io.Writer) (int64, error) { nw, err := w.Write(c.writerData) return int64(nw), err } // HeaderWriter writes header data to writer // it uses internally func (c *StringBase[T]) HeaderWriter(w *readerwriter.Writer) { } func (c *StringBase[T]) appendEmpty() { var emptyValue T c.Append(emptyValue) } func (c *StringBase[T]) Elem(arrayLevel int, nullable, lc bool) ColumnBasic { if nullable { return c.Nullable().elem(arrayLevel, lc) } if lc { return c.LowCardinality().elem(arrayLevel) } if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/string_test.go ================================================ package column_test import ( "context" "fmt" "os" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestString(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) tableName := "string" chType := "String" err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( block_id UInt8, %[1]s %[2]s, %[1]s_nullable Nullable(%[2]s), %[1]s_array Array(%[2]s), %[1]s_array_nullable Array(Nullable(%[2]s)), %[1]s_lc LowCardinality(%[2]s), %[1]s_nullable_lc LowCardinality(Nullable(%[2]s)), %[1]s_array_lc Array(LowCardinality(%[2]s)), %[1]s_array_lc_nullable Array(LowCardinality(Nullable(%[2]s))) ) Engine=Memory`, tableName, chType), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) blockID := column.New[uint8]() col := column.NewString() colNullable := column.NewString().Nullable() colArray := column.NewString().Array() colNullableArray := column.NewString().Nullable().Array() colLC := column.NewString().LC() colLCNullable := column.NewString().Nullable().LC() colArrayLC := column.NewString().LC().Array() colArrayLCNullable := column.NewString().Nullable().LC().Array() var colInsert []string var colInsertByte [][]byte var colNullableInsert []*string var colArrayInsert [][]string var colArrayNullableInsert [][]*string var colLCInsert []string var colLCNullableInsert []*string var colLCArrayInsert [][]string var colLCNullableArrayInsert [][]*string for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { blockID.Append(uint8(insertN)) val := fmt.Sprintf("string %d", i) val2 := strings.Repeat(val, 50) valArray := []string{val, val2} valArrayNil := []*string{&val, nil} col.Append(val) colInsert = append(colInsert, val) colInsertByte = append(colInsertByte, []byte(val)) // example add nullable if i%2 == 0 { colNullableInsert = append(colNullableInsert, &val) colNullable.Append(val) colLCNullableInsert = append(colLCNullableInsert, &val) colLCNullable.Append(val) } else { colNullableInsert = append(colNullableInsert, nil) colNullable.AppendNil() colLCNullableInsert = append(colLCNullableInsert, nil) colLCNullable.AppendNil() } colArray.Append(valArray) colArrayInsert = append(colArrayInsert, valArray) colNullableArray.AppendP(valArrayNil) colArrayNullableInsert = append(colArrayNullableInsert, valArrayNil) colLCInsert = append(colLCInsert, val) colLC.Append(val) colLCArrayInsert = append(colLCArrayInsert, valArray) colArrayLC.Append(valArray) colLCNullableArrayInsert = append(colLCNullableArrayInsert, valArrayNil) colArrayLCNullable.AppendP(valArrayNil) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( block_id, %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable ) VALUES`, tableName), blockID, col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, ) require.NoError(t, err) } // example read all colRead := column.NewString() colNullableRead := column.NewString().Nullable() colArrayRead := column.NewString().Array() colNullableArrayRead := column.NewString().Nullable().Array() colLCRead := column.NewString().LC() colLCNullableRead := column.NewString().Nullable().LC() colArrayLCRead := column.NewString().LC().Array() colArrayLCNullableRead := column.NewString().Nullable().LC().Array() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead) require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []string var colDataByte [][]byte var colDataByteByData [][]byte var colDataByteByRow [][]byte var colNullableData []*string var colArrayData [][]string var colArrayNullableData [][]*string var colLCData []string var colLCNullableData []*string var colLCArrayData [][]string var colLCNullableArrayData [][]*string for selectStmt.Next() { require.NoError(t, err) colData = colRead.Read(colData) colDataByte = colRead.ReadBytes(colDataByte) colDataByteByData = append(colDataByteByData, colRead.DataBytes()...) for i := 0; i < selectStmt.RowsInBlock(); i++ { colDataByteByRow = append(colDataByteByRow, colRead.RowBytes(i)) } colNullableData = colNullableRead.ReadP(colNullableData) colArrayData = colArrayRead.Read(colArrayData) colArrayNullableData = colNullableArrayRead.ReadP(colArrayNullableData) colLCData = colLCRead.Read(colLCData) colLCNullableData = colLCNullableRead.ReadP(colLCNullableData) colLCArrayData = colArrayLCRead.Read(colLCArrayData) colLCNullableArrayData = colArrayLCNullableRead.ReadP(colLCNullableArrayData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colInsert, colData) assert.Equal(t, colInsertByte, colDataByte) assert.Equal(t, colInsertByte, colDataByteByData) assert.Equal(t, colInsertByte, colDataByteByRow) assert.Equal(t, colNullableInsert, colNullableData) assert.Equal(t, colArrayInsert, colArrayData) assert.Equal(t, colArrayNullableInsert, colArrayNullableData) assert.Equal(t, colLCInsert, colLCData) assert.Equal(t, colLCNullableInsert, colLCNullableData) assert.Equal(t, colLCArrayInsert, colLCArrayData) assert.Equal(t, colLCNullableArrayInsert, colLCNullableArrayData) // check dynamic column selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s order by block_id`, tableName), ) require.NoError(t, err) autoColumns := selectStmt.Columns() assert.Len(t, autoColumns, 8) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } ================================================ FILE: column/tuple.go ================================================ package column import ( "fmt" "io" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // Tuple is a column of Tuple(T1,T2,.....,Tn) ClickHouse data type // // this is actually a group of columns. it doesn't have any method for read or write data // // You MUST use this on Select and Insert methods and for append and read data use the sub columns type Tuple struct { column columns []ColumnBasic } // NewTuple create a new tuple of Tuple(T1,T2,.....,Tn) ClickHouse data type // // this is actually a group of columns. it doesn't have any method for read or write data // // You MUST use this on Select and Insert methods and for append and read data use the sub columns func NewTuple(columns ...ColumnBasic) *Tuple { if len(columns) < 1 { panic("tuple must have at least one column") } return &Tuple{ columns: columns, } } // NumRow return number of row for this block func (c *Tuple) NumRow() int { return c.columns[0].NumRow() } // Array return a Array type for this column func (c *Tuple) Array() *ArrayBase { return NewArrayBase(c) } // Reset all statuses and buffered data // // After each reading, the reading data does not need to be reset. It will be automatically reset. // // When inserting, buffers are reset only after the operation is successful. // If an error occurs, you can safely call insert again. func (c *Tuple) Reset() { for _, col := range c.columns { col.Reset() } } // SetWriteBufferSize set write buffer (number of rows) // this buffer only used for writing. // By setting this buffer, you will avoid allocating the memory several times. func (c *Tuple) SetWriteBufferSize(row int) { for _, col := range c.columns { col.SetWriteBufferSize(row) } } // ReadRaw read raw data from the reader. it runs automatically func (c *Tuple) ReadRaw(num int, r *readerwriter.Reader) error { for i, col := range c.columns { err := col.ReadRaw(num, r) if err != nil { return fmt.Errorf("tuple: read column index %d: %w", i, err) } } return nil } // HeaderReader reads header data from reader. // it uses internally func (c *Tuple) HeaderReader(r *readerwriter.Reader, readColumn bool, revision uint64) error { c.r = r err := c.readColumn(readColumn, revision) if err != nil { return err } for i, col := range c.columns { err = col.HeaderReader(r, false, revision) if err != nil { return fmt.Errorf("tuple: read column header index %d: %w", i, err) } } return nil } // Column returns the all sub columns func (c *Tuple) Columns() []ColumnBasic { return c.columns } func (c *Tuple) Validate() error { chType := helper.FilterSimpleAggregate(c.chType) if helper.IsPoint(chType) { chType = helper.PointMainTypeStr } if !helper.IsTuple(chType) { return ErrInvalidType{ column: c, } } columnsTuple, err := helper.TypesInParentheses(chType[helper.LenTupleStr : len(chType)-1]) if err != nil { return fmt.Errorf("tuple invalid types %w", err) } if len(columnsTuple) != len(c.columns) { //nolint:goerr113 return fmt.Errorf("columns number for %s (%s) is not equal to tuple columns number: %d != %d", string(c.name), string(c.Type()), len(columnsTuple), len(c.columns), ) } for i, col := range c.columns { col.SetType(columnsTuple[i].ChType) col.SetName(columnsTuple[i].Name) if col.Validate() != nil { return ErrInvalidType{ column: c, } } } return nil } func (c *Tuple) ColumnType() string { str := helper.TupleStr for _, col := range c.columns { str += col.ColumnType() + "," } return str[:len(str)-1] + ")" } // WriteTo write data to ClickHouse. // it uses internally func (c *Tuple) WriteTo(w io.Writer) (int64, error) { var n int64 for i, col := range c.columns { nw, err := col.WriteTo(w) if err != nil { return n, fmt.Errorf("tuple: write column index %d: %w", i, err) } n += nw } return n, nil } // HeaderWriter writes header data to writer // it uses internally func (c *Tuple) HeaderWriter(w *readerwriter.Writer) { for _, col := range c.columns { col.HeaderWriter(w) } } func (c *Tuple) Elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.Array().elem(arrayLevel - 1) } return c } ================================================ FILE: column/tuple1.go ================================================ package column // Tuple1 is a column of Tuple(T1) ClickHouse data type type Tuple1[T1 any] struct { Tuple col1 Column[T1] } // NewTuple1 create a new tuple of Tuple(T1) ClickHouse data type func NewTuple1[T1 any]( column1 Column[T1], ) *Tuple1[T1] { return &Tuple1[T1]{ Tuple: Tuple{ columns: []ColumnBasic{ column1, }, }, col1: column1, } } // NewNested1 create a new nested of Nested(T1) ClickHouse data type // // this is actually an alias for NewTuple1(T1).Array() func NewNested1[T any]( column1 Column[T], ) *Array[T] { return NewTuple1( column1, ).Array() } // Data get all the data in current block as a slice. func (c *Tuple1[T]) Data() []T { return c.col1.Data() } // Read reads all the data in current block and append to the input. func (c *Tuple1[T]) Read(value []T) []T { return c.col1.Read(value) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple1[T]) Row(row int) T { return c.col1.Row(row) } // Append value for insert func (c *Tuple1[T]) Append(v ...T) { c.col1.Append(v...) } // Array return a Array type for this column func (c *Tuple1[T]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuple2_gen.go ================================================ package column import ( "unsafe" ) type tuple2Value[T1, T2 any] struct { Col1 T1 Col2 T2 } // Tuple2 is a column of Tuple(T1, T2) ClickHouse data type type Tuple2[T ~struct { Col1 T1 Col2 T2 }, T1, T2 any] struct { Tuple col1 Column[T1] col2 Column[T2] } // NewTuple2 create a new tuple of Tuple(T1, T2) ClickHouse data type func NewTuple2[T ~struct { Col1 T1 Col2 T2 }, T1, T2 any]( column1 Column[T1], column2 Column[T2], ) *Tuple2[T, T1, T2] { return &Tuple2[T, T1, T2]{ Tuple: Tuple{ columns: []ColumnBasic{ column1, column2, }, }, col1: column1, col2: column2, } } // NewNested2 create a new nested of Nested(T1, T2) ClickHouse data type // // this is actually an alias for NewTuple2(T1, T2).Array() func NewNested2[T ~struct { Col1 T1 Col2 T2 }, T1, T2 any]( column1 Column[T1], column2 Column[T2], ) *Array[T] { return NewTuple2[T]( column1, column2, ).Array() } // Data get all the data in current block as a slice. func (c *Tuple2[T, T1, T2]) Data() []T { val := make([]T, c.NumRow()) for i := 0; i < c.NumRow(); i++ { val[i] = T(tuple2Value[T1, T2]{ Col1: c.col1.Row(i), Col2: c.col2.Row(i), }) } return val } // Read reads all the data in current block and append to the input. func (c *Tuple2[T, T1, T2]) Read(value []T) []T { valTuple := *(*[]tuple2Value[T1, T2])(unsafe.Pointer(&value)) if cap(valTuple)-len(valTuple) >= c.NumRow() { valTuple = valTuple[:len(value)+c.NumRow()] } else { valTuple = append(valTuple, make([]tuple2Value[T1, T2], c.NumRow())...) } val := valTuple[len(valTuple)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i].Col1 = c.col1.Row(i) val[i].Col2 = c.col2.Row(i) } return *(*[]T)(unsafe.Pointer(&valTuple)) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple2[T, T1, T2]) Row(row int) T { return T(tuple2Value[T1, T2]{ Col1: c.col1.Row(row), Col2: c.col2.Row(row), }) } // Append value for insert func (c *Tuple2[T, T1, T2]) Append(v ...T) { for _, v := range v { t := tuple2Value[T1, T2](v) c.col1.Append(t.Col1) c.col2.Append(t.Col2) } } // Array return a Array type for this column func (c *Tuple2[T, T1, T2]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuple3_gen.go ================================================ package column import ( "unsafe" ) type tuple3Value[T1, T2, T3 any] struct { Col1 T1 Col2 T2 Col3 T3 } // Tuple3 is a column of Tuple(T1, T2, T3) ClickHouse data type type Tuple3[T ~struct { Col1 T1 Col2 T2 Col3 T3 }, T1, T2, T3 any] struct { Tuple col1 Column[T1] col2 Column[T2] col3 Column[T3] } // NewTuple3 create a new tuple of Tuple(T1, T2, T3) ClickHouse data type func NewTuple3[T ~struct { Col1 T1 Col2 T2 Col3 T3 }, T1, T2, T3 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], ) *Tuple3[T, T1, T2, T3] { return &Tuple3[T, T1, T2, T3]{ Tuple: Tuple{ columns: []ColumnBasic{ column1, column2, column3, }, }, col1: column1, col2: column2, col3: column3, } } // NewNested3 create a new nested of Nested(T1, T2, T3) ClickHouse data type // // this is actually an alias for NewTuple3(T1, T2, T3).Array() func NewNested3[T ~struct { Col1 T1 Col2 T2 Col3 T3 }, T1, T2, T3 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], ) *Array[T] { return NewTuple3[T]( column1, column2, column3, ).Array() } // Data get all the data in current block as a slice. func (c *Tuple3[T, T1, T2, T3]) Data() []T { val := make([]T, c.NumRow()) for i := 0; i < c.NumRow(); i++ { val[i] = T(tuple3Value[T1, T2, T3]{ Col1: c.col1.Row(i), Col2: c.col2.Row(i), Col3: c.col3.Row(i), }) } return val } // Read reads all the data in current block and append to the input. func (c *Tuple3[T, T1, T2, T3]) Read(value []T) []T { valTuple := *(*[]tuple3Value[T1, T2, T3])(unsafe.Pointer(&value)) if cap(valTuple)-len(valTuple) >= c.NumRow() { valTuple = valTuple[:len(value)+c.NumRow()] } else { valTuple = append(valTuple, make([]tuple3Value[T1, T2, T3], c.NumRow())...) } val := valTuple[len(valTuple)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i].Col1 = c.col1.Row(i) val[i].Col2 = c.col2.Row(i) val[i].Col3 = c.col3.Row(i) } return *(*[]T)(unsafe.Pointer(&valTuple)) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple3[T, T1, T2, T3]) Row(row int) T { return T(tuple3Value[T1, T2, T3]{ Col1: c.col1.Row(row), Col2: c.col2.Row(row), Col3: c.col3.Row(row), }) } // Append value for insert func (c *Tuple3[T, T1, T2, T3]) Append(v ...T) { for _, v := range v { t := tuple3Value[T1, T2, T3](v) c.col1.Append(t.Col1) c.col2.Append(t.Col2) c.col3.Append(t.Col3) } } // Array return a Array type for this column func (c *Tuple3[T, T1, T2, T3]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuple4_gen.go ================================================ package column import ( "unsafe" ) type tuple4Value[T1, T2, T3, T4 any] struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 } // Tuple4 is a column of Tuple(T1, T2, T3, T4) ClickHouse data type type Tuple4[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 }, T1, T2, T3, T4 any] struct { Tuple col1 Column[T1] col2 Column[T2] col3 Column[T3] col4 Column[T4] } // NewTuple4 create a new tuple of Tuple(T1, T2, T3, T4) ClickHouse data type func NewTuple4[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 }, T1, T2, T3, T4 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], column4 Column[T4], ) *Tuple4[T, T1, T2, T3, T4] { return &Tuple4[T, T1, T2, T3, T4]{ Tuple: Tuple{ columns: []ColumnBasic{ column1, column2, column3, column4, }, }, col1: column1, col2: column2, col3: column3, col4: column4, } } // NewNested4 create a new nested of Nested(T1, T2, T3, T4) ClickHouse data type // // this is actually an alias for NewTuple4(T1, T2, T3, T4).Array() func NewNested4[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 }, T1, T2, T3, T4 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], column4 Column[T4], ) *Array[T] { return NewTuple4[T]( column1, column2, column3, column4, ).Array() } // Data get all the data in current block as a slice. func (c *Tuple4[T, T1, T2, T3, T4]) Data() []T { val := make([]T, c.NumRow()) for i := 0; i < c.NumRow(); i++ { val[i] = T(tuple4Value[T1, T2, T3, T4]{ Col1: c.col1.Row(i), Col2: c.col2.Row(i), Col3: c.col3.Row(i), Col4: c.col4.Row(i), }) } return val } // Read reads all the data in current block and append to the input. func (c *Tuple4[T, T1, T2, T3, T4]) Read(value []T) []T { valTuple := *(*[]tuple4Value[T1, T2, T3, T4])(unsafe.Pointer(&value)) if cap(valTuple)-len(valTuple) >= c.NumRow() { valTuple = valTuple[:len(value)+c.NumRow()] } else { valTuple = append(valTuple, make([]tuple4Value[T1, T2, T3, T4], c.NumRow())...) } val := valTuple[len(valTuple)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i].Col1 = c.col1.Row(i) val[i].Col2 = c.col2.Row(i) val[i].Col3 = c.col3.Row(i) val[i].Col4 = c.col4.Row(i) } return *(*[]T)(unsafe.Pointer(&valTuple)) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple4[T, T1, T2, T3, T4]) Row(row int) T { return T(tuple4Value[T1, T2, T3, T4]{ Col1: c.col1.Row(row), Col2: c.col2.Row(row), Col3: c.col3.Row(row), Col4: c.col4.Row(row), }) } // Append value for insert func (c *Tuple4[T, T1, T2, T3, T4]) Append(v ...T) { for _, v := range v { t := tuple4Value[T1, T2, T3, T4](v) c.col1.Append(t.Col1) c.col2.Append(t.Col2) c.col3.Append(t.Col3) c.col4.Append(t.Col4) } } // Array return a Array type for this column func (c *Tuple4[T, T1, T2, T3, T4]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuple5_gen.go ================================================ package column import ( "unsafe" ) type tuple5Value[T1, T2, T3, T4, T5 any] struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 Col5 T5 } // Tuple5 is a column of Tuple(T1, T2, T3, T4, T5) ClickHouse data type type Tuple5[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 Col5 T5 }, T1, T2, T3, T4, T5 any] struct { Tuple col1 Column[T1] col2 Column[T2] col3 Column[T3] col4 Column[T4] col5 Column[T5] } // NewTuple5 create a new tuple of Tuple(T1, T2, T3, T4, T5) ClickHouse data type func NewTuple5[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 Col5 T5 }, T1, T2, T3, T4, T5 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], column4 Column[T4], column5 Column[T5], ) *Tuple5[T, T1, T2, T3, T4, T5] { return &Tuple5[T, T1, T2, T3, T4, T5]{ Tuple: Tuple{ columns: []ColumnBasic{ column1, column2, column3, column4, column5, }, }, col1: column1, col2: column2, col3: column3, col4: column4, col5: column5, } } // NewNested5 create a new nested of Nested(T1, T2, T3, T4, T5) ClickHouse data type // // this is actually an alias for NewTuple5(T1, T2, T3, T4, T5).Array() func NewNested5[T ~struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 Col5 T5 }, T1, T2, T3, T4, T5 any]( column1 Column[T1], column2 Column[T2], column3 Column[T3], column4 Column[T4], column5 Column[T5], ) *Array[T] { return NewTuple5[T]( column1, column2, column3, column4, column5, ).Array() } // Data get all the data in current block as a slice. func (c *Tuple5[T, T1, T2, T3, T4, T5]) Data() []T { val := make([]T, c.NumRow()) for i := 0; i < c.NumRow(); i++ { val[i] = T(tuple5Value[T1, T2, T3, T4, T5]{ Col1: c.col1.Row(i), Col2: c.col2.Row(i), Col3: c.col3.Row(i), Col4: c.col4.Row(i), Col5: c.col5.Row(i), }) } return val } // Read reads all the data in current block and append to the input. func (c *Tuple5[T, T1, T2, T3, T4, T5]) Read(value []T) []T { valTuple := *(*[]tuple5Value[T1, T2, T3, T4, T5])(unsafe.Pointer(&value)) if cap(valTuple)-len(valTuple) >= c.NumRow() { valTuple = valTuple[:len(value)+c.NumRow()] } else { valTuple = append(valTuple, make([]tuple5Value[T1, T2, T3, T4, T5], c.NumRow())...) } val := valTuple[len(valTuple)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { val[i].Col1 = c.col1.Row(i) val[i].Col2 = c.col2.Row(i) val[i].Col3 = c.col3.Row(i) val[i].Col4 = c.col4.Row(i) val[i].Col5 = c.col5.Row(i) } return *(*[]T)(unsafe.Pointer(&valTuple)) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple5[T, T1, T2, T3, T4, T5]) Row(row int) T { return T(tuple5Value[T1, T2, T3, T4, T5]{ Col1: c.col1.Row(row), Col2: c.col2.Row(row), Col3: c.col3.Row(row), Col4: c.col4.Row(row), Col5: c.col5.Row(row), }) } // Append value for insert func (c *Tuple5[T, T1, T2, T3, T4, T5]) Append(v ...T) { for _, v := range v { t := tuple5Value[T1, T2, T3, T4, T5](v) c.col1.Append(t.Col1) c.col2.Append(t.Col2) c.col3.Append(t.Col3) c.col4.Append(t.Col4) c.col5.Append(t.Col5) } } // Array return a Array type for this column func (c *Tuple5[T, T1, T2, T3, T4, T5]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuple_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestTuple(t *testing.T) { tableName := "tuple" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s Tuple(String, Int64), %[1]s_nullable Tuple(Nullable(String), Nullable(Int64)), %[1]s_array Tuple(Array(String),Array(Int64)), %[1]s_array_nullable Tuple(Array(Nullable(String)),Array(Nullable(Int64))), %[1]s_lc Tuple(LowCardinality(String),LowCardinality(Int64)), %[1]s_nullable_lc Tuple(LowCardinality(Nullable(String)),LowCardinality(Nullable(Int64))), %[1]s_array_lc Tuple(Array(LowCardinality(String)),Array(LowCardinality(Int64))), %[1]s_array_lc_nullable Tuple(Array(LowCardinality(Nullable(String))),Array(LowCardinality(Nullable(Int64)))), %[1]s_array_array_tuple Array(Array(Tuple(String, Int64))) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) colString := column.NewString() colInt := column.New[int64]() col := column.NewTuple(colString, colInt) colNullableString := column.NewString().Nullable() colNullableInt := column.New[int64]().Nullable() colNullable := column.NewTuple(colNullableString, colNullableInt) colArrayString := column.NewString().Array() colArrayInt := column.New[int64]().Array() colArray := column.NewTuple(colArrayString, colArrayInt) colNullableArrayString := column.NewString().Nullable().Array() colNullableArrayInt := column.New[int64]().Nullable().Array() colNullableArray := column.NewTuple(colNullableArrayString, colNullableArrayInt) colLCString := column.NewString().LowCardinality() colLCInt := column.New[int64]().LowCardinality() colLC := column.NewTuple(colLCString, colLCInt) colLCNullableString := column.NewString().Nullable().LowCardinality() colLCNullableInt := column.New[int64]().Nullable().LowCardinality() colLCNullable := column.NewTuple(colLCNullableString, colLCNullableInt) colArrayLCString := column.NewString().LowCardinality().Array() colArrayLCInt := column.New[int64]().LowCardinality().Array() colArrayLC := column.NewTuple(colArrayLCString, colArrayLCInt) colArrayLCNullableString := column.NewString().Nullable().LowCardinality().Array() colArrayLCNullableInt := column.New[int64]().Nullable().LowCardinality().Array() colArrayLCNullable := column.NewTuple(colArrayLCNullableString, colArrayLCNullableInt) colArrayArrayTupleString := column.NewString() colArrayArrayTupleInt := column.New[int64]() colArrayArrayTuple := column.NewTuple(colArrayArrayTupleString, colArrayArrayTupleInt).Array().Array() var colStringInsert []string var colIntInsert []int64 var colNullableStringInsert []*string var colNullableIntInsert []*int64 var colArrayStringInsert [][]string var colArrayIntInsert [][]int64 var colArrayNullableStringInsert [][]*string var colArrayNullableIntInsert [][]*int64 var colLCStringInsert []string var colLCIntInsert []int64 var colLCNullableStringInsert []*string var colLCNullableIntInsert []*int64 var colLCArrayStringInsert [][]string var colLCArrayIntInsert [][]int64 var colLCNullableArrayStringInsert [][]*string var colLCNullableArrayIntInsert [][]*int64 for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { valString := fmt.Sprintf("string %d", i) valInt := int64(i) val2String := fmt.Sprintf("string %d", i+1) val2Int := int64(i + 1) valArrayString := []string{valString, val2String} valArrayInt := []int64{valInt, val2Int} valArrayNilString := []*string{&valString, nil} valArrayNilInt := []*int64{&valInt, nil} colStringInsert = append(colStringInsert, valString) colIntInsert = append(colIntInsert, valInt) colString.Append(valString) colInt.Append(valInt) // example add nullable if i%2 == 0 { colNullableStringInsert = append(colNullableStringInsert, &valString) colNullableIntInsert = append(colNullableIntInsert, &valInt) colNullableString.Append(valString) colNullableInt.Append(valInt) colLCNullableStringInsert = append(colLCNullableStringInsert, &valString) colLCNullableIntInsert = append(colLCNullableIntInsert, &valInt) colLCNullableString.Append(valString) colLCNullableInt.Append(valInt) } else { colNullableStringInsert = append(colNullableStringInsert, nil) colNullableIntInsert = append(colNullableIntInsert, nil) colNullableString.AppendNil() colNullableInt.AppendNil() colLCNullableStringInsert = append(colLCNullableStringInsert, nil) colLCNullableIntInsert = append(colLCNullableIntInsert, nil) colLCNullableString.AppendNil() colLCNullableInt.AppendNil() } colArrayString.Append(valArrayString) colArrayInt.Append(valArrayInt) colArrayStringInsert = append(colArrayStringInsert, valArrayString) colArrayIntInsert = append(colArrayIntInsert, valArrayInt) colNullableArrayString.AppendP(valArrayNilString) colNullableArrayInt.AppendP(valArrayNilInt) colArrayNullableStringInsert = append(colArrayNullableStringInsert, valArrayNilString) colArrayNullableIntInsert = append(colArrayNullableIntInsert, valArrayNilInt) colLCStringInsert = append(colLCStringInsert, valString) colLCIntInsert = append(colLCIntInsert, valInt) colLCString.Append(valString) colLCInt.Append(valInt) colLCArrayStringInsert = append(colLCArrayStringInsert, valArrayString) colLCArrayIntInsert = append(colLCArrayIntInsert, valArrayInt) colArrayLCString.Append(valArrayString) colArrayLCInt.Append(valArrayInt) colLCNullableArrayStringInsert = append(colLCNullableArrayStringInsert, valArrayNilString) colLCNullableArrayIntInsert = append(colLCNullableArrayIntInsert, valArrayNilInt) colArrayLCNullableString.AppendP(valArrayNilString) colArrayLCNullableInt.AppendP(valArrayNilInt) colArrayArrayTuple.AppendLen(1) colArrayArrayTuple.Column().(*column.ArrayBase).AppendLen(2) colArrayArrayTupleString.Append(valString, val2String) colArrayArrayTupleInt.Append(valInt, val2Int) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable, %[1]s_array_array_tuple ) VALUES`, tableName), col, colNullable, colArray, colNullableArray, colLC, colLCNullable, colArrayLC, colArrayLCNullable, colArrayArrayTuple, ) require.NoError(t, err) } // example read all colStringRead := column.NewString() colIntRead := column.New[int64]() colRead := column.NewTuple(colStringRead, colIntRead) colNullableStringRead := column.NewString().Nullable() colNullableIntRead := column.New[int64]().Nullable() colNullableRead := column.NewTuple(colNullableStringRead, colNullableIntRead) colArrayStringRead := column.NewString().Array() colArrayIntRead := column.New[int64]().Array() colArrayRead := column.NewTuple(colArrayStringRead, colArrayIntRead) colNullableArrayStringRead := column.NewString().Nullable().Array() colNullableArrayIntRead := column.New[int64]().Nullable().Array() colNullableArrayRead := column.NewTuple(colNullableArrayStringRead, colNullableArrayIntRead) colLCStringRead := column.NewString().LowCardinality() colLCIntRead := column.New[int64]().LowCardinality() colLCRead := column.NewTuple(colLCStringRead, colLCIntRead) colLCNullableStringRead := column.NewString().Nullable().LowCardinality() colLCNullableIntRead := column.New[int64]().Nullable().LowCardinality() colLCNullableRead := column.NewTuple(colLCNullableStringRead, colLCNullableIntRead) colArrayLCStringRead := column.NewString().LowCardinality().Array() colArrayLCIntRead := column.New[int64]().LowCardinality().Array() colArrayLCRead := column.NewTuple(colArrayLCStringRead, colArrayLCIntRead) colArrayLCNullableStringRead := column.NewString().Nullable().LowCardinality().Array() colArrayLCNullableIntRead := column.New[int64]().Nullable().LowCardinality().Array() colArrayLCNullableRead := column.NewTuple(colArrayLCNullableStringRead, colArrayLCNullableIntRead) selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), colRead, colNullableRead, colArrayRead, colNullableArrayRead, colLCRead, colLCNullableRead, colArrayLCRead, colArrayLCNullableRead) require.NoError(t, err) require.True(t, conn.IsBusy()) var colStringData []string var colIntData []int64 var colNullableStringData []*string var colNullableIntData []*int64 var colArrayStringData [][]string var colArrayIntData [][]int64 var colArrayNullableStringData [][]*string var colArrayNullableIntData [][]*int64 var colLCStringData []string var colLCIntData []int64 var colLCNullableStringData []*string var colLCNullableIntData []*int64 var colLCArrayStringData [][]string var colLCArrayIntData [][]int64 var colLCNullableArrayStringData [][]*string var colLCNullableArrayIntData [][]*int64 for selectStmt.Next() { colStringData = colStringRead.Read(colStringData) colNullableStringData = colNullableStringRead.ReadP(colNullableStringData) colArrayStringData = colArrayStringRead.Read(colArrayStringData) colArrayNullableStringData = colNullableArrayStringRead.ReadP(colArrayNullableStringData) colLCStringData = colLCStringRead.Read(colLCStringData) colLCNullableStringData = colLCNullableStringRead.ReadP(colLCNullableStringData) colLCArrayStringData = colArrayLCStringRead.Read(colLCArrayStringData) colLCNullableArrayStringData = colArrayLCNullableStringRead.ReadP(colLCNullableArrayStringData) colIntData = colIntRead.Read(colIntData) colNullableIntData = colNullableIntRead.ReadP(colNullableIntData) colArrayIntData = colArrayIntRead.Read(colArrayIntData) colArrayNullableIntData = colNullableArrayIntRead.ReadP(colArrayNullableIntData) colLCIntData = colLCIntRead.Read(colLCIntData) colLCNullableIntData = colLCNullableIntRead.ReadP(colLCNullableIntData) colLCArrayIntData = colArrayLCIntRead.Read(colLCArrayIntData) colLCNullableArrayIntData = colArrayLCNullableIntRead.ReadP(colLCNullableArrayIntData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, colStringInsert, colStringData) assert.Equal(t, colIntInsert, colIntData) assert.Equal(t, colNullableStringInsert, colNullableStringData) assert.Equal(t, colNullableIntInsert, colNullableIntData) assert.Equal(t, colArrayStringInsert, colArrayStringData) assert.Equal(t, colArrayIntInsert, colArrayIntData) assert.Equal(t, colArrayNullableStringInsert, colArrayNullableStringData) assert.Equal(t, colArrayNullableIntInsert, colArrayNullableIntData) assert.Equal(t, colLCStringInsert, colLCStringData) assert.Equal(t, colLCIntInsert, colLCIntData) assert.Equal(t, colLCNullableStringInsert, colLCNullableStringData) assert.Equal(t, colLCNullableIntInsert, colLCNullableIntData) assert.Equal(t, colLCArrayStringInsert, colLCArrayStringData) assert.Equal(t, colLCArrayIntInsert, colLCArrayIntData) assert.Equal(t, colLCNullableArrayStringInsert, colLCNullableArrayStringData) assert.Equal(t, colLCNullableArrayIntInsert, colLCNullableArrayIntData) // check dynamic column selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s, %[1]s_nullable, %[1]s_array, %[1]s_array_nullable, %[1]s_lc, %[1]s_nullable_lc, %[1]s_array_lc, %[1]s_array_lc_nullable FROM test_%[1]s`, tableName), ) require.NoError(t, err) autoColumns := selectStmt.Columns() assert.Len(t, autoColumns, 8) assert.Equal(t, colRead.ColumnType(), autoColumns[0].ColumnType()) assert.Equal(t, colNullableRead.ColumnType(), autoColumns[1].ColumnType()) assert.Equal(t, colArrayRead.ColumnType(), autoColumns[2].ColumnType()) assert.Equal(t, colNullableArrayRead.ColumnType(), autoColumns[3].ColumnType()) assert.Equal(t, colLCRead.ColumnType(), autoColumns[4].ColumnType()) assert.Equal(t, colLCNullableRead.ColumnType(), autoColumns[5].ColumnType()) assert.Equal(t, colArrayLCRead.ColumnType(), autoColumns[6].ColumnType()) assert.Equal(t, colArrayLCNullableRead.ColumnType(), autoColumns[7].ColumnType()) for selectStmt.Next() { } require.NoError(t, selectStmt.Err()) selectStmt.Close() } func TestTupleNoColumn(t *testing.T) { assert.Panics(t, func() { column.NewTuple() }) } func TestGeo(t *testing.T) { tableName := "geo" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "1", }, { Name: "allow_experimental_geo_types", Value: "1", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( point Point , ring Ring , polygon Polygon , multiPolygon MultiPolygon ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) colPoint := column.NewPoint() colRing := column.NewPoint().Array() colPolygon := column.NewPoint().Array().Array() colMultiPolygon := column.NewPoint().Array().Array().Array() colPoint.SetWriteBufferSize(20) colRing.SetWriteBufferSize(20) colPolygon.SetWriteBufferSize(20) colMultiPolygon.SetWriteBufferSize(20) var pointInsert []types.Point var ringInsert [][]types.Point var polygonInsert [][][]types.Point var multiPolygonInsert [][][][]types.Point for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { pointValue := types.Point{ Col1: float64(i), Col2: float64(i + 1), } ringValue := []types.Point{ { Col1: float64(i), Col2: float64(i + 1), }, { Col1: float64(i + 2), Col2: float64(i + 3), }, } polygonValue := [][]types.Point{ { { Col1: float64(i), Col2: float64(i + 1), }, { Col1: float64(i + 2), Col2: float64(i + 3), }, }, { { Col1: float64(i), Col2: float64(i + 1), }, { Col1: float64(i + 2), Col2: float64(i + 3), }, }, } multiPolygonValue := [][][]types.Point{ { { { Col1: float64(i), Col2: float64(i + 1), }, { Col1: float64(i + 2), Col2: float64(i + 3), }, }, { { Col1: float64(i), Col2: float64(i + 1), }, { Col1: float64(i + 2), Col2: float64(i + 3), }, }, }, } colPoint.Append(pointValue) pointInsert = append(pointInsert, pointValue) colRing.Append(ringValue) ringInsert = append(ringInsert, ringValue) colPolygon.Append(polygonValue) polygonInsert = append(polygonInsert, polygonValue) colMultiPolygon.Append(multiPolygonValue) multiPolygonInsert = append(multiPolygonInsert, multiPolygonValue) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( point, ring, polygon, multiPolygon ) VALUES`, tableName), colPoint, colRing, colPolygon, colMultiPolygon, ) require.NoError(t, err) } // example read all colPointRead := column.NewPoint() colRingRead := column.NewPoint().Array() colPolygonRead := column.NewPoint().Array().Array() colMultiPolygonRead := column.NewPoint().Array().Array().Array() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT point, ring, polygon, multiPolygon FROM test_%[1]s`, tableName), colPointRead, colRingRead, colPolygonRead, colMultiPolygonRead, ) require.NoError(t, err) require.True(t, conn.IsBusy()) var pointData []types.Point var ringData [][]types.Point var polygonData [][][]types.Point var multiPolygonData [][][][]types.Point for selectStmt.Next() { pointData = colPointRead.Read(pointData) ringData = colRingRead.Read(ringData) polygonData = colPolygonRead.Read(polygonData) multiPolygonData = colMultiPolygonRead.Read(multiPolygonData) } require.NoError(t, selectStmt.Err()) assert.Equal(t, pointInsert, pointData) assert.Equal(t, ringInsert, ringData) assert.Equal(t, polygonInsert, polygonData) assert.Equal(t, multiPolygonInsert, multiPolygonData) } ================================================ FILE: column/tuples_template/tuple.go.tmpl ================================================ package column import ( "unsafe" ) type tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }} any] struct { {{- range $val := iterate .Numbrer "1" }} Col{{ $val }} T{{ $val }}{{end }} } // Tuple{{.Numbrer}} is a column of Tuple(T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}) ClickHouse data type type Tuple{{.Numbrer}}[T ~struct { {{- range $val := iterate .Numbrer "1" }} Col{{ $val }} T{{ $val }}{{end }} }{{- range $val := iterate .Numbrer "1" }}, T{{ $val }}{{end }} any] struct { Tuple {{- range $val := iterate .Numbrer "1" }} col{{ $val }} Column[T{{ $val }}]{{end }} } // NewTuple{{.Numbrer}} create a new tuple of Tuple(T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}) ClickHouse data type func NewTuple{{.Numbrer}}[T ~struct { {{- range $val := iterate .Numbrer "1" }} Col{{ $val }} T{{ $val }}{{end }} }{{- range $val := iterate .Numbrer "1" }}, T{{ $val }}{{end }} any]( {{- range $val := iterate .Numbrer "1" }} column{{ $val }} Column[T{{ $val }}],{{end }} ) *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}] { return &Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]{ Tuple: Tuple{ columns: []ColumnBasic{ {{- range $val := iterate .Numbrer "1" }} column{{ $val }},{{end }} }, }, {{- range $val := iterate .Numbrer "1" }} col{{ $val }}: column{{ $val }},{{end }} } } // NewNested{{.Numbrer}} create a new nested of Nested(T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}) ClickHouse data type // // this is actually an alias for NewTuple{{.Numbrer}}(T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}).Array() func NewNested{{.Numbrer}}[T ~struct { {{- range $val := iterate .Numbrer "1" }} Col{{ $val }} T{{ $val }}{{end }} }{{- range $val := iterate .Numbrer "1" }}, T{{ $val }}{{end }} any]( {{- range $val := iterate .Numbrer "1" }} column{{ $val }} Column[T{{ $val }}],{{end }} ) *Array[T] { return NewTuple{{.Numbrer}}[T]( {{- range $val := iterate .Numbrer "1" }} column{{ $val }},{{end}} ).Array() } // Data get all the data in current block as a slice. func (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]) Data() []T { val := make([]T, c.NumRow()) for i := 0; i < c.NumRow(); i++ { val[i] = T(tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}]{ {{- range $val := iterate .Numbrer "1" }} Col{{ $val }}: c.col{{ $val }}.Row(i),{{end }} }) } return val } // Read reads all the data in current block and append to the input. func (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]) Read(value []T) []T { valTuple := *(*[]tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}])(unsafe.Pointer(&value)) if cap(valTuple)-len(valTuple) >= c.NumRow() { valTuple = valTuple[:len(value)+c.NumRow()] } else { valTuple = append(valTuple, make([]tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}], c.NumRow())...) } val := valTuple[len(valTuple)-c.NumRow():] for i := 0; i < c.NumRow(); i++ { {{- range $val := iterate .Numbrer "1" }} val[i].Col{{ $val }} = c.col{{ $val }}.Row(i){{end }} } return *(*[]T)(unsafe.Pointer(&valTuple)) } // Row return the value of given row. // NOTE: Row number start from zero func (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]) Row(row int) T { return T(tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}]{ {{- range $val := iterate .Numbrer "1" }} Col{{ $val }}: c.col{{ $val }}.Row(row),{{end }} }) } // Append value for insert func (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]) Append(v ...T) { for _, v := range v { t := tuple{{.Numbrer}}Value[T1{{- range $val := iterate .Numbrer "2" }}, T{{ $val }}{{end }}](v) {{- range $val := iterate .Numbrer "1" }} c.col{{ $val }}.Append(t.Col{{ $val }}){{end }} } } // Array return a Array type for this column func (c *Tuple{{.Numbrer}}[T{{- range $val := iterate .Numbrer "1" }} ,T{{$val}}{{end}}]) Array() *Array[T] { return NewArray[T](c) } ================================================ FILE: column/tuples_template/tuple2.json ================================================ { "Numbrer": "2" } ================================================ FILE: column/tuples_template/tuple3.json ================================================ { "Numbrer": "3" } ================================================ FILE: column/tuples_template/tuple4.json ================================================ { "Numbrer": "4" } ================================================ FILE: column/tuples_template/tuple5.json ================================================ { "Numbrer": "5" } ================================================ FILE: column/tuples_test.go ================================================ package column_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestTuples(t *testing.T) { tableName := "tuples" t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := chconn.Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), fmt.Sprintf(`DROP TABLE IF EXISTS test_%s`, tableName), ) require.NoError(t, err) set := chconn.Settings{ { Name: "allow_suspicious_low_cardinality_types", Value: "true", }, } err = conn.ExecWithOption(context.Background(), fmt.Sprintf(`CREATE TABLE test_%[1]s ( %[1]s1 Tuple(Int64), %[1]s1_array Array(Tuple(Int64)), %[1]s2 Tuple(Int64, Int64), %[1]s2_array Array(Tuple(Int64, Int64)), %[1]s3 Tuple(Int64, Int64, Int64), %[1]s3_array Array(Tuple(Int64, Int64, Int64)), %[1]s4 Tuple(Int64, Int64, Int64, Int64), %[1]s4_array Array(Tuple(Int64, Int64, Int64, Int64)), %[1]s5 Tuple(Int64, Int64, Int64, Int64, Int64), %[1]s5_array Array(Tuple(Int64, Int64, Int64, Int64, Int64)) ) Engine=Memory`, tableName), &chconn.QueryOptions{ Settings: set, }) require.NoError(t, err) col1 := column.NewTuple1[int64](column.New[int64]()) col1Array := column.NewTuple1[int64](column.New[int64]()).Array() type Tuple2 types.Tuple2[int64, int64] col2 := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()) col2Array := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array() type Tuple3 types.Tuple3[int64, int64, int64] col3 := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]()) col3Array := column.NewTuple3[Tuple3, int64, int64, int64](column.New[int64](), column.New[int64](), column.New[int64]()).Array() type Tuple4 types.Tuple4[int64, int64, int64, int64] col4 := column.NewTuple4[ Tuple4, int64, int64, int64, int64]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col4Array := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() type Tuple5 types.Tuple5[ int64, int64, int64, int64, int64, ] col5 := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col5Array := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() var col1Insert []int64 var col1ArrayInsert [][]int64 var col2Insert []Tuple2 var col2ArrayInsert [][]Tuple2 var col3Insert []Tuple3 var col3ArrayInsert [][]Tuple3 var col4Insert []Tuple4 var col4ArrayInsert [][]Tuple4 var col5Insert []Tuple5 var col5ArrayInsert [][]Tuple5 for insertN := 0; insertN < 2; insertN++ { rows := 10 for i := 0; i < rows; i++ { col1.Append(int64(i)) col1Insert = append(col1Insert, int64(i)) col1Array.Append([]int64{int64(i), int64(i + 1)}) col1ArrayInsert = append(col1ArrayInsert, []int64{int64(i), int64(i + 1)}) col2.Append(Tuple2{int64(i), int64(i + 1)}) col2Insert = append(col2Insert, Tuple2{int64(i), int64(i + 1)}) col2Array.Append([]Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}}) col2ArrayInsert = append(col2ArrayInsert, []Tuple2{{int64(i), int64(i + 1)}, {int64(i + 2), int64(i + 3)}}) col3.Append(Tuple3{int64(i), int64(i + 1), int64(i + 2)}) col3Insert = append(col3Insert, Tuple3{int64(i), int64(i + 1), int64(i + 2)}) col3Array.Append([]Tuple3{ {int64(i), int64(i + 1), int64(i + 2)}, {int64(i + 3), int64(i + 4), int64(i + 5)}, }) col3ArrayInsert = append(col3ArrayInsert, []Tuple3{ {int64(i), int64(i + 1), int64(i + 2)}, {int64(i + 3), int64(i + 4), int64(i + 5)}, }) col4.Append(Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}) col4Insert = append(col4Insert, Tuple4{int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}) col4Array.Append([]Tuple4{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}, {int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)}, }) col4ArrayInsert = append(col4ArrayInsert, []Tuple4{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3)}, {int64(i + 4), int64(i + 5), int64(i + 6), int64(i + 7)}, }) col5.Append(Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}) col5Insert = append(col5Insert, Tuple5{int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}) col5Array.Append([]Tuple5{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}, {int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)}, }) col5ArrayInsert = append(col5ArrayInsert, []Tuple5{ {int64(i), int64(i + 1), int64(i + 2), int64(i + 3), int64(i + 4)}, {int64(i + 5), int64(i + 6), int64(i + 7), int64(i + 8), int64(i + 9)}, }) } err = conn.Insert(context.Background(), fmt.Sprintf(`INSERT INTO test_%[1]s ( %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array ) VALUES`, tableName), col1, col1Array, col2, col2Array, col3, col3Array, col4, col4Array, col5, col5Array, ) require.NoError(t, err) } // example read all col1Read := column.NewTuple1[int64](column.New[int64]()) col1ArrayRead := column.NewTuple1[int64](column.New[int64]()).Array() col2Read := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()) col2ArrayRead := column.NewTuple2[Tuple2, int64, int64](column.New[int64](), column.New[int64]()).Array() col3Read := column.NewTuple3[ Tuple3, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), ) col3ArrayRead := column.NewTuple3[ Tuple3, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), ).Array() col4Read := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col4ArrayRead := column.NewTuple4[ Tuple4, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() col5Read := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ) col5ArrayRead := column.NewTuple5[ Tuple5, int64, int64, int64, int64, int64, ]( column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), column.New[int64](), ).Array() selectStmt, err := conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array FROM test_%[1]s`, tableName), col1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead) require.NoError(t, err) require.True(t, conn.IsBusy()) var col1ReadData []int64 var col1ArrayReadData [][]int64 var col2ReadData []Tuple2 var col2ArrayReadData [][]Tuple2 var col3ReadData []Tuple3 var col3ArrayReadData [][]Tuple3 var col4ReadData []Tuple4 var col4ArrayReadData [][]Tuple4 var col5ReadData []Tuple5 var col5ArrayReadData [][]Tuple5 for selectStmt.Next() { col1ReadData = col1Read.Read(col1ReadData) col1ArrayReadData = col1ArrayRead.Read(col1ArrayReadData) col2ReadData = col2Read.Read(col2ReadData) col2ArrayReadData = col2ArrayRead.Read(col2ArrayReadData) col3ReadData = col3Read.Read(col3ReadData) col3ArrayReadData = col3ArrayRead.Read(col3ArrayReadData) col4ReadData = col4Read.Read(col4ReadData) col4ArrayReadData = col4ArrayRead.Read(col4ArrayReadData) col5ReadData = col5Read.Read(col5ReadData) col5ArrayReadData = col5ArrayRead.Read(col5ArrayReadData) } require.NoError(t, selectStmt.Err()) selectStmt.Close() assert.Equal(t, col1Insert, col1ReadData) assert.Equal(t, col1ArrayInsert, col1ArrayReadData) assert.Equal(t, col2Insert, col2ReadData) assert.Equal(t, col2ArrayInsert, col2ArrayReadData) assert.Equal(t, col3Insert, col3ReadData) assert.Equal(t, col3ArrayInsert, col3ArrayReadData) assert.Equal(t, col4Insert, col4ReadData) assert.Equal(t, col4ArrayInsert, col4ArrayReadData) assert.Equal(t, col5Insert, col5ReadData) assert.Equal(t, col5ArrayInsert, col5ArrayReadData) // example read row selectStmt, err = conn.Select(context.Background(), fmt.Sprintf(`SELECT %[1]s1, %[1]s1_array, %[1]s2, %[1]s2_array, %[1]s3, %[1]s3_array, %[1]s4, %[1]s4_array, %[1]s5, %[1]s5_array FROM test_%[1]s`, tableName), col1Read, col1ArrayRead, col2Read, col2ArrayRead, col3Read, col3ArrayRead, col4Read, col4ArrayRead, col5Read, col5ArrayRead) require.NoError(t, err) require.True(t, conn.IsBusy()) col1ReadData = col1ReadData[:0] col1ArrayReadData = col1ArrayReadData[:0] col2ReadData = col2ReadData[:0] col2ArrayReadData = col2ArrayReadData[:0] col3ReadData = col3ReadData[:0] col3ArrayReadData = col3ArrayReadData[:0] col4ReadData = col4ReadData[:0] col4ArrayReadData = col4ArrayReadData[:0] col5ReadData = col5ReadData[:0] col5ArrayReadData = col5ArrayReadData[:0] for selectStmt.Next() { for i := 0; i < selectStmt.RowsInBlock(); i++ { col1ReadData = append(col1ReadData, col1Read.Row(i)) col1ArrayReadData = append(col1ArrayReadData, col1ArrayRead.Row(i)) col2ReadData = append(col2ReadData, col2Read.Row(i)) col2ArrayReadData = append(col2ArrayReadData, col2ArrayRead.Row(i)) col3ReadData = append(col3ReadData, col3Read.Row(i)) col3ArrayReadData = append(col3ArrayReadData, col3ArrayRead.Row(i)) col4ReadData = append(col4ReadData, col4Read.Row(i)) col4ArrayReadData = append(col4ArrayReadData, col4ArrayRead.Row(i)) col5ReadData = append(col5ReadData, col5Read.Row(i)) col5ArrayReadData = append(col5ArrayReadData, col5ArrayRead.Row(i)) } } require.NoError(t, selectStmt.Err()) selectStmt.Close() assert.Equal(t, col1Insert, col1ReadData) assert.Equal(t, col1ArrayInsert, col1ArrayReadData) assert.Equal(t, col2Insert, col2ReadData) assert.Equal(t, col2ArrayInsert, col2ArrayReadData) assert.Equal(t, col3Insert, col3ReadData) assert.Equal(t, col3ArrayInsert, col3ArrayReadData) assert.Equal(t, col4Insert, col4ReadData) assert.Equal(t, col4ArrayInsert, col4ArrayReadData) assert.Equal(t, col5Insert, col5ReadData) assert.Equal(t, col5ArrayInsert, col5ArrayReadData) } ================================================ FILE: config.go ================================================ package chconn import ( "context" "crypto/tls" "crypto/x509" "fmt" "math" "net" "net/url" "os" "strconv" "strings" "time" ) const defaultUsername = "default" const defaultDatabase = "default" const defaultDBPort = "9000" const defaultClientName = "chx" // Method is compression codec. type CompressMethod byte // Possible compression methods. const ( CompressNone CompressMethod = 0x00 CompressChecksum CompressMethod = 0x02 CompressLZ4 CompressMethod = 0x82 CompressZSTD CompressMethod = 0x90 ) // AfterConnectFunc is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables // or prepare statements). If this returns an error the connection attempt fails. type AfterConnectFunc func(ctx context.Context, conn Conn) error // ValidateConnectFunc is called during a connection attempt after a successful authentication with the ClickHouse server. // It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next // fallback config is tried. This allows implementing high availability behavior. type ValidateConnectFunc func(ctx context.Context, conn Conn) error // Config is the settings used to establish a connection to a ClickHouse server. It must be created by ParseConfig and // then it can be modified. A manually initialized Config will cause ConnectConfig to panic. type Config struct { Host string // host (e.g. localhost) Port uint16 Database string User string Password string ClientName string TLSConfig *tls.Config // nil disables TLS ConnectTimeout time.Duration DialFunc DialFunc // e.g. net.Dialer.DialContext LookupFunc LookupFunc // e.g. net.Resolver.LookupHost ReaderFunc ReaderFunc // e.g. bufio.Reader Compress CompressMethod QuotaKey string WriterFunc WriterFunc MinReadBufferSize int // Run-time parameters to set on connection as session default values RuntimeParams map[string]string Fallbacks []*FallbackConfig // ValidateConnect is called during a connection attempt after a successful authentication with the ClickHouse server. // It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next // fallback config is tried. This allows implementing high availability behavior. ValidateConnect ValidateConnectFunc // AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables // or prepare statements). If this returns an error the connection attempt fails. AfterConnect AfterConnectFunc createdByParseConfig bool // Used to enforce created by ParseConfig rule. // Original connection string that was parsed into config. connString string } // Copy returns a deep copy of the config that is safe to use and modify. // The only exception is the TLSConfig field: // according to the tls.Config docs it must not be modified after creation. func (c *Config) Copy() *Config { newConf := new(Config) *newConf = *c if newConf.TLSConfig != nil { newConf.TLSConfig = c.TLSConfig.Clone() } if newConf.RuntimeParams != nil { newConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams)) for k, v := range c.RuntimeParams { newConf.RuntimeParams[k] = v } } if newConf.Fallbacks != nil { newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks)) for i, fallback := range c.Fallbacks { newFallback := new(FallbackConfig) *newFallback = *fallback if newFallback.TLSConfig != nil { newFallback.TLSConfig = fallback.TLSConfig.Clone() } newConf.Fallbacks[i] = newFallback } } return newConf } // ConnString returns the original connection string used to connect to the ClickHouse server. func (c *Config) ConnString() string { return c.connString } // FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a // network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections. type FallbackConfig struct { Host string // host (e.g. localhost) Port uint16 TLSConfig *tls.Config // nil disables TLS } // NetworkAddress converts a ClickHouse host and port into network and address suitable for use with // net.Dial. func NetworkAddress(host string, port uint16) (network, address string) { network = "tcp" address = net.JoinHostPort(host, strconv.Itoa(int(port))) return } // ParseConfig builds a []*Config with default values and use CH* Env. // // # Example DSN // user=vahid password=secret host=ch.example.com port=5432 dbname=mydb sslmode=verify-ca // // # Example URL // clickhouse://vahid:secret@ch.example.com:9440/mydb?sslmode=verify-ca // // ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated // values that will be tried in order. This can be used as part of a high availability system. // // # Example URL // clickhouse://vahid:secret@foo.example.com:9000,bar.example.com:9000/mydb // // ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed // via database URL or DSN: // // CHHOST // CHPORT // CHDATABASE // CHUSER // CHPASSWORD // CHCLIENTNAME // CHCONNECT_TIMEOUT // CHSSLMODE // CHSSLKEY // CHSSLCERT // CHSSLROOTCERT // // Important Security Notes: // // ParseConfig tries to match libpq behavior with regard to CHSSLMODE. This includes defaulting to "prefer" behavior if // not set. // // See http://www.postgresql.org/docs/12/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of // security each sslmode provides. // // The sslmode "prefer" (the default), sslmode "allow", and multiple hosts are implemented via the Fallbacks field of // the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of // sslmode "prefer" this means it will first try the main Config settings which use TLS, then it will try the fallback // which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually // changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting // TLCConfig. // // If a host name resolves into multiple addresses chconn will only try the first. // // In addition, ParseConfig accepts the following options: // // min_read_buffer_size // The minimum size of the internal read buffer. Default 8192. // compress // compression method. empty string or "checksum" or "lz4" or "zstd". // in the "checksum" chconn checks the checksum and not use any compress method. // quota_key // the quota key. func ParseConfig(connString string) (*Config, error) { defaultSettings := defaultSettings() envSettings := parseEnvSettings() connStringSettings := make(map[string]string) if connString != "" { var err error // connString may be a database URL or a DSN if strings.HasPrefix(connString, "clickhouse://") { connStringSettings, err = parseURLSettings(connString) if err != nil { return nil, &parseConfigError{connString: connString, msg: "failed to parse as URL", err: err} } } else { connStringSettings, err = parseDSNSettings(connString) if err != nil { return nil, &parseConfigError{connString: connString, msg: "failed to parse as DSN", err: err} } } } settings := mergeSettings(defaultSettings, envSettings, connStringSettings) minReadBufferSize, err := strconv.Atoi(settings["min_read_buffer_size"]) if err != nil { return nil, &parseConfigError{connString: connString, msg: "cannot parse min_read_buffer_size", err: err} } config := &Config{ createdByParseConfig: true, Database: settings["database"], User: settings["user"], Password: settings["password"], RuntimeParams: make(map[string]string), ClientName: settings["client_name"], MinReadBufferSize: minReadBufferSize, connString: connString, } switch settings["compress"] { case "checksum": config.Compress = CompressChecksum case "lz4": config.Compress = CompressLZ4 case "zstd": config.Compress = CompressZSTD } config.QuotaKey = settings["quota_key"] if connectTimeoutSetting, present := settings["connect_timeout"]; present { connectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting) if err != nil { return nil, &parseConfigError{connString: connString, msg: "invalid connect_timeout", err: err} } config.ConnectTimeout = connectTimeout config.DialFunc = makeConnectTimeoutDialFunc(connectTimeout) } else { defaultDialer := makeDefaultDialer() config.DialFunc = defaultDialer.DialContext } config.LookupFunc = makeDefaultResolver().LookupHost notRuntimeParams := map[string]struct{}{ "host": {}, "port": {}, "database": {}, "user": {}, "password": {}, "connect_timeout": {}, "sslmode": {}, "client_name": {}, "min_read_buffer_size": {}, "sslkey": {}, "sslcert": {}, "sslrootcert": {}, "compress": {}, "quota_key": {}, } for k, v := range settings { if _, present := notRuntimeParams[k]; present { continue } config.RuntimeParams[k] = v } fallbacks := []*FallbackConfig{} hosts := strings.Split(settings["host"], ",") ports := strings.Split(settings["port"], ",") for i, host := range hosts { var portStr string if i < len(ports) { portStr = ports[i] } else { portStr = ports[0] } port, err := parsePort(portStr) if err != nil { return nil, &parseConfigError{connString: connString, msg: "invalid port", err: err} } var tlsConfigs []*tls.Config tlsConfigs, err = configTLS(settings, host) if err != nil { return nil, &parseConfigError{connString: connString, msg: "failed to configure TLS", err: err} } for _, tlsConfig := range tlsConfigs { fallbacks = append(fallbacks, &FallbackConfig{ Host: host, Port: port, TLSConfig: tlsConfig, }) } } config.Host = fallbacks[0].Host config.Port = fallbacks[0].Port config.TLSConfig = fallbacks[0].TLSConfig config.Fallbacks = fallbacks[1:] return config, nil } func defaultSettings() map[string]string { settings := make(map[string]string) settings["host"] = "localhost" settings["port"] = defaultDBPort settings["user"] = defaultUsername settings["database"] = defaultDatabase settings["client_name"] = defaultClientName settings["min_read_buffer_size"] = "8192" return settings } func mergeSettings(settingSets ...map[string]string) map[string]string { settings := make(map[string]string) for _, s2 := range settingSets { for k, v := range s2 { settings[k] = v } } return settings } func parseEnvSettings() map[string]string { settings := make(map[string]string) nameMap := map[string]string{ "CHHOST": "host", "CHPORT": "port", "CHDATABASE": "database", "CHUSER": "user", "CHPASSWORD": "password", "CHCLIENTNAME": "client_name", "CHCONNECT_TIMEOUT": "connect_timeout", "CHSSLMODE": "sslmode", "CHSSLKEY": "sslkey", "CHSSLCERT": "sslcert", "CHSSLROOTCERT": "sslrootcert", } for envname, realname := range nameMap { value := os.Getenv(envname) if value != "" { settings[realname] = value } } return settings } func parseURLSettings(connString string) (map[string]string, error) { settings := make(map[string]string) urlConn, err := url.Parse(connString) if err != nil { return nil, err } if urlConn.User != nil { settings["user"] = urlConn.User.Username() if password, present := urlConn.User.Password(); present { settings["password"] = password } } // Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port. var hosts []string var ports []string for _, host := range strings.Split(urlConn.Host, ",") { if host == "" { continue } if isIPOnly(host) { hosts = append(hosts, strings.Trim(host, "[]")) continue } h, p, err := net.SplitHostPort(host) if err != nil { return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err) } if h != "" { hosts = append(hosts, h) } if p != "" { ports = append(ports, p) } } if len(hosts) > 0 { settings["host"] = strings.Join(hosts, ",") } if len(ports) > 0 { settings["port"] = strings.Join(ports, ",") } database := strings.TrimLeft(urlConn.Path, "/") if database != "" { settings["database"] = database } nameMap := map[string]string{ "dbname": "database", } for k, v := range urlConn.Query() { if k2, present := nameMap[k]; present { k = k2 } settings[k] = v[0] } return settings, nil } func isIPOnly(host string) bool { return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":") } var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1} func parseDSNSettings(s string) (map[string]string, error) { settings := make(map[string]string) nameMap := map[string]string{ "dbname": "database", } for len(s) > 0 { var key, val string eqIdx := strings.IndexRune(s, '=') if eqIdx < 0 { return nil, ErrInvalidDSN } key = strings.Trim(s[:eqIdx], " \t\n\r\v\f") s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f") if s == "" { } else if s[0] != '\'' { end := 0 for ; end < len(s); end++ { if asciiSpace[s[end]] == 1 { break } if s[end] == '\\' { end++ if end == len(s) { return nil, ErrInvalidBackSlash } } } val = strings.ReplaceAll(strings.ReplaceAll(s[:end], "\\\\", "\\"), "\\'", "'") if end == len(s) { s = "" } else { s = s[end+1:] } } else { // quoted string s = s[1:] end := 0 for ; end < len(s); end++ { if s[end] == '\'' { break } if s[end] == '\\' { end++ } } if end == len(s) { return nil, ErrInvalidquoted } val = strings.ReplaceAll(strings.ReplaceAll(s[:end], "\\\\", "\\"), "\\'", "'") if end == len(s) { s = "" } else { s = s[end+1:] } } if k, ok := nameMap[key]; ok { key = k } if key == "" { return nil, ErrInvalidDSN } settings[key] = val } return settings, nil } // configTLS uses libpq's TLS parameters to construct []*tls.Config. It is // necessary to allow returning multiple TLS configs as sslmode "allow" and // "prefer" allow fallback. // //nolint:funlen,gocyclo func configTLS(settings map[string]string, thisHost string) ([]*tls.Config, error) { host := thisHost sslmode := settings["sslmode"] sslrootcert := settings["sslrootcert"] sslcert := settings["sslcert"] sslkey := settings["sslkey"] // in clickhouse default non tls connection accepted and tls connection listen on another port if sslmode == "" || sslmode == "disable" { return []*tls.Config{nil}, nil } //nolint:gosec // it change by config tlsConfig := &tls.Config{} switch sslmode { case "disable": return []*tls.Config{nil}, nil case "allow", "prefer": tlsConfig.InsecureSkipVerify = true case "require": if sslrootcert != "" { goto nextCase } tlsConfig.InsecureSkipVerify = true break nextCase: fallthrough case "verify-ca": // Don't perform the default certificate verification because it // will verify the hostname. Instead, verify the server's // certificate chain ourselves in VerifyPeerCertificate and // ignore the server name. This emulates libpq's verify-ca // behavior. // // See https://github.com/golang/go/issues/21971#issuecomment-332693931 // and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate // for more info. tlsConfig.InsecureSkipVerify = true tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error { certs := make([]*x509.Certificate, len(certificates)) for i, asn1Data := range certificates { cert, err := x509.ParseCertificate(asn1Data) if err != nil { return fmt.Errorf("failed to parse certificate from server: %w", err) } certs[i] = cert } // Leave DNSName empty to skip hostname verification. opts := x509.VerifyOptions{ Roots: tlsConfig.RootCAs, Intermediates: x509.NewCertPool(), } // Skip the first cert because it's the leaf. All others // are intermediates. for _, cert := range certs[1:] { opts.Intermediates.AddCert(cert) } _, err := certs[0].Verify(opts) return err } case "verify-full": tlsConfig.ServerName = host default: return nil, ErrSSLModeInvalid } if sslrootcert != "" { caCertPool := x509.NewCertPool() caPath := sslrootcert caCert, err := os.ReadFile(caPath) if err != nil { return nil, fmt.Errorf("unable to read CA file: %w", err) } if !caCertPool.AppendCertsFromPEM(caCert) { return nil, ErrAddCA } tlsConfig.RootCAs = caCertPool tlsConfig.ClientCAs = caCertPool } if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") { return nil, ErrMissCertRequirement } if sslcert != "" && sslkey != "" { cert, err := tls.LoadX509KeyPair(sslcert, sslkey) if err != nil { return nil, fmt.Errorf("unable to read cert: %w", err) } tlsConfig.Certificates = []tls.Certificate{cert} } switch sslmode { case "allow": return []*tls.Config{nil, tlsConfig}, nil case "prefer": return []*tls.Config{tlsConfig, nil}, nil case "require", "verify-ca", "verify-full": return []*tls.Config{tlsConfig}, nil default: panic("BUG: bad sslmode should already have been caught") } } func parsePort(s string) (uint16, error) { port, err := strconv.ParseUint(s, 10, 16) if err != nil { return 0, err } if port < 1 || port > math.MaxUint16 { return 0, ErrPortInvalid } return uint16(port), nil } func makeDefaultDialer() *net.Dialer { return &net.Dialer{KeepAlive: 5 * time.Minute} } func makeDefaultResolver() *net.Resolver { return net.DefaultResolver } func parseConnectTimeoutSetting(s string) (time.Duration, error) { timeout, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0, err } if timeout < 0 { return 0, ErrNegativeTimeout } return time.Duration(timeout) * time.Second, nil } func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc { d := makeDefaultDialer() d.Timeout = timeout return d.DialContext } ================================================ FILE: config_test.go ================================================ package chconn import ( "context" "crypto/tls" "errors" "fmt" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var parseConfigTests = []struct { name string connString string config *Config }{ // Test all sslmodes { name: "sslmode not set (disable)", connString: "clickhouse://vahid:secret@localhost:9000/mydb", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, Database: "mydb", ClientName: defaultClientName, TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "sslmode disable", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", ClientName: defaultClientName, Port: 9000, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "sslmode allow", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=allow", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "localhost", Port: 9000, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, }, }, }, }, { name: "sslmode prefer", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=prefer", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, Database: "mydb", ClientName: defaultClientName, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "localhost", Port: 9000, TLSConfig: nil, }, }, }, }, { name: "sslmode require", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=require", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, Database: "mydb", ClientName: defaultClientName, RuntimeParams: map[string]string{}, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, }, }, { name: "sslmode verify-ca", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=verify-ca", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, RuntimeParams: map[string]string{}, }, }, { name: "sslmode verify-full", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=verify-full", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: &tls.Config{ServerName: "localhost"}, RuntimeParams: map[string]string{}, }, }, { name: "database url everything", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable&client_name=chxtest&extradata=test&connect_timeout=5", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, Database: "mydb", TLSConfig: nil, ConnectTimeout: 5 * time.Second, ClientName: "chxtest", RuntimeParams: map[string]string{ "extradata": "test", }, }, }, { name: "database url missing password", connString: "clickhouse://vahid@localhost:9000/mydb?sslmode=disable", config: &Config{ User: "vahid", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url missing user and password", connString: "clickhouse://localhost:9000/mydb?sslmode=disable", config: &Config{ User: defaultUsername, Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url missing port", connString: "clickhouse://vahid:secret@localhost:9000/mydb?sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url clickhouse protocol", connString: "clickhouse://vahid@localhost:9000/mydb?sslmode=disable", config: &Config{ User: "vahid", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url IPv4 with port", connString: "clickhouse://vahid@127.0.0.1:5433/mydb?sslmode=disable", config: &Config{ User: "vahid", Host: "127.0.0.1", ClientName: defaultClientName, Port: 5433, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url IPv6 with port", connString: "clickhouse://vahid@[2001:db8::1]:5433/mydb?sslmode=disable", config: &Config{ User: "vahid", Host: "2001:db8::1", Port: 5433, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "database url IPv6 no port", connString: "clickhouse://vahid@[2001:db8::1]/mydb?sslmode=disable", config: &Config{ User: "vahid", Host: "2001:db8::1", Port: 9000, Database: "mydb", ClientName: defaultClientName, TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN everything", connString: "user=vahid password=secret host=localhost port=9000 dbname=mydb sslmode=disable client_name=chxtest connect_timeout=5", config: &Config{ User: "vahid", Password: "secret", Host: "localhost", Port: 9000, Database: "mydb", TLSConfig: nil, ClientName: "chxtest", ConnectTimeout: 5 * time.Second, RuntimeParams: map[string]string{}, }, }, { name: "DSN with escaped single quote", connString: "user=vahid\\'s password=secret host=localhost port=9000 dbname=mydb sslmode=disable", config: &Config{ User: "vahid's", Password: "secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN with escaped backslash", connString: "user=vahid password=sooper\\\\secret host=localhost port=9000 dbname=mydb sslmode=disable", config: &Config{ User: "vahid", Password: "sooper\\secret", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN with single quoted values", connString: "user='vahid' host='localhost' dbname='mydb' sslmode='disable'", config: &Config{ User: "vahid", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN with single quoted value with escaped single quote", connString: "user='vahid\\'s' host='localhost' dbname='mydb' sslmode='disable'", config: &Config{ User: "vahid's", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN with empty single quoted value", connString: "user='vahid' password='' host='localhost' dbname='mydb' sslmode='disable'", config: &Config{ User: "vahid", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "DSN with space between key and value", connString: "user = 'vahid' password = '' host = 'localhost' dbname = 'mydb' sslmode='disable'", config: &Config{ User: "vahid", Host: "localhost", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "URL multiple hosts", connString: "clickhouse://vahid:secret@foo,bar,baz/mydb?sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "bar", Port: 9000, TLSConfig: nil, }, { Host: "baz", Port: 9000, TLSConfig: nil, }, }, }, }, { name: "URL multiple hosts and ports", connString: "clickhouse://vahid:secret@foo:1,bar:2,baz:3/mydb?sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 1, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "bar", Port: 2, TLSConfig: nil, }, { Host: "baz", Port: 3, TLSConfig: nil, }, }, }, }, { name: "DSN multiple hosts one port", connString: "user=vahid password=secret host=foo,bar,baz port=9000 dbname=mydb sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 9000, ClientName: defaultClientName, Database: "mydb", TLSConfig: nil, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "bar", Port: 9000, TLSConfig: nil, }, { Host: "baz", Port: 9000, TLSConfig: nil, }, }, }, }, { name: "DSN multiple hosts multiple ports", connString: "user=vahid password=secret host=foo,bar,baz port=1,2,3 dbname=mydb sslmode=disable", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 1, Database: "mydb", TLSConfig: nil, ClientName: defaultClientName, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "bar", Port: 2, TLSConfig: nil, }, { Host: "baz", Port: 3, TLSConfig: nil, }, }, }, }, { name: "multiple hosts and fallback tsl", connString: "user=vahid password=secret host=foo,bar,baz dbname=mydb sslmode=prefer", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 9000, Database: "mydb", ClientName: defaultClientName, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "foo", Port: 9000, TLSConfig: nil, }, { Host: "bar", Port: 9000, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }}, { Host: "bar", Port: 9000, TLSConfig: nil, }, { Host: "baz", Port: 9000, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }}, { Host: "baz", Port: 9000, TLSConfig: nil, }, }, }, }, { name: "enable compress", connString: "user=vahid password=secret host=foo,bar,baz dbname=mydb sslmode=prefer compress=checksum", config: &Config{ User: "vahid", Password: "secret", Host: "foo", Port: 9000, Database: "mydb", Compress: CompressChecksum, ClientName: defaultClientName, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }, RuntimeParams: map[string]string{}, Fallbacks: []*FallbackConfig{ { Host: "foo", Port: 9000, TLSConfig: nil, }, { Host: "bar", Port: 9000, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }}, { Host: "bar", Port: 9000, TLSConfig: nil, }, { Host: "baz", Port: 9000, TLSConfig: &tls.Config{ InsecureSkipVerify: true, }}, { Host: "baz", Port: 9000, TLSConfig: nil, }, }, }, }, } func TestParseConfig(t *testing.T) { t.Parallel() for i, tt := range parseConfigTests { config, err := ParseConfig(tt.connString) if !assert.Nilf(t, err, "Test %d (%s)", i, tt.name) { continue } assertConfigsEqual(t, tt.config, config, fmt.Sprintf("Test %d (%s)", i, tt.name)) } } func TestParseConfigDSNWithTrailingEmptyEqualDoesNotPanic(t *testing.T) { _, err := ParseConfig("host= user= password= port= database=") require.NoError(t, err) } func TestParseConfigDSNLeadingEqual(t *testing.T) { _, err := ParseConfig("= user=vahid") require.Error(t, err) } func TestParseConfigDSNTrailingBackslash(t *testing.T) { _, err := ParseConfig(`x=x\`) require.Error(t, err) assert.Contains(t, err.Error(), "invalid backslash") } func TestConfigCopyReturnsEqualConfig(t *testing.T) { connString := "clickhouse://vahid:secret@localhost:9000/mydb?client_name=chxtest&connect_timeout=5" original, err := ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assertConfigsEqual(t, original, copied, "Test Config.Copy() returns equal config") } func TestConfigCopyOriginalConfigDidNotChange(t *testing.T) { connString := "host=localhost,localhost2 port=9000,9000 database=mydb client_name=chxtest connect_timeout=5" original, err := ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assertConfigsEqual(t, original, copied, "Test Config.Copy() returns equal config") copied.Port = uint16(5433) copied.RuntimeParams["foo"] = "bar" assert.Equal(t, uint16(9000), original.Port) assert.Equal(t, "", original.RuntimeParams["foo"]) } func TestConfigCopyCanBeUsedToConnect(t *testing.T) { connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") original, err := ParseConfig(connString) require.NoError(t, err) copied := original.Copy() assert.NotPanics(t, func() { _, err = ConnectConfig(context.Background(), copied) }) assert.NoError(t, err) } func assertConfigsEqual(t *testing.T, expected, actual *Config, testName string) { if !assert.NotNil(t, expected) { return } if !assert.NotNil(t, actual) { return } assert.Equalf(t, expected.Host, actual.Host, "%s - Host", testName) assert.Equalf(t, expected.Database, actual.Database, "%s - Database", testName) assert.Equalf(t, expected.Port, actual.Port, "%s - Port", testName) assert.Equalf(t, expected.User, actual.User, "%s - User", testName) assert.Equalf(t, expected.Password, actual.Password, "%s - Password", testName) assert.Equalf(t, expected.ConnectTimeout, actual.ConnectTimeout, "%s - ConnectTimeout", testName) assert.Equalf(t, expected.ClientName, actual.ClientName, "%s - Client Name", testName) assert.Equalf(t, expected.RuntimeParams, actual.RuntimeParams, "%s - RuntimeParams", testName) // Can't test function equality, so just test that they are set or not. assert.Equalf(t, expected.ValidateConnect == nil, actual.ValidateConnect == nil, "%s - ValidateConnect", testName) assert.Equalf(t, expected.AfterConnect == nil, actual.AfterConnect == nil, "%s - AfterConnect", testName) if assert.Equalf(t, expected.TLSConfig == nil, actual.TLSConfig == nil, "%s - TLSConfig", testName) { if expected.TLSConfig != nil { assert.Equalf(t, expected.TLSConfig.InsecureSkipVerify, actual.TLSConfig.InsecureSkipVerify, "%s - TLSConfig InsecureSkipVerify", testName, ) assert.Equalf(t, expected.TLSConfig.ServerName, actual.TLSConfig.ServerName, "%s - TLSConfig ServerName", testName, ) } } if assert.Equalf(t, len(expected.Fallbacks), len(actual.Fallbacks), "%s - Fallbacks", testName) { for i := range expected.Fallbacks { assert.Equalf(t, expected.Fallbacks[i].Host, actual.Fallbacks[i].Host, "%s - Fallback %d - Host", testName, i) assert.Equalf(t, expected.Fallbacks[i].Port, actual.Fallbacks[i].Port, "%s - Fallback %d - Port", testName, i) if assert.Equalf(t, expected.Fallbacks[i].TLSConfig == nil, actual.Fallbacks[i].TLSConfig == nil, "%s - Fallback %d - TLSConfig", testName, i, ) { if expected.Fallbacks[i].TLSConfig != nil { assert.Equalf(t, expected.Fallbacks[i].TLSConfig.InsecureSkipVerify, actual.Fallbacks[i].TLSConfig.InsecureSkipVerify, "%s - Fallback %d - TLSConfig InsecureSkipVerify", testName, ) assert.Equalf(t, expected.Fallbacks[i].TLSConfig.ServerName, actual.Fallbacks[i].TLSConfig.ServerName, "%s - Fallback %d - TLSConfig ServerName", testName, ) } } } } } func TestParseConfigEnv(t *testing.T) { tests := []struct { name string envvars map[string]string config *Config }{ { // not testing no environment at all as that would use default host and that can vary. name: "CHHOST only", envvars: map[string]string{"CHHOST": "123.123.123.123"}, config: &Config{ User: defaultUsername, Host: "123.123.123.123", Port: 9000, ClientName: defaultClientName, Database: defaultDatabase, TLSConfig: nil, RuntimeParams: map[string]string{}, }, }, { name: "All non-TLS environment", envvars: map[string]string{ "CHHOST": "123.123.123.123", "CHPORT": "7777", "CHDATABASE": "foo", "CHUSER": "bar", "CHPASSWORD": "baz", "CHCONNECT_TIMEOUT": "10", "CHSSLMODE": "disable", "CHCLIENTNAME": "chxtest", }, config: &Config{ Host: "123.123.123.123", Port: 7777, Database: "foo", User: "bar", Password: "baz", ConnectTimeout: 10 * time.Second, TLSConfig: nil, ClientName: "chxtest", RuntimeParams: map[string]string{}, }, }, } chEnvvars := []string{"CHHOST", "CHPORT", "CHDATABASE", "CHUSER", "CHPASSWORD", "CHCLIENTNAME", "CHSSLMODE", "CHCONNECT_TIMEOUT"} savedEnv := make(map[string]string) for _, n := range chEnvvars { savedEnv[n] = os.Getenv(n) } defer func() { for k, v := range savedEnv { err := os.Setenv(k, v) if err != nil { t.Fatalf("Unable to restore environment: %v", err) } } }() for i, tt := range tests { for _, n := range chEnvvars { err := os.Unsetenv(n) require.NoError(t, err) } for k, v := range tt.envvars { err := os.Setenv(k, v) require.NoError(t, err) } config, err := ParseConfig("") if !assert.Nilf(t, err, "Test %d (%s)", i, tt.name) { continue } assertConfigsEqual(t, tt.config, config, fmt.Sprintf("Test %d (%s)", i, tt.name)) } } func TestParseConfigError(t *testing.T) { t.Parallel() content := []byte("invalid tls") tmpInvalidTLS, err := os.CreateTemp("", "invalidtls") if err != nil { t.Fatal(err) } defer os.Remove(tmpInvalidTLS.Name()) // clean up if _, err := tmpInvalidTLS.Write(content); err != nil { t.Fatal(err) } if err := tmpInvalidTLS.Close(); err != nil { t.Fatal(err) } parseConfigErrorTests := []struct { name string connString string err string errUnwarp string }{ { name: "invalid url", connString: "clickhouse://invalid\t", err: "cannot parse `clickhouse://invalid\t`: failed to parse as URL (parse \"clickhouse://invalid\\t\": net/url: invalid control character in URL)", //nolint:lll //can't change line lengh }, { name: "invalid port", connString: "port=invalid", errUnwarp: "strconv.ParseUint: parsing \"invalid\": invalid syntax", }, { name: "invalid port range", connString: "port=0", err: "cannot parse `port=0`: invalid port (outside range)", }, { name: "invalid connect_timeout", connString: "connect_timeout=200g", err: "cannot parse `connect_timeout=200g`: invalid connect_timeout (strconv.ParseInt: parsing \"200g\": invalid syntax)", }, { name: "negative connect_timeout", connString: "connect_timeout=-100", err: "cannot parse `connect_timeout=-100`: invalid connect_timeout (negative timeout)", }, { name: "negative sslmode", connString: "sslmode=invalid", err: "cannot parse `sslmode=invalid`: failed to configure TLS (sslmode is invalid)", }, { name: "fail load sslrootcert", connString: "sslrootcert=invalid_address sslmode=prefer", err: "cannot parse `sslrootcert=invalid_address sslmode=prefer`: failed to configure TLS (unable to read CA file: open invalid_address: no such file or directory)", //nolint:lll //can't change line lengh }, { name: "invalid sslrootcert", connString: "sslrootcert=" + tmpInvalidTLS.Name() + " sslmode=prefer", err: "cannot parse `sslrootcert=" + tmpInvalidTLS.Name() + " sslmode=prefer`: failed to configure TLS (unable to add CA to cert pool)", //nolint:lll //can't change line lengh }, { name: "not provide both sslcert and sskkey", connString: "sslcert=invalid_address sslmode=prefer", err: "cannot parse `sslcert=invalid_address sslmode=prefer`: failed to configure TLS (both \"sslcert\" and \"sslkey\" are required)", //nolint:lll //can't change line lengh }, { name: "invalid sslcert", connString: "sslcert=invalid_address sslkey=invalid_address sslmode=prefer", err: "cannot parse `sslcert=invalid_address sslkey=invalid_address sslmode=prefer`: failed to configure TLS (unable to read cert: open invalid_address: no such file or directory)", //nolint:lll //can't change line lengh }, } for i, tt := range parseConfigErrorTests { _, err := ParseConfig(tt.connString) if !assert.Errorf(t, err, "Test %d (%s)", i, tt.name) { continue } if tt.err != "" { if !assert.EqualError(t, err, tt.err, "Test %d (%s)", i, tt.name) { continue } } else { if !assert.EqualErrorf(t, errors.Unwrap(err), tt.errUnwarp, "Test %d (%s)", i, tt.name) { continue } } } } ================================================ FILE: doc.go ================================================ // Package chconn is a low-level Clickhouse database driver. /* chconn is a pure Go driver for [ClickHouse] that use Native protocol chconn aims to be low-level, fast, and performant. If you have any suggestion or comment, please feel free to open an issue on this tutorial's GitHub page! */ package chconn ================================================ FILE: doc_test.go ================================================ package chconn_test import ( "context" "fmt" "os" "time" "github.com/vahid-sohrabloo/chconn/v2/chpool" "github.com/vahid-sohrabloo/chconn/v2/column" ) func Example() { conn, err := chpool.New(os.Getenv("DATABASE_URL")) if err != nil { panic(err) } defer conn.Close() // to check if the connection is alive err = conn.Ping(context.Background()) if err != nil { panic(err) } err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS example_table`) if err != nil { panic(err) } err = conn.Exec(context.Background(), `CREATE TABLE example_table ( uint64 UInt64, uint64_nullable Nullable(UInt64) ) Engine=Memory`) if err != nil { panic(err) } col1 := column.New[uint64]() col2 := column.New[uint64]().Nullable() rows := 1_000_0000 // One hundred million rows- insert in 10 times numInsert := 10 col1.SetWriteBufferSize(rows) col2.SetWriteBufferSize(rows) startInsert := time.Now() for i := 0; i < numInsert; i++ { col1.Reset() col2.Reset() for y := 0; y < rows; y++ { col1.Append(uint64(i)) if i%2 == 0 { col2.Append(uint64(i)) } else { col2.AppendNil() } } ctxInsert, cancelInsert := context.WithTimeout(context.Background(), time.Second*30) // insert data err = conn.Insert(ctxInsert, "INSERT INTO example_table (uint64,uint64_nullable) VALUES", col1, col2) if err != nil { cancelInsert() panic(err) } cancelInsert() } fmt.Println("inserted 10M rows in ", time.Since(startInsert)) // select data col1Read := column.New[uint64]() col2Read := column.New[uint64]().Nullable() ctxSelect, cancelSelect := context.WithTimeout(context.Background(), time.Second*30) defer cancelSelect() startSelect := time.Now() selectStmt, err := conn.Select(ctxSelect, "SELECT uint64,uint64_nullable FROM example_table", col1Read, col2Read) if err != nil { panic(err) } // make sure the stmt close after select. but it's not necessary defer selectStmt.Close() var col1Data []uint64 var col2DataNil []bool var col2Data []uint64 // read data block by block // for more information about block, see: https://clickhouse.com/docs/en/development/architecture/#block for selectStmt.Next() { col1Data = col1Data[:0] col1Data = col1Read.Read(col1Data) col2DataNil = col2DataNil[:0] col2DataNil = col2Read.ReadNil(col2DataNil) col2Data = col2Data[:0] col2Data = col2Read.Read(col2Data) } // check errors if selectStmt.Err() != nil { panic(selectStmt.Err()) } fmt.Println("selected 10M rows in ", time.Since(startSelect)) } ================================================ FILE: errors.go ================================================ package chconn import ( "context" "errors" "fmt" "net" "net/url" "regexp" "strings" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // ErrNegativeTimeout when negative timeout provided var ErrNegativeTimeout = errors.New("negative timeout") // ErrPortInvalid when privide out of range port var ErrPortInvalid = errors.New("outside range") // ErrSSLModeInvalid when privide invalid ssl mode var ErrSSLModeInvalid = errors.New("sslmode is invalid") // ErrAddCA when can't add ca var ErrAddCA = errors.New("unable to add CA to cert pool") // ErrMissCertRequirement when sslcert or sslkey not provided var ErrMissCertRequirement = errors.New(`both "sslcert" and "sslkey" are required`) // ErrInvalidDSN for invalid dsn var ErrInvalidDSN = errors.New("invalid dsn") // ErrInvalidBackSlash invalid backslash in dsn var ErrInvalidBackSlash = errors.New("invalid backslash") // ErrInvalidquoted invalid quoted in dsn var ErrInvalidquoted = errors.New("unterminated quoted string in connection info string") // ErrIPNotFound when can't found ip in connecting var ErrIPNotFound = errors.New("ip addr wasn't found") // ChError represents an error reported by the Clickhouse server type ChError struct { Code ChErrorType Name string Message string StackTrace string err error } func (e *ChError) read(r *readerwriter.Reader) error { var ( err error hasNested uint8 errCode int32 ) if errCode, err = r.Int32(); err != nil { return &readError{"ChError: read code", err} } e.Code = ChErrorType(errCode) if e.Name, err = r.String(); err != nil { return &readError{"ChError: read name", err} } if e.Message, err = r.String(); err != nil { return &readError{"ChError: read message", err} } e.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+":")) if e.StackTrace, err = r.String(); err != nil { return &readError{"ChError: read StackTrace", err} } if hasNested, err = r.ReadByte(); err != nil { return &readError{"ChError: read hasNested", err} } if hasNested == 1 { nestedErr := &ChError{} if err := nestedErr.read(r); err != nil { return err } e.err = nestedErr } return nil } // Unwrap returns the underlying error func (e *ChError) Unwrap() error { return e.err } // Error return string error func (e *ChError) Error() string { if e.err == nil { return fmt.Sprintf(" %s (%d): %s", e.Name, e.Code, e.Message) } return fmt.Sprintf(" %s (%d): %s (%s)", e.Name, e.Code, e.Message, e.err) } // preferContextOverNetTimeoutError returns ctx.Err() if ctx.Err() is present and err is a net.Error with Timeout() == // true. Otherwise returns err. func preferContextOverNetTimeoutError(ctx context.Context, err error) error { if err == nil { return nil } var timeoutError net.Error errors.As(err, &timeoutError) if timeoutError != nil && timeoutError.Timeout() && ctx.Err() != nil { return &errTimeout{ mainError: err, err: ctx.Err(), } } return err } // errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is // context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true. type errTimeout struct { err error mainError error } func (e *errTimeout) Error() string { if e.mainError == nil { return fmt.Sprintf("timeout: %s", e.err.Error()) } return fmt.Sprintf("timeout: %s - %s", e.err.Error(), e.mainError.Error()) } func (e *errTimeout) Unwrap() error { return e.err } type contextAlreadyDoneError struct { err error } func (e *contextAlreadyDoneError) Error() string { return fmt.Sprintf("context already done: %s", e.err.Error()) } func (e *contextAlreadyDoneError) Unwrap() error { return e.err } // newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`. func newContextAlreadyDoneError(ctx context.Context) (err error) { return &errTimeout{ err: &contextAlreadyDoneError{err: ctx.Err()}, } } type unexpectedPacket struct { expected string actual interface{} } func (e *unexpectedPacket) Error() string { return fmt.Sprintf("Unexpected packet from server (expected %s got %#v)", e.expected, e.actual) } type notImplementedPacket struct { packet uint64 } func (e *notImplementedPacket) Error() string { return fmt.Sprintf("packet not implemented: %d", e.packet) } type connectError struct { config *Config msg string err error } func (e *connectError) Error() string { sb := &strings.Builder{} fmt.Fprintf(sb, "failed to connect to `host=%s user=%s database=%s`: %s", e.config.Host, e.config.User, e.config.Database, e.msg) if e.err != nil { fmt.Fprintf(sb, " (%s)", e.err.Error()) } return sb.String() } func (e *connectError) Unwrap() error { return e.err } type connLockError struct { status string } func (e *connLockError) Error() string { return e.status } type parseConfigError struct { connString string msg string err error } func (e *parseConfigError) Error() string { connString := redactPW(e.connString) if e.err == nil { return fmt.Sprintf("cannot parse `%s`: %s", connString, e.msg) } return fmt.Sprintf("cannot parse `%s`: %s (%s)", connString, e.msg, e.err.Error()) } func (e *parseConfigError) Unwrap() error { return e.err } type readError struct { msg string err error } func (e *readError) Error() string { return fmt.Sprintf("%s (%s)", e.msg, e.err.Error()) } func (e *readError) Unwrap() error { return e.err } type writeError struct { msg string err error } func (e *writeError) Error() string { return fmt.Sprintf("%s (%s)", e.msg, e.err.Error()) } func (e *writeError) Unwrap() error { return e.err } func redactPW(connString string) string { if strings.HasPrefix(connString, "clickhouse://") { if u, err := url.Parse(connString); err == nil { return redactURL(u) } } quotedDSN := regexp.MustCompile(`password='[^']*'`) connString = quotedDSN.ReplaceAllLiteralString(connString, "password=xxxxx") plainDSN := regexp.MustCompile(`password=[^ ]*`) connString = plainDSN.ReplaceAllLiteralString(connString, "password=xxxxx") brokenURL := regexp.MustCompile(`:[^:@]+?@`) connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@") return connString } func redactURL(u *url.URL) string { if u == nil { return "" } if _, pwSet := u.User.Password(); pwSet { u.User = url.UserPassword(u.User.Username(), "xxxxx") } return u.String() } // InsertError represents an error when insert error type InsertError struct { err error remoteAddr net.Addr } // Error return string error func (e *InsertError) Error() string { return fmt.Sprintf("failed to insert data: remoteAddr: %s - %s", e.remoteAddr.String(), e.err.Error()) } // Unwrap returns the underlying error func (e *InsertError) Unwrap() error { return e.err } // ColumnNumberReadError represents an error when read more or less column type ColumnNumberReadError struct { Read int Available uint64 } func (e *ColumnNumberReadError) Error() string { return fmt.Sprintf("read %d column(s), but available %d column(s)", e.Read, e.Available) } // ColumnNumberWriteError represents an error when number of write column is not equal to number of query column type ColumnNumberWriteError struct { WriteColumn int NeedColumn uint64 } func (e *ColumnNumberWriteError) Error() string { return fmt.Sprintf("write %d column(s) but insert query needs %d column(s)", e.WriteColumn, e.NeedColumn) } // NumberWriteError represents an error when number rows of columns is not equal type NumberWriteError struct { FirstNumRow int NumRow int Column string FirstColumn string } func (e *NumberWriteError) Error() string { return fmt.Sprintf("%q has %d rows but %q column has %d rows", e.FirstColumn, e.FirstNumRow, e.Column, e.NumRow) } // ColumnNotFoundError represents an error when column not found (when try to reorder columns) type ColumnNotFoundError struct { Column string } func (e *ColumnNotFoundError) Error() string { return fmt.Sprintf("the input columns do not contain column %q. The column name must be set using the `SetName` method", e.Column) } ================================================ FILE: errors_ch_code.go ================================================ package chconn type ChErrorType int32 const ( ChErrorOk ChErrorType = 0 // OK ChErrorUnsupportedMethod ChErrorType = 1 // UNSUPPORTED_METHOD ChErrorUnsupportedParameter ChErrorType = 2 // UNSUPPORTED_PARAMETER ChErrorUnexpectedEndOfFile ChErrorType = 3 // UNEXPECTED_END_OF_FILE ChErrorExpectedEndOfFile ChErrorType = 4 // EXPECTED_END_OF_FILE ChErrorCannotParseText ChErrorType = 6 // CANNOT_PARSE_TEXT ChErrorIncorrectNumberOfColumns ChErrorType = 7 // INCORRECT_NUMBER_OF_COLUMNS ChErrorThereIsNoColumn ChErrorType = 8 // THERE_IS_NO_COLUMN ChErrorSizesOfColumnsDoesntMatch ChErrorType = 9 // SIZES_OF_COLUMNS_DOESNT_MATCH ChErrorNotFoundColumnInBlock ChErrorType = 10 // NOT_FOUND_COLUMN_IN_BLOCK ChErrorPositionOutOfBound ChErrorType = 11 // POSITION_OUT_OF_BOUND ChErrorParameterOutOfBound ChErrorType = 12 // PARAMETER_OUT_OF_BOUND ChErrorSizesOfColumnsInTupleDoesntMatch ChErrorType = 13 // SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH ChErrorDuplicateColumn ChErrorType = 15 // DUPLICATE_COLUMN ChErrorNoSuchColumnInTable ChErrorType = 16 // NO_SUCH_COLUMN_IN_TABLE ChErrorDelimiterInStringLiteralDoesntMatch ChErrorType = 17 // DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH ChErrorCannotInsertElementIntoConstantColumn ChErrorType = 18 // CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN ChErrorSizeOfFixedStringDoesntMatch ChErrorType = 19 // SIZE_OF_FIXED_STRING_DOESNT_MATCH ChErrorNumberOfColumnsDoesntMatch ChErrorType = 20 // NUMBER_OF_COLUMNS_DOESNT_MATCH ChErrorCannotReadAllDataFromTabSeparatedInput ChErrorType = 21 // CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT ChErrorCannotParseAllValueFromTabSeparatedInput ChErrorType = 22 // CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT ChErrorCannotReadFromIstream ChErrorType = 23 // CANNOT_READ_FROM_ISTREAM ChErrorCannotWriteToOstream ChErrorType = 24 // CANNOT_WRITE_TO_OSTREAM ChErrorCannotParseEscapeSequence ChErrorType = 25 // CANNOT_PARSE_ESCAPE_SEQUENCE ChErrorCannotParseQuotedString ChErrorType = 26 // CANNOT_PARSE_QUOTED_STRING ChErrorCannotParseInputAssertionFailed ChErrorType = 27 // CANNOT_PARSE_INPUT_ASSERTION_FAILED ChErrorCannotPrintFloatOrDoubleNumber ChErrorType = 28 // CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER ChErrorCannotPrintInteger ChErrorType = 29 // CANNOT_PRINT_INTEGER ChErrorCannotReadSizeOfCompressedChunk ChErrorType = 30 // CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK ChErrorCannotReadCompressedChunk ChErrorType = 31 // CANNOT_READ_COMPRESSED_CHUNK ChErrorAttemptToReadAfterEOF ChErrorType = 32 // ATTEMPT_TO_READ_AFTER_EOF ChErrorCannotReadAllData ChErrorType = 33 // CANNOT_READ_ALL_DATA ChErrorTooManyArgumentsForFunction ChErrorType = 34 // TOO_MANY_ARGUMENTS_FOR_FUNCTION ChErrorTooFewArgumentsForFunction ChErrorType = 35 // TOO_FEW_ARGUMENTS_FOR_FUNCTION ChErrorBadArguments ChErrorType = 36 // BAD_ARGUMENTS ChErrorUnknownElementInAst ChErrorType = 37 // UNKNOWN_ELEMENT_IN_AST ChErrorCannotParseDate ChErrorType = 38 // CANNOT_PARSE_DATE ChErrorTooLargeSizeCompressed ChErrorType = 39 // TOO_LARGE_SIZE_COMPRESSED ChErrorChecksumDoesntMatch ChErrorType = 40 // CHECKSUM_DOESNT_MATCH ChErrorCannotParseDatetime ChErrorType = 41 // CANNOT_PARSE_DATETIME ChErrorNumberOfArgumentsDoesntMatch ChErrorType = 42 // NUMBER_OF_ARGUMENTS_DOESNT_MATCH ChErrorIllegalTypeOfArgument ChErrorType = 43 // ILLEGAL_TYPE_OF_ARGUMENT ChErrorIllegalColumn ChErrorType = 44 // ILLEGAL_COLUMN ChErrorIllegalNumberOfResultColumns ChErrorType = 45 // ILLEGAL_NUMBER_OF_RESULT_COLUMNS ChErrorUnknownFunction ChErrorType = 46 // UNKNOWN_FUNCTION ChErrorUnknownIdentifier ChErrorType = 47 // UNKNOWN_IDENTIFIER ChErrorNotImplemented ChErrorType = 48 // NOT_IMPLEMENTED ChErrorLogicalError ChErrorType = 49 // LOGICAL_ERROR ChErrorUnknownType ChErrorType = 50 // UNKNOWN_TYPE ChErrorEmptyListOfColumnsQueried ChErrorType = 51 // EMPTY_LIST_OF_COLUMNS_QUERIED ChErrorColumnQueriedMoreThanOnce ChErrorType = 52 // COLUMN_QUERIED_MORE_THAN_ONCE ChErrorTypeMismatch ChErrorType = 53 // TYPE_MISMATCH ChErrorStorageDoesntAllowParameters ChErrorType = 54 // STORAGE_DOESNT_ALLOW_PARAMETERS ChErrorStorageRequiresParameter ChErrorType = 55 // STORAGE_REQUIRES_PARAMETER ChErrorUnknownStorage ChErrorType = 56 // UNKNOWN_STORAGE ChErrorTableAlreadyExists ChErrorType = 57 // TABLE_ALREADY_EXISTS ChErrorTableMetadataAlreadyExists ChErrorType = 58 // TABLE_METADATA_ALREADY_EXISTS ChErrorIllegalTypeOfColumnForFilter ChErrorType = 59 // ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER ChErrorUnknownTable ChErrorType = 60 // UNKNOWN_TABLE ChErrorOnlyFilterColumnInBlock ChErrorType = 61 // ONLY_FILTER_COLUMN_IN_BLOCK ChErrorSyntaxError ChErrorType = 62 // SYNTAX_ERROR ChErrorUnknownAggregateFunction ChErrorType = 63 // UNKNOWN_AGGREGATE_FUNCTION ChErrorCannotReadAggregateFunctionFromText ChErrorType = 64 // CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT ChErrorCannotWriteAggregateFunctionAsText ChErrorType = 65 // CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT ChErrorNotAColumn ChErrorType = 66 // NOT_A_COLUMN ChErrorIllegalKeyOfAggregation ChErrorType = 67 // ILLEGAL_KEY_OF_AGGREGATION ChErrorCannotGetSizeOfField ChErrorType = 68 // CANNOT_GET_SIZE_OF_FIELD ChErrorArgumentOutOfBound ChErrorType = 69 // ARGUMENT_OUT_OF_BOUND ChErrorCannotConvertType ChErrorType = 70 // CANNOT_CONVERT_TYPE ChErrorCannotWriteAfterEndOfBuffer ChErrorType = 71 // CANNOT_WRITE_AFTER_END_OF_BUFFER ChErrorCannotParseNumber ChErrorType = 72 // CANNOT_PARSE_NUMBER ChErrorUnknownFormat ChErrorType = 73 // UNKNOWN_FORMAT ChErrorCannotReadFromFileDescriptor ChErrorType = 74 // CANNOT_READ_FROM_FILE_DESCRIPTOR ChErrorCannotWriteToFileDescriptor ChErrorType = 75 // CANNOT_WRITE_TO_FILE_DESCRIPTOR ChErrorCannotOpenFile ChErrorType = 76 // CANNOT_OPEN_FILE ChErrorCannotCloseFile ChErrorType = 77 // CANNOT_CLOSE_FILE ChErrorUnknownTypeOfQuery ChErrorType = 78 // UNKNOWN_TYPE_OF_QUERY ChErrorIncorrectFileName ChErrorType = 79 // INCORRECT_FILE_NAME ChErrorIncorrectQuery ChErrorType = 80 // INCORRECT_QUERY ChErrorUnknownDatabase ChErrorType = 81 // UNKNOWN_DATABASE ChErrorDatabaseAlreadyExists ChErrorType = 82 // DATABASE_ALREADY_EXISTS ChErrorDirectoryDoesntExist ChErrorType = 83 // DIRECTORY_DOESNT_EXIST ChErrorDirectoryAlreadyExists ChErrorType = 84 // DIRECTORY_ALREADY_EXISTS ChErrorFormatIsNotSuitableForInput ChErrorType = 85 // FORMAT_IS_NOT_SUITABLE_FOR_INPUT ChErrorReceivedErrorFromRemoteIoServer ChErrorType = 86 // RECEIVED_ERROR_FROM_REMOTE_IO_SERVER ChErrorCannotSeekThroughFile ChErrorType = 87 // CANNOT_SEEK_THROUGH_FILE ChErrorCannotTruncateFile ChErrorType = 88 // CANNOT_TRUNCATE_FILE ChErrorUnknownCompressionMethod ChErrorType = 89 // UNKNOWN_COMPRESSION_METHOD ChErrorEmptyListOfColumnsPassed ChErrorType = 90 // EMPTY_LIST_OF_COLUMNS_PASSED ChErrorSizesOfMarksFilesAreInconsistent ChErrorType = 91 // SIZES_OF_MARKS_FILES_ARE_INCONSISTENT ChErrorEmptyDataPassed ChErrorType = 92 // EMPTY_DATA_PASSED ChErrorUnknownAggregatedDataVariant ChErrorType = 93 // UNKNOWN_AGGREGATED_DATA_VARIANT ChErrorCannotMergeDifferentAggregatedDataVariants ChErrorType = 94 // CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS ChErrorCannotReadFromSocket ChErrorType = 95 // CANNOT_READ_FROM_SOCKET ChErrorCannotWriteToSocket ChErrorType = 96 // CANNOT_WRITE_TO_SOCKET ChErrorCannotReadAllDataFromChunkedInput ChErrorType = 97 // CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT ChErrorCannotWriteToEmptyBlockOutputStream ChErrorType = 98 // CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM ChErrorUnknownPacketFromClient ChErrorType = 99 // UNKNOWN_PACKET_FROM_CLIENT ChErrorUnknownPacketFromServer ChErrorType = 100 // UNKNOWN_PACKET_FROM_SERVER ChErrorUnexpectedPacketFromClient ChErrorType = 101 // UNEXPECTED_PACKET_FROM_CLIENT ChErrorUnexpectedPacketFromServer ChErrorType = 102 // UNEXPECTED_PACKET_FROM_SERVER ChErrorReceivedDataForWrongQueryID ChErrorType = 103 // RECEIVED_DATA_FOR_WRONG_QUERY_ID ChErrorTooSmallBufferSize ChErrorType = 104 // TOO_SMALL_BUFFER_SIZE ChErrorCannotReadHistory ChErrorType = 105 // CANNOT_READ_HISTORY ChErrorCannotAppendHistory ChErrorType = 106 // CANNOT_APPEND_HISTORY ChErrorFileDoesntExist ChErrorType = 107 // FILE_DOESNT_EXIST ChErrorNoDataToInsert ChErrorType = 108 // NO_DATA_TO_INSERT ChErrorCannotBlockSignal ChErrorType = 109 // CANNOT_BLOCK_SIGNAL ChErrorCannotUnblockSignal ChErrorType = 110 // CANNOT_UNBLOCK_SIGNAL ChErrorCannotManipulateSigset ChErrorType = 111 // CANNOT_MANIPULATE_SIGSET ChErrorCannotWaitForSignal ChErrorType = 112 // CANNOT_WAIT_FOR_SIGNAL ChErrorThereIsNoSession ChErrorType = 113 // THERE_IS_NO_SESSION ChErrorCannotClockGettime ChErrorType = 114 // CANNOT_CLOCK_GETTIME ChErrorUnknownSetting ChErrorType = 115 // UNKNOWN_SETTING ChErrorThereIsNoDefaultValue ChErrorType = 116 // THERE_IS_NO_DEFAULT_VALUE ChErrorIncorrectData ChErrorType = 117 // INCORRECT_DATA ChErrorEngineRequired ChErrorType = 119 // ENGINE_REQUIRED ChErrorCannotInsertValueOfDifferentSizeIntoTuple ChErrorType = 120 // CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE ChErrorUnsupportedJoinKeys ChErrorType = 121 // UNSUPPORTED_JOIN_KEYS ChErrorIncompatibleColumns ChErrorType = 122 // INCOMPATIBLE_COLUMNS ChErrorUnknownTypeOfAstNode ChErrorType = 123 // UNKNOWN_TYPE_OF_AST_NODE ChErrorIncorrectElementOfSet ChErrorType = 124 // INCORRECT_ELEMENT_OF_SET ChErrorIncorrectResultOfScalarSubquery ChErrorType = 125 // INCORRECT_RESULT_OF_SCALAR_SUBQUERY ChErrorCannotGetReturnType ChErrorType = 126 // CANNOT_GET_RETURN_TYPE ChErrorIllegalIndex ChErrorType = 127 // ILLEGAL_INDEX ChErrorTooLargeArraySize ChErrorType = 128 // TOO_LARGE_ARRAY_SIZE ChErrorFunctionIsSpecial ChErrorType = 129 // FUNCTION_IS_SPECIAL ChErrorCannotReadArrayFromText ChErrorType = 130 // CANNOT_READ_ARRAY_FROM_TEXT ChErrorTooLargeStringSize ChErrorType = 131 // TOO_LARGE_STRING_SIZE ChErrorAggregateFunctionDoesntAllowParameters ChErrorType = 133 // AGGREGATE_FUNCTION_DOESNT_ALLOW_PARAMETERS ChErrorParametersToAggregateFunctionsMustBeLiterals ChErrorType = 134 // PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS ChErrorZeroArrayOrTupleIndex ChErrorType = 135 // ZERO_ARRAY_OR_TUPLE_INDEX ChErrorUnknownElementInConfig ChErrorType = 137 // UNKNOWN_ELEMENT_IN_CONFIG ChErrorExcessiveElementInConfig ChErrorType = 138 // EXCESSIVE_ELEMENT_IN_CONFIG ChErrorNoElementsInConfig ChErrorType = 139 // NO_ELEMENTS_IN_CONFIG ChErrorAllRequestedColumnsAreMissing ChErrorType = 140 // ALL_REQUESTED_COLUMNS_ARE_MISSING ChErrorSamplingNotSupported ChErrorType = 141 // SAMPLING_NOT_SUPPORTED ChErrorNotFoundNode ChErrorType = 142 // NOT_FOUND_NODE ChErrorFoundMoreThanOneNode ChErrorType = 143 // FOUND_MORE_THAN_ONE_NODE ChErrorFirstDateIsBiggerThanLastDate ChErrorType = 144 // FIRST_DATE_IS_BIGGER_THAN_LAST_DATE ChErrorUnknownOverflowMode ChErrorType = 145 // UNKNOWN_OVERFLOW_MODE ChErrorQuerySectionDoesntMakeSense ChErrorType = 146 // QUERY_SECTION_DOESNT_MAKE_SENSE ChErrorNotFoundFunctionElementForAggregate ChErrorType = 147 // NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE ChErrorNotFoundRelationElementForCondition ChErrorType = 148 // NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION ChErrorNotFoundRhsElementForCondition ChErrorType = 149 // NOT_FOUND_RHS_ELEMENT_FOR_CONDITION ChErrorEmptyListOfAttributesPassed ChErrorType = 150 // EMPTY_LIST_OF_ATTRIBUTES_PASSED ChErrorIndexOfColumnInSortClauseIsOutOfRange ChErrorType = 151 // INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE ChErrorUnknownDirectionOfSorting ChErrorType = 152 // UNKNOWN_DIRECTION_OF_SORTING ChErrorIllegalDivision ChErrorType = 153 // ILLEGAL_DIVISION ChErrorAggregateFunctionNotApplicable ChErrorType = 154 // AGGREGATE_FUNCTION_NOT_APPLICABLE ChErrorUnknownRelation ChErrorType = 155 // UNKNOWN_RELATION ChErrorDictionariesWasNotLoaded ChErrorType = 156 // DICTIONARIES_WAS_NOT_LOADED ChErrorIllegalOverflowMode ChErrorType = 157 // ILLEGAL_OVERFLOW_MODE ChErrorTooManyRows ChErrorType = 158 // TOO_MANY_ROWS ChErrorTimeoutExceeded ChErrorType = 159 // TIMEOUT_EXCEEDED ChErrorTooSlow ChErrorType = 160 // TOO_SLOW ChErrorTooManyColumns ChErrorType = 161 // TOO_MANY_COLUMNS ChErrorTooDeepSubqueries ChErrorType = 162 // TOO_DEEP_SUBQUERIES ChErrorTooDeepPipeline ChErrorType = 163 // TOO_DEEP_PIPELINE ChErrorReadonly ChErrorType = 164 // READONLY ChErrorTooManyTemporaryColumns ChErrorType = 165 // TOO_MANY_TEMPORARY_COLUMNS ChErrorTooManyTemporaryNonConstColumns ChErrorType = 166 // TOO_MANY_TEMPORARY_NON_CONST_COLUMNS ChErrorTooDeepAst ChErrorType = 167 // TOO_DEEP_AST ChErrorTooBigAst ChErrorType = 168 // TOO_BIG_AST ChErrorBadTypeOfField ChErrorType = 169 // BAD_TYPE_OF_FIELD ChErrorBadGet ChErrorType = 170 // BAD_GET ChErrorCannotCreateDirectory ChErrorType = 172 // CANNOT_CREATE_DIRECTORY ChErrorCannotAllocateMemory ChErrorType = 173 // CANNOT_ALLOCATE_MEMORY ChErrorCyclicAliases ChErrorType = 174 // CYCLIC_ALIASES ChErrorChunkNotFound ChErrorType = 176 // CHUNK_NOT_FOUND ChErrorDuplicateChunkName ChErrorType = 177 // DUPLICATE_CHUNK_NAME ChErrorMultipleAliasesForExpression ChErrorType = 178 // MULTIPLE_ALIASES_FOR_EXPRESSION ChErrorMultipleExpressionsForAlias ChErrorType = 179 // MULTIPLE_EXPRESSIONS_FOR_ALIAS ChErrorThereIsNoProfile ChErrorType = 180 // THERE_IS_NO_PROFILE ChErrorIllegalFinal ChErrorType = 181 // ILLEGAL_FINAL ChErrorIllegalPrewhere ChErrorType = 182 // ILLEGAL_PREWHERE ChErrorUnexpectedExpression ChErrorType = 183 // UNEXPECTED_EXPRESSION ChErrorIllegalAggregation ChErrorType = 184 // ILLEGAL_AGGREGATION ChErrorUnsupportedMyisamBlockType ChErrorType = 185 // UNSUPPORTED_MYISAM_BLOCK_TYPE ChErrorUnsupportedCollationLocale ChErrorType = 186 // UNSUPPORTED_COLLATION_LOCALE ChErrorCollationComparisonFailed ChErrorType = 187 // COLLATION_COMPARISON_FAILED ChErrorUnknownAction ChErrorType = 188 // UNKNOWN_ACTION ChErrorTableMustNotBeCreatedManually ChErrorType = 189 // TABLE_MUST_NOT_BE_CREATED_MANUALLY ChErrorSizesOfArraysDoesntMatch ChErrorType = 190 // SIZES_OF_ARRAYS_DOESNT_MATCH ChErrorSetSizeLimitExceeded ChErrorType = 191 // SET_SIZE_LIMIT_EXCEEDED ChErrorUnknownUser ChErrorType = 192 // UNKNOWN_USER ChErrorWrongPassword ChErrorType = 193 // WRONG_PASSWORD ChErrorRequiredPassword ChErrorType = 194 // REQUIRED_PASSWORD ChErrorIPAddressNotAllowed ChErrorType = 195 // IP_ADDRESS_NOT_ALLOWED ChErrorUnknownAddressPatternType ChErrorType = 196 // UNKNOWN_ADDRESS_PATTERN_TYPE ChErrorServerRevisionIsTooOld ChErrorType = 197 // SERVER_REVISION_IS_TOO_OLD ChErrorDNSError ChErrorType = 198 // DNS_ERROR ChErrorUnknownQuota ChErrorType = 199 // UNKNOWN_QUOTA ChErrorQuotaDoesntAllowKeys ChErrorType = 200 // QUOTA_DOESNT_ALLOW_KEYS ChErrorQuotaExpired ChErrorType = 201 // QUOTA_EXPIRED ChErrorTooManySimultaneousQueries ChErrorType = 202 // TOO_MANY_SIMULTANEOUS_QUERIES ChErrorNoFreeConnection ChErrorType = 203 // NO_FREE_CONNECTION ChErrorCannotFsync ChErrorType = 204 // CANNOT_FSYNC ChErrorNestedTypeTooDeep ChErrorType = 205 // NESTED_TYPE_TOO_DEEP ChErrorAliasRequired ChErrorType = 206 // ALIAS_REQUIRED ChErrorAmbiguousIdentifier ChErrorType = 207 // AMBIGUOUS_IDENTIFIER ChErrorEmptyNestedTable ChErrorType = 208 // EMPTY_NESTED_TABLE ChErrorSocketTimeout ChErrorType = 209 // SOCKET_TIMEOUT ChErrorNetworkError ChErrorType = 210 // NETWORK_ERROR ChErrorEmptyQuery ChErrorType = 211 // EMPTY_QUERY ChErrorUnknownLoadBalancing ChErrorType = 212 // UNKNOWN_LOAD_BALANCING ChErrorUnknownTotalsMode ChErrorType = 213 // UNKNOWN_TOTALS_MODE ChErrorCannotStatvfs ChErrorType = 214 // CANNOT_STATVFS ChErrorNotAnAggregate ChErrorType = 215 // NOT_AN_AGGREGATE ChErrorQueryWithSameIDIsAlreadyRunning ChErrorType = 216 // QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING ChErrorClientHasConnectedToWrongPort ChErrorType = 217 // CLIENT_HAS_CONNECTED_TO_WRONG_PORT ChErrorTableIsDropped ChErrorType = 218 // TABLE_IS_DROPPED ChErrorDatabaseNotEmpty ChErrorType = 219 // DATABASE_NOT_EMPTY ChErrorDuplicateInterserverIoEndpoint ChErrorType = 220 // DUPLICATE_INTERSERVER_IO_ENDPOINT ChErrorNoSuchInterserverIoEndpoint ChErrorType = 221 // NO_SUCH_INTERSERVER_IO_ENDPOINT ChErrorAddingReplicaToNonEmptyTable ChErrorType = 222 // ADDING_REPLICA_TO_NON_EMPTY_TABLE ChErrorUnexpectedAstStructure ChErrorType = 223 // UNEXPECTED_AST_STRUCTURE ChErrorReplicaIsAlreadyActive ChErrorType = 224 // REPLICA_IS_ALREADY_ACTIVE ChErrorNoZookeeper ChErrorType = 225 // NO_ZOOKEEPER ChErrorNoFileInDataPart ChErrorType = 226 // NO_FILE_IN_DATA_PART ChErrorUnexpectedFileInDataPart ChErrorType = 227 // UNEXPECTED_FILE_IN_DATA_PART ChErrorBadSizeOfFileInDataPart ChErrorType = 228 // BAD_SIZE_OF_FILE_IN_DATA_PART ChErrorQueryIsTooLarge ChErrorType = 229 // QUERY_IS_TOO_LARGE ChErrorNotFoundExpectedDataPart ChErrorType = 230 // NOT_FOUND_EXPECTED_DATA_PART ChErrorTooManyUnexpectedDataParts ChErrorType = 231 // TOO_MANY_UNEXPECTED_DATA_PARTS ChErrorNoSuchDataPart ChErrorType = 232 // NO_SUCH_DATA_PART ChErrorBadDataPartName ChErrorType = 233 // BAD_DATA_PART_NAME ChErrorNoReplicaHasPart ChErrorType = 234 // NO_REPLICA_HAS_PART ChErrorDuplicateDataPart ChErrorType = 235 // DUPLICATE_DATA_PART ChErrorAborted ChErrorType = 236 // ABORTED ChErrorNoReplicaNameGiven ChErrorType = 237 // NO_REPLICA_NAME_GIVEN ChErrorFormatVersionTooOld ChErrorType = 238 // FORMAT_VERSION_TOO_OLD ChErrorCannotMunmap ChErrorType = 239 // CANNOT_MUNMAP ChErrorCannotMremap ChErrorType = 240 // CANNOT_MREMAP ChErrorMemoryLimitExceeded ChErrorType = 241 // MEMORY_LIMIT_EXCEEDED ChErrorTableIsReadOnly ChErrorType = 242 // TABLE_IS_READ_ONLY ChErrorNotEnoughSpace ChErrorType = 243 // NOT_ENOUGH_SPACE ChErrorUnexpectedZookeeperError ChErrorType = 244 // UNEXPECTED_ZOOKEEPER_ERROR ChErrorCorruptedData ChErrorType = 246 // CORRUPTED_DATA ChErrorIncorrectMark ChErrorType = 247 // INCORRECT_MARK ChErrorInvalidPartitionValue ChErrorType = 248 // INVALID_PARTITION_VALUE ChErrorNotEnoughBlockNumbers ChErrorType = 250 // NOT_ENOUGH_BLOCK_NUMBERS ChErrorNoSuchReplica ChErrorType = 251 // NO_SUCH_REPLICA ChErrorTooManyParts ChErrorType = 252 // TOO_MANY_PARTS ChErrorReplicaIsAlreadyExist ChErrorType = 253 // REPLICA_IS_ALREADY_EXIST ChErrorNoActiveReplicas ChErrorType = 254 // NO_ACTIVE_REPLICAS ChErrorTooManyRetriesToFetchParts ChErrorType = 255 // TOO_MANY_RETRIES_TO_FETCH_PARTS ChErrorPartitionAlreadyExists ChErrorType = 256 // PARTITION_ALREADY_EXISTS ChErrorPartitionDoesntExist ChErrorType = 257 // PARTITION_DOESNT_EXIST ChErrorUnionAllResultStructuresMismatch ChErrorType = 258 // UNION_ALL_RESULT_STRUCTURES_MISMATCH ChErrorClientOutputFormatSpecified ChErrorType = 260 // CLIENT_OUTPUT_FORMAT_SPECIFIED ChErrorUnknownBlockInfoField ChErrorType = 261 // UNKNOWN_BLOCK_INFO_FIELD ChErrorBadCollation ChErrorType = 262 // BAD_COLLATION ChErrorCannotCompileCode ChErrorType = 263 // CANNOT_COMPILE_CODE ChErrorIncompatibleTypeOfJoin ChErrorType = 264 // INCOMPATIBLE_TYPE_OF_JOIN ChErrorNoAvailableReplica ChErrorType = 265 // NO_AVAILABLE_REPLICA ChErrorMismatchReplicasDataSources ChErrorType = 266 // MISMATCH_REPLICAS_DATA_SOURCES ChErrorStorageDoesntSupportParallelReplicas ChErrorType = 267 // STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS ChErrorCpuidError ChErrorType = 268 // CPUID_ERROR ChErrorInfiniteLoop ChErrorType = 269 // INFINITE_LOOP ChErrorCannotCompress ChErrorType = 270 // CANNOT_COMPRESS ChErrorCannotDecompress ChErrorType = 271 // CANNOT_DECOMPRESS ChErrorCannotIoSubmit ChErrorType = 272 // CANNOT_IO_SUBMIT ChErrorCannotIoGetevents ChErrorType = 273 // CANNOT_IO_GETEVENTS ChErrorAioReadError ChErrorType = 274 // AIO_READ_ERROR ChErrorAioWriteError ChErrorType = 275 // AIO_WRITE_ERROR ChErrorIndexNotUsed ChErrorType = 277 // INDEX_NOT_USED ChErrorAllConnectionTriesFailed ChErrorType = 279 // ALL_CONNECTION_TRIES_FAILED ChErrorNoAvailableData ChErrorType = 280 // NO_AVAILABLE_DATA ChErrorDictionaryIsEmpty ChErrorType = 281 // DICTIONARY_IS_EMPTY ChErrorIncorrectIndex ChErrorType = 282 // INCORRECT_INDEX ChErrorUnknownDistributedProductMode ChErrorType = 283 // UNKNOWN_DISTRIBUTED_PRODUCT_MODE ChErrorWrongGlobalSubquery ChErrorType = 284 // WRONG_GLOBAL_SUBQUERY ChErrorTooFewLiveReplicas ChErrorType = 285 // TOO_FEW_LIVE_REPLICAS ChErrorUnsatisfiedQuorumForPreviousWrite ChErrorType = 286 // UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE ChErrorUnknownFormatVersion ChErrorType = 287 // UNKNOWN_FORMAT_VERSION ChErrorDistributedInJoinSubqueryDenied ChErrorType = 288 // DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED ChErrorReplicaIsNotInQuorum ChErrorType = 289 // REPLICA_IS_NOT_IN_QUORUM ChErrorLimitExceeded ChErrorType = 290 // LIMIT_EXCEEDED ChErrorDatabaseAccessDenied ChErrorType = 291 // DATABASE_ACCESS_DENIED ChErrorMongodbCannotAuthenticate ChErrorType = 293 // MONGODB_CANNOT_AUTHENTICATE ChErrorInvalidBlockExtraInfo ChErrorType = 294 // INVALID_BLOCK_EXTRA_INFO ChErrorReceivedEmptyData ChErrorType = 295 // RECEIVED_EMPTY_DATA ChErrorNoRemoteShardFound ChErrorType = 296 // NO_REMOTE_SHARD_FOUND ChErrorShardHasNoConnections ChErrorType = 297 // SHARD_HAS_NO_CONNECTIONS ChErrorCannotPipe ChErrorType = 298 // CANNOT_PIPE ChErrorCannotFork ChErrorType = 299 // CANNOT_FORK ChErrorCannotDlsym ChErrorType = 300 // CANNOT_DLSYM ChErrorCannotCreateChildProcess ChErrorType = 301 // CANNOT_CREATE_CHILD_PROCESS ChErrorChildWasNotExitedNormally ChErrorType = 302 // CHILD_WAS_NOT_EXITED_NORMALLY ChErrorCannotSelect ChErrorType = 303 // CANNOT_SELECT ChErrorCannotWaitpid ChErrorType = 304 // CANNOT_WAITPID ChErrorTableWasNotDropped ChErrorType = 305 // TABLE_WAS_NOT_DROPPED ChErrorTooDeepRecursion ChErrorType = 306 // TOO_DEEP_RECURSION ChErrorTooManyBytes ChErrorType = 307 // TOO_MANY_BYTES ChErrorUnexpectedNodeInZookeeper ChErrorType = 308 // UNEXPECTED_NODE_IN_ZOOKEEPER ChErrorFunctionCannotHaveParameters ChErrorType = 309 // FUNCTION_CANNOT_HAVE_PARAMETERS ChErrorInvalidShardWeight ChErrorType = 317 // INVALID_SHARD_WEIGHT ChErrorInvalidConfigParameter ChErrorType = 318 // INVALID_CONFIG_PARAMETER ChErrorUnknownStatusOfInsert ChErrorType = 319 // UNKNOWN_STATUS_OF_INSERT ChErrorValueIsOutOfRangeOfDataType ChErrorType = 321 // VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE ChErrorBarrierTimeout ChErrorType = 335 // BARRIER_TIMEOUT ChErrorUnknownDatabaseEngine ChErrorType = 336 // UNKNOWN_DATABASE_ENGINE ChErrorDdlGuardIsActive ChErrorType = 337 // DDL_GUARD_IS_ACTIVE ChErrorUnfinished ChErrorType = 341 // UNFINISHED ChErrorMetadataMismatch ChErrorType = 342 // METADATA_MISMATCH ChErrorSupportIsDisabled ChErrorType = 344 // SUPPORT_IS_DISABLED ChErrorTableDiffersTooMuch ChErrorType = 345 // TABLE_DIFFERS_TOO_MUCH ChErrorCannotConvertCharset ChErrorType = 346 // CANNOT_CONVERT_CHARSET ChErrorCannotLoadConfig ChErrorType = 347 // CANNOT_LOAD_CONFIG ChErrorCannotInsertNullInOrdinaryColumn ChErrorType = 349 // CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN ChErrorIncompatibleSourceTables ChErrorType = 350 // INCOMPATIBLE_SOURCE_TABLES ChErrorAmbiguousTableName ChErrorType = 351 // AMBIGUOUS_TABLE_NAME ChErrorAmbiguousColumnName ChErrorType = 352 // AMBIGUOUS_COLUMN_NAME ChErrorIndexOfPositionalArgumentIsOutOfRange ChErrorType = 353 // INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE ChErrorZlibInflateFailed ChErrorType = 354 // ZLIB_INFLATE_FAILED ChErrorZlibDeflateFailed ChErrorType = 355 // ZLIB_DEFLATE_FAILED ChErrorBadLambda ChErrorType = 356 // BAD_LAMBDA ChErrorReservedIdentifierName ChErrorType = 357 // RESERVED_IDENTIFIER_NAME ChErrorIntoOutfileNotAllowed ChErrorType = 358 // INTO_OUTFILE_NOT_ALLOWED ChErrorTableSizeExceedsMaxDropSizeLimit ChErrorType = 359 // TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT ChErrorCannotCreateCharsetConverter ChErrorType = 360 // CANNOT_CREATE_CHARSET_CONVERTER ChErrorSeekPositionOutOfBound ChErrorType = 361 // SEEK_POSITION_OUT_OF_BOUND ChErrorCurrentWriteBufferIsExhausted ChErrorType = 362 // CURRENT_WRITE_BUFFER_IS_EXHAUSTED ChErrorCannotCreateIoBuffer ChErrorType = 363 // CANNOT_CREATE_IO_BUFFER ChErrorReceivedErrorTooManyRequests ChErrorType = 364 // RECEIVED_ERROR_TOO_MANY_REQUESTS ChErrorSizesOfNestedColumnsAreInconsistent ChErrorType = 366 // SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT ChErrorTooManyFetches ChErrorType = 367 // TOO_MANY_FETCHES ChErrorAllReplicasAreStale ChErrorType = 369 // ALL_REPLICAS_ARE_STALE ChErrorDataTypeCannotBeUsedInTables ChErrorType = 370 // DATA_TYPE_CANNOT_BE_USED_IN_TABLES ChErrorInconsistentClusterDefinition ChErrorType = 371 // INCONSISTENT_CLUSTER_DEFINITION ChErrorSessionNotFound ChErrorType = 372 // SESSION_NOT_FOUND ChErrorSessionIsLocked ChErrorType = 373 // SESSION_IS_LOCKED ChErrorInvalidSessionTimeout ChErrorType = 374 // INVALID_SESSION_TIMEOUT ChErrorCannotDlopen ChErrorType = 375 // CANNOT_DLOPEN ChErrorCannotParseUUID ChErrorType = 376 // CANNOT_PARSE_UUID ChErrorIllegalSyntaxForDataType ChErrorType = 377 // ILLEGAL_SYNTAX_FOR_DATA_TYPE ChErrorDataTypeCannotHaveArguments ChErrorType = 378 // DATA_TYPE_CANNOT_HAVE_ARGUMENTS ChErrorUnknownStatusOfDistributedDdlTask ChErrorType = 379 // UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK ChErrorCannotKill ChErrorType = 380 // CANNOT_KILL ChErrorHTTPLengthRequired ChErrorType = 381 // HTTP_LENGTH_REQUIRED ChErrorCannotLoadCatboostModel ChErrorType = 382 // CANNOT_LOAD_CATBOOST_MODEL ChErrorCannotApplyCatboostModel ChErrorType = 383 // CANNOT_APPLY_CATBOOST_MODEL ChErrorPartIsTemporarilyLocked ChErrorType = 384 // PART_IS_TEMPORARILY_LOCKED ChErrorMultipleStreamsRequired ChErrorType = 385 // MULTIPLE_STREAMS_REQUIRED ChErrorNoCommonType ChErrorType = 386 // NO_COMMON_TYPE ChErrorDictionaryAlreadyExists ChErrorType = 387 // DICTIONARY_ALREADY_EXISTS ChErrorCannotAssignOptimize ChErrorType = 388 // CANNOT_ASSIGN_OPTIMIZE ChErrorInsertWasDeduplicated ChErrorType = 389 // INSERT_WAS_DEDUPLICATED ChErrorCannotGetCreateTableQuery ChErrorType = 390 // CANNOT_GET_CREATE_TABLE_QUERY ChErrorExternalLibraryError ChErrorType = 391 // EXTERNAL_LIBRARY_ERROR ChErrorQueryIsProhibited ChErrorType = 392 // QUERY_IS_PROHIBITED ChErrorThereIsNoQuery ChErrorType = 393 // THERE_IS_NO_QUERY ChErrorQueryWasCancelled ChErrorType = 394 // QUERY_WAS_CANCELED ChErrorFunctionThrowIfValueIsNonZero ChErrorType = 395 // FUNCTION_THROW_IF_VALUE_IS_NON_ZERO ChErrorTooManyRowsOrBytes ChErrorType = 396 // TOO_MANY_ROWS_OR_BYTES ChErrorQueryIsNotSupportedInMaterializedView ChErrorType = 397 // QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW ChErrorUnknownMutationCommand ChErrorType = 398 // UNKNOWN_MUTATION_COMMAND ChErrorFormatIsNotSuitableForOutput ChErrorType = 399 // FORMAT_IS_NOT_SUITABLE_FOR_OUTPUT ChErrorCannotStat ChErrorType = 400 // CANNOT_STAT ChErrorFeatureIsNotEnabledAtBuildTime ChErrorType = 401 // FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME ChErrorCannotIosetup ChErrorType = 402 // CANNOT_IOSETUP ChErrorInvalidJoinOnExpression ChErrorType = 403 // INVALID_JOIN_ON_EXPRESSION ChErrorBadOdbcConnectionString ChErrorType = 404 // BAD_ODBC_CONNECTION_STRING ChErrorPartitionSizeExceedsMaxDropSizeLimit ChErrorType = 405 // PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT ChErrorTopAndLimitTogether ChErrorType = 406 // TOP_AND_LIMIT_TOGETHER ChErrorDecimalOverflow ChErrorType = 407 // DECIMAL_OVERFLOW ChErrorBadRequestParameter ChErrorType = 408 // BAD_REQUEST_PARAMETER ChErrorExternalExecutableNotFound ChErrorType = 409 // EXTERNAL_EXECUTABLE_NOT_FOUND ChErrorExternalServerIsNotResponding ChErrorType = 410 // EXTERNAL_SERVER_IS_NOT_RESPONDING ChErrorPthreadError ChErrorType = 411 // PTHREAD_ERROR ChErrorNetlinkError ChErrorType = 412 // NETLINK_ERROR ChErrorCannotSetSignalHandler ChErrorType = 413 // CANNOT_SET_SIGNAL_HANDLER ChErrorAllReplicasLost ChErrorType = 415 // ALL_REPLICAS_LOST ChErrorReplicaStatusChanged ChErrorType = 416 // REPLICA_STATUS_CHANGED ChErrorExpectedAllOrAny ChErrorType = 417 // EXPECTED_ALL_OR_ANY ChErrorUnknownJoin ChErrorType = 418 // UNKNOWN_JOIN ChErrorMultipleAssignmentsToColumn ChErrorType = 419 // MULTIPLE_ASSIGNMENTS_TO_COLUMN ChErrorCannotUpdateColumn ChErrorType = 420 // CANNOT_UPDATE_COLUMN ChErrorCannotAddDifferentAggregateStates ChErrorType = 421 // CANNOT_ADD_DIFFERENT_AGGREGATE_STATES ChErrorUnsupportedURIScheme ChErrorType = 422 // UNSUPPORTED_URI_SCHEME ChErrorCannotGettimeofday ChErrorType = 423 // CANNOT_GETTIMEOFDAY ChErrorCannotLink ChErrorType = 424 // CANNOT_LINK ChErrorSystemError ChErrorType = 425 // SYSTEM_ERROR ChErrorCannotCompileRegexp ChErrorType = 427 // CANNOT_COMPILE_REGEXP ChErrorUnknownLogLevel ChErrorType = 428 // UNKNOWN_LOG_LEVEL ChErrorFailedToGetpwuid ChErrorType = 429 // FAILED_TO_GETPWUID ChErrorMismatchingUsersForProcessAndData ChErrorType = 430 // MISMATCHING_USERS_FOR_PROCESS_AND_DATA ChErrorIllegalSyntaxForCodecType ChErrorType = 431 // ILLEGAL_SYNTAX_FOR_CODEC_TYPE ChErrorUnknownCodec ChErrorType = 432 // UNKNOWN_CODEC ChErrorIllegalCodecParameter ChErrorType = 433 // ILLEGAL_CODEC_PARAMETER ChErrorCannotParseProtobufSchema ChErrorType = 434 // CANNOT_PARSE_PROTOBUF_SCHEMA ChErrorNoColumnSerializedToRequiredProtobufField ChErrorType = 435 // NO_COLUMN_SERIALIZED_TO_REQUIRED_PROTOBUF_FIELD ChErrorProtobufBadCast ChErrorType = 436 // PROTOBUF_BAD_CAST ChErrorProtobufFieldNotRepeated ChErrorType = 437 // PROTOBUF_FIELD_NOT_REPEATED ChErrorDataTypeCannotBePromoted ChErrorType = 438 // DATA_TYPE_CANNOT_BE_PROMOTED ChErrorCannotScheduleTask ChErrorType = 439 // CANNOT_SCHEDULE_TASK ChErrorInvalidLimitExpression ChErrorType = 440 // INVALID_LIMIT_EXPRESSION ChErrorCannotParseDomainValueFromString ChErrorType = 441 // CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING ChErrorBadDatabaseForTemporaryTable ChErrorType = 442 // BAD_DATABASE_FOR_TEMPORARY_TABLE ChErrorNoColumnsSerializedToProtobufFields ChErrorType = 443 // NO_COLUMNS_SERIALIZED_TO_PROTOBUF_FIELDS ChErrorUnknownProtobufFormat ChErrorType = 444 // UNKNOWN_PROTOBUF_FORMAT ChErrorCannotMprotect ChErrorType = 445 // CANNOT_MPROTECT ChErrorFunctionNotAllowed ChErrorType = 446 // FUNCTION_NOT_ALLOWED ChErrorHyperscanCannotScanText ChErrorType = 447 // HYPERSCAN_CANNOT_SCAN_TEXT ChErrorBrotliReadFailed ChErrorType = 448 // BROTLI_READ_FAILED ChErrorBrotliWriteFailed ChErrorType = 449 // BROTLI_WRITE_FAILED ChErrorBadTTLExpression ChErrorType = 450 // BAD_TTL_EXPRESSION ChErrorBadTTLFile ChErrorType = 451 // BAD_TTL_FILE ChErrorSettingConstraintViolation ChErrorType = 452 // SETTING_CONSTRAINT_VIOLATION ChErrorMysqlClientInsufficientCapabilities ChErrorType = 453 // MYSQL_CLIENT_INSUFFICIENT_CAPABILITIES ChErrorOpensslError ChErrorType = 454 // OPENSSL_ERROR ChErrorSuspiciousTypeForLowCardinality ChErrorType = 455 // SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY ChErrorUnknownQueryParameter ChErrorType = 456 // UNKNOWN_QUERY_PARAMETER ChErrorBadQueryParameter ChErrorType = 457 // BAD_QUERY_PARAMETER ChErrorCannotUnlink ChErrorType = 458 // CANNOT_UNLINK ChErrorCannotSetThreadPriority ChErrorType = 459 // CANNOT_SET_THREAD_PRIORITY ChErrorCannotCreateTimer ChErrorType = 460 // CANNOT_CREATE_TIMER ChErrorCannotSetTimerPeriod ChErrorType = 461 // CANNOT_SET_TIMER_PERIOD ChErrorCannotDeleteTimer ChErrorType = 462 // CANNOT_DELETE_TIMER ChErrorCannotFcntl ChErrorType = 463 // CANNOT_FCNTL ChErrorCannotParseElf ChErrorType = 464 // CANNOT_PARSE_ELF ChErrorCannotParseDwarf ChErrorType = 465 // CANNOT_PARSE_DWARF ChErrorInsecurePath ChErrorType = 466 // INSECURE_PATH ChErrorCannotParseBool ChErrorType = 467 // CANNOT_PARSE_BOOL ChErrorCannotPthreadAttr ChErrorType = 468 // CANNOT_PTHREAD_ATTR ChErrorViolatedConstraint ChErrorType = 469 // VIOLATED_CONSTRAINT ChErrorQueryIsNotSupportedInLiveView ChErrorType = 470 // QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW ChErrorInvalidSettingValue ChErrorType = 471 // INVALID_SETTING_VALUE ChErrorReadonlySetting ChErrorType = 472 // READONLY_SETTING ChErrorDeadlockAvoided ChErrorType = 473 // DEADLOCK_AVOIDED ChErrorInvalidTemplateFormat ChErrorType = 474 // INVALID_TEMPLATE_FORMAT ChErrorInvalidWithFillExpression ChErrorType = 475 // INVALID_WITH_FILL_EXPRESSION ChErrorWithTiesWithoutOrderBy ChErrorType = 476 // WITH_TIES_WITHOUT_ORDER_BY ChErrorInvalidUsageOfInput ChErrorType = 477 // INVALID_USAGE_OF_INPUT ChErrorUnknownPolicy ChErrorType = 478 // UNKNOWN_POLICY ChErrorUnknownDisk ChErrorType = 479 // UNKNOWN_DISK ChErrorUnknownProtocol ChErrorType = 480 // UNKNOWN_PROTOCOL ChErrorPathAccessDenied ChErrorType = 481 // PATH_ACCESS_DENIED ChErrorDictionaryAccessDenied ChErrorType = 482 // DICTIONARY_ACCESS_DENIED ChErrorTooManyRedirects ChErrorType = 483 // TOO_MANY_REDIRECTS ChErrorInternalRedisError ChErrorType = 484 // INTERNAL_REDIS_ERROR ChErrorScalarAlreadyExists ChErrorType = 485 // SCALAR_ALREADY_EXISTS ChErrorCannotGetCreateDictionaryQuery ChErrorType = 487 // CANNOT_GET_CREATE_DICTIONARY_QUERY ChErrorUnknownDictionary ChErrorType = 488 // UNKNOWN_DICTIONARY ChErrorIncorrectDictionaryDefinition ChErrorType = 489 // INCORRECT_DICTIONARY_DEFINITION ChErrorCannotFormatDatetime ChErrorType = 490 // CANNOT_FORMAT_DATETIME ChErrorUnacceptableURL ChErrorType = 491 // UNACCEPTABLE_URL ChErrorAccessEntityNotFound ChErrorType = 492 // ACCESS_ENTITY_NOT_FOUND ChErrorAccessEntityAlreadyExists ChErrorType = 493 // ACCESS_ENTITY_ALREADY_EXISTS ChErrorAccessEntityFoundDuplicates ChErrorType = 494 // ACCESS_ENTITY_FOUND_DUPLICATES ChErrorAccessStorageReadonly ChErrorType = 495 // ACCESS_STORAGE_READONLY ChErrorQuotaRequiresClientKey ChErrorType = 496 // QUOTA_REQUIRES_CLIENT_KEY ChErrorAccessDenied ChErrorType = 497 // ACCESS_DENIED ChErrorLimitByWithTiesIsNotSupported ChErrorType = 498 // LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED ChErrorS3Error ChErrorType = 499 // S3_ERROR ChErrorAzureBlobStorageError ChErrorType = 500 // AZURE_BLOB_STORAGE_ERROR ChErrorCannotCreateDatabase ChErrorType = 501 // CANNOT_CREATE_DATABASE ChErrorCannotSigqueue ChErrorType = 502 // CANNOT_SIGQUEUE ChErrorAggregateFunctionThrow ChErrorType = 503 // AGGREGATE_FUNCTION_THROW ChErrorFileAlreadyExists ChErrorType = 504 // FILE_ALREADY_EXISTS ChErrorCannotDeleteDirectory ChErrorType = 505 // CANNOT_DELETE_DIRECTORY ChErrorUnexpectedErrorCode ChErrorType = 506 // UNEXPECTED_ERROR_CODE ChErrorUnableToSkipUnusedShards ChErrorType = 507 // UNABLE_TO_SKIP_UNUSED_SHARDS ChErrorUnknownAccessType ChErrorType = 508 // UNKNOWN_ACCESS_TYPE ChErrorInvalidGrant ChErrorType = 509 // INVALID_GRANT ChErrorCacheDictionaryUpdateFail ChErrorType = 510 // CACHE_DICTIONARY_UPDATE_FAIL ChErrorUnknownRole ChErrorType = 511 // UNKNOWN_ROLE ChErrorSetNonGrantedRole ChErrorType = 512 // SET_NON_GRANTED_ROLE ChErrorUnknownPartType ChErrorType = 513 // UNKNOWN_PART_TYPE ChErrorAccessStorageForInsertionNotFound ChErrorType = 514 // ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND ChErrorIncorrectAccessEntityDefinition ChErrorType = 515 // INCORRECT_ACCESS_ENTITY_DEFINITION ChErrorAuthenticationFailed ChErrorType = 516 // AUTHENTICATION_FAILED ChErrorCannotAssignAlter ChErrorType = 517 // CANNOT_ASSIGN_ALTER ChErrorCannotCommitOffset ChErrorType = 518 // CANNOT_COMMIT_OFFSET ChErrorNoRemoteShardAvailable ChErrorType = 519 // NO_REMOTE_SHARD_AVAILABLE ChErrorCannotDetachDictionaryAsTable ChErrorType = 520 // CANNOT_DETACH_DICTIONARY_AS_TABLE ChErrorAtomicRenameFail ChErrorType = 521 // ATOMIC_RENAME_FAIL ChErrorUnknownRowPolicy ChErrorType = 523 // UNKNOWN_ROW_POLICY ChErrorAlterOfColumnIsForbidden ChErrorType = 524 // ALTER_OF_COLUMN_IS_FORBIDDEN ChErrorIncorrectDiskIndex ChErrorType = 525 // INCORRECT_DISK_INDEX ChErrorNoSuitableFunctionImplementation ChErrorType = 527 // NO_SUITABLE_FUNCTION_IMPLEMENTATION ChErrorCassandraInternalError ChErrorType = 528 // CASSANDRA_INTERNAL_ERROR ChErrorNotALeader ChErrorType = 529 // NOT_A_LEADER ChErrorCannotConnectRabbitmq ChErrorType = 530 // CANNOT_CONNECT_RABBITMQ ChErrorCannotFstat ChErrorType = 531 // CANNOT_FSTAT ChErrorLdapError ChErrorType = 532 // LDAP_ERROR ChErrorInconsistentReservations ChErrorType = 533 // INCONSISTENT_RESERVATIONS ChErrorNoReservationsProvided ChErrorType = 534 // NO_RESERVATIONS_PROVIDED ChErrorUnknownRaidType ChErrorType = 535 // UNKNOWN_RAID_TYPE ChErrorCannotRestoreFromFieldDump ChErrorType = 536 // CANNOT_RESTORE_FROM_FIELD_DUMP ChErrorIllegalMysqlVariable ChErrorType = 537 // ILLEGAL_MYSQL_VARIABLE ChErrorMysqlSyntaxError ChErrorType = 538 // MYSQL_SYNTAX_ERROR ChErrorCannotBindRabbitmqExchange ChErrorType = 539 // CANNOT_BIND_RABBITMQ_EXCHANGE ChErrorCannotDeclareRabbitmqExchange ChErrorType = 540 // CANNOT_DECLARE_RABBITMQ_EXCHANGE ChErrorCannotCreateRabbitmqQueueBinding ChErrorType = 541 // CANNOT_CREATE_RABBITMQ_QUEUE_BINDING ChErrorCannotRemoveRabbitmqExchange ChErrorType = 542 // CANNOT_REMOVE_RABBITMQ_EXCHANGE ChErrorUnknownMysqlDatatypesSupportLevel ChErrorType = 543 // UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL ChErrorRowAndRowsTogether ChErrorType = 544 // ROW_AND_ROWS_TOGETHER ChErrorFirstAndNextTogether ChErrorType = 545 // FIRST_AND_NEXT_TOGETHER ChErrorNoRowDelimiter ChErrorType = 546 // NO_ROW_DELIMITER ChErrorInvalidRaidType ChErrorType = 547 // INVALID_RAID_TYPE ChErrorUnknownVolume ChErrorType = 548 // UNKNOWN_VOLUME ChErrorDataTypeCannotBeUsedInKey ChErrorType = 549 // DATA_TYPE_CANNOT_BE_USED_IN_KEY ChErrorConditionalTreeParentNotFound ChErrorType = 550 // CONDITIONAL_TREE_PARENT_NOT_FOUND ChErrorIllegalProjectionManipulator ChErrorType = 551 // ILLEGAL_PROJECTION_MANIPULATOR ChErrorUnrecognizedArguments ChErrorType = 552 // UNRECOGNIZED_ARGUMENTS ChErrorLzmaStreamEncoderFailed ChErrorType = 553 // LZMA_STREAM_ENCODER_FAILED ChErrorLzmaStreamDecoderFailed ChErrorType = 554 // LZMA_STREAM_DECODER_FAILED ChErrorRocksdbError ChErrorType = 555 // ROCKSDB_ERROR ChErrorSyncMysqlUserAccessErro ChErrorType = 556 // SYNC_MYSQL_USER_ACCESS_ERRO ChErrorUnknownUnion ChErrorType = 557 // UNKNOWN_UNION ChErrorExpectedAllOrDistinct ChErrorType = 558 // EXPECTED_ALL_OR_DISTINCT ChErrorInvalidGrpcQueryInfo ChErrorType = 559 // INVALID_GRPC_QUERY_INFO ChErrorZstdEncoderFailed ChErrorType = 560 // ZSTD_ENCODER_FAILED ChErrorZstdDecoderFailed ChErrorType = 561 // ZSTD_DECODER_FAILED ChErrorTldListNotFound ChErrorType = 562 // TLD_LIST_NOT_FOUND ChErrorCannotReadMapFromText ChErrorType = 563 // CANNOT_READ_MAP_FROM_TEXT ChErrorInterserverSchemeDoesntMatch ChErrorType = 564 // INTERSERVER_SCHEME_DOESNT_MATCH ChErrorTooManyPartitions ChErrorType = 565 // TOO_MANY_PARTITIONS ChErrorCannotRmdir ChErrorType = 566 // CANNOT_RMDIR ChErrorDuplicatedPartUuids ChErrorType = 567 // DUPLICATED_PART_UUIDS ChErrorRaftError ChErrorType = 568 // RAFT_ERROR ChErrorMultipleColumnsSerializedToSameProtobufField ChErrorType = 569 // MULTIPLE_COLUMNS_SERIALIZED_TO_SAME_PROTOBUF_FIELD ChErrorDataTypeIncompatibleWithProtobufField ChErrorType = 570 // DATA_TYPE_INCOMPATIBLE_WITH_PROTOBUF_FIELD ChErrorDatabaseReplicationFailed ChErrorType = 571 // DATABASE_REPLICATION_FAILED ChErrorTooManyQueryPlanOptimizations ChErrorType = 572 // TOO_MANY_QUERY_PLAN_OPTIMIZATIONS ChErrorEpollError ChErrorType = 573 // EPOLL_ERROR ChErrorDistributedTooManyPendingBytes ChErrorType = 574 // DISTRIBUTED_TOO_MANY_PENDING_BYTES ChErrorUnknownSnapshot ChErrorType = 575 // UNKNOWN_SNAPSHOT ChErrorKerberosError ChErrorType = 576 // KERBEROS_ERROR ChErrorInvalidShardID ChErrorType = 577 // INVALID_SHARD_ID ChErrorInvalidFormatInsertQueryWithData ChErrorType = 578 // INVALID_FORMAT_INSERT_QUERY_WITH_DATA ChErrorIncorrectPartType ChErrorType = 579 // INCORRECT_PART_TYPE ChErrorCannotSetRoundingMode ChErrorType = 580 // CANNOT_SET_ROUNDING_MODE ChErrorTooLargeDistributedDepth ChErrorType = 581 // TOO_LARGE_DISTRIBUTED_DEPTH ChErrorNoSuchProjectionInTable ChErrorType = 582 // NO_SUCH_PROJECTION_IN_TABLE ChErrorIllegalProjection ChErrorType = 583 // ILLEGAL_PROJECTION ChErrorProjectionNotUsed ChErrorType = 584 // PROJECTION_NOT_USED ChErrorCannotParseYaml ChErrorType = 585 // CANNOT_PARSE_YAML ChErrorCannotCreateFile ChErrorType = 586 // CANNOT_CREATE_FILE ChErrorConcurrentAccessNotSupported ChErrorType = 587 // CONCURRENT_ACCESS_NOT_SUPPORTED ChErrorDistributedBrokenBatchInfo ChErrorType = 588 // DISTRIBUTED_BROKEN_BATCH_INFO ChErrorDistributedBrokenBatchFiles ChErrorType = 589 // DISTRIBUTED_BROKEN_BATCH_FILES ChErrorCannotSysconf ChErrorType = 590 // CANNOT_SYSCONF ChErrorSqliteEngineError ChErrorType = 591 // SQLITE_ENGINE_ERROR ChErrorDataEncryptionError ChErrorType = 592 // DATA_ENCRYPTION_ERROR ChErrorZeroCopyReplicationError ChErrorType = 593 // ZERO_COPY_REPLICATION_ERROR ChErrorBzip2StreamDecoderFailed ChErrorType = 594 // BZIP2_STREAM_DECODER_FAILED ChErrorBzip2StreamEncoderFailed ChErrorType = 595 // BZIP2_STREAM_ENCODER_FAILED ChErrorIntersectOrExceptResultStructuresMismatch ChErrorType = 596 // INTERSECT_OR_EXCEPT_RESULT_STRUCTURES_MISMATCH ChErrorNoSuchErrorCode ChErrorType = 597 // NO_SUCH_ERROR_CODE ChErrorBackupAlreadyExists ChErrorType = 598 // BACKUP_ALREADY_EXISTS ChErrorBackupNotFound ChErrorType = 599 // BACKUP_NOT_FOUND ChErrorBackupVersionNotSupported ChErrorType = 600 // BACKUP_VERSION_NOT_SUPPORTED ChErrorBackupDamaged ChErrorType = 601 // BACKUP_DAMAGED ChErrorNoBaseBackup ChErrorType = 602 // NO_BASE_BACKUP ChErrorWrongBaseBackup ChErrorType = 603 // WRONG_BASE_BACKUP ChErrorBackupEntryAlreadyExists ChErrorType = 604 // BACKUP_ENTRY_ALREADY_EXISTS ChErrorBackupEntryNotFound ChErrorType = 605 // BACKUP_ENTRY_NOT_FOUND ChErrorBackupIsEmpty ChErrorType = 606 // BACKUP_IS_EMPTY ChErrorBackupElementDuplicate ChErrorType = 607 // BACKUP_ELEMENT_DUPLICATE ChErrorCannotRestoreTable ChErrorType = 608 // CANNOT_RESTORE_TABLE ChErrorFunctionAlreadyExists ChErrorType = 609 // FUNCTION_ALREADY_EXISTS ChErrorCannotDropFunction ChErrorType = 610 // CANNOT_DROP_FUNCTION ChErrorCannotCreateRecursiveFunction ChErrorType = 611 // CANNOT_CREATE_RECURSIVE_FUNCTION ChErrorObjectAlreadyStoredOnDisk ChErrorType = 612 // OBJECT_ALREADY_STORED_ON_DISK ChErrorObjectWasNotStoredOnDisk ChErrorType = 613 // OBJECT_WAS_NOT_STORED_ON_DISK ChErrorPostgresqlConnectionFailure ChErrorType = 614 // POSTGRESQL_CONNECTION_FAILURE ChErrorCannotAdvise ChErrorType = 615 // CANNOT_ADVISE ChErrorUnknownReadMethod ChErrorType = 616 // UNKNOWN_READ_METHOD ChErrorLz4EncoderFailed ChErrorType = 617 // LZ4_ENCODER_FAILED ChErrorLz4DecoderFailed ChErrorType = 618 // LZ4_DECODER_FAILED ChErrorPostgresqlReplicationInternalError ChErrorType = 619 // POSTGRESQL_REPLICATION_INTERNAL_ERROR ChErrorQueryNotAllowed ChErrorType = 620 // QUERY_NOT_ALLOWED ChErrorCannotNormalizeString ChErrorType = 621 // CANNOT_NORMALIZE_STRING ChErrorCannotParseCapnProtoSchema ChErrorType = 622 // CANNOT_PARSE_CAPN_PROTO_SCHEMA ChErrorCapnProtoBadCast ChErrorType = 623 // CAPN_PROTO_BAD_CAST ChErrorBadFileType ChErrorType = 624 // BAD_FILE_TYPE ChErrorIoSetupError ChErrorType = 625 // IO_SETUP_ERROR ChErrorCannotSkipUnknownField ChErrorType = 626 // CANNOT_SKIP_UNKNOWN_FIELD ChErrorBackupEngineNotFound ChErrorType = 627 // BACKUP_ENGINE_NOT_FOUND ChErrorOffsetFetchWithoutOrderBy ChErrorType = 628 // OFFSET_FETCH_WITHOUT_ORDER_BY ChErrorHTTPRangeNotSatisfiable ChErrorType = 629 // HTTP_RANGE_NOT_SATISFIABLE ChErrorHaveDependentObjects ChErrorType = 630 // HAVE_DEPENDENT_OBJECTS ChErrorUnknownFileSize ChErrorType = 631 // UNKNOWN_FILE_SIZE ChErrorUnexpectedDataAfterParsedValue ChErrorType = 632 // UNEXPECTED_DATA_AFTER_PARSED_VALUE ChErrorQueryIsNotSupportedInWindowView ChErrorType = 633 // QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW ChErrorMongodbError ChErrorType = 634 // MONGODB_ERROR ChErrorCannotPoll ChErrorType = 635 // CANNOT_POLL ChErrorCannotExtractTableStructure ChErrorType = 636 // CANNOT_EXTRACT_TABLE_STRUCTURE ChErrorInvalidTableOverride ChErrorType = 637 // INVALID_TABLE_OVERRIDE ChErrorSnappyUncompressFailed ChErrorType = 638 // SNAPPY_UNCOMPRESS_FAILED ChErrorSnappyCompressFailed ChErrorType = 639 // SNAPPY_COMPRESS_FAILED ChErrorNoHivemetastore ChErrorType = 640 // NO_HIVEMETASTORE ChErrorCannotAppendToFile ChErrorType = 641 // CANNOT_APPEND_TO_FILE ChErrorCannotPackArchive ChErrorType = 642 // CANNOT_PACK_ARCHIVE ChErrorCannotUnpackArchive ChErrorType = 643 // CANNOT_UNPACK_ARCHIVE ChErrorKeeperException ChErrorType = 999 // KEEPER_EXCEPTION ChErrorPocoException ChErrorType = 1000 // POCO_EXCEPTION ChErrorStdException ChErrorType = 1001 // STD_EXCEPTION ChErrorUnknownException ChErrorType = 1002 // UNKNOWN_EXCEPTION ) ================================================ FILE: errors_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestChErrorReadError(t *testing.T) { startValidReader := 14 tests := []struct { name string wantErr string numberValid int }{ { name: "ChError: read code", wantErr: "ChError: read code", numberValid: startValidReader, }, { name: "ChError: read name", wantErr: "ChError: read name", numberValid: startValidReader + 1, }, { name: "ChError: read message", wantErr: "ChError: read message", numberValid: startValidReader + 3, }, { name: "ChError: read StackTrace", wantErr: "ChError: read StackTrace", numberValid: startValidReader + 5, }, { name: "ChError: read hasNested", wantErr: "ChError: read hasNested", numberValid: startValidReader + 8, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), "SELECT * FROM invalid_table LIMIT 5;") require.Error(t, err) readErr, ok := err.(*readError) require.True(t, ok) require.Equal(t, readErr.msg, tt.wantErr) require.EqualError(t, readErr.Unwrap(), "timeout") assert.True(t, c.IsClosed()) }) } } func NewParseConfigError(conn, msg string, err error) error { return &parseConfigError{ connString: conn, msg: msg, err: err, } } func TestConfigError(t *testing.T) { tests := []struct { name string err error expectedMsg string }{ { name: "url with password", err: NewParseConfigError("clickhouse://foo:password@host", "msg", nil), expectedMsg: "cannot parse `clickhouse://foo:xxxxx@host`: msg", }, { name: "dsn with password unquoted", err: NewParseConfigError("host=host password=password user=user", "msg", nil), expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg", }, { name: "dsn with password quoted", err: NewParseConfigError("host=host password='pass word' user=user", "msg", nil), expectedMsg: "cannot parse `host=host password=xxxxx user=user`: msg", }, { name: "weird url", err: NewParseConfigError("clickhouse://foo::pasword@host:1:", "msg", nil), expectedMsg: "cannot parse `clickhouse://foo:xxxxx@host:1:`: msg", }, { name: "weird url with slash in password", err: NewParseConfigError("clickhouse://user:pass/word@host:5432/db_name", "msg", nil), expectedMsg: "cannot parse `clickhouse://user:xxxxxx@host:5432/db_name`: msg", }, { name: "url without password", err: NewParseConfigError("clickhouse://other@host/db", "msg", nil), expectedMsg: "cannot parse `clickhouse://other@host/db`: msg", }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() assert.EqualError(t, tt.err, tt.expectedMsg) }) } } ================================================ FILE: go.mod ================================================ module github.com/vahid-sohrabloo/chconn/v2 go 1.18 require ( github.com/go-faster/city v1.0.1 github.com/google/uuid v1.3.0 github.com/jackc/puddle/v2 v2.1.2 github.com/klauspost/compress v1.15.15 github.com/pierrec/lz4/v4 v4.1.17 github.com/stretchr/testify v1.8.1 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) ================================================ FILE: go.sum ================================================ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg= github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: helper_test.go ================================================ package chconn import ( "io" "time" ) type readErrorHelper struct { numberValid int err error r io.Reader count int } func (r *readErrorHelper) Read(p []byte) (int, error) { r.count++ if r.count > r.numberValid { return 0, r.err } return r.r.Read(p) } type writerErrorHelper struct { numberValid int err error w io.Writer count int } func (w *writerErrorHelper) Write(p []byte) (int, error) { w.count++ if w.count > w.numberValid { return 0, w.err } return w.w.Write(p) } type writerSlowHelper struct { w io.Writer sleep time.Duration } func (w *writerSlowHelper) Write(p []byte) (int, error) { time.Sleep(w.sleep) return w.w.Write(p) } ================================================ FILE: insert.go ================================================ package chconn import ( "context" "github.com/vahid-sohrabloo/chconn/v2/column" ) // InsertStmt is a interface for insert stream statement type InsertStmt interface { // Write write a columns (a block of data) to the clickhouse server // after each write you need to reset the columns. it will not reset automatically Write(ctx context.Context, columns ...column.ColumnBasic) error // Flush flush the data to the clickhouse server and close the statement Flush(ctx context.Context) error // Close close the statement and release the connection // close will be called automatically after Flush Close() } type insertStmt struct { block *block conn *conn query string queryOptions *QueryOptions clientInfo *ClientInfo hasError bool closed bool finishInsert bool } func (s *insertStmt) Flush(ctx context.Context) error { defer s.Close() s.finishInsert = true if ctx != context.Background() { select { case <-ctx.Done(): return newContextAlreadyDoneError(ctx) default: } s.conn.contextWatcher.Watch(ctx) defer s.conn.contextWatcher.Unwatch() } err := s.conn.sendEmptyBlock() if err != nil { s.hasError = true return &InsertError{ err: err, remoteAddr: s.conn.RawConn().RemoteAddr(), } } var res interface{} for { res, err = s.conn.receiveAndProcessData(emptyOnProgress) if err != nil { s.hasError = true return err } if res == nil { return nil } if profile, ok := res.(*Profile); ok { if s.queryOptions.OnProfile != nil { s.queryOptions.OnProfile(profile) } continue } if progress, ok := res.(*Progress); ok { if s.queryOptions.OnProgress != nil { s.queryOptions.OnProgress(progress) } continue } if profileEvent, ok := res.(*ProfileEvent); ok { if s.queryOptions.OnProfileEvent != nil { s.queryOptions.OnProfileEvent(profileEvent) } continue } s.hasError = true return &unexpectedPacket{expected: "serverData", actual: res} } } // Close close the statement and release the connection // If Next is called and returns false and there are no further blocks, // the Rows are closed automatically and it will suffice to check the result of Err. // Close is idempotent and does not affect the result of Err. func (s *insertStmt) Close() { s.conn.reader.SetCompress(false) if !s.closed { s.closed = true s.conn.contextWatcher.Unwatch() s.conn.unlock() if s.hasError || !s.finishInsert { s.conn.Close() } } } func (s *insertStmt) Write(ctx context.Context, columns ...column.ColumnBasic) error { if int(s.block.NumColumns) != len(columns) { return &InsertError{ err: &ColumnNumberWriteError{ WriteColumn: len(columns), NeedColumn: s.block.NumColumns, }, remoteAddr: s.conn.RawConn().RemoteAddr(), } } var err error if len(columns[0].Name()) != 0 { columns, err = s.block.reorderColumns(columns) if err != nil { s.hasError = true return &InsertError{ err: err, remoteAddr: s.conn.RawConn().RemoteAddr(), } } } for i, col := range columns { col.SetType(s.block.Columns[i].ChType) if errValidate := col.Validate(); errValidate != nil { s.hasError = true return errValidate } } if ctx != context.Background() { select { case <-ctx.Done(): return newContextAlreadyDoneError(ctx) default: } s.conn.contextWatcher.Watch(ctx) defer s.conn.contextWatcher.Unwatch() } err = s.conn.sendData(s.block, columns[0].NumRow()) if err != nil { s.hasError = true return &InsertError{ err: err, remoteAddr: s.conn.RawConn().RemoteAddr(), } } err = s.block.writeColumnsBuffer(s.conn, columns...) if err != nil { s.hasError = true return &InsertError{ err: err, remoteAddr: s.conn.RawConn().RemoteAddr(), } } for _, col := range columns { col.Reset() } return nil } // Insert send query for insert and commit columns func (ch *conn) Insert(ctx context.Context, query string, columns ...column.ColumnBasic) error { return ch.InsertWithOption(ctx, query, nil, columns...) } // Insert send query for insert and prepare insert stmt with setting option func (ch *conn) InsertWithOption( ctx context.Context, query string, queryOptions *QueryOptions, columns ...column.ColumnBasic) error { stmt, err := ch.InsertStreamWithOption(ctx, query, queryOptions) if err != nil { return err } if stmt == nil { ch.reader.SetCompress(false) ch.contextWatcher.Unwatch() ch.unlock() return nil } defer stmt.Close() err = stmt.Write(ctx, columns...) if err != nil { return err } err = stmt.Flush(ctx) if err != nil { return err } for _, col := range columns { col.Reset() } return nil } func (ch *conn) InsertStream(ctx context.Context, query string) (InsertStmt, error) { return ch.InsertStreamWithOption(ctx, query, nil) } // Insert send query for insert and prepare insert stmt with setting option func (ch *conn) InsertStreamWithOption( ctx context.Context, query string, queryOptions *QueryOptions) (InsertStmt, error) { err := ch.lock() if err != nil { return nil, err } var hasError bool defer func() { if hasError { ch.Close() } }() if ctx != context.Background() { select { case <-ctx.Done(): return nil, newContextAlreadyDoneError(ctx) default: } ch.contextWatcher.Watch(ctx) defer ch.contextWatcher.Unwatch() } if queryOptions == nil { queryOptions = emptyQueryOptions } err = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters) if err != nil { hasError = true return nil, preferContextOverNetTimeoutError(ctx, err) } var blockData *block for { var res interface{} res, err = ch.receiveAndProcessData(emptyOnProgress) if err != nil { hasError = true return nil, preferContextOverNetTimeoutError(ctx, err) } if b, ok := res.(*block); ok { blockData = b break } if profile, ok := res.(*Profile); ok { if queryOptions.OnProfile != nil { queryOptions.OnProfile(profile) } continue } if progress, ok := res.(*Progress); ok { if queryOptions.OnProgress != nil { queryOptions.OnProgress(progress) } continue } if profileEvent, ok := res.(*ProfileEvent); ok { if queryOptions.OnProfileEvent != nil { queryOptions.OnProfileEvent(profileEvent) } continue } if res == nil { return nil, nil } hasError = true return nil, &unexpectedPacket{expected: "serverData", actual: res} } err = blockData.readColumns(ch) if err != nil { hasError = true return nil, preferContextOverNetTimeoutError(ctx, err) } s := &insertStmt{ conn: ch, query: query, block: blockData, queryOptions: queryOptions, clientInfo: nil, } return s, nil } ================================================ FILE: insert_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2/column" ) func TestInsertError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) // test lock error c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) c.(*conn).status = connStatusUninitialized err = c.Insert(context.Background(), "insert into system.numbers VALUES") require.EqualError(t, err, "conn uninitialized") require.EqualError(t, c.(*conn).lock(), "conn uninitialized") c.Close() // test write block info error config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: 1, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Insert(context.Background(), "insert into system.numbers VALUES") require.EqualError(t, err, "write block info (timeout)") assert.True(t, c.IsClosed()) // test insert server error config.WriterFunc = nil c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Insert(context.Background(), "insert into system.numbers VALUES") require.EqualError(t, err, " DB::Exception (48): Method write is not supported by storage SystemNumbers") assert.True(t, c.IsClosed()) // test read column error c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: 27, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error ( int8 ) VALUES`) require.EqualError(t, err, "block: read column name (timeout)") assert.True(t, c.IsClosed()) config, err = ParseConfig(connString) require.NoError(t, err) c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error ( int8 ) VALUES`) require.EqualError(t, errors.Unwrap(err), "write 0 column(s) but insert query needs 1 column(s)") assert.True(t, c.IsClosed()) } func TestInsertCtxError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) cancel() err = c.Insert(ctx, `INSERT INTO clickhouse_test_insert_error ( int8 ) VALUES`) require.EqualError(t, err, "timeout: context already done: context canceled") assert.False(t, c.IsClosed()) config, err = ParseConfig(connString) require.NoError(t, err) config.WriterFunc = func(w io.Writer) io.Writer { return &writerSlowHelper{ w: w, sleep: time.Second, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() err = c.Insert(ctx, `INSERT INTO clickhouse_test_insert_error ( int8 ) VALUES`) require.EqualError(t, errors.Unwrap(err), "context deadline exceeded") assert.True(t, c.IsClosed()) } func TestInsertMoreColumnsError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error_column`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error_column ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) err = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error_column ( int8 ) VALUES`, column.New[int8](), column.New[int8]()) remoteAddr := c.RawConn().RemoteAddr().String() require.EqualError(t, err, "failed to insert data: remoteAddr: "+remoteAddr+" - write 2 column(s) but insert query needs 1 column(s)") assert.True(t, c.IsClosed()) } func TestInsertMoreRowsError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_error_rows`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_error_rows ( int8 Int8, int16 Int16 ) Engine=Memory`) require.NoError(t, err) col1 := column.New[int8]() col2 := column.New[int16]() col1.Append(1) col1.Append(2) col2.Append(2) err = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_error_rows ( int8, int16 ) VALUES`, col1, col2) remoteAddr := c.RawConn().RemoteAddr().String() require.EqualError(t, err, "failed to insert data: remoteAddr: "+remoteAddr+" - \"int8\" has 2 rows but \"int16\" column has 1 rows") assert.True(t, c.IsClosed()) } func TestInsert(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert`) require.NoError(t, err) err = conn.Exec(context.Background(), `CREATE TABLE test_insert ( int8 Int8, int16 Int16, int32 Int32 ) Engine=Memory`) require.NoError(t, err) col8 := column.New[int8]() col8.SetName([]byte("int8")) col16 := column.New[int16]() col16.SetName([]byte("int16")) col32 := column.New[int32]() col32.SetName([]byte("int32")) var col8Insert []int8 var col16Insert []int16 var col32Insert []int32 rows := 10 for i := 0; i < rows; i++ { col8.Append(int8(i)) col16.Append(int16(i)) col32.Append(int32(i)) col8Insert = append(col8Insert, int8(i)) col16Insert = append(col16Insert, int16(i)) col32Insert = append(col32Insert, int32(i)) } // send in invalid order to test sorted columns by name err = conn.InsertWithOption(context.Background(), `INSERT INTO test_insert (int8,int16,int32) VALUES`, &QueryOptions{ OnProgress: func(progress *Progress) { }, OnProfileEvent: func(pe *ProfileEvent) { }, OnProfile: func(p *Profile) { }, }, col32, col16, col8) require.NoError(t, err) // example read al col8Read := column.New[int8]() col16Read := column.New[int16]() col32Read := column.New[int32]() selectStmt, err := conn.Select(context.Background(), `SELECT int8,int16,int32 FROM test_insert`, col8Read, col16Read, col32Read) require.NoError(t, err) require.True(t, conn.IsBusy()) var col8Data []int8 var col16Data []int16 var col32Data []int32 for selectStmt.Next() { col8Data = col8Read.Read(col8Data) col16Data = col16Read.Read(col16Data) col32Data = col32Read.Read(col32Data) } require.NoError(t, selectStmt.Err()) assert.Equal(t, col8Insert, col8Data) assert.Equal(t, col16Insert, col16Data) assert.Equal(t, col32Insert, col32Data) selectStmt.Close() conn.RawConn().Close() } func TestInsertNotFoundColumn(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := Connect(context.Background(), connString) require.NoError(t, err) err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert_not_found_column`) require.NoError(t, err) err = conn.Exec(context.Background(), `CREATE TABLE test_insert_not_found_column ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) col8 := column.New[int8]() col8.SetName([]byte("not_found")) rows := 10 for i := 0; i < rows; i++ { col8.Append(int8(i)) } // send in invalid order to test sorted columns by name err = conn.Insert(context.Background(), `INSERT INTO test_insert_not_found_column (int8) VALUES`, col8) require.Equal( t, errors.Unwrap(err).Error(), "the input columns do not contain column \"int8\". The column name must be set using the `SetName` method") conn.RawConn().Close() } func TestCompressInsert(t *testing.T) { t.Parallel() tests := []struct { name string compressType CompressMethod }{ { name: "none", compressType: CompressNone, }, { name: "lz4", compressType: CompressLZ4, }, { name: "zstd", compressType: CompressZSTD, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") parseConfig, err := ParseConfig(connString) require.NoError(t, err) parseConfig.Compress = tt.compressType conn, err := ConnectConfig(context.Background(), parseConfig) require.NoError(t, err) err = conn.Exec(context.Background(), `DROP TABLE IF EXISTS test_insert_compress`) require.NoError(t, err) err = conn.Exec(context.Background(), `CREATE TABLE test_insert_compress ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) col := column.New[int8]() var colInsert []int8 rows := 1000 for i := 0; i < rows; i++ { val := int8(i) col.Append(val) colInsert = append(colInsert, val) } err = conn.Insert(context.Background(), `INSERT INTO test_insert_compress (int8) VALUES`, col) require.NoError(t, err) // example read all colRead := column.New[int8]() selectStmt, err := conn.Select(context.Background(), `SELECT int8 FROM test_insert_compress`, colRead) require.NoError(t, err) require.True(t, conn.IsBusy()) var colData []int8 for selectStmt.Next() { colData = colRead.Read(colData) } assert.Equal(t, colInsert, colData) require.NoError(t, selectStmt.Err()) selectStmt.Close() conn.RawConn().Close() }) } } func TestInsertColumnError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write header", wantErr: "block: write header block data for column int8 (timeout)", numberValid: startValidReader, }, { name: "write block data", wantErr: "block: write block data for column int8 (timeout)", numberValid: startValidReader + 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[int8]() err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error (int8) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestInsertColumnErrorCompress(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) config.Compress = CompressLZ4 require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_compress`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_compress ( int8 Int8 ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write header", wantErr: "write block info (timeout)", numberValid: startValidReader, }, { name: "flush block info", wantErr: "flush block info (timeout)", numberValid: startValidReader + 1, }, { name: "flush data", wantErr: "block: flush block data (timeout)", numberValid: startValidReader + 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[int8]() err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_compress (int8) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestInsertColumnDataError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_lc`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_lc ( col LowCardinality(String) ) Engine=Memory`) require.NoError(t, err) startValidReader := 3 tests := []struct { name string wantErr string numberValid int }{ { name: "write header", wantErr: "block: write header block data for column col (timeout)", numberValid: startValidReader, }, { name: "write stype", wantErr: "block: write block data for column col (error writing stype: timeout)", numberValid: startValidReader + 1, }, { name: "write dictionarySize", wantErr: "block: write block data for column col (error writing dictionarySize: timeout)", numberValid: startValidReader + 2, }, { name: "write dictionary", wantErr: "block: write block data for column col (error writing dictionary: timeout)", numberValid: startValidReader + 3, }, { name: "write keys len", wantErr: "block: write block data for column col (error writing keys len: timeout)", numberValid: startValidReader + 4, }, { name: "write indices", wantErr: "block: write block data for column col (error writing indices: timeout)", numberValid: startValidReader + 5, }, { name: "write block info", wantErr: "write block info (timeout)", numberValid: startValidReader + 6, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: tt.numberValid, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.NewString().LowCardinality() col.Append("test") err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_lc (col) VALUES", col, ) require.EqualError(t, errors.Unwrap(err), tt.wantErr) assert.True(t, c.IsClosed()) }) } } func TestInsertColumnDataErrorValidate(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_column_error_l_validate`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_column_error_l_validate ( col LowCardinality(String) ) Engine=Memory`) require.NoError(t, err) c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.NewString() col.Append("test") err = c.Insert(context.Background(), "insert into clickhouse_test_insert_column_error_l_validate (col) VALUES", col, ) require.EqualError(t, err, "mismatch column type: ClickHouse Type: LowCardinality(String), column types: String") assert.True(t, c.IsClosed()) } func TestInsertSelectStmt(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) // test read column error c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Exec(context.Background(), `DROP TABLE IF EXISTS clickhouse_test_insert_select`) require.NoError(t, err) err = c.Exec(context.Background(), `CREATE TABLE clickhouse_test_insert_select ( number Int64 ) Engine=Memory`) require.NoError(t, err) err = c.Insert(context.Background(), `INSERT INTO clickhouse_test_insert_select ( number ) select number from system.numbers limit 10`) require.NoError(t, err) colRead := column.New[int64]() selectStmt, err := c.Select(context.Background(), `SELECT number FROM clickhouse_test_insert_select`, colRead) require.NoError(t, err) var colData []int64 for selectStmt.Next() { colData = colRead.Read(colData) } assert.Equal(t, []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, colData) require.NoError(t, selectStmt.Err()) } ================================================ FILE: internal/ctxwatch/context_watcher.go ================================================ package ctxwatch import ( "context" "sync" ) // ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a // time. type ContextWatcher struct { onCancel func() onUnwatchAfterCancel func() unwatchChan chan struct{} lock sync.Mutex watchInProgress bool onCancelWasCalled bool } // NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled. // OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and // onCancel called. func NewContextWatcher(onCancel, onUnwatchAfterCancel func()) *ContextWatcher { cw := &ContextWatcher{ onCancel: onCancel, onUnwatchAfterCancel: onUnwatchAfterCancel, unwatchChan: make(chan struct{}), } return cw } // Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called. func (cw *ContextWatcher) Watch(ctx context.Context) { cw.lock.Lock() defer cw.lock.Unlock() if cw.watchInProgress { panic("Watch already in progress") } cw.onCancelWasCalled = false if ctx.Done() != nil { cw.watchInProgress = true go func() { select { case <-ctx.Done(): cw.onCancel() cw.onCancelWasCalled = true <-cw.unwatchChan case <-cw.unwatchChan: } }() } else { cw.watchInProgress = false } } // Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was // called then onUnwatchAfterCancel will also be called. func (cw *ContextWatcher) Unwatch() { cw.lock.Lock() defer cw.lock.Unlock() if cw.watchInProgress { cw.unwatchChan <- struct{}{} if cw.onCancelWasCalled { cw.onUnwatchAfterCancel() } cw.watchInProgress = false } } ================================================ FILE: internal/ctxwatch/context_watcher_test.go ================================================ package ctxwatch_test import ( "context" "sync/atomic" "testing" "time" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2/internal/ctxwatch" ) func TestContextWatcherContextCancelled(t *testing.T) { canceledChan := make(chan struct{}) cleanupCalled := false cw := ctxwatch.NewContextWatcher(func() { canceledChan <- struct{}{} }, func() { cleanupCalled = true }) ctx, cancel := context.WithCancel(context.Background()) cw.Watch(ctx) cancel() select { case <-canceledChan: case <-time.NewTimer(time.Second).C: t.Fatal("Timed out waiting for cancel func to be called") } cw.Unwatch() require.True(t, cleanupCalled, "Cleanup func was not called") } func TestContextWatcherUnwatchdBeforeContextCancelled(t *testing.T) { cw := ctxwatch.NewContextWatcher(func() { t.Error("cancel func should not have been called") }, func() { t.Error("cleanup func should not have been called") }) ctx, cancel := context.WithCancel(context.Background()) cw.Watch(ctx) cw.Unwatch() cancel() } func TestContextWatcherMultipleWatchPanics(t *testing.T) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cw.Watch(ctx) ctx2, cancel2 := context.WithCancel(context.Background()) defer cancel2() require.Panics(t, func() { cw.Watch(ctx2) }, "Expected panic when Watch called multiple times") } func TestContextWatcherUnwatchWhenNotWatchingIsSafe(t *testing.T) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) cw.Unwatch() // unwatch when not / never watching ctx, cancel := context.WithCancel(context.Background()) defer cancel() cw.Watch(ctx) cw.Unwatch() cw.Unwatch() // double unwatch } func TestContextWatcherUnwatchIsConcurrencySafe(t *testing.T) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() cw.Watch(ctx) go cw.Unwatch() go cw.Unwatch() <-ctx.Done() } //nolint:govet func TestContextWatcherStress(t *testing.T) { var cancelFuncCalls int64 var cleanupFuncCalls int64 cw := ctxwatch.NewContextWatcher(func() { atomic.AddInt64(&cancelFuncCalls, 1) }, func() { atomic.AddInt64(&cleanupFuncCalls, 1) }) cycleCount := 100000 for i := 0; i < cycleCount; i++ { //nolint:govet ctx, cancel := context.WithCancel(context.Background()) cw.Watch(ctx) if i%2 == 0 { cancel() } // Without time.Sleep, cw.Unwatch will almost always run before the cancel func which means cancel will never happen. // This gives us a better mix. if i%3 == 0 { time.Sleep(time.Nanosecond) } cw.Unwatch() if i%2 == 1 { cancel() } } actualCancelFuncCalls := atomic.LoadInt64(&cancelFuncCalls) actualCleanupFuncCalls := atomic.LoadInt64(&cleanupFuncCalls) if actualCancelFuncCalls == 0 { t.Fatal("actualCancelFuncCalls == 0") } maxCancelFuncCalls := int64(cycleCount) / 2 if actualCancelFuncCalls > maxCancelFuncCalls { t.Errorf("cancel func calls should be no more than %d but was %d", actualCancelFuncCalls, maxCancelFuncCalls) } if actualCancelFuncCalls != actualCleanupFuncCalls { t.Errorf("cancel func calls (%d) should be equal to cleanup func calls (%d) but was not", actualCancelFuncCalls, actualCleanupFuncCalls) } } func BenchmarkContextWatcherUncancellable(b *testing.B) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) for i := 0; i < b.N; i++ { cw.Watch(context.Background()) cw.Unwatch() } } func BenchmarkContextWatcherCancelled(b *testing.B) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) for i := 0; i < b.N; i++ { ctx, cancel := context.WithCancel(context.Background()) cw.Watch(ctx) cancel() cw.Unwatch() } } func BenchmarkContextWatcherCancellable(b *testing.B) { cw := ctxwatch.NewContextWatcher(func() {}, func() {}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() for i := 0; i < b.N; i++ { cw.Watch(ctx) cw.Unwatch() } } ================================================ FILE: internal/helper/features.go ================================================ package helper const ( DbmsMinRevisionWithClientInfo = 54032 DbmsMinRevisionWithServerTimezone = 54058 DbmsMinRevisionWithQuotaKeyInClientInfo = 54060 DbmsMinRevisionWithServerDisplayName = 54372 DbmsMinRevisionWithVersionPatch = 54401 DbmsMinRevisionWithClientWriteInfo = 54420 DbmsMinRevisionWithSettingsSerializedAsStrings = 54429 DbmsMinRevisionWithInterServerSecret = 54441 DbmsMinRevisionWithOpenTelemetry = 54442 DbmsMinProtocolVersionWithDistributedDepth = 54448 DbmsMinProtocolVersionWithInitialQueryStartTime = 54449 DbmsMinProtocolVersionWithParallelReplicas = 54453 DbmsMinProtocolWithCustomSerialization = 54454 DbmsMinProtocolWithQuotaKey = 54458 DbmsMinProtocolWithParameters = 54459 DbmsMinProtocolWithServerQueryTimeInProgress = 54460 ) ================================================ FILE: internal/helper/strs.go ================================================ package helper const ( TupleStr = "Tuple(" LenTupleStr = len(TupleStr) PointStr = "Point" ) var PointMainTypeStr = []byte("Tuple(Float64, Float64)") const PolygonStr = "Polygon" var PolygonMainTypeStr = []byte("Array(Array(Tuple(Float64, Float64)))") const MultiPolygonStr = "MultiPolygon" var MultiPolygonMainTypeStr = []byte("Array(Array(Array(Tuple(Float64, Float64))))") const ( ArrayStr = "Array(" LenArrayStr = len(ArrayStr) ArrayTypeStr = "Array()" NestedStr = "Nested(" LenNestedStr = len(NestedStr) NestedToArrayTube = "Array(Nested(" RingStr = "Ring" ) var RingMainTypeStr = []byte("Array(Tuple(Float64, Float64))") const ( Enum8Str = "Enum8(" Enum8StrLen = len(Enum8Str) Enum16Str = "Enum16(" Enum16StrLen = len(Enum16Str) DateTimeStr = "DateTime(" DateTimeStrLen = len(DateTimeStr) DateTime64Str = "DateTime64(" DateTime64StrLen = len(DateTime64Str) DecimalStr = "Decimal(" DecimalStrLen = len(DecimalStr) FixedStringStr = "FixedString(" FixedStringStrLen = len(FixedStringStr) SimpleAggregateStr = "SimpleAggregateFunction(" SimpleAggregateStrLen = len(SimpleAggregateStr) ) const ( LowCardinalityStr = "LowCardinality(" LenLowCardinalityStr = len(LowCardinalityStr) LowCardinalityTypeStr = "LowCardinality()" LowCardinalityNullableStr = "LowCardinality(Nullable(" LenLowCardinalityNullableStr = len(LowCardinalityNullableStr) LowCardinalityNullableTypeStr = "LowCardinality(Nullable())" ) const ( MapStr = "Map(" LenMapStr = len(MapStr) MapTypeStr = "Map(, )" ) const ( NullableStr = "Nullable(" LenNullableStr = len(NullableStr) NullableTypeStr = "Nullable()" ) const ( StringStr = "String" ) ================================================ FILE: internal/helper/validator.go ================================================ package helper import ( "bytes" "fmt" "strconv" ) func IsEnum8(chType []byte) bool { return len(chType) > Enum8StrLen && (string(chType[:Enum8StrLen]) == Enum8Str) } func ExtractEnum(data []byte) (intToStringMap map[int16]string, stringToIntMap map[string]int16, err error) { enums := bytes.Split(data, []byte(", ")) intToStringMap = make(map[int16]string) stringToIntMap = make(map[string]int16) for _, enum := range enums { parts := bytes.SplitN(enum, []byte(" = "), 2) if len(parts) != 2 { return nil, nil, fmt.Errorf("invalid enum: %s", enum) } id, err := strconv.ParseInt(string(parts[1]), 10, 8) if err != nil { return nil, nil, fmt.Errorf("invalid enum id: %s", parts[1]) } val := string(parts[0][1 : len(parts[0])-1]) intToStringMap[int16(id)] = val stringToIntMap[val] = int16(id) } return intToStringMap, stringToIntMap, nil } func IsEnum16(chType []byte) bool { return len(chType) > Enum16StrLen && (string(chType[:Enum16StrLen]) == Enum16Str) } func IsDateTimeWithParam(chType []byte) bool { return len(chType) > DateTimeStrLen && (string(chType[:DateTimeStrLen]) == DateTimeStr) } func IsDateTime64(chType []byte) bool { return len(chType) > DateTime64StrLen && (string(chType[:DateTime64StrLen]) == DateTime64Str) } func IsFixedString(chType []byte) bool { return len(chType) > FixedStringStrLen && (string(chType[:FixedStringStrLen]) == FixedStringStr) } func IsDecimal(chType []byte) bool { return len(chType) > DecimalStrLen && (string(chType[:DecimalStrLen]) == DecimalStr) } func IsRing(chType []byte) bool { return string(chType) == RingStr } func IsMultiPolygon(chType []byte) bool { return string(chType) == MultiPolygonStr } func IsNested(chType []byte) bool { return len(chType) > LenNestedStr && string(chType[:LenNestedStr]) == NestedStr } func NestedToArrayType(chType []byte) []byte { if IsNested(chType) { newChType := make([]byte, 0, len(chType)-LenNestedStr+LenArrayStr+LenTupleStr+1) newChType = append(newChType, "Array(Tuple("...) newChType = append(newChType, chType[LenNestedStr:]...) newChType = append(newChType, ')') return newChType } return chType } func IsArray(chType []byte) bool { return len(chType) > LenArrayStr && string(chType[:LenArrayStr]) == ArrayStr } func IsPolygon(chType []byte) bool { return string(chType) == PolygonStr } func IsString(chType []byte) bool { return string(chType) == StringStr } func IsLowCardinality(chType []byte) bool { return len(chType) > LenLowCardinalityStr && string(chType[:LenLowCardinalityStr]) == LowCardinalityStr } func IsNullableLowCardinality(chType []byte) bool { return len(chType) > LenLowCardinalityNullableStr && string(chType[:LenLowCardinalityNullableStr]) == LowCardinalityNullableStr } func IsMap(chType []byte) bool { return len(chType) > LenMapStr && string(chType[:LenMapStr]) == MapStr } func IsNullable(chType []byte) bool { return len(chType) > LenNullableStr && string(chType[:LenNullableStr]) == NullableStr } func IsPoint(chType []byte) bool { return string(chType) == PointStr } func IsTuple(chType []byte) bool { return len(chType) > LenTupleStr && string(chType[:LenTupleStr]) == TupleStr } type ColumnData struct { ChType, Name []byte } func TypesInParentheses(b []byte) ([]ColumnData, error) { var columns []ColumnData var openFunc int var hasBacktick bool cur := 0 for i, char := range b { if char == '`' { if !hasBacktick { hasBacktick = true continue } if b[i-1] != '\\' { hasBacktick = false } continue } if hasBacktick { continue } if char == ',' { if openFunc == 0 { colData, err := SplitNameType(b[cur:i]) if err != nil { return nil, err } columns = append(columns, colData) // add 2 to skip the ', ' cur = i + 2 } continue } if char == '(' { openFunc++ continue } if char == ')' { openFunc-- continue } } colData, err := SplitNameType(b[cur:]) if err != nil { return nil, err } return append(columns, colData), nil } func SplitNameType(b []byte) (ColumnData, error) { // for example: `date f` Array(String) if b[0] == '`' { b = b[1:] for i, char := range b { if char == '`' && b[i-1] != '\\' { return ColumnData{ Name: b[1 : i+1], ChType: b[i+2:], }, nil } } return ColumnData{}, fmt.Errorf("cannot find closing backtick in %s", b) } for i, char := range b { if char == '(' { break } if char == ' ' { return ColumnData{ Name: b[1 : i+1], ChType: b[i+1:], }, nil } } return ColumnData{ ChType: b, }, nil } func FilterSimpleAggregate(chType []byte) []byte { if len(chType) <= SimpleAggregateStrLen || (string(chType[:SimpleAggregateStrLen]) != SimpleAggregateStr) { return chType } chType = chType[SimpleAggregateStrLen:] for i, v := range chType { if v == ',' { return chType[i+2 : len(chType)-1] } } panic("Cannot found nested type of " + string(chType)) } ================================================ FILE: internal/readerwriter/compress_reader.go ================================================ package readerwriter // copy from https://github.com/ClickHouse/ch-go/blob/4cde4e4bec24211c0bcdc6f385f4212d0ad522d9/compress/reader.go // some changes to compatible with chconn import ( "encoding/binary" "fmt" "io" "github.com/go-faster/city" "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" ) type invalidCompressErr struct { method CompressMethod } func (e *invalidCompressErr) Error() string { return fmt.Sprintf("unknown compression method: 0x%02x ", e.method) } type compressReader struct { reader io.Reader data []byte pos int64 raw []byte header []byte zstd *zstd.Decoder } // NewCompressReader wrap the io.Reader func NewCompressReader(r io.Reader) io.Reader { return &compressReader{ zstd: nil, // lazily initialized reader: r, header: make([]byte, headerSize), } } func (r *compressReader) Read(buf []byte) (n int, err error) { if r.pos >= int64(len(r.data)) { if err := r.readBlock(); err != nil { return 0, fmt.Errorf("read block: %w", err) } } n = copy(buf, r.data[r.pos:]) r.pos += int64(n) return n, nil } // readBlock reads next compressed data into raw and decompresses into data. func (r *compressReader) readBlock() error { r.pos = 0 _ = r.header[headerSize-1] if _, err := io.ReadFull(r.reader, r.header); err != nil { return fmt.Errorf("read header: %w", err) } var ( rawSize = int(binary.LittleEndian.Uint32(r.header[hRawSize:])) - compressHeaderSize dataSize = int(binary.LittleEndian.Uint32(r.header[hDataSize:])) ) if dataSize < 0 || dataSize > maxDataSize { return fmt.Errorf("data size should be %d < %d < %d", 0, dataSize, maxDataSize) } if rawSize < 0 || rawSize > maxBlockSize { return fmt.Errorf("raw size should be %d < %d < %d", 0, rawSize, maxBlockSize) } r.data = append(r.data[:0], make([]byte, dataSize)...) r.raw = append(r.raw[:0], r.header...) r.raw = append(r.raw, make([]byte, rawSize)...) _ = r.raw[:rawSize+headerSize-1] if _, err := io.ReadFull(r.reader, r.raw[headerSize:]); err != nil { return fmt.Errorf("read raw: %w", err) } hGot := city.U128{ Low: binary.LittleEndian.Uint64(r.raw[0:8]), High: binary.LittleEndian.Uint64(r.raw[8:16]), } h := city.CH128(r.raw[hMethod:]) if hGot != h { return &CorruptedDataErr{ Actual: h, Reference: hGot, RawSize: rawSize, DataSize: dataSize, } } //nolint:exhaustive switch m := CompressMethod(r.header[hMethod]); m { case CompressLZ4: n, err := lz4.UncompressBlock(r.raw[headerSize:], r.data) if err != nil { return fmt.Errorf("lz4 decompress: %w", err) } if n != dataSize { return fmt.Errorf("unexpected uncompressed data size: %d (actual) != %d (got in header)", n, dataSize, ) } case CompressZSTD: if r.zstd == nil { // Lazily initializing to prevent spawning goroutines in NewReader. // See https://github.com/golang/go/issues/47056#issuecomment-997436820 zstdReader, err := zstd.NewReader(nil, zstd.WithDecoderConcurrency(1), zstd.WithDecoderLowmem(true), ) if err != nil { return fmt.Errorf("zstd new: %w", err) } r.zstd = zstdReader } data, err := r.zstd.DecodeAll(r.raw[headerSize:], r.data[:0]) if err != nil { return fmt.Errorf("zstd decompress: %w", err) } if len(data) != dataSize { return fmt.Errorf("unexpected uncompressed data size: %d (actual) != %d (got in header)", len(data), dataSize, ) } r.data = data case CompressChecksum: copy(r.data, r.raw[headerSize:]) default: return &invalidCompressErr{m} } return nil } ================================================ FILE: internal/readerwriter/compress_writer.go ================================================ package readerwriter // copy from https://github.com/ClickHouse/ch-go/blob/4cde4e4bec24211c0bcdc6f385f4212d0ad522d9/compress/writer.go // some changes to compatible with chconn import ( "encoding/binary" "fmt" "io" "github.com/go-faster/city" "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" ) type compressWriter struct { writer io.Writer // data uncompressed data []byte // data position pos int // data compressed zdata []byte // compression method method CompressMethod lz4 *lz4.Compressor zstd *zstd.Encoder } // NewCompressWriter wrap the io.Writer func NewCompressWriter(w io.Writer, method byte) io.Writer { p := &compressWriter{ writer: w, method: CompressMethod(method), data: make([]byte, maxBlockSize), } return p } func (cw *compressWriter) Write(buf []byte) (int, error) { var n int for len(buf) > 0 { // Accumulate the data to be compressed. m := copy(cw.data[cw.pos:], buf) cw.pos += m buf = buf[m:] if cw.pos == len(cw.data) { err := cw.Flush() if err != nil { return n, err } } n += m } return n, nil } // Compress buf into Data. func (cw *compressWriter) Flush() error { if cw.pos == 0 { return nil } maxSize := lz4.CompressBlockBound(len(cw.data[:cw.pos])) cw.zdata = append(cw.zdata[:0], make([]byte, maxSize+headerSize)...) _ = cw.zdata[:headerSize] cw.zdata[hMethod] = byte(cw.method) var n int //nolint:exhaustive switch cw.method { case CompressLZ4: if cw.lz4 == nil { cw.lz4 = &lz4.Compressor{} } compressedSize, err := cw.lz4.CompressBlock(cw.data[:cw.pos], cw.zdata[headerSize:]) if err != nil { return fmt.Errorf("lz4 compress error: %v", err) } n = compressedSize case CompressZSTD: if cw.zstd == nil { zw, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedDefault), zstd.WithEncoderConcurrency(1), zstd.WithLowerEncoderMem(true), ) if err != nil { return fmt.Errorf("zstd new error: %v", err) } cw.zstd = zw } cw.zdata = cw.zstd.EncodeAll(cw.data[:cw.pos], cw.zdata[:headerSize]) n = len(cw.zdata) - headerSize case CompressChecksum: n = copy(cw.zdata[headerSize:], cw.data[:cw.pos]) } cw.zdata = cw.zdata[:n+headerSize] binary.LittleEndian.PutUint32(cw.zdata[hRawSize:], uint32(n+compressHeaderSize)) binary.LittleEndian.PutUint32(cw.zdata[hDataSize:], uint32(cw.pos)) h := city.CH128(cw.zdata[hMethod:]) binary.LittleEndian.PutUint64(cw.zdata[0:8], h.Low) binary.LittleEndian.PutUint64(cw.zdata[8:16], h.High) _, err := cw.writer.Write(cw.zdata) cw.pos = 0 return err } ================================================ FILE: internal/readerwriter/consts.go ================================================ package readerwriter import ( "fmt" "github.com/go-faster/city" ) // Method is compression codec. type CompressMethod byte const ( // ChecksumSize is 128bits for cityhash102 checksum ChecksumSize = 16 // CompressHeaderSize magic + compressed_size + uncompressed_size CompressHeaderSize = 1 + 4 + 4 // HeaderSize for compress header HeaderSize = ChecksumSize + CompressHeaderSize // BlockMaxSize 1MB BlockMaxSize = 1024 * 1024 * 128 ) // Possible compression methods. const ( CompressNone CompressMethod = 0x00 CompressChecksum CompressMethod = 0x02 CompressLZ4 CompressMethod = 0x82 CompressZSTD CompressMethod = 0x90 ) // Constants for compression encoding. // // See https://go-faster.org/docs/clickhouse/compression for reference. const ( checksumSize = 16 compressHeaderSize = 1 + 4 + 4 headerSize = checksumSize + compressHeaderSize // Limiting total data/block size to protect from possible OOM. maxDataSize = 1024 * 1024 * 2 // 2MB maxBlockSize = maxDataSize hRawSize = 17 hDataSize = 21 hMethod = 16 ) // CorruptedDataErr means that provided hash mismatch with calculated. type CorruptedDataErr struct { Actual city.U128 Reference city.U128 RawSize int DataSize int } func (c *CorruptedDataErr) Error() string { return fmt.Sprintf("corrupted data: %d (actual), %d (reference), compressed size: %d, data size: %d", c.Actual.High, c.Reference.High, c.RawSize, c.DataSize, ) } ================================================ FILE: internal/readerwriter/reader.go ================================================ package readerwriter import ( "encoding/binary" "io" ) // Reader is a helper to read data from reader type Reader struct { mainReader io.Reader input io.Reader compressReader io.Reader scratch [binary.MaxVarintLen64]byte } // NewReader get new Reader func NewReader(input io.Reader) *Reader { return &Reader{ input: input, mainReader: input, } } // SetCompress set compress statusp func (r *Reader) SetCompress(c bool) { if c { if r.compressReader == nil { r.compressReader = NewCompressReader(r.mainReader) } r.input = r.compressReader return } r.input = r.mainReader } // Uvarint read variable uint64 value func (r *Reader) Uvarint() (uint64, error) { return binary.ReadUvarint(r) } // Int32 read Int32 value func (r *Reader) Int32() (int32, error) { v, err := r.Uint32() if err != nil { return 0, err } return int32(v), nil } // Uint32 read Uint32 value func (r *Reader) Uint32() (uint32, error) { if _, err := io.ReadFull(r.input, r.scratch[:4]); err != nil { return 0, err } return binary.LittleEndian.Uint32(r.scratch[:4]), nil } // Uint64 read Uint64 value func (r *Reader) Uint64() (uint64, error) { if _, err := io.ReadFull(r.input, r.scratch[:8]); err != nil { return 0, err } return binary.LittleEndian.Uint64(r.scratch[:8]), nil } // FixedString read FixedString value func (r *Reader) FixedString(strlen int) ([]byte, error) { buf := make([]byte, strlen) _, err := io.ReadFull(r, buf) return buf, err } // String read String value func (r *Reader) String() (string, error) { strlen, err := r.Uvarint() if err != nil { return "", err } str, err := r.FixedString(int(strlen)) if err != nil { return "", err } return string(str), nil } // ByteString read string value as []byte func (r *Reader) ByteString() ([]byte, error) { strlen, err := r.Uvarint() if err != nil { return nil, err } if strlen == 0 { return []byte{}, nil } return r.FixedString(int(strlen)) } // ReadByte read a single byte func (r *Reader) ReadByte() (byte, error) { if _, err := r.input.Read(r.scratch[:1]); err != nil { return 0, err } return r.scratch[0], nil } // Read implement Read func (r *Reader) Read(buf []byte) (int, error) { return io.ReadFull(r.input, buf) } ================================================ FILE: internal/readerwriter/writer.go ================================================ package readerwriter import ( "bytes" "encoding/binary" "io" "reflect" "unsafe" ) // Writer is a helper to write data into bytes.Buffer type Writer struct { output *bytes.Buffer scratch [binary.MaxVarintLen64]byte } // NewWriter get new writer func NewWriter() *Writer { return &Writer{ output: &bytes.Buffer{}, } } // Uvarint write a variable uint64 value into writer func (w *Writer) Uvarint(v uint64) { ln := binary.PutUvarint(w.scratch[:binary.MaxVarintLen64], v) w.Write(w.scratch[:ln]) } // Int32 write Int32 value func (w *Writer) Int32(v int32) { w.Uint32(uint32(v)) } // Int64 write Int64 value func (w *Writer) Int64(v int64) { w.Uint64(uint64(v)) } // Uint8 write Uint8 value func (w *Writer) Uint8(v uint8) { w.output.WriteByte(v) } // Uint32 write Uint32 value func (w *Writer) Uint32(v uint32) { w.scratch[0] = byte(v) w.scratch[1] = byte(v >> 8) w.scratch[2] = byte(v >> 16) w.scratch[3] = byte(v >> 24) w.Write(w.scratch[:4]) } // Uint64 write Uint64 value func (w *Writer) Uint64(v uint64) { w.scratch[0] = byte(v) w.scratch[1] = byte(v >> 8) w.scratch[2] = byte(v >> 16) w.scratch[3] = byte(v >> 24) w.scratch[4] = byte(v >> 32) w.scratch[5] = byte(v >> 40) w.scratch[6] = byte(v >> 48) w.scratch[7] = byte(v >> 56) w.Write(w.scratch[:8]) } // String write string func (w *Writer) String(v string) { str := str2Bytes(v) w.Uvarint(uint64(len(str))) w.Write(str) } // ByteString write []byte func (w *Writer) ByteString(v []byte) { w.Uvarint(uint64(len(v))) w.Write(v) } // Write write raw []byte data func (w *Writer) Write(b []byte) { w.output.Write(b) } // WriteTo implement WriteTo func (w *Writer) WriteTo(wt io.Writer) (int64, error) { return w.output.WriteTo(wt) } // Reset reset all data func (w *Writer) Reset() { w.output.Reset() } // Output get raw *bytes.Buffer func (w *Writer) Output() *bytes.Buffer { return w.output } func str2Bytes(str string) []byte { header := (*reflect.SliceHeader)(unsafe.Pointer(&str)) header.Len = len(str) header.Cap = header.Len return *(*[]byte)(unsafe.Pointer(header)) } ================================================ FILE: ping.go ================================================ package chconn import ( "context" ) type pong struct{} // Check that connection to the server is alive. func (ch *conn) Ping(ctx context.Context) error { if ctx != context.Background() { select { case <-ctx.Done(): return newContextAlreadyDoneError(ctx) default: } ch.contextWatcher.Watch(ctx) defer ch.contextWatcher.Unwatch() } ch.writer.Uvarint(clientPing) var hasError bool defer func() { if hasError { ch.Close() } }() if _, err := ch.writer.WriteTo(ch.writerTo); err != nil { hasError = true return &writeError{"ping: write packet type", preferContextOverNetTimeoutError(ctx, err)} } res, err := ch.receiveAndProcessData(emptyOnProgress) if err != nil { hasError = true return preferContextOverNetTimeoutError(ctx, err) } if _, ok := res.(*pong); !ok { hasError = true return &unexpectedPacket{expected: "serverPong", actual: res} } return nil } ================================================ FILE: ping_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPing(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") conn, err := Connect(context.Background(), connString) require.NoError(t, err) require.NoError(t, conn.Ping(context.Background())) conn.Close() } func TestPingWriteError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: 1, } } c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) err = c.Ping(context.Background()) require.EqualError(t, err, "ping: write packet type (timeout)") require.EqualError(t, errors.Unwrap(err), "timeout") assert.True(t, c.IsClosed()) config.WriterFunc = nil config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: 13, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) require.EqualError(t, c.Ping(context.Background()), "packet: read packet type (timeout)") assert.True(t, c.IsClosed()) } func TestPingCtxError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) cancel() err = c.Ping(ctx) require.EqualError(t, err, "timeout: context already done: context canceled") require.EqualError(t, errors.Unwrap(err), "context already done: context canceled") assert.False(t, c.IsClosed()) config.WriterFunc = func(w io.Writer) io.Writer { return &writerSlowHelper{ w: w, sleep: time.Second, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() err = c.Ping(ctx) require.EqualError(t, errors.Unwrap(errors.Unwrap(err)), "context deadline exceeded") assert.True(t, c.IsClosed()) } ================================================ FILE: profile.go ================================================ package chconn // Profile detail of profile select query type Profile struct { Rows uint64 Blocks uint64 Bytes uint64 RowsBeforeLimit uint64 AppliedLimit uint8 CalculatedRowsBeforeLimit uint8 } func newProfile() *Profile { return &Profile{} } func (p *Profile) read(ch *conn) (err error) { if p.Rows, err = ch.reader.Uvarint(); err != nil { return &readError{"profile: read Rows", err} } if p.Blocks, err = ch.reader.Uvarint(); err != nil { return &readError{"profile: read Blocks", err} } if p.Bytes, err = ch.reader.Uvarint(); err != nil { return &readError{"profile: read Bytes", err} } if p.AppliedLimit, err = ch.reader.ReadByte(); err != nil { return &readError{"profile: read AppliedLimit", err} } if p.RowsBeforeLimit, err = ch.reader.Uvarint(); err != nil { return &readError{"profile: read RowsBeforeLimit", err} } if p.CalculatedRowsBeforeLimit, err = ch.reader.ReadByte(); err != nil { return &readError{"profile: read CalculatedRowsBeforeLimit", err} } return nil } ================================================ FILE: profile_event.go ================================================ package chconn import ( "github.com/vahid-sohrabloo/chconn/v2/column" ) // Profile detail of profile select query type ProfileEvent struct { Host *column.String Time *column.Base[uint32] ThreadID *column.Base[uint64] Type *column.Base[int8] Name *column.String Value *column.Base[int64] } func newProfileEvent() *ProfileEvent { return &ProfileEvent{ Host: column.NewString(), Time: column.New[uint32](), ThreadID: column.New[uint64](), Type: column.New[int8](), Name: column.NewString(), Value: column.New[int64](), } } func (p ProfileEvent) read(c *conn) error { return c.block.readColumnsData(c, true, p.Host, p.Time, p.ThreadID, p.Type, p.Name, p.Value) } ================================================ FILE: profile_test.go ================================================ package chconn import ( "context" "errors" "fmt" "io" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" ) func TestProfileReadError(t *testing.T) { startValidReader := 43 config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) if c.ServerInfo().Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress { // todo we need to fix this for clickhouse 22.10 and above return } tests := []struct { name string wantErr string numberValid int }{ { name: "profile: read Rows", wantErr: "profile: read Rows", numberValid: startValidReader, }, { name: "profile: read Blocks", wantErr: "profile: read Blocks", numberValid: startValidReader + 1, }, { name: "profile: read Bytes", wantErr: "profile: read Bytes", numberValid: startValidReader + 2, }, { name: "profile: read AppliedLimit", wantErr: "profile: read AppliedLimit", numberValid: startValidReader + 3, }, { name: "profile: read RowsBeforeLimit", wantErr: "profile: read RowsBeforeLimit", numberValid: startValidReader + 4, }, { name: "profile: read CalculatedRowsBeforeLimit", wantErr: "profile: read CalculatedRowsBeforeLimit", numberValid: startValidReader + 5, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) if c.ServerInfo().Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress { tt.numberValid++ } config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) col := column.New[uint64]() stmt, err := c.Select(context.Background(), "SELECT * FROM system.numbers LIMIT 1;", col) require.NoError(t, err) for stmt.Next() { } require.Error(t, stmt.Err()) readErr, ok := stmt.Err().(*readError) require.True(t, ok) fmt.Println("readErr.msg:", readErr.msg) require.Equal(t, tt.wantErr, readErr.msg) require.EqualError(t, readErr.Unwrap(), "timeout") assert.True(t, c.IsClosed()) }) } } ================================================ FILE: progress.go ================================================ package chconn import "github.com/vahid-sohrabloo/chconn/v2/internal/helper" // Progress details of progress select query type Progress struct { ReadRows uint64 ReadBytes uint64 TotalRows uint64 WriterRows uint64 WrittenBytes uint64 ElapsedNS uint64 } func newProgress() *Progress { return &Progress{} } func (p *Progress) read(ch *conn) (err error) { if p.ReadRows, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read ReadRows", err} } if p.ReadBytes, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read ReadBytes", err} } if p.TotalRows, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read TotalRows", err} } if ch.serverInfo.Revision >= helper.DbmsMinRevisionWithClientWriteInfo { if p.WriterRows, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read WriterRows", err} } if p.WrittenBytes, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read WrittenBytes", err} } } if ch.serverInfo.Revision >= helper.DbmsMinProtocolWithServerQueryTimeInProgress { if p.ElapsedNS, err = ch.reader.Uvarint(); err != nil { return &readError{"progress: read ElapsedNS", err} } } return nil } ================================================ FILE: select_stmt.go ================================================ package chconn import ( "bytes" "context" "fmt" "strconv" "time" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/types" ) // Select executes a query and return select stmt. // NOTE: only use for select query func (ch *conn) Select(ctx context.Context, query string, columns ...column.ColumnBasic) (SelectStmt, error) { return ch.SelectWithOption(ctx, query, nil, columns...) } // Select executes a query with the the query options and return select stmt. // NOTE: only use for select query func (ch *conn) SelectWithOption( ctx context.Context, query string, queryOptions *QueryOptions, columns ...column.ColumnBasic, ) (SelectStmt, error) { err := ch.lock() if err != nil { return nil, err } var hasError bool defer func() { if hasError { ch.Close() } }() if ctx != context.Background() { select { case <-ctx.Done(): return nil, newContextAlreadyDoneError(ctx) default: } ch.contextWatcher.Watch(ctx) } if queryOptions == nil { queryOptions = emptyQueryOptions } err = ch.sendQueryWithOption(query, queryOptions.QueryID, queryOptions.Settings, queryOptions.Parameters) if err != nil { hasError = true return nil, preferContextOverNetTimeoutError(ctx, err) } s := &selectStmt{ conn: ch, query: query, queryOptions: queryOptions, clientInfo: nil, ctx: ctx, columnsForRead: columns, } res, err := s.conn.receiveAndProcessData(nil) if err != nil { s.lastErr = err s.Close() return nil, err } if block, ok := res.(*block); ok { if block.NumRows == 0 { err = s.readEmptyBlock(block) if err != nil { return nil, err } return s, nil } } return nil, &unexpectedPacket{expected: "serverData with zero len", actual: res} } // SelectStmt is a interface for select statement type SelectStmt interface { // Next read the next block of data for reading. // It returns true on success, or false if there is no next result row or an error happened while preparing it. // Err should be consulted to distinguish between the two cases. Next() bool // Err returns the error, if any, that was encountered during iteration. // Err may be called after an explicit or implicit Close. Err() error // RowsInBlock return number of rows in this current block RowsInBlock() int // Columns return the columns of this select statement. Columns() []column.ColumnBasic // Close close the statement and release the connection // If Next is called and returns false and there are no further blocks, // the Rows are closed automatically and it will suffice to check the result of Err. // Close is idempotent and does not affect the result of Err. Close() } type selectStmt struct { block *block conn *conn query string queryOptions *QueryOptions clientInfo *ClientInfo lastErr error closed bool columnsForRead []column.ColumnBasic ctx context.Context finishSelect bool validateData bool } var _ SelectStmt = &selectStmt{} func (s *selectStmt) readEmptyBlock(b *block) error { err := b.readColumns(s.conn) if err != nil { s.lastErr = err s.Close() return err } if len(s.columnsForRead) == 0 { s.columnsForRead, err = s.getColumnsByChType(b) if err != nil { s.lastErr = err s.Close() return err } } else if len(s.columnsForRead[0].Name()) != 0 { s.columnsForRead, err = b.reorderColumns(s.columnsForRead) if err != nil { return err } } return nil } func (s *selectStmt) Next() bool { // protect after close if s.closed { return false } s.conn.reader.SetCompress(false) res, err := s.conn.receiveAndProcessData(nil) if err != nil { s.lastErr = err s.Close() return false } if block, ok := res.(*block); ok { if block.NumRows == 0 { err = s.readEmptyBlock(block) if err != nil { return false } return s.Next() } s.block = block needValidateData := !s.validateData s.validateData = false if needValidateData { if errValidate := s.validate(); errValidate != nil { s.lastErr = errValidate s.Close() return false } } err = block.readColumnsData(s.conn, needValidateData, s.columnsForRead...) if err != nil { s.lastErr = preferContextOverNetTimeoutError(s.ctx, err) s.Close() return false } return true } if profile, ok := res.(*Profile); ok { if s.queryOptions.OnProfile != nil { s.queryOptions.OnProfile(profile) } return s.Next() } if progress, ok := res.(*Progress); ok { if s.queryOptions.OnProgress != nil { s.queryOptions.OnProgress(progress) } return s.Next() } if profileEvent, ok := res.(*ProfileEvent); ok { if s.queryOptions.OnProfileEvent != nil { s.queryOptions.OnProfileEvent(profileEvent) } return s.Next() } if res == nil { s.finishSelect = true s.columnsForRead = nil s.Close() return false } s.lastErr = &unexpectedPacket{expected: "serverData", actual: res} s.Close() return false } func (s *selectStmt) validate() error { if int(s.block.NumColumns) != len(s.columnsForRead) { return &ColumnNumberReadError{ Read: len(s.columnsForRead), Available: s.block.NumColumns, } } return nil } // RowsInBlock return number of rows in this current block func (s *selectStmt) RowsInBlock() int { return int(s.block.NumRows) } // Err returns the error, if any, that was encountered during iteration. // Err may be called after an explicit or implicit Close. func (s *selectStmt) Err() error { return preferContextOverNetTimeoutError(s.ctx, s.lastErr) } // Close close the statement and release the connection // If Next is called and returns false and there are no further blocks, // the Rows are closed automatically and it will suffice to check the result of Err. // Close is idempotent and does not affect the result of Err. func (s *selectStmt) Close() { s.conn.reader.SetCompress(false) if !s.closed { s.closed = true s.conn.contextWatcher.Unwatch() s.conn.unlock() if s.Err() != nil || !s.finishSelect { s.conn.Close() } } } func (s *selectStmt) Columns() []column.ColumnBasic { return s.columnsForRead } func (s *selectStmt) getColumnsByChType(b *block) ([]column.ColumnBasic, error) { columns := make([]column.ColumnBasic, len(b.Columns)) for i, col := range b.Columns { columnByType, err := s.columnByType(col.ChType, 0, false, false) if err != nil { return nil, err } columnByType.SetName(col.Name) columnByType.SetType(col.ChType) err = columnByType.Validate() if err != nil { return nil, err } columns[i] = columnByType } return columns, nil } //nolint:funlen,gocyclo func (s *selectStmt) columnByType(chType []byte, arrayLevel int, nullable, lc bool) (column.ColumnBasic, error) { switch { case string(chType) == "Bool": return column.New[bool]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int8" || helper.IsEnum8(chType): return column.New[int8]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int16" || helper.IsEnum16(chType): return column.New[int16]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int32": return column.New[int32]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int64": return column.New[int64]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int128": return column.New[types.Int128]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Int256": return column.New[types.Int256]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt8": return column.New[uint8]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt16": return column.New[uint16]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt32": return column.New[uint32]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt64": return column.New[uint64]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt128": return column.New[types.Uint128]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "UInt256": return column.New[types.Uint256]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Float32": return column.New[float32]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Float64": return column.New[float64]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "String": return column.NewString().Elem(arrayLevel, nullable, lc), nil case helper.IsFixedString(chType): strLen, err := strconv.Atoi(string(chType[helper.FixedStringStrLen : len(chType)-1])) if err != nil { return nil, fmt.Errorf("invalid fixed string length: %s: %w", string(chType), err) } return getFixedType(strLen, arrayLevel, nullable, lc) case string(chType) == "Date": if !s.queryOptions.UseGoTime { return column.New[types.Date]().Elem(arrayLevel, nullable, lc), nil } return column.NewDate[types.Date]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "Date32": if !s.queryOptions.UseGoTime { return column.New[types.Date32]().Elem(arrayLevel, nullable, lc), nil } return column.NewDate[types.Date32]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "DateTime" || helper.IsDateTimeWithParam(chType): if !s.queryOptions.UseGoTime { return column.New[types.DateTime]().Elem(arrayLevel, nullable, lc), nil } var params [][]byte if bytes.HasPrefix(chType, []byte("DateTime(")) { params = bytes.Split(chType[len("DateTime("):len(chType)-1], []byte(", ")) } col := column.NewDate[types.DateTime]() if len(params) > 0 && len(params[0]) >= 3 { if loc, err := time.LoadLocation(string(params[0][1 : len(params[0])-1])); err == nil { col.SetLocation(loc) } else if loc, err := time.LoadLocation(s.conn.serverInfo.Timezone); err == nil { col.SetLocation(loc) } } return col.Elem(arrayLevel, nullable, lc), nil case helper.IsDateTime64(chType): if !s.queryOptions.UseGoTime { return column.New[types.DateTime64]().Elem(arrayLevel, nullable, lc), nil } params := bytes.Split(chType[helper.DateTime64StrLen:len(chType)-1], []byte(", ")) if len(params) == 0 { panic("DateTime64 invalid params") } precision, err := strconv.Atoi(string(params[0])) if err != nil { panic("DateTime64 invalid precision: " + err.Error()) } col := column.NewDate[types.DateTime64]() col.SetPrecision(precision) if len(params) > 1 && len(params[1]) >= 3 { if loc, err := time.LoadLocation(string(params[1][1 : len(params[1])-1])); err == nil { col.SetLocation(loc) } else if loc, err := time.LoadLocation(s.conn.serverInfo.Timezone); err == nil { col.SetLocation(loc) } } return col.Elem(arrayLevel, nullable, lc), nil case helper.IsDecimal(chType): params := bytes.Split(chType[helper.DecimalStrLen:len(chType)-1], []byte(", ")) precision, _ := strconv.Atoi(string(params[0])) if precision <= 9 { return column.New[types.Decimal32]().Elem(arrayLevel, nullable, lc), nil } if precision <= 18 { return column.New[types.Decimal64]().Elem(arrayLevel, nullable, lc), nil } if precision <= 38 { return column.New[types.Decimal128]().Elem(arrayLevel, nullable, lc), nil } if precision <= 76 { return column.New[types.Decimal256]().Elem(arrayLevel, nullable, lc), nil } panic("Decimal invalid precision: " + string(chType)) case string(chType) == "UUID": return column.New[types.UUID]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "IPv4": return column.New[types.IPv4]().Elem(arrayLevel, nullable, lc), nil case string(chType) == "IPv6": return column.New[types.IPv6]().Elem(arrayLevel, nullable, lc), nil case helper.IsNullable(chType): return s.columnByType(chType[helper.LenNullableStr:len(chType)-1], arrayLevel, true, lc) case bytes.HasPrefix(chType, []byte("SimpleAggregateFunction(")): return s.columnByType(helper.FilterSimpleAggregate(chType), arrayLevel, nullable, lc) case helper.IsArray(chType): if arrayLevel == 3 { return nil, fmt.Errorf("max array level is 3") } if nullable { return nil, fmt.Errorf("array is not allowed in nullable") } if lc { return nil, fmt.Errorf("LowCardinality is not allowed in nullable") } return s.columnByType(chType[helper.LenArrayStr:len(chType)-1], arrayLevel+1, nullable, lc) case helper.IsLowCardinality(chType): return s.columnByType(chType[helper.LenLowCardinalityStr:len(chType)-1], arrayLevel, nullable, true) case helper.IsTuple(chType): columnsTuple, err := helper.TypesInParentheses(chType[helper.LenTupleStr : len(chType)-1]) if err != nil { return nil, fmt.Errorf("tuple invalid types: %w", err) } columns := make([]column.ColumnBasic, len(columnsTuple)) for i, c := range columnsTuple { col, err := s.columnByType(c.ChType, 0, false, false) if err != nil { return nil, err } col.SetName(c.Name) columns[i] = col } return column.NewTuple(columns...).Elem(arrayLevel), nil case helper.IsMap(chType): columnsMap, err := helper.TypesInParentheses(chType[helper.LenMapStr : len(chType)-1]) if err != nil { return nil, fmt.Errorf("map invalid types: %w", err) } if len(columnsMap) != 2 { return nil, fmt.Errorf("map must have 2 columns") } columns := make([]column.ColumnBasic, len(columnsMap)) for i, col := range columnsMap { col, err := s.columnByType(col.ChType, arrayLevel, nullable, lc) if err != nil { return nil, err } columns[i] = col } return column.NewMapBase(columns[0], columns[1]), nil case helper.IsNested(chType): return s.columnByType(helper.NestedToArrayType(chType), arrayLevel, nullable, lc) } return nil, fmt.Errorf("unknown type: %s", chType) } //nolint:funlen,gocyclo func getFixedType(fixedLen, arrayLevel int, nullable, lc bool) (column.ColumnBasic, error) { switch fixedLen { case 1: return column.New[[1]byte]().Elem(arrayLevel, nullable, lc), nil case 2: return column.New[[2]byte]().Elem(arrayLevel, nullable, lc), nil case 3: return column.New[[3]byte]().Elem(arrayLevel, nullable, lc), nil case 4: return column.New[[4]byte]().Elem(arrayLevel, nullable, lc), nil case 5: return column.New[[5]byte]().Elem(arrayLevel, nullable, lc), nil case 6: return column.New[[6]byte]().Elem(arrayLevel, nullable, lc), nil case 7: return column.New[[7]byte]().Elem(arrayLevel, nullable, lc), nil case 8: return column.New[[8]byte]().Elem(arrayLevel, nullable, lc), nil case 9: return column.New[[9]byte]().Elem(arrayLevel, nullable, lc), nil case 10: return column.New[[10]byte]().Elem(arrayLevel, nullable, lc), nil case 11: return column.New[[11]byte]().Elem(arrayLevel, nullable, lc), nil case 12: return column.New[[12]byte]().Elem(arrayLevel, nullable, lc), nil case 13: return column.New[[13]byte]().Elem(arrayLevel, nullable, lc), nil case 14: return column.New[[14]byte]().Elem(arrayLevel, nullable, lc), nil case 15: return column.New[[15]byte]().Elem(arrayLevel, nullable, lc), nil case 16: return column.New[[16]byte]().Elem(arrayLevel, nullable, lc), nil case 17: return column.New[[17]byte]().Elem(arrayLevel, nullable, lc), nil case 18: return column.New[[18]byte]().Elem(arrayLevel, nullable, lc), nil case 19: return column.New[[19]byte]().Elem(arrayLevel, nullable, lc), nil case 20: return column.New[[20]byte]().Elem(arrayLevel, nullable, lc), nil case 21: return column.New[[21]byte]().Elem(arrayLevel, nullable, lc), nil case 22: return column.New[[22]byte]().Elem(arrayLevel, nullable, lc), nil case 23: return column.New[[23]byte]().Elem(arrayLevel, nullable, lc), nil case 24: return column.New[[24]byte]().Elem(arrayLevel, nullable, lc), nil case 25: return column.New[[25]byte]().Elem(arrayLevel, nullable, lc), nil case 26: return column.New[[26]byte]().Elem(arrayLevel, nullable, lc), nil case 27: return column.New[[27]byte]().Elem(arrayLevel, nullable, lc), nil case 28: return column.New[[28]byte]().Elem(arrayLevel, nullable, lc), nil case 29: return column.New[[29]byte]().Elem(arrayLevel, nullable, lc), nil case 30: return column.New[[30]byte]().Elem(arrayLevel, nullable, lc), nil case 31: return column.New[[31]byte]().Elem(arrayLevel, nullable, lc), nil case 32: return column.New[[32]byte]().Elem(arrayLevel, nullable, lc), nil case 33: return column.New[[33]byte]().Elem(arrayLevel, nullable, lc), nil case 34: return column.New[[34]byte]().Elem(arrayLevel, nullable, lc), nil case 35: return column.New[[35]byte]().Elem(arrayLevel, nullable, lc), nil case 36: return column.New[[36]byte]().Elem(arrayLevel, nullable, lc), nil case 37: return column.New[[37]byte]().Elem(arrayLevel, nullable, lc), nil case 38: return column.New[[38]byte]().Elem(arrayLevel, nullable, lc), nil case 39: return column.New[[39]byte]().Elem(arrayLevel, nullable, lc), nil case 40: return column.New[[40]byte]().Elem(arrayLevel, nullable, lc), nil case 41: return column.New[[41]byte]().Elem(arrayLevel, nullable, lc), nil case 42: return column.New[[42]byte]().Elem(arrayLevel, nullable, lc), nil case 43: return column.New[[43]byte]().Elem(arrayLevel, nullable, lc), nil case 44: return column.New[[44]byte]().Elem(arrayLevel, nullable, lc), nil case 45: return column.New[[45]byte]().Elem(arrayLevel, nullable, lc), nil case 46: return column.New[[46]byte]().Elem(arrayLevel, nullable, lc), nil case 47: return column.New[[47]byte]().Elem(arrayLevel, nullable, lc), nil case 48: return column.New[[48]byte]().Elem(arrayLevel, nullable, lc), nil case 49: return column.New[[49]byte]().Elem(arrayLevel, nullable, lc), nil case 50: return column.New[[50]byte]().Elem(arrayLevel, nullable, lc), nil case 51: return column.New[[51]byte]().Elem(arrayLevel, nullable, lc), nil case 52: return column.New[[52]byte]().Elem(arrayLevel, nullable, lc), nil case 53: return column.New[[53]byte]().Elem(arrayLevel, nullable, lc), nil case 54: return column.New[[54]byte]().Elem(arrayLevel, nullable, lc), nil case 55: return column.New[[55]byte]().Elem(arrayLevel, nullable, lc), nil case 56: return column.New[[56]byte]().Elem(arrayLevel, nullable, lc), nil case 57: return column.New[[57]byte]().Elem(arrayLevel, nullable, lc), nil case 58: return column.New[[58]byte]().Elem(arrayLevel, nullable, lc), nil case 59: return column.New[[59]byte]().Elem(arrayLevel, nullable, lc), nil case 60: return column.New[[60]byte]().Elem(arrayLevel, nullable, lc), nil case 61: return column.New[[61]byte]().Elem(arrayLevel, nullable, lc), nil case 62: return column.New[[62]byte]().Elem(arrayLevel, nullable, lc), nil case 63: return column.New[[63]byte]().Elem(arrayLevel, nullable, lc), nil case 64: return column.New[[64]byte]().Elem(arrayLevel, nullable, lc), nil case 65: return column.New[[65]byte]().Elem(arrayLevel, nullable, lc), nil case 66: return column.New[[66]byte]().Elem(arrayLevel, nullable, lc), nil case 67: return column.New[[67]byte]().Elem(arrayLevel, nullable, lc), nil case 68: return column.New[[68]byte]().Elem(arrayLevel, nullable, lc), nil case 69: return column.New[[69]byte]().Elem(arrayLevel, nullable, lc), nil case 70: return column.New[[70]byte]().Elem(arrayLevel, nullable, lc), nil } return nil, fmt.Errorf("fixed length %d is not supported", fixedLen) } ================================================ FILE: select_stmt_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2/column" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/types" ) func TestSelectError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) c.(*conn).status = connStatusUninitialized res, err := c.Select(context.Background(), "select * from system.numbers limit 5") require.Nil(t, res) require.EqualError(t, err, "conn uninitialized") require.EqualError(t, c.(*conn).lock(), "conn uninitialized") c.Close() config.WriterFunc = func(w io.Writer) io.Writer { return &writerErrorHelper{ err: errors.New("timeout"), w: w, numberValid: 1, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) res, err = c.Select(context.Background(), "select * from system.numbers limit 5") require.EqualError(t, err, "write block info (timeout)") require.Nil(t, res) assert.True(t, c.IsClosed()) config.WriterFunc = nil c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) colNumber := column.New[int64]() res, err = c.Select(context.Background(), "select number,toNullable(number) from system.numbers limit 5", colNumber) require.NoError(t, err) for res.Next() { } assert.False(t, res.Next()) require.EqualError(t, res.Err(), "read 1 column(s), but available 2 column(s)") assert.True(t, c.IsClosed()) } func TestSelectCtxError(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) cancel() res, err := c.Select(ctx, "select * from system.numbers limit 1") require.EqualError(t, err, "timeout: context already done: context canceled") require.Nil(t, res) assert.False(t, c.IsClosed()) config.WriterFunc = func(w io.Writer) io.Writer { return &writerSlowHelper{ w: w, sleep: time.Second, } } c, err = ConnectConfig(context.Background(), config) require.NoError(t, err) ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() res, err = c.Select(ctx, "select * from system.numbers") require.EqualError(t, errors.Unwrap(err), "context deadline exceeded") require.Nil(t, res) assert.True(t, c.IsClosed()) } func TestSelectProgress(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) colSleep := column.New[uint8]() colNumber := column.New[uint64]() res, err := c.SelectWithOption(context.Background(), "SELECT sleep(1), * FROM system.numbers LIMIT 1", &QueryOptions{ OnProgress: func(p *Progress) { }, OnProfile: func(p *Profile) { }, OnProfileEvent: func(p *ProfileEvent) { }, }, colSleep, colNumber, ) require.NotNil(t, res) require.NoError(t, err) for res.Next() { } require.NoError(t, res.Err()) c.Close() } func TestSelectParameters(t *testing.T) { t.Parallel() connString := os.Getenv("CHX_TEST_TCP_CONN_STRING") config, err := ParseConfig(connString) require.NoError(t, err) c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) colA := column.New[int32]() colAS := column.New[int32]().Array() colB := column.NewString() colBS := column.NewString().Array() colC := column.NewDate[types.DateTime]() colD := column.NewMap[string, uint8](column.NewString(), column.New[uint8]()) colE := column.New[uint32]() colES := column.New[uint32]().Array() colF32 := column.New[float32]() colF32S := column.New[float32]().Array() colF64 := column.New[float64]() colF64S := column.New[float64]().Array() res, err := c.SelectWithOption(context.Background(), `SELECT {a: Int32}, {as: Array(Int32)}, {b: String}, {bs: Array(String)}, {c: DateTime}, {d: Map(String, UInt8)}, {e: UInt32}, {es: Array(UInt32)}, {f32: Float32}, {f64: Float64}, {f32s: Array(Float32)}, {f64s: Array(Float64)} `, &QueryOptions{ Parameters: NewParameters( IntParameter("a", 13), IntSliceParameter("as", []int32{-15, -16}), StringParameter("b", "str'"), StringSliceParameter("bs", []string{"str", "str2\\'"}), StringParameter("c", "2022-08-04 18:30:53"), StringParameter("d", `{'a': 1, 'b': 2}`), UintParameter("e", uint64(14)), UintSliceParameter("es", []uint32{15, 16}), Float32Parameter("f32", float32(1.5)), Float64Parameter("f64", float64(1.8)), Float32SliceParameter("f32s", []float32{1.5, 1.6}), Float64SliceParameter("f64s", []float64{1.8, 1.9}), ), }, colA, colAS, colB, colBS, colC, colD, colE, colES, colF32, colF64, colF32S, colF64S, ) if err != nil && err.Error() == "parameters are not supported by the server" { t.SkipNow() } require.NoError(t, err) require.NotNil(t, res) for res.Next() { } require.NoError(t, res.Err()) require.Len(t, colA.Data(), 1) require.Len(t, colAS.Data(), 1) require.Len(t, colB.Data(), 1) require.Len(t, colBS.Data(), 1) require.Len(t, colC.Data(), 1) require.Len(t, colD.Data(), 1) require.Len(t, colE.Data(), 1) require.Len(t, colES.Data(), 1) assert.Equal(t, int32(13), colA.Data()[0]) assert.Equal(t, []int32{-15, -16}, colAS.Data()[0]) assert.Equal(t, "str'", colB.Data()[0]) assert.Equal(t, []string{"str", "str2\\'"}, colBS.Data()[0]) assert.Equal(t, "2022-08-04 18:30:53", colC.Data()[0].Format("2006-01-02 15:04:05")) assert.Equal(t, map[string]uint8{ "a": 1, "b": 2, }, colD.Data()[0]) assert.Equal(t, uint32(14), colE.Data()[0]) assert.Equal(t, []uint32{15, 16}, colES.Data()[0]) assert.Equal(t, float32(1.5), colF32.Data()[0]) assert.Equal(t, float64(1.8), colF64.Data()[0]) assert.Equal(t, []float32{1.5, 1.6}, colF32S.Data()[0]) assert.Equal(t, []float64{1.8, 1.9}, colF64S.Data()[0]) c.Close() } func TestSelectProgressError(t *testing.T) { startValidReader := 33 tests := []struct { name string wantErr string numberValid int minRevision uint64 }{ { name: "read ReadRows", wantErr: "progress: read ReadRows (timeout)", numberValid: startValidReader, }, { name: "read ReadBytes", wantErr: "progress: read ReadBytes (timeout)", numberValid: startValidReader + 1, }, { name: "read TotalRows ", wantErr: "progress: read TotalRows (timeout)", numberValid: startValidReader + 2, }, { name: "read WriterRows", wantErr: "progress: read WriterRows (timeout)", numberValid: startValidReader + 3, }, { name: "read WrittenBytes", wantErr: "progress: read WrittenBytes (timeout)", numberValid: startValidReader + 4, }, { name: "read ElapsedNS", wantErr: "progress: read ElapsedNS (timeout)", numberValid: startValidReader + 5, minRevision: helper.DbmsMinProtocolWithServerQueryTimeInProgress, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } c, err := ConnectConfig(context.Background(), config) require.NoError(t, err) if c.ServerInfo().Revision < tt.minRevision { c.Close() return } colSleep := column.New[uint8]() colNumber := column.New[uint64]() res, err := c.SelectWithOption(context.Background(), "SELECT sleep(1), * FROM system.numbers LIMIT 1", &QueryOptions{ OnProgress: func(p *Progress) { }, }, colSleep, colNumber, ) require.NoError(t, err) require.NotNil(t, res) for res.Next() { } assert.EqualError(t, res.Err(), tt.wantErr) }) } } func TestGetFixedColumnType(t *testing.T) { tests := []struct { name string len int col column.ColumnBasic }{ { name: "fixed 1", len: 1, col: column.New[[1]byte](), }, { name: "fixed 2", len: 2, col: column.New[[2]byte](), }, { name: "fixed 3", len: 3, col: column.New[[3]byte](), }, { name: "fixed 4", len: 4, col: column.New[[4]byte](), }, { name: "fixed 5", len: 5, col: column.New[[5]byte](), }, { name: "fixed 6", len: 6, col: column.New[[6]byte](), }, { name: "fixed 7", len: 7, col: column.New[[7]byte](), }, { name: "fixed 8", len: 8, col: column.New[[8]byte](), }, { name: "fixed 9", len: 9, col: column.New[[9]byte](), }, { name: "fixed 10", len: 10, col: column.New[[10]byte](), }, { name: "fixed 11", len: 11, col: column.New[[11]byte](), }, { name: "fixed 12", len: 12, col: column.New[[12]byte](), }, { name: "fixed 13", len: 13, col: column.New[[13]byte](), }, { name: "fixed 14", len: 14, col: column.New[[14]byte](), }, { name: "fixed 15", len: 15, col: column.New[[15]byte](), }, { name: "fixed 16", len: 16, col: column.New[[16]byte](), }, { name: "fixed 17", len: 17, col: column.New[[17]byte](), }, { name: "fixed 18", len: 18, col: column.New[[18]byte](), }, { name: "fixed 19", len: 19, col: column.New[[19]byte](), }, { name: "fixed 20", len: 20, col: column.New[[20]byte](), }, { name: "fixed 21", len: 21, col: column.New[[21]byte](), }, { name: "fixed 22", len: 22, col: column.New[[22]byte](), }, { name: "fixed 23", len: 23, col: column.New[[23]byte](), }, { name: "fixed 24", len: 24, col: column.New[[24]byte](), }, { name: "fixed 25", len: 25, col: column.New[[25]byte](), }, { name: "fixed 26", len: 26, col: column.New[[26]byte](), }, { name: "fixed 27", len: 27, col: column.New[[27]byte](), }, { name: "fixed 28", len: 28, col: column.New[[28]byte](), }, { name: "fixed 29", len: 29, col: column.New[[29]byte](), }, { name: "fixed 30", len: 30, col: column.New[[30]byte](), }, { name: "fixed 31", len: 31, col: column.New[[31]byte](), }, { name: "fixed 32", len: 32, col: column.New[[32]byte](), }, { name: "fixed 33", len: 33, col: column.New[[33]byte](), }, { name: "fixed 34", len: 34, col: column.New[[34]byte](), }, { name: "fixed 35", len: 35, col: column.New[[35]byte](), }, { name: "fixed 36", len: 36, col: column.New[[36]byte](), }, { name: "fixed 37", len: 37, col: column.New[[37]byte](), }, { name: "fixed 38", len: 38, col: column.New[[38]byte](), }, { name: "fixed 39", len: 39, col: column.New[[39]byte](), }, { name: "fixed 40", len: 40, col: column.New[[40]byte](), }, { name: "fixed 41", len: 41, col: column.New[[41]byte](), }, { name: "fixed 42", len: 42, col: column.New[[42]byte](), }, { name: "fixed 43", len: 43, col: column.New[[43]byte](), }, { name: "fixed 44", len: 44, col: column.New[[44]byte](), }, { name: "fixed 45", len: 45, col: column.New[[45]byte](), }, { name: "fixed 46", len: 46, col: column.New[[46]byte](), }, { name: "fixed 47", len: 47, col: column.New[[47]byte](), }, { name: "fixed 48", len: 48, col: column.New[[48]byte](), }, { name: "fixed 49", len: 49, col: column.New[[49]byte](), }, { name: "fixed 50", len: 50, col: column.New[[50]byte](), }, { name: "fixed 51", len: 51, col: column.New[[51]byte](), }, { name: "fixed 52", len: 52, col: column.New[[52]byte](), }, { name: "fixed 53", len: 53, col: column.New[[53]byte](), }, { name: "fixed 54", len: 54, col: column.New[[54]byte](), }, { name: "fixed 55", len: 55, col: column.New[[55]byte](), }, { name: "fixed 56", len: 56, col: column.New[[56]byte](), }, { name: "fixed 57", len: 57, col: column.New[[57]byte](), }, { name: "fixed 58", len: 58, col: column.New[[58]byte](), }, { name: "fixed 59", len: 59, col: column.New[[59]byte](), }, { name: "fixed 60", len: 60, col: column.New[[60]byte](), }, { name: "fixed 61", len: 61, col: column.New[[61]byte](), }, { name: "fixed 62", len: 62, col: column.New[[62]byte](), }, { name: "fixed 63", len: 63, col: column.New[[63]byte](), }, { name: "fixed 64", len: 64, col: column.New[[64]byte](), }, { name: "fixed 65", len: 65, col: column.New[[65]byte](), }, { name: "fixed 66", len: 66, col: column.New[[66]byte](), }, { name: "fixed 67", len: 67, col: column.New[[67]byte](), }, { name: "fixed 68", len: 68, col: column.New[[68]byte](), }, { name: "fixed 69", len: 69, col: column.New[[69]byte](), }, { name: "fixed 70", len: 70, col: column.New[[70]byte](), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { f, err := getFixedType(tt.len, 0, false, false) require.NoError(t, err) assert.IsType(t, f, tt.col) }) } } ================================================ FILE: server_info.go ================================================ package chconn import ( "fmt" "github.com/vahid-sohrabloo/chconn/v2/internal/helper" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // ServerInfo detail of server info type ServerInfo struct { Name string Revision uint64 MinorVersion uint64 MajorVersion uint64 ServerDisplayName string ServerVersionPatch uint64 Timezone string } func (srv *ServerInfo) read(r *readerwriter.Reader) (err error) { if srv.Name, err = r.String(); err != nil { return &readError{"ServerInfo: could not read server name", err} } if srv.MajorVersion, err = r.Uvarint(); err != nil { return &readError{"ServerInfo: could not read server major version", err} } if srv.MinorVersion, err = r.Uvarint(); err != nil { return &readError{"ServerInfo: could not read server minor version", err} } if srv.Revision, err = r.Uvarint(); err != nil { return &readError{"ServerInfo: could not read server revision", err} } if srv.Revision >= helper.DbmsMinRevisionWithServerTimezone { if srv.Timezone, err = r.String(); err != nil { return &readError{"ServerInfo: could not read server timezone", err} } } if srv.Revision >= helper.DbmsMinRevisionWithServerDisplayName { if srv.ServerDisplayName, err = r.String(); err != nil { return &readError{"ServerInfo: could not read server display name", err} } } if srv.Revision >= helper.DbmsMinRevisionWithVersionPatch { if srv.ServerVersionPatch, err = r.Uvarint(); err != nil { return &readError{"ServerInfo: could not read server version patch", err} } } return nil } func (srv *ServerInfo) String() string { return fmt.Sprintf("%s %d.%d.%d (%s) %s %d", srv.Name, srv.MajorVersion, srv.MinorVersion, srv.Revision, srv.Timezone, srv.ServerDisplayName, srv.ServerVersionPatch) } // ServerInfo get server info func (ch *conn) ServerInfo() *ServerInfo { return ch.serverInfo } ================================================ FILE: server_info_test.go ================================================ package chconn import ( "context" "errors" "io" "os" "testing" "github.com/stretchr/testify/require" ) func TestServerInfoError(t *testing.T) { startValidReader := 1 tests := []struct { name string wantErr string numberValid int }{ { name: "server name", wantErr: "ServerInfo: could not read server name", numberValid: startValidReader, }, { name: "server major version", wantErr: "ServerInfo: could not read server major version", numberValid: startValidReader + 2, }, { name: "server minor version", wantErr: "ServerInfo: could not read server minor version", numberValid: startValidReader + 3, }, { name: "server revision", wantErr: "ServerInfo: could not read server revision", numberValid: startValidReader + 4, }, { name: "server timezone", wantErr: "ServerInfo: could not read server timezone", numberValid: startValidReader + 7, }, { name: "server display name", wantErr: "ServerInfo: could not read server display name", numberValid: startValidReader + 9, }, { name: "server version patch", wantErr: "ServerInfo: could not read server version patch", numberValid: startValidReader + 11, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { config, err := ParseConfig(os.Getenv("CHX_TEST_TCP_CONN_STRING")) require.NoError(t, err) config.ReaderFunc = func(r io.Reader) io.Reader { return &readErrorHelper{ err: errors.New("timeout"), r: r, numberValid: tt.numberValid, } } _, err = ConnectConfig(context.Background(), config) require.Error(t, err) readErr, ok := err.(*readError) require.True(t, ok) require.Equal(t, readErr.msg, tt.wantErr) require.EqualError(t, readErr.Unwrap(), "timeout") }) } } ================================================ FILE: settings.go ================================================ package chconn import ( "strconv" "strings" "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" ) // Setting is a setting for the clickhouse query. // // The list of setting is here: https://clickhouse.com/docs/en/operations/settings/settings/ // Some of settings doesn't have effect. for example `http_zlib_compression_level` // because chconn use TCP connection to send data not HTTP. type Setting struct { Name, Value string Important, Custom, Obsolete bool } const ( settingFlagImportant = 0x01 settingFlagCustom = 0x02 settingFlagObsolete = 0x04 ) // Settings is a list of settings for the clickhouse query. type Settings []Setting func (st Setting) write(w *readerwriter.Writer) { w.String(st.Name) var flag uint8 if st.Important { flag |= settingFlagImportant } if st.Custom { flag |= settingFlagCustom } if st.Obsolete { flag |= settingFlagObsolete } w.Uint8(flag) w.String(st.Value) } func (s Settings) write(w *readerwriter.Writer) { for _, st := range s { st.write(w) } } // Parameters is a list of params for the clickhouse query. type Parameters struct { params []Setting } type Parameter func() Setting func NewParameters(input ...Parameter) *Parameters { params := make([]Setting, len(input)) for i, p := range input { params[i] = p() } return &Parameters{ params: params, } } // IntParameter get int query parameter. func IntParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v T) Parameter { return func() Setting { return Setting{ Name: name, Value: "'" + strconv.FormatInt(int64(v), 10) + "'", Custom: true, } } } // IntSliceParameter get int query parameter. func IntSliceParameter[T ~int | ~int8 | ~int16 | ~int32 | ~int64](name string, v []T) Parameter { return func() Setting { var b strings.Builder b.WriteString("[") for i, v := range v { if i > 0 { b.WriteString(",") } b.WriteString(strconv.FormatInt(int64(v), 10)) } b.WriteString("]") return Setting{ Name: name, Value: "'" + b.String() + "'", Custom: true, } } } // UintParameter get uint query parameter. func UintParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v T) Parameter { return func() Setting { return Setting{ Name: name, Value: "'" + strconv.FormatUint(uint64(v), 10) + "'", Custom: true, } } } // UintSliceParameter get uint slice query parameter. func UintSliceParameter[T ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64](name string, v []T) Parameter { return func() Setting { var b strings.Builder b.WriteString("[") for i, v := range v { if i > 0 { b.WriteString(",") } b.WriteString(strconv.FormatUint(uint64(v), 10)) } b.WriteString("]") return Setting{ Name: name, Value: "'" + b.String() + "'", Custom: true, } } } // Float32Parameter get float32 query parameter. func Float32Parameter[T ~float32](name string, v T) Parameter { return func() Setting { return Setting{ Name: name, Value: "'" + strconv.FormatFloat(float64(v), 'f', -1, 32) + "'", Custom: true, } } } // Float32SliceParameter get float32 slice query parameter. func Float32SliceParameter[T ~float32](name string, v []T) Parameter { return func() Setting { var b strings.Builder b.WriteString("[") for i, v := range v { if i > 0 { b.WriteString(",") } b.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 32)) } b.WriteString("]") return Setting{ Name: name, Value: "'" + b.String() + "'", Custom: true, } } } // Float64Parameter get float64 query parameter. func Float64Parameter[T ~float64](name string, v T) Parameter { return func() Setting { return Setting{ Name: name, Value: "'" + strconv.FormatFloat(float64(v), 'f', -1, 64) + "'", Custom: true, } } } // Float64SliceParameter get float64 slice query parameter. func Float64SliceParameter[T ~float64](name string, v []T) Parameter { return func() Setting { var b strings.Builder b.WriteString("[") for i, v := range v { if i > 0 { b.WriteString(",") } b.WriteString(strconv.FormatFloat(float64(v), 'f', -1, 64)) } b.WriteString("]") return Setting{ Name: name, Value: "'" + b.String() + "'", Custom: true, } } } func addSlashes(str string) string { var tmpRune []rune for _, ch := range str { switch ch { case '\\', '\'': tmpRune = append(tmpRune, '\\', ch) default: tmpRune = append(tmpRune, ch) } } return string(tmpRune) } // StringParameter get string query parameter. func StringParameter(name, v string) Parameter { return func() Setting { return Setting{ Name: name, Value: "'" + addSlashes(v) + "'", Custom: true, } } } // StringSliceParameter get string array query parameter. func StringSliceParameter(name string, v []string) Parameter { return func() Setting { var b strings.Builder b.WriteString("[") for i, v := range v { if i > 0 { b.WriteString(",") } b.WriteString("'" + addSlashes(v) + "'") } b.WriteString("]") return Setting{ Name: name, Value: "'" + addSlashes(b.String()) + "'", Custom: true, } } } func (p *Parameters) Params() []Setting { return p.params } func (p *Parameters) hasParam() bool { return p != nil && len(p.params) > 0 } func (p *Parameters) write(w *readerwriter.Writer) { if p == nil { return } for _, st := range p.params { st.write(w) } } ================================================ FILE: sqlbuilder/injection.go ================================================ // sqlbuilder is a builder for SQL statements for clickhouse. // copy from https://github.com/huandu/go-sqlbuilder // change for chconn package sqlbuilder import ( "bytes" "strings" ) // injection is a helper type to manage injected SQLs in all builders. type injection struct { markerSQLs map[injectionMarker][]string } type injectionMarker int // newInjection creates a new injection. func newInjection() *injection { return &injection{ markerSQLs: map[injectionMarker][]string{}, } } // SQL adds sql to injection's sql list. // All sqls inside injection is ordered by marker in ascending order. func (injection *injection) SQL(marker injectionMarker, sql string) { injection.markerSQLs[marker] = append(injection.markerSQLs[marker], sql) } // WriteTo joins all SQL strings at the same marker value with blank (" ") // and writes the joined value to buf. func (injection *injection) WriteTo(buf *bytes.Buffer, marker injectionMarker) { sqls := injection.markerSQLs[marker] empty := buf.Len() == 0 if len(sqls) == 0 { return } if !empty { buf.WriteByte(' ') } s := strings.Join(sqls, " ") buf.WriteString(s) if empty { buf.WriteByte(' ') } } ================================================ FILE: sqlbuilder/select.go ================================================ // sqlbuilder is a builder for SQL statements for clickhouse. // copy from https://github.com/huandu/go-sqlbuilder // change for chconn package sqlbuilder import ( "bytes" "fmt" "strconv" "strings" "github.com/vahid-sohrabloo/chconn/v2" ) const ( selectMarkerInit injectionMarker = iota selectMarkerAfterSelect selectMarkerAfterFrom selectMarkerAfterArrayJoin selectMarkerAfterJoin selectMarkerAfterPreWhere selectMarkerAfterWhere selectMarkerAfterGroupBy selectMarkerAfterOrderBy selectMarkerAfterLimit selectMarkerAfterFor ) // JoinOption is the option in JOIN. type JoinOption string // Join options. const ( InnerJoin JoinOption = "INNER" LeftJoin JoinOption = "LEFT" LeftOuterJoin JoinOption = "LEFT OUTER" LeftSemiJoin JoinOption = "LEFT SEMI" LeftAntiJoin JoinOption = "LEFT ANTI" RightJoin JoinOption = "RIGHT" RightOuterJoin JoinOption = "RIGHT OUTER" RightSemiJoin JoinOption = "RIGHT SEMI" RightAntiJoin JoinOption = "RIGHT ANTI" FullJoin JoinOption = "FULL" FullOuterJoin JoinOption = "FULL OUTER" CrossJoin JoinOption = "CROSS" ) func NewSelectBuilder() *SelectBuilder { return &SelectBuilder{ limit: -1, offset: -1, injection: newInjection(), } } // SelectBuilder is a builder to build SELECT. type SelectBuilder struct { parameters []chconn.Parameter distinct bool final bool tables []string selectCols []string leftArrayJoin bool arrayJoin []string joinOptions []JoinOption joinTables []string joinExprs [][]string whereExprs []string preWhereExprs []string havingExprs []string groupByCols []string orderByCols []string limit int offset int injection *injection marker injectionMarker } // var _ Builder = new(SelectBuilder) // Select sets columns in SELECT. func Select(col ...string) *SelectBuilder { return NewSelectBuilder().Select(col...) } // Select sets columns in SELECT. func (sb *SelectBuilder) Select(col ...string) *SelectBuilder { sb.selectCols = col sb.marker = selectMarkerAfterSelect return sb } // Select add columns in SELECT. func (sb *SelectBuilder) Column(col ...string) *SelectBuilder { sb.selectCols = append(sb.selectCols, col...) sb.marker = selectMarkerAfterSelect return sb } // Distinct marks this SELECT as DISTINCT. func (sb *SelectBuilder) Distinct() *SelectBuilder { sb.distinct = true sb.marker = selectMarkerAfterSelect return sb } // Final marks this SELECT as FINAL. func (sb *SelectBuilder) Final() *SelectBuilder { sb.final = true sb.marker = selectMarkerAfterSelect return sb } // From sets table names in SELECT. func (sb *SelectBuilder) From(table ...string) *SelectBuilder { sb.tables = table sb.marker = selectMarkerAfterFrom return sb } // arrayJoin sets expressions of Array Join in SELECT. // // It builds a ARRAY JOIN expression like // // Array JOIN onExpr[0], onExpr[1] ... func (sb *SelectBuilder) ArrayJoin(onExpr ...string) *SelectBuilder { sb.marker = selectMarkerAfterArrayJoin sb.arrayJoin = append(sb.arrayJoin, onExpr...) return sb } // LeftArrayJoin marks this SELECT as LEFT ARRAY JOIN. func (sb *SelectBuilder) LeftArrayJoin() *SelectBuilder { sb.leftArrayJoin = true return sb } // Join sets expressions of JOIN in SELECT. // // It builds a JOIN expression like // // JOIN table ON onExpr[0] AND onExpr[1] ... func (sb *SelectBuilder) Join(table string, onExpr ...string) *SelectBuilder { sb.marker = selectMarkerAfterJoin return sb.JoinWithOption("", table, onExpr...) } // JoinWithOption sets expressions of JOIN with an option. // // It builds a JOIN expression like // // option JOIN table ON onExpr[0] AND onExpr[1] ... // // Here is a list of supported options. // - FullJoin: FULL JOIN // - FullOuterJoin: FULL OUTER JOIN // - InnerJoin: INNER JOIN // - LeftJoin: LEFT JOIN // - LeftOuterJoin: LEFT OUTER JOIN // - RightJoin: RIGHT JOIN // - RightOuterJoin: RIGHT OUTER JOIN func (sb *SelectBuilder) JoinWithOption(option JoinOption, table string, onExpr ...string) *SelectBuilder { sb.joinOptions = append(sb.joinOptions, option) sb.joinTables = append(sb.joinTables, table) sb.joinExprs = append(sb.joinExprs, onExpr) sb.marker = selectMarkerAfterJoin return sb } // Where sets expressions of WHERE in SELECT. func (sb *SelectBuilder) Where(andExpr ...string) *SelectBuilder { sb.whereExprs = append(sb.whereExprs, andExpr...) sb.marker = selectMarkerAfterWhere return sb } // PreWhere sets expressions of PREWHERE in SELECT. func (sb *SelectBuilder) PreWhere(andExpr ...string) *SelectBuilder { sb.marker = selectMarkerAfterPreWhere sb.preWhereExprs = append(sb.preWhereExprs, andExpr...) return sb } func (sb *SelectBuilder) Parameters(p chconn.Parameter) *SelectBuilder { sb.parameters = append(sb.parameters, p) return sb } // Having sets expressions of HAVING in SELECT. func (sb *SelectBuilder) Having(andExpr ...string) *SelectBuilder { sb.havingExprs = append(sb.havingExprs, andExpr...) sb.marker = selectMarkerAfterGroupBy return sb } // GroupBy sets columns of GROUP BY in SELECT. func (sb *SelectBuilder) GroupBy(col ...string) *SelectBuilder { sb.groupByCols = append(sb.groupByCols, col...) sb.marker = selectMarkerAfterGroupBy return sb } // OrderBy sets columns of ORDER BY in SELECT. func (sb *SelectBuilder) OrderBy(col ...string) *SelectBuilder { sb.orderByCols = append(sb.orderByCols, col...) sb.marker = selectMarkerAfterOrderBy return sb } // Limit sets the LIMIT in SELECT. func (sb *SelectBuilder) Limit(limit int) *SelectBuilder { sb.limit = limit sb.marker = selectMarkerAfterLimit return sb } // Offset sets the LIMIT offset in SELECT. func (sb *SelectBuilder) Offset(offset int) *SelectBuilder { sb.offset = offset sb.marker = selectMarkerAfterLimit return sb } // As returns an AS expression. func As(name, alias string) string { return fmt.Sprintf("%s AS %s", name, alias) } // String returns the compiled SELECT string. func (sb *SelectBuilder) String() string { s, _ := sb.Build() return s } // Build returns compiled SELECT string and args. // They can be used in `Select` directly. func (sb *SelectBuilder) Build() (sql string, params *chconn.Parameters) { buf := &bytes.Buffer{} sb.injection.WriteTo(buf, selectMarkerInit) buf.WriteString("SELECT ") if sb.distinct { buf.WriteString("DISTINCT ") } buf.WriteString(strings.Join(sb.selectCols, ", ")) sb.injection.WriteTo(buf, selectMarkerAfterSelect) buf.WriteString(" FROM ") buf.WriteString(strings.Join(sb.tables, ", ")) sb.injection.WriteTo(buf, selectMarkerAfterFrom) if sb.final { buf.WriteString(" FINAL") } if len(sb.arrayJoin) > 0 { if sb.leftArrayJoin { buf.WriteString(" LEFT") } buf.WriteString(" ARRAY JOIN ") buf.WriteString(strings.Join(sb.arrayJoin, " , ")) sb.injection.WriteTo(buf, selectMarkerAfterArrayJoin) } for i := range sb.joinTables { if option := sb.joinOptions[i]; option != "" { buf.WriteByte(' ') buf.WriteString(string(option)) } buf.WriteString(" JOIN ") buf.WriteString(sb.joinTables[i]) if exprs := sb.joinExprs[i]; len(exprs) > 0 { buf.WriteString(" ON ") buf.WriteString(strings.Join(sb.joinExprs[i], " AND ")) } } if len(sb.joinTables) > 0 { sb.injection.WriteTo(buf, selectMarkerAfterJoin) } if len(sb.preWhereExprs) > 0 { buf.WriteString(" PREWHERE ") buf.WriteString(strings.Join(sb.preWhereExprs, " AND ")) sb.injection.WriteTo(buf, selectMarkerAfterPreWhere) } if len(sb.whereExprs) > 0 { buf.WriteString(" WHERE ") buf.WriteString(strings.Join(sb.whereExprs, " AND ")) sb.injection.WriteTo(buf, selectMarkerAfterWhere) } if len(sb.groupByCols) > 0 { buf.WriteString(" GROUP BY ") buf.WriteString(strings.Join(sb.groupByCols, ", ")) if len(sb.havingExprs) > 0 { buf.WriteString(" HAVING ") buf.WriteString(strings.Join(sb.havingExprs, " AND ")) } sb.injection.WriteTo(buf, selectMarkerAfterGroupBy) } if len(sb.orderByCols) > 0 { buf.WriteString(" ORDER BY ") buf.WriteString(strings.Join(sb.orderByCols, ", ")) sb.injection.WriteTo(buf, selectMarkerAfterOrderBy) } if sb.limit >= 0 { buf.WriteString(" LIMIT ") buf.WriteString(strconv.Itoa(sb.limit)) } if sb.offset >= 0 { buf.WriteString(" OFFSET ") buf.WriteString(strconv.Itoa(sb.offset)) } if sb.limit >= 0 { sb.injection.WriteTo(buf, selectMarkerAfterLimit) } return buf.String(), chconn.NewParameters(sb.parameters...) } // SQL adds an arbitrary sql to current position. func (sb *SelectBuilder) SQL(sql string) *SelectBuilder { sb.injection.SQL(sb.marker, sql) return sb } ================================================ FILE: sqlbuilder/select_test.go ================================================ package sqlbuilder import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vahid-sohrabloo/chconn/v2" ) func TestSelectBuilder(t *testing.T) { sb := Select("id", "name", As("COUNT(*)", "t")).Distinct() sb.Column("age", "birthday") sb.From("user").Final() sb.SQL("/* before */") sb.ArrayJoin("roles").LeftArrayJoin() sb.SQL("/* after */") sb.PreWhere("id > 0") sb.Where( "id > {id: Int32}", "name LIKE {name: String}", ) sb.Parameters(chconn.IntParameter("id", 1)) sb.Parameters(chconn.StringParameter("name", "vahid")) sb.Join("contract c", "u.id = c.user_id", "c.status = {status: Array(Int64)}", ) sb.Parameters(chconn.IntSliceParameter("status", []int64{1, 2, 3})) sb.JoinWithOption(RightOuterJoin, "person p", "u.id = p.user_id", "p.surname = {surname: String}", ) sb.Parameters(chconn.StringParameter("surname", "sohrabloo")) sb.GroupBy("status").Having("status > 0") sb.OrderBy("modified_at ASC", "created_at DESC") sb.Limit(10).Offset(5) s, args := sb.Build() assert.Equal(t, "SELECT DISTINCT id, name, COUNT(*) AS t, age, birthday /* before */ FROM user FINAL "+ "LEFT ARRAY JOIN roles /* after */ "+ "JOIN contract c ON u.id = c.user_id AND c.status = {status: Array(Int64)} "+ "RIGHT OUTER JOIN person p ON u.id = p.user_id AND p.surname = {surname: String} "+ "PREWHERE id > 0 "+ "WHERE id > {id: Int32} AND name LIKE {name: String} "+ "GROUP BY status HAVING status > 0 "+ "ORDER BY modified_at ASC, created_at DESC "+ "LIMIT 10 OFFSET 5", s, ) require.Len(t, args.Params(), 4) assert.Equal(t, "id", args.Params()[0].Name) assert.Equal(t, "'1'", args.Params()[0].Value) assert.Equal(t, "name", args.Params()[1].Name) assert.Equal(t, "'vahid'", args.Params()[1].Value) assert.Equal(t, "status", args.Params()[2].Name) assert.Equal(t, "'[1,2,3]'", args.Params()[2].Value) assert.Equal(t, "surname", args.Params()[3].Name) assert.Equal(t, "'sohrabloo'", args.Params()[3].Value) } ================================================ FILE: types/Int256.go ================================================ package types import ( "math/big" ) // Note, Zero and Max are functions just to make read-only values. // We cannot define constants for structures, and global variables // are unacceptable because it will be possible to change them. // Zero is the lowest possible Int256 value. func Int256Zero() Int256 { return Int256From64(0) } // Max is the largest possible Int256 value. func Int256Max() Int256 { return Int256{ Lo: Uint128Max(), Hi: Int128Max(), } } // Int256 is an unsigned 256-bit number. // All methods are immutable, works just like standard uint64. type Int256 struct { Lo Uint128 // lower 128-bit half Hi Int128 // upper 128-bit half } // From128 converts 128-bit value v to a Int256 value. // Upper 128-bit half will be zero. func Int256From128(v Int128) Int256 { var hi Int128 if v.Hi < 0 { hi = Int128{Lo: 0, Hi: -1} v = v.Neg() } return Int256{Lo: Uint128{ Lo: v.Lo, Hi: uint64(v.Hi), }, Hi: hi} } // From64 converts 64-bit value v to a Int256 value. // Upper 128-bit half will be zero. func Int256From64(v int64) Int256 { return Int256From128(Int128From64(v)) } // FromBig converts *big.Int to 256-bit Int256 value ignoring overflows. // If input integer is nil or negative then return Zero. // If input integer overflows 256-bit then return Max. func Int256FromBig(i *big.Int) Int256 { u, _ := Int256FromBigEx(i) return u } // FromBigEx converts *big.Int to 256-bit Int256 value (eXtended version). // Provides ok successful flag as a second return value. // If input integer is negative or overflows 256-bit then ok=false. // If input is nil then zero 256-bit returned. func Int256FromBigEx(i *big.Int) (Int256, bool) { switch { case i == nil: return Int256Zero(), true // assuming nil === 0 case i.BitLen() > 256: return Int256Max(), false // value overflows 256-bit! } neg := false if i.Sign() == -1 { i = new(big.Int).Neg(i) neg = true } t := new(big.Int) lolo := i.Uint64() lohi := t.Rsh(i, 64).Uint64() hilo := t.Rsh(i, 128).Uint64() hihi := int64(t.Rsh(i, 192).Uint64()) val := Int256{ Lo: Uint128{Lo: lolo, Hi: lohi}, Hi: Int128{Lo: hilo, Hi: hihi}, } if neg { val = val.Neg() } return val, true } // Big returns 256-bit value as a *big.Int. // //nolint:dupl func (u Int256) Big() *big.Int { t := new(big.Int) i := new(big.Int).SetInt64(u.Hi.Hi) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Hi.Lo)) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Lo.Hi)) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Lo.Lo)) return i } // Equals returns true if two 256-bit values are equal. // Int256 values can be compared directly with == operator // but use of the Equals method is preferred for consistency. func (u Int256) Equals(v Int256) bool { return u.Lo.Equals(v.Lo) && u.Hi.Equals(v.Hi) } // Neg returns the additive inverse of an Int256 func (u Int256) Neg() (z Int256) { z.Hi = u.Hi.Neg() z.Lo.Lo = -u.Lo.Lo z.Lo.Hi = -u.Lo.Hi // TODO, I'm not sure here. if z.Lo.Hi > 0 || z.Lo.Lo > 0 { z.Hi.Lo-- } return z } ================================================ FILE: types/date_type.go ================================================ package types import ( "time" ) type Date uint16 const minDate32 = int32(-25567) // 1900-01-01 00:00:00 +0000 UTC type Date32 int32 type DateTime uint32 const minDateTime64 = int64(-2208988800) // 1900-01-01 00:00:00 +0000 UTC type DateTime64 int64 const daySeconds = 24 * 60 * 60 func TimeToDate(t time.Time) Date { if t.Unix() <= 0 { return 0 } _, offset := t.Zone() return Date((t.Unix() + int64(offset)) / daySeconds) } func (d Date) FromTime(v time.Time, precision int) Date { return TimeToDate(v) } func (d Date) ToTime(loc *time.Location, precision int) time.Time { return time.Unix(d.Unix(), 0).UTC() } func (d Date) Unix() int64 { return daySeconds * int64(d) } func (d Date32) Unix() int64 { return daySeconds * int64(d) } func (d Date32) FromTime(v time.Time, precision int) Date32 { return TimeToDate32(v) } func (d Date32) ToTime(loc *time.Location, precision int) time.Time { return time.Unix(d.Unix(), 0).UTC() } func TimeToDate32(t time.Time) Date32 { _, offset := t.Zone() d := int32((t.Unix() + int64(offset)) / daySeconds) if d <= minDate32 { return Date32(minDate32) } return Date32(d) } func TimeToDateTime(t time.Time) DateTime { if t.Unix() <= 0 { return 0 } return DateTime(t.Unix()) } func (d DateTime) FromTime(v time.Time, precision int) DateTime { return TimeToDateTime(v) } func (d DateTime) ToTime(loc *time.Location, precision int) time.Time { return time.Unix(int64(d), 0).In(loc) } var precisionFactor = [...]int64{ 1000000000, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1, } func TimeToDateTime64(t time.Time, precision int) DateTime64 { if t.Unix() <= minDateTime64 { return DateTime64(minDateTime64) } return DateTime64(t.UnixNano() / precisionFactor[precision]) } func (d DateTime64) FromTime(v time.Time, precision int) DateTime64 { return TimeToDateTime64(v, precision) } func (d DateTime64) ToTime(loc *time.Location, precision int) time.Time { if d == 0 { return time.Time{} } nsec := int64(d) * precisionFactor[precision] return time.Unix(nsec/1e9, nsec%1e9).In(loc) } ================================================ FILE: types/decimal.go ================================================ package types // Decimal32 represents a 32-bit decimal number. type Decimal32 int32 // Decimal64 represents a 64-bit decimal number. type Decimal64 int64 // Decimal128 represents a 128-bit decimal number. type Decimal128 Int128 // Decimal256 represents a 256-bit decimal number. type Decimal256 Int256 // Table of powers of 10 for fast casting from floating types to decimal type // representations. var factors10 = []float64{ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, } // Float64 converts decimal number to float64. func (d Decimal32) Float64(scale int) float64 { return float64(d) / factors10[scale] } // Float64 converts decimal number to float64. func (d Decimal64) Float64(scale int) float64 { return float64(d) / factors10[scale] } // Decimal32FromFloat64 converts float64 to decimal32 number. func Decimal32FromFloat64(f float64, scale int) Decimal32 { return Decimal32(f * factors10[scale]) } // Decimal64FromFloat64 converts float64 to decimal64 number. func Decimal64FromFloat64(f float64, scale int) Decimal64 { return Decimal64(f * factors10[scale]) } ================================================ FILE: types/decimal_test.go ================================================ package types import ( "testing" "github.com/stretchr/testify/assert" ) func TestDecimal(t *testing.T) { d32 := Decimal32(12_234) assert.Equal(t, d32.Float64(3), float64(12.234)) d64 := Decimal64(12_234) assert.Equal(t, d64.Float64(3), float64(12.234)) assert.Equal(t, Decimal32FromFloat64(12.2334, 3), Decimal32(12233)) assert.Equal(t, Decimal64FromFloat64(12.2334, 3), Decimal64(12233)) } ================================================ FILE: types/int128.go ================================================ package types import ( "math" "math/big" ) // Note, Zero and Max are functions just to make read-only values. // We cannot define constants for structures, and global variables // are unacceptable because it will be possible to change them. // Zero is the lowest possible Int128 value. func Int128Zero() Int128 { return Int128From64(0) } // Max is the largest possible Int128 value. func Int128Max() Int128 { return Int128{ Lo: math.MaxUint64, Hi: math.MaxInt64, } } // Int128 is an unsigned 128-bit number. // All methods are immutable, works just like standard uint64. type Int128 struct { Lo uint64 // lower 64-bit half Hi int64 // upper 64-bit half } // Note, there in no New(lo, hi) just not to confuse // which half goes first: lower or upper. // Use structure initialization Int128{Lo: ..., Hi: ...} instead. // From64 converts 64-bit value v to a Int128 value. // Upper 64-bit half will be zero. func Int128From64(v int64) Int128 { var hi int64 if v < 0 { hi = -1 } return Int128{Lo: uint64(v), Hi: hi} } // FromBig converts *big.Int to 128-bit Int128 value ignoring overflows. // If input integer is nil or negative then return Zero. // If input interger overflows 128-bit then return Max. func Int128FromBig(i *big.Int) Int128 { u, _ := Int128FromBigEx(i) return u } // FromBigEx converts *big.Int to 128-bit Int128 value (eXtended version). // Provides ok successful flag as a second return value. // If input integer is negative or overflows 128-bit then ok=false. // If input is nil then zero 128-bit returned. func Int128FromBigEx(i *big.Int) (Int128, bool) { switch { case i == nil: return Int128Zero(), true // assuming nil === 0 case i.BitLen() > 128: return Int128Max(), false // value overflows 128-bit! } neg := false if i.Sign() == -1 { i = new(big.Int).Neg(i) neg = true } // Note, actually result of big.Int.Uint64 is undefined // if stored value is greater than 2^64 // but we assume that it just gets lower 64 bits. t := new(big.Int) lo := i.Uint64() hi := int64(t.Rsh(i, 64).Uint64()) val := Int128{ Lo: lo, Hi: hi, } if neg { return val.Neg(), true } return val, true } // Big returns 128-bit value as a *big.Int. func (u Int128) Big() *big.Int { i := new(big.Int).SetInt64(u.Hi) i = i.Lsh(i, 64) i = i.Or(i, new(big.Int).SetUint64(u.Lo)) return i } // Equals returns true if two 128-bit values are equal. // Int128 values can be compared directly with == operator // but use of the Equals method is preferred for consistency. func (u Int128) Equals(v Int128) bool { return (u.Lo == v.Lo) && (u.Hi == v.Hi) } // Neg returns the additive inverse of an Int128 func (u Int128) Neg() (z Int128) { z.Hi = -u.Hi z.Lo = -u.Lo if z.Lo > 0 { z.Hi-- } return z } ================================================ FILE: types/int128_test.go ================================================ package types import ( "math/big" "testing" "github.com/stretchr/testify/assert" ) // TestUint128 unit tests for various Int128 helpers. func TestInt128(t *testing.T) { t.Run("FromBig", func(t *testing.T) { if got := Int128FromBig(nil); !got.Equals(Int128Zero()) { t.Fatalf("Int128FromBig(nil) does not equal to 0, got %#x", got) } if got := Int128FromBig(new(big.Int).Lsh(big.NewInt(1), 129)); !got.Equals(Int128Max()) { t.Fatalf("Int128FromBig(2^129) does not equal to Max(), got %#x", got) } }) t.Run("ToBig", func(t *testing.T) { i := new(big.Int).SetInt64(-124) assert.Equal(t, Int128FromBig(i).Big().String(), "-124") int128From64 := Int128From64(-124) assert.Equal(t, int128From64.Big().String(), "-124") }) } ================================================ FILE: types/int256_test.go ================================================ package types import ( "math/big" "testing" "github.com/stretchr/testify/assert" ) // TestUint256 unit tests for various Int256 helpers. func TestInt256(t *testing.T) { t.Run("FromBig", func(t *testing.T) { if got := Int256FromBig(nil); !got.Equals(Int256Zero()) { t.Fatalf("FromBig(nil) does not equal to 0, got %#x", got) } if got := Int256FromBig(new(big.Int).Lsh(big.NewInt(1), 257)); !got.Equals(Int256Max()) { t.Fatalf("FromBig(2^129) does not equal to Max(), got %#x", got) } }) t.Run("ToBig", func(t *testing.T) { i := new(big.Int).SetInt64(124) assert.Equal(t, Int256FromBig(i).Big().String(), "124") int256From64 := Int256From64(124) assert.Equal(t, int256From64.Big().String(), "124") }) } ================================================ FILE: types/ip_test.go ================================================ package types import ( "net/netip" "testing" "github.com/stretchr/testify/assert" ) func TestIP(t *testing.T) { ipv4 := IPv4FromAddr(netip.AddrFrom4([4]byte{1, 2, 3, 4})) assert.Equal(t, ipv4.NetIP().As4(), [4]byte{1, 2, 3, 4}) ipv6 := IPv6FromAddr(netip.AddrFrom16([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) assert.Equal(t, ipv6.NetIP().As16(), [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) } ================================================ FILE: types/ipv4.go ================================================ package types import "net/netip" // IPv4 is a compatible type for IPv4 address in clickhouse. // // clickhouse use Little endian for IPv4. but golang use big endian type IPv4 [4]byte func (ip IPv4) NetIP() netip.Addr { return netip.AddrFrom4([4]byte{ip[3], ip[2], ip[1], ip[0]}) } func IPv4FromAddr(ipAddr netip.Addr) IPv4 { ip := ipAddr.As4() return IPv4{ip[3], ip[2], ip[1], ip[0]} } ================================================ FILE: types/ipv6.go ================================================ package types import "net/netip" type IPv6 [16]byte func (ip IPv6) NetIP() netip.Addr { return netip.AddrFrom16(ip) } func IPv6FromAddr(ipAddr netip.Addr) IPv6 { return IPv6(ipAddr.As16()) } ================================================ FILE: types/tuple.go ================================================ package types type Point Tuple2[float64, float64] type Tuple2[T1, T2 any] struct { Col1 T1 Col2 T2 } type Tuple3[T1, T2, T3 any] struct { Col1 T1 Col2 T2 Col3 T3 } type Tuple4[T1, T2, T3, T4 any] struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 } type Tuple5[T1, T2, T3, T4, T5 any] struct { Col1 T1 Col2 T2 Col3 T3 Col4 T4 Col5 T5 } ================================================ FILE: types/uint128.go ================================================ package types import ( "math" "math/big" ) // Note, Zero and Max are functions just to make read-only values. // We cannot define constants for structures, and global variables // are unacceptable because it will be possible to change them. // Zero is the lowest possible Uint128 value. func Uint128Zero() Uint128 { return Uint128From64(0) } // Max is the largest possible Uint128 value. func Uint128Max() Uint128 { return Uint128{ Lo: math.MaxUint64, Hi: math.MaxUint64, } } // Uint128 is an unsigned 128-bit number. // All methods are immutable, works just like standard uint64. type Uint128 struct { Lo uint64 // lower 64-bit half Hi uint64 // upper 64-bit half } // Note, there in no New(lo, hi) just not to confuse // which half goes first: lower or upper. // Use structure initialization Uint128{Lo: ..., Hi: ...} instead. // From64 converts 64-bit value v to a Uint128 value. // Upper 64-bit half will be zero. func Uint128From64(v uint64) Uint128 { return Uint128{Lo: v} } // FromBig converts *big.Int to 128-bit Uint128 value ignoring overflows. // If input integer is nil or negative then return Zero. // If input interger overflows 128-bit then return Max. func Uint128FromBig(i *big.Int) Uint128 { u, _ := Uint128FromBigEx(i) return u } // FromBigEx converts *big.Int to 128-bit Uint128 value (eXtended version). // Provides ok successful flag as a second return value. // If input integer is negative or overflows 128-bit then ok=false. // If input is nil then zero 128-bit returned. func Uint128FromBigEx(i *big.Int) (Uint128, bool) { switch { case i == nil: return Uint128Zero(), true // assuming nil === 0 case i.Sign() < 0: return Uint128Zero(), false // value cannot be negative! case i.BitLen() > 128: return Uint128Max(), false // value overflows 128-bit! } // Note, actually result of big.Int.Uint64 is undefined // if stored value is greater than 2^64 // but we assume that it just gets lower 64 bits. t := new(big.Int) lo := i.Uint64() hi := t.Rsh(i, 64).Uint64() return Uint128{ Lo: lo, Hi: hi, }, true } // Big returns 128-bit value as a *big.Int. func (u Uint128) Big() *big.Int { i := new(big.Int).SetUint64(u.Hi) i = i.Lsh(i, 64) i = i.Or(i, new(big.Int).SetUint64(u.Lo)) return i } // Equals returns true if two 128-bit values are equal. // Uint128 values can be compared directly with == operator // but use of the Equals method is preferred for consistency. func (u Uint128) Equals(v Uint128) bool { return (u.Lo == v.Lo) && (u.Hi == v.Hi) } ================================================ FILE: types/uint128_test.go ================================================ package types import ( "math/big" "testing" "github.com/stretchr/testify/assert" ) // TestUint128 unit tests for various Uint128 helpers. func TestUint128(t *testing.T) { t.Run("FromBig", func(t *testing.T) { if got := Uint128FromBig(nil); !got.Equals(Uint128Zero()) { t.Fatalf("Uint128FromBig(nil) does not equal to 0, got %#x", got) } if got := Uint128FromBig(big.NewInt(-1)); !got.Equals(Uint128Zero()) { t.Fatalf("Uint128FromBig(-1) does not equal to 0, got %#x", got) } if got := Uint256FromBig(big.NewInt(124)).Big().String(); got != "124" { t.Fatalf("Uint256FromBig(big.NewInt(124)) does not equal to 0, got %#x", got) } if got := Uint128FromBig(new(big.Int).Lsh(big.NewInt(1), 129)); !got.Equals(Uint128Max()) { t.Fatalf("Uint128FromBig(2^129) does not equal to Max(), got %#x", got) } }) t.Run("ToBig", func(t *testing.T) { i := new(big.Int).SetInt64(124) assert.Equal(t, Uint256FromBig(i).Big().String(), "124") Uint256From64 := Uint256From64(124) assert.Equal(t, Uint256From64.Big().String(), "124") }) } ================================================ FILE: types/uint256.go ================================================ package types import ( "math/big" ) // Note, Zero and Max are functions just to make read-only values. // We cannot define constants for structures, and global variables // are unacceptable because it will be possible to change them. // Zero is the lowest possible Uint256 value. func Uint256Zero() Uint256 { return Uint256From64(0) } // Max is the largest possible Uint256 value. func Uint256Max() Uint256 { return Uint256{ Lo: Uint128Max(), Hi: Uint128Max(), } } // Uint256 is an unsigned 256-bit number. // All methods are immutable, works just like standard uint64. type Uint256 struct { Lo Uint128 // lower 128-bit half Hi Uint128 // upper 128-bit half } // From128 converts 128-bit value v to a Uint256 value. // Upper 128-bit half will be zero. func Uint256From128(v Uint128) Uint256 { return Uint256{Lo: v} } // From64 converts 64-bit value v to a Uint256 value. // Upper 128-bit half will be zero. func Uint256From64(v uint64) Uint256 { return Uint256From128(Uint128From64(v)) } // FromBig converts *big.Int to 256-bit Uint256 value ignoring overflows. // If input integer is nil or negative then return Zero. // If input interger overflows 256-bit then return Max. func Uint256FromBig(i *big.Int) Uint256 { u, _ := Uint256FromBigEx(i) return u } // FromBigEx converts *big.Int to 256-bit Uint256 value (eXtended version). // Provides ok successful flag as a second return value. // If input integer is negative or overflows 256-bit then ok=false. // If input is nil then zero 256-bit returned. func Uint256FromBigEx(i *big.Int) (Uint256, bool) { switch { case i == nil: return Uint256Zero(), true // assuming nil === 0 case i.Sign() < 0: return Uint256Zero(), false // value cannot be negative! case i.BitLen() > 256: return Uint256Max(), false // value overflows 256-bit! } // Note, actually result of big.Int.Uint64 is undefined // if stored value is greater than 2^64 // but we assume that it just gets lower 64 bits. t := new(big.Int) lolo := i.Uint64() lohi := t.Rsh(i, 64).Uint64() hilo := t.Rsh(i, 128).Uint64() hihi := t.Rsh(i, 192).Uint64() return Uint256{ Lo: Uint128{Lo: lolo, Hi: lohi}, Hi: Uint128{Lo: hilo, Hi: hihi}, }, true } // Big returns 256-bit value as a *big.Int. // //nolint:dupl func (u Uint256) Big() *big.Int { t := new(big.Int) i := new(big.Int).SetUint64(u.Hi.Hi) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Hi.Lo)) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Lo.Hi)) i = i.Lsh(i, 64) i = i.Or(i, t.SetUint64(u.Lo.Lo)) return i } // Equals returns true if two 256-bit values are equal. // Uint256 values can be compared directly with == operator // but use of the Equals method is preferred for consistency. func (u Uint256) Equals(v Uint256) bool { return u.Lo.Equals(v.Lo) && u.Hi.Equals(v.Hi) } ================================================ FILE: types/uint256_test.go ================================================ package types import ( "math/big" "testing" ) // TestUint256 unit tests for various Uint256 helpers. func TestUint256(t *testing.T) { t.Run("FromBig", func(t *testing.T) { if got := Uint256FromBig(nil); !got.Equals(Uint256Zero()) { t.Fatalf("Uint256FromBig(nil) does not equal to 0, got %#x", got) } if got := Uint256FromBig(big.NewInt(-1)); !got.Equals(Uint256Zero()) { t.Fatalf("Uint256FromBig(-1) does not equal to 0, got %#x", got) } if got := Uint256FromBig(big.NewInt(124)).Big().String(); got != "124" { t.Fatalf("Uint256FromBig(big.NewInt(124)) does not equal to 0, got %#x", got) } if got := Uint256FromBig(new(big.Int).Lsh(big.NewInt(1), 257)); !got.Equals(Uint256Max()) { t.Fatalf("Uint256FromBig(2^129) does not equal to Max(), got %#x", got) } }) } ================================================ FILE: types/uuid.go ================================================ package types type UUID [16]byte func UUIDFromBigEndian(b [16]byte) UUID { var val [16]byte val[0], val[7] = b[7], b[0] val[1], val[6] = b[6], b[1] val[2], val[5] = b[5], b[2] val[3], val[4] = b[4], b[3] val[8], val[15] = b[15], b[8] val[9], val[14] = b[14], b[9] val[10], val[13] = b[13], b[10] val[11], val[12] = b[12], b[11] return val } func (u UUID) BigEndian() [16]byte { return UUIDFromBigEndian(u) } ================================================ FILE: types/uuid_test.go ================================================ package types import ( "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" ) func TestUUID(t *testing.T) { u := uuid.New() uuidData := UUIDFromBigEndian(u) assert.Equal(t, uuidData.BigEndian(), [16]byte(u)) }