Repository: gookit/slog Branch: master Commit: f12a8b6e1523 Files: 90 Total size: 366.7 KB Directory structure: gitextract_ub2bczst/ ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ └── bug_report.md │ ├── changelog.yml │ ├── dependabot.yml │ ├── revive.toml │ └── workflows/ │ ├── go.yml │ └── release.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── README.zh-CN.md ├── _example/ │ ├── bench_loglibs.md │ ├── bench_loglibs_test.go │ ├── demos/ │ │ ├── demo1.go │ │ ├── simple.go │ │ └── slog_all_level.go │ ├── diff-with-zap-zerolog.md │ ├── go.mod │ ├── handler/ │ │ └── grouped.go │ ├── issue100/ │ │ └── issue100_test.go │ ├── issue111/ │ │ └── main.go │ ├── issue137/ │ │ └── main.go │ ├── pprof/ │ │ └── main.go │ └── refer/ │ └── main.go ├── benchmark2_test.go ├── benchmark_test.go ├── bufwrite/ │ ├── bufio_writer.go │ ├── bufwrite_test.go │ └── line_writer.go ├── common.go ├── common_test.go ├── example_test.go ├── formatter.go ├── formatter_json.go ├── formatter_test.go ├── formatter_text.go ├── go.mod ├── go.sum ├── handler/ │ ├── README.md │ ├── buffer.go │ ├── buffer_test.go │ ├── builder.go │ ├── config.go │ ├── config_test.go │ ├── console.go │ ├── console_test.go │ ├── email.go │ ├── example_test.go │ ├── file.go │ ├── file_test.go │ ├── handler.go │ ├── handler_test.go │ ├── rotatefile.go │ ├── rotatefile_test.go │ ├── syslog.go │ ├── syslog_test.go │ ├── write_close_flusher.go │ ├── write_close_syncer.go │ ├── write_closer.go │ ├── writer.go │ └── writer_test.go ├── handler.go ├── handler_test.go ├── internal/ │ └── util.go ├── issues_test.go ├── logger.go ├── logger_sub.go ├── logger_test.go ├── logger_write.go ├── processor.go ├── processor_test.go ├── record.go ├── record_test.go ├── rotatefile/ │ ├── README.md │ ├── cleanup.go │ ├── cleanup_test.go │ ├── config.go │ ├── config_test.go │ ├── issues_test.go │ ├── rotatefile.go │ ├── rotatefile_test.go │ ├── util.go │ ├── util_test.go │ ├── writer.go │ └── writer_test.go ├── slog.go ├── slog_test.go ├── sugared.go ├── util.go └── util_test.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: inhere --- **System (please complete the following information):** - OS: `linux` [e.g. linux, macOS] - GO Version: `1.13` [e.g. `1.13`] - Pkg Version: `1.1.1` [e.g. `1.1.1`] **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** ```go // go code ``` **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Additional context** Add any other context about the problem here. ================================================ FILE: .github/changelog.yml ================================================ title: '## Change Log' # style allow: simple, markdown(mkdown), ghr(gh-release) style: gh-release # group names names: [Refactor, Fixed, Feature, Update, Other] repo_url: https://github.com/gookit/slog filters: # message length should >= 12 - name: msg_len min_len: 12 # message words should >= 3 - name: words_len min_len: 3 - name: keyword keyword: format code exclude: true - name: keywords keywords: format code, action test exclude: true # group match rules # not matched will use 'Other' group. rules: - name: Refactor start_withs: [refactor, break] contains: ['refactor:'] - name: Fixed start_withs: [fix] contains: ['fix:'] - name: Feature start_withs: [feat, new] contains: [feature] - name: Update start_withs: [update, 'up:'] contains: [] ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: gomod directory: "/" schedule: interval: daily open-pull-requests-limit: 10 - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates to GitHub Actions every weekday interval: "daily" ================================================ FILE: .github/revive.toml ================================================ ignoreGeneratedHeader = false # Sets the default severity to "warning" #severity = "error" severity = "warning" confidence = 0.8 errorCode = 0 warningCode = 0 [rule.blank-imports] [rule.context-as-argument] [rule.context-keys-type] [rule.dot-imports] [rule.error-return] [rule.error-strings] [rule.error-naming] [rule.exported] severity = "warning" [rule.if-return] [rule.increment-decrement] [rule.var-naming] [rule.var-declaration] [rule.package-comments] [rule.range] [rule.receiver-naming] [rule.time-naming] [rule.unexported-return] [rule.indent-error-flow] [rule.errorf] [rule.argument-limit] arguments = [4] [rule.function-result-limit] arguments = [3] [rule.empty-block] [rule.confusing-naming] [rule.superfluous-else] [rule.unused-parameter] [rule.unreachable-code] [rule.unnecessary-stmt] [rule.struct-tag] [rule.atomic] [rule.empty-lines] [rule.duplicated-imports] [rule.import-shadowing] [rule.confusing-results] [rule.modifies-parameter] [rule.redefines-builtin-id] ================================================ FILE: .github/workflows/go.yml ================================================ name: Unit-Tests on: pull_request: paths: - 'go.mod' - '**.go' - '**.yml' push: paths: - 'go.mod' - '**.go' - '**.yml' jobs: test: name: Test on go ${{ matrix.go_version }} and ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: go_version: [1.24, 1.23, 1.22, 1.21, 1.19, stable] os: [ubuntu-latest, windows-latest] # , macOS-latest steps: - name: Check out code uses: actions/checkout@v6 - name: Setup Go SDK uses: actions/setup-go@v6 timeout-minutes: 3 with: go-version: ${{ matrix.go_version }} - name: Tidy go mod run: go mod tidy # https://github.com/actions/setup-go # - name: Use Go ${{ matrix.go_version }} # timeout-minutes: 3 # uses: actions/setup-go@v3 # with: # go-version: ${{ matrix.go_version }} # - name: Revive check # uses: docker://morphy/revive-action:v2 # if: ${{ matrix.os == 'ubuntu-latest' && matrix.go_version == 'stable' }} # with: # config: .github/revive.toml # # Exclude patterns, separated by semicolons (optional) # exclude: "./internal/..." - name: Run staticcheck uses: reviewdog/action-staticcheck@v1 if: ${{ github.event_name == 'pull_request'}} with: github_token: ${{ secrets.github_token }} # Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review]. reporter: github-pr-check # Report all results. [added,diff_context,file,nofilter]. filter_mode: added # Exit with 1 when it find at least one finding. fail_on_error: true - name: Run unit tests # run: go test -v -cover ./... run: go test -coverprofile="profile.cov" ./... - name: Send coverage uses: shogo82148/actions-goveralls@v1 if: ${{ matrix.os == 'ubuntu-latest' }} with: path-to-profile: profile.cov flag-name: Go-${{ matrix.go_version }} parallel: true shallow: true # notifies that all test jobs are finished. # https://github.com/shogo82148/actions-goveralls finish: needs: test runs-on: ubuntu-latest steps: - uses: shogo82148/actions-goveralls@v1 with: shallow: true parallel-finished: true ================================================ FILE: .github/workflows/release.yml ================================================ name: Tag-release on: push: tags: - v* jobs: release: name: Release new version runs-on: ubuntu-latest timeout-minutes: 10 steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - name: Setup ENV # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable run: | echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV echo "RELEASE_NAME=$GITHUB_WORKFLOW" >> $GITHUB_ENV - name: Generate changelog run: | curl https://github.com/gookit/gitw/releases/latest/download/chlog-linux-amd64 -L -o /usr/local/bin/chlog chmod a+x /usr/local/bin/chlog chlog -c .github/changelog.yml -o changelog.md prev last # https://github.com/softprops/action-gh-release - name: Create release and upload assets uses: softprops/action-gh-release@v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: name: ${{ env.RELEASE_TAG }} tag_name: ${{ env.RELEASE_TAG }} body_path: changelog.md token: ${{ secrets.GITHUB_TOKEN }} # files: macos-chlog.exe ================================================ FILE: .gitignore ================================================ *.log *.swp .idea *.patch *.tmp # Go template # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, build with `go test -c` *.test *.log.* *~ # Output of the go coverage tool, specifically when used with LiteIDE *.out .DS_Store *.prof # shell script /*.bash /*.sh /*.zsh /*.pid go.work changelog.md testdata _example/go.sum .xenv.* ================================================ FILE: LICENSE ================================================ The MIT License (MIT) Copyright (c) 2016 inhere Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Makefile ================================================ # Make does not offer a recursive wildcard function, so here's one: # from https://github.com/jenkins-x-plugins/jx-gitops/blob/main/Makefile rwildcard=$(wildcard $1$2) $(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2)) SHELL := /bin/bash NAME := slog BUILD_TARGET = testdata MAIN_SRC_FILE=cmd/main.go GO :=go ORG := gookit REV := $(shell git rev-parse --short HEAD 2> /dev/null || echo 'unknown') ORG_REPO := $(ORG)/$(NAME) RELEASE_ORG_REPO := $(ORG_REPO) ROOT_PACKAGE := github.com/$(ORG_REPO) GO_VERSION := $(shell $(GO) version | sed -e 's/^[^0-9.]*\([0-9.]*\).*/\1/') GO_DEPENDENCIES := $(call rwildcard,pkg/,*.go) $(call rwildcard,cmd/,*.go) BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null || echo 'unknown') BUILD_DATE := $(shell date +%Y%m%d-%H:%M:%S) CGO_ENABLED = 0 REPORTS_DIR=$(BUILD_TARGET)/reports GOTEST := $(GO) test # set dev version unless VERSION is explicitly set via environment VERSION ?= $(shell echo "$$(git for-each-ref refs/tags/ --count=1 --sort=-version:refname --format='%(refname:short)' 2>/dev/null)-dev+$(REV)" | sed 's/^v//') # Build flags for setting build-specific configuration at build time - defaults to empty #BUILD_TIME_CONFIG_FLAGS ?= "" # Full build flags used when building binaries. Not used for test compilation/execution. BUILDFLAGS := -ldflags \ " -X $(ROOT_PACKAGE)/pkg/cmd/version.Version=$(VERSION)\ -X github.com/jenkins-x-plugins/jx-gitops/pkg/cmd/version.Version=$(VERSION)\ -X $(ROOT_PACKAGE)/pkg/cmd/version.Revision='$(REV)'\ -X $(ROOT_PACKAGE)/pkg/cmd/version.Branch='$(BRANCH)'\ -X $(ROOT_PACKAGE)/pkg/cmd/version.BuildDate='$(BUILD_DATE)'\ -X $(ROOT_PACKAGE)/pkg/cmd/version.GoVersion='$(GO_VERSION)'\ $(BUILD_TIME_CONFIG_FLAGS)" # Some tests expect default values for version.*, so just use the config package values there. TEST_BUILDFLAGS := -ldflags "$(BUILD_TIME_CONFIG_FLAGS)" ifdef DEBUG BUILDFLAGS := -gcflags "all=-N -l" $(BUILDFLAGS) endif ifdef PARALLEL_BUILDS BUILDFLAGS += -p $(PARALLEL_BUILDS) GOTEST += -p $(PARALLEL_BUILDS) else # -p 4 seems to work well for people GOTEST += -p 4 endif ifdef DISABLE_TEST_CACHING GOTEST += -count=1 endif TEST_PACKAGE ?= ./... COVER_OUT:=$(REPORTS_DIR)/cover.out COVERFLAGS=-coverprofile=$(COVER_OUT) --covermode=count --coverpkg=./... .PHONY: list list: ## List all make targets @$(MAKE) -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort .PHONY: help .DEFAULT_GOAL := help help: @echo -e "Some useful commands for develop\n" @grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' full: check ## Build and run the tests check: build test ## Build and run the tests get-test-deps: ## Install test dependencies get install github.com/axw/gocov/gocov get install gopkg.in/matm/v1/gocov-html print-version: ## Print version @echo $(VERSION) build: $(GO_DEPENDENCIES) clean ## Build jx-labs binary for current OS go mod download CGO_ENABLED=$(CGO_ENABLED) $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/$(NAME) $(MAIN_SRC_FILE) label: $(GO_DEPENDENCIES) CGO_ENABLED=$(CGO_ENABLED) $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/jx-label fns/label/main.go build-all: $(GO_DEPENDENCIES) build make-reports-dir ## Build all files - runtime, all tests etc. CGO_ENABLED=$(CGO_ENABLED) $(GOTEST) -run=nope -tags=integration -failfast -short ./... $(BUILDFLAGS) tidy-deps: ## Cleans up dependencies $(GO) mod tidy # mod tidy only takes compile dependencies into account, let's make sure we capture tooling dependencies as well @$(MAKE) install-generate-deps pprof-web: ## generate pprof file and start an web-ui cd ./_example; go mod tidy; go run ./pprof #go tool pprof rux_prof_data.prof go tool pprof -http=:8080 ./_example/rux_prof_data.prof .PHONY: make-reports-dir make-reports-dir: mkdir -p $(REPORTS_DIR) test: ## Run tests with the "unit" build tag KUBECONFIG=/cluster/connections/not/allowed CGO_ENABLED=$(CGO_ENABLED) $(GOTEST) --tags=unit -failfast -short ./... $(TEST_BUILDFLAGS) test-coverage : make-reports-dir ## Run tests and coverage for all tests with the "unit" build tag CGO_ENABLED=$(CGO_ENABLED) $(GOTEST) --tags=unit $(COVERFLAGS) -failfast -short ./... $(TEST_BUILDFLAGS) test-report: make-reports-dir get-test-deps test-coverage ## Create the test report @gocov convert $(COVER_OUT) | gocov report test-report-html: make-reports-dir get-test-deps test-coverage ## Create the test report in HTML format @gocov convert $(COVER_OUT) | gocov-html > $(REPORTS_DIR)/cover.html && open $(REPORTS_DIR)/cover.html test-bench: ## run bench test report in _example dir cd ./_example; go mod tidy; \ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go install: $(GO_DEPENDENCIES) ## Install the binary GOBIN=${GOPATH}/bin $(GO) install $(BUILDFLAGS) $(MAIN_SRC_FILE) linux: ## Build for Linux CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/linux/$(NAME) $(MAIN_SRC_FILE) chmod +x build/linux/$(NAME) arm: ## Build for ARM CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=arm $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/arm/$(NAME) $(MAIN_SRC_FILE) chmod +x build/arm/$(NAME) win: ## Build for Windows CGO_ENABLED=$(CGO_ENABLED) GOOS=windows GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/win/$(NAME)-windows-amd64.exe $(MAIN_SRC_FILE) darwin: ## Build for OSX CGO_ENABLED=$(CGO_ENABLED) GOOS=darwin GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/darwin/$(NAME) $(MAIN_SRC_FILE) chmod +x build/darwin/$(NAME) .PHONY: release release: clean linux test release-all: release linux win darwin promoter: cd promote && go build main.go .PHONY: goreleaser goreleaser: step-go-releaser --organisation=$(ORG) --revision=$(REV) --branch=$(BRANCH) --build-date=$(BUILD_DATE) --go-version=$(GO_VERSION) --root-package=$(ROOT_PACKAGE) --version=$(VERSION) --timeout 200m .PHONY: clean clean: ## Clean the generated artifacts rm -rf build release dist get-fmt-deps: ## Install test dependencies get install golang.org/x/tools/cmd/goimports .PHONY: fmt fmt: importfmt ## Format the code $(eval FORMATTED = $(shell $(GO) fmt ./...)) @if [ "$(FORMATTED)" == "" ]; \ then \ echo "All Go files properly formatted"; \ else \ echo "Fixed formatting for: $(FORMATTED)"; \ fi .PHONY: importfmt importfmt: get-fmt-deps @echo "Formatting the imports..." goimports -w $(GO_DEPENDENCIES) .PHONY: lint lint: ## Lint the code ./hack/gofmt.sh ./hack/linter.sh ./hack/generate.sh .PHONY: all all: fmt build test lint generate-refdocs install-refdocs: $(GO) get github.com/jenkins-x/gen-crd-api-reference-docs generate-refdocs: install-refdocs gen-crd-api-reference-docs -config "hack/configdocs/config.json" \ -template-dir hack/configdocs/templates \ -api-dir "./pkg/apis/gitops/v1alpha1" \ -out-file docs/config.md generate-scheduler-refdocs: install-refdocs gen-crd-api-reference-docs -config "hack/configdocs/config.json" \ -template-dir hack/configdocs/templates \ -api-dir "./pkg/apis/scheduler/v1alpha1" \ -out-file docs/scheduler-config.md bin/docs: go build $(LDFLAGS) -v -o bin/docs cmd/docs/*.go .PHONY: docs docs: bin/docs generate-refdocs generate-scheduler-refdocs ## update docs @echo "Generating docs" @./bin/docs --target=./docs/cmd @./bin/docs --target=./docs/man/man1 --kind=man @rm -f ./bin/docs ================================================ FILE: README.md ================================================ # slog ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/gookit/slog?style=flat-square) [![GoDoc](https://pkg.go.dev/badge/github.com/gookit/slog.svg)](https://pkg.go.dev/github.com/gookit/slog) [![Go Report Card](https://goreportcard.com/badge/github.com/gookit/slog)](https://goreportcard.com/report/github.com/gookit/slog) [![Unit-Tests](https://github.com/gookit/slog/workflows/Unit-Tests/badge.svg)](https://github.com/gookit/slog/actions) [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gookit/slog)](https://github.com/gookit/slog) [![Coverage Status](https://coveralls.io/repos/github/gookit/slog/badge.svg?branch=master)](https://coveralls.io/github/gookit/slog?branch=master) 📑 Lightweight, structured, extensible, configurable logging library written in Golang. **Output in console:** ![console-log-all-level](_example/images/console-log-all-level.png) ## Features - Simple, directly available without configuration - Support common log level processing. - eg: `trace` `debug` `info` `notice` `warn` `error` `fatal` `panic` - Support any extension of `Handler` `Formatter` as needed - Supports adding multiple `Handler` log processing at the same time, outputting logs to different places - Support to custom log message `Formatter` - Built-in `json` `text` two log record formatting `Formatter` - Support to custom build log messages `Handler` - The built-in `handler.Config` `handler.Builder` can easily and quickly build the desired log handler - Has built-in common log write handler program - `console` output logs to the console, supports color output - `writer` output logs to the specified `io.Writer` - `file` output log to the specified file, optionally enable `buffer` to buffer writes - `simple` output log to the specified file, write directly to the file without buffering - `rotate_file` outputs logs to the specified file, and supports splitting files by time and size, and `buffer` buffered writing is enabled by default - See [./handler](./handler) folder for more built-in implementations - Benchmark performance test please see [Benchmarks](#benchmarks) ### Output logs to file - Support enabling `buffer` for log writing - Support splitting log files by `time` and `size` - Support configuration to compress log files via `gzip` - Support clean old log files by `BackupNum` `BackupTime` ### `rotatefile` subpackage - The `rotatefile` subpackage is a stand-alone tool library with file splitting, cleaning, and compressing backups - `rotatefile.Writer` can also be directly wrapped and used in other logging libraries. For example: `log`, `glog`, `zap`, etc. - `rotatefile.FilesClear` is an independent file cleaning backup tool, which can be used in other places (such as other program log cleaning such as PHP) - For more usage, please see [rotatefile](rotatefile/README.md) ### Use slog in GORM Please see https://github.com/gookit/slog/issues/127#issuecomment-2827745713 ## [中文说明](README.zh-CN.md) 中文说明请阅读 [README.zh-CN](README.zh-CN.md) ## GoDoc - [Godoc for github](https://pkg.go.dev/github.com/gookit/slog?tab=doc) ## Install ```bash go get github.com/gookit/slog ``` ## Quick Start `slog` is very simple to use and can be used without any configuration ```go package main import ( "github.com/gookit/slog" ) func main() { slog.Info("info log message") slog.Warn("warning log message") slog.Infof("info log %s", "message") slog.Debugf("debug %s", "message") } ``` **Output:** ```text [2020/07/16 12:19:33] [application] [INFO] [main.go:7] info log message [2020/07/16 12:19:33] [application] [WARNING] [main.go:8] warning log message [2020/07/16 12:19:33] [application] [INFO] [main.go:9] info log message [2020/07/16 12:19:33] [application] [DEBUG] [main.go:10] debug message ``` ### Console Color You can enable color on output logs to console. _This is default_ ```go package main import ( "github.com/gookit/slog" ) func main() { slog.Configure(func(logger *slog.SugaredLogger) { f := logger.Formatter.(*slog.TextFormatter) f.EnableColor = true }) slog.Trace("this is a simple log message") slog.Debug("this is a simple log message") slog.Info("this is a simple log message") slog.Notice("this is a simple log message") slog.Warn("this is a simple log message") slog.Error("this is a simple log message") slog.Fatal("this is a simple log message") } ``` **Output:** ![](_example/images/console-color-log.png) ### Change log output style Above is the `Formatter` setting that changed the default logger. > You can also create your own logger and append `ConsoleHandler` to support printing logs to the console: ```go h := handler.NewConsoleHandler(slog.AllLevels) l := slog.NewWithHandlers(h) l.Trace("this is a simple log message") l.Debug("this is a simple log message") ``` Change the default logger log output style: ```go h.Formatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate) ``` **Output:** ![](_example/images/console-color-log1.png) > Note: `slog.TextFormatter` uses a template string to format the output log, so the new field output needs to adjust the template at the same time. ### Use JSON Format `slog` also has a built-in `Formatter` for JSON format. If not specified, the default is to use `TextFormatter` to format log records. ```go package main import ( "github.com/gookit/slog" ) func main() { // use JSON formatter slog.SetFormatter(slog.NewJSONFormatter()) slog.Info("info log message") slog.Warn("warning log message") slog.WithData(slog.M{ "key0": 134, "key1": "abc", }).Infof("info log %s", "message") r := slog.WithFields(slog.M{ "category": "service", "IP": "127.0.0.1", }) r.Infof("info %s", "message") r.Debugf("debug %s", "message") } ``` **Output:** ```text {"channel":"application","data":{},"datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info log message"} {"channel":"application","data":{},"datetime":"2020/07/16 13:23:33","extra":{},"level":"WARNING","message":"warning log message"} {"channel":"application","data":{"key0":134,"key1":"abc"},"datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info log message"} {"IP":"127.0.0.1","category":"service","channel":"application","datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info message"} {"IP":"127.0.0.1","category":"service","channel":"application","datetime":"2020/07/16 13:23:33","extra":{},"level":"DEBUG","message":"debug message"} ``` ## Introduction - `Logger` - log dispatcher. One logger can register multiple `Handler`, `Processor` - `Record` - log records, each log is a `Record` instance. - `Processor` - enables extended processing of log records. It is called before the log `Record` is processed by the `Handler`. - You can use it to perform additional operations on `Record`, such as: adding fields, adding extended information, etc. - `Handler` - log handler, each log will be processed by `Handler.Handle()`. - Here you can send logs to console, file, remote server, etc. - `Formatter` - logging data formatting process. - Usually set in `Handler`, it can be used to format log records, convert records into text, JSON, etc., `Handler` then writes the formatted data to the specified place. - `Formatter` is not required. You can do without it and handle logging directly in `Handler.Handle()`. **Simple structure of log scheduler**: ```text Processors Logger --{ Handlers --|- Handler0 With Formatter0 |- Handler1 With Formatter1 |- Handler2 (can also without Formatter) |- ... more ``` > Note: Be sure to remember to add `Handler`, `Processor` to the logger instance and log records will be processed by `Handler`. ### Processor `Processor` interface: ```go // Processor interface definition type Processor interface { // Process record Process(record *Record) } // ProcessorFunc definition type ProcessorFunc func(record *Record) // Process record func (fn ProcessorFunc) Process(record *Record) { fn(record) } ``` > You can use it to perform additional operations on the Record before the log `Record` reaches the `Handler` for processing, such as: adding fields, adding extended information, etc. Add processor to logger: ```go slog.AddProcessor(slog.AddHostname()) // or l := slog.New() l.AddProcessor(slog.AddHostname()) ``` The built-in processor `slog.AddHostname` is used here as an example, which can add a new field `hostname` on each log record. ```go slog.AddProcessor(slog.AddHostname()) slog.Info("message") ``` Output, including new fields `"hostname":"InhereMac"`: ```json {"channel":"application","level":"INFO","datetime":"2020/07/17 12:01:35","hostname":"InhereMac","data":{},"extra":{},"message":"message"} ``` ### Handler `Handler` interface: > You can customize any `Handler` you want, just implement the `slog.Handler` interface. ```go // Handler interface definition type Handler interface { io.Closer Flush() error // IsHandling Checks whether the given record will be handled by this handler. IsHandling(level Level) bool // Handle a log record. // all records may be passed to this method, and the handler should discard // those that it does not want to handle. Handle(*Record) error } ``` ### Formatter `Formatter` interface: ```go // Formatter interface type Formatter interface { Format(record *Record) ([]byte, error) } ``` Function wrapper type: ```go // FormatterFunc wrapper definition type FormatterFunc func(r *Record) ([]byte, error) // Format a log record func (fn FormatterFunc) Format(r *Record) ([]byte, error) { return fn(r) } ``` **JSON formatter** ```go type JSONFormatter struct { // Fields exported log fields. Fields []string // Aliases for output fields. you can change export field name. // item: `"field" : "output name"` // eg: {"message": "msg"} export field will display "msg" Aliases StringMap // PrettyPrint will indent all json logs PrettyPrint bool // TimeFormat the time format layout. default is time.RFC3339 TimeFormat string } ``` **Text formatter** Default templates: ```go const DefaultTemplate = "[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\n" const NamedTemplate = "{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\n" ``` Change template: ```go myTemplate := "[{{datetime}}] [{{level}}] {{message}}" f := slog.NewTextFormatter() f.SetTemplate(myTemplate) ``` ## Custom logger Custom `Processor` and `Formatter` are relatively simple, just implement a corresponding method. ### Create new logger `slog.Info, slog.Warn` and other methods use the default logger and output logs to the console by default. You can create a brand-new instance of `slog.Logger`: **Method 1**: ```go l := slog.New() // add handlers ... h1 := handler.NewConsoleHandler(slog.AllLevels) l.AddHandlers(h1) ``` **Method 2**: ```go l := slog.NewWithName("myLogger") // add handlers ... h1 := handler.NewConsoleHandler(slog.AllLevels) l.AddHandlers(h1) ``` **Method 3**: ```go package main import ( "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func main() { l := slog.NewWithHandlers(handler.NewConsoleHandler(slog.AllLevels)) l.Info("message") } ``` ### Create custom Handler You only need to implement the `slog.Handler` interface to create a custom `Handler`. You can quickly assemble your own Handler through the built-in `handler.LevelsWithFormatter` `handler.LevelWithFormatter` and other fragments of slog. Examples: > Use `handler.LevelsWithFormatter`, only need to implement `Close, Flush, Handle` methods ```go type MyHandler struct { handler.LevelsWithFormatter Output io.Writer } func (h *MyHandler) Handle(r *slog.Record) error { // you can write log message to file or send to remote. } func (h *MyHandler) Flush() error {} func (h *MyHandler) Close() error {} ``` Add `Handler` to the logger to use: ```go // add to default logger slog.AddHander(&MyHandler{}) // or, add to custom logger: l := slog.New() l.AddHander(&MyHandler{}) ``` ## Use the built-in handlers [./handler](handler) package has built-in common log handlers, which can basically meet most scenarios. ```go // Output logs to console, allow render color. func NewConsoleHandler(levels []slog.Level) *ConsoleHandler // Send logs to email func NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler // Send logs to syslog func NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error) // A simple handler implementation that outputs logs to a given io.Writer func NewSimpleHandler(out io.Writer, level slog.Level) *SimpleHandler ``` **Output log to file**: ```go // Output log to the specified file, without buffering by default func NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error) // Output logs to the specified file in JSON format, without buffering by default func JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error) // Buffered output log to specified file func NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error) ``` > TIP: `NewFileHandler` `JSONFileHandler` can also enable write buffering by passing in fns `handler.WithBuffSize(buffSize)` **Output log to file and rotate automatically**: ```go // Automatic rotating according to file size func NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) // Automatic rotating according to time func NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) // It supports configuration to rotate according to size and time. // The default setting file size is 20M, and the default automatic splitting time is 1 hour (EveryHour). func NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) ``` > TIP: By passing in `fns ...ConfigFn`, more options can be set, such as log file retention time, log write buffer size, etc. For detailed settings, see the `handler.Config` structure ### Logs to file Output log to the specified file, `buffer` buffered writing is not enabled by default. Buffering can also be enabled by passing in a parameter. ```go package mypkg import ( "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func main() { defer slog.MustClose() // DangerLevels contains: slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel h1 := handler.MustFileHandler("/tmp/error.log", handler.WithLogLevels(slog.DangerLevels)) // custom log format // f := h1.Formatter().(*slog.TextFormatter) f := slog.AsTextFormatter(h1.Formatter()) f.SetTemplate("your template format\n") // NormalLevels contains: slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel h2 := handler.MustFileHandler("/tmp/info.log", handler.WithLogLevels(slog.NormalLevels)) // register handlers slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message text") slog.Error("error message text") } ``` > **Note**: If write buffering `buffer` is enabled, be sure to call `logger.Close()` at the end of the program to flush the contents of the buffer to the file. ### Log to file with automatic rotating `slog/handler` also has a built-in output log to a specified file, and supports splitting files by time and size at the same time. By default, `buffer` buffered writing is enabled ```go func Example_rotateFileHandler() { h1 := handler.MustRotateFile("/tmp/error.log", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels)) h2 := handler.MustRotateFile("/tmp/info.log", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels)) slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message") slog.Error("error message") } ``` Example of file name sliced by time: ```text time-rotate-file.log time-rotate-file.log.20201229_155753 time-rotate-file.log.20201229_155754 ``` Example of a filename cut by size, in the format `filename.log.HIS_000N`. For example: ```text size-rotate-file.log size-rotate-file.log.122915_0001 size-rotate-file.log.122915_0002 ``` ### Use rotatefile on another logger `rotatefile.Writer` can also be used with other logging packages, such as: `log`, `glog`, etc. For example, using `rotatefile` on golang `log`: ```go package main import ( "log" "github.com/gookit/slog/rotatefile" ) func main() { logFile := "testdata/go_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } log.SetOutput(writer) log.Println("log message") } ``` ### Quickly create a Handler based on config This is config struct for create a Handler: ```go // Config struct type Config struct { // Logfile for write logs Logfile string `json:"logfile" yaml:"logfile"` // LevelMode for filter log record. default LevelModeList LevelMode uint8 `json:"level_mode" yaml:"level_mode"` // Level value. use on LevelMode = LevelModeValue Level slog.Level `json:"level" yaml:"level"` // Levels for log record Levels []slog.Level `json:"levels" yaml:"levels"` // UseJSON for format logs UseJSON bool `json:"use_json" yaml:"use_json"` // BuffMode type name. allow: line, bite BuffMode string `json:"buff_mode" yaml:"buff_mode"` // BuffSize for enable buffer, unit is bytes. set 0 to disable buffer BuffSize int `json:"buff_size" yaml:"buff_size"` // RotateTime for rotate file, unit is seconds. RotateTime rotatefile.RotateTime `json:"rotate_time" yaml:"rotate_time"` // MaxSize on rotate file by size, unit is bytes. MaxSize uint64 `json:"max_size" yaml:"max_size"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // BackupNum max number for keep old files. // 0 is not limit, default is 20. BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files. unit is hours // 0 is not limit, default is a week. BackupTime uint `json:"backup_time" yaml:"backup_time"` // RenameFunc build filename for rotate file RenameFunc func(filepath string, rotateNum uint) string } ``` **Examples**: ```go testFile := "testdata/error.log" h := handler.NewEmptyConfig( handler.WithLogfile(testFile), handler.WithBuffSize(1024*8), handler.WithRotateTimeString("1hour"), handler.WithLogLevels(slog.DangerLevels), ). CreateHandler() l := slog.NewWithHandlers(h) ``` **About BuffMode** `Config.BuffMode` The name of the BuffMode type to use. Allow: line, bite - `BuffModeBite`: Buffer by bytes, when the number of bytes in the buffer reaches the specified size, write the contents of the buffer to the file - `BuffModeLine`: Buffer by line, when the buffer size is reached, always ensure that a complete line of log content is written to the file (to avoid log content being truncated) ### Use Builder to quickly create Handler Use `handler.Builder` to easily and quickly create Handler instances. ```go testFile := "testdata/info.log" h := handler.NewBuilder(). WithLogfile(testFile). WithLogLevels(slog.NormalLevels). WithBuffSize(1024*8). WithRotateTime(rotatefile.Every30Min). WithCompress(true). Build() l := slog.NewWithHandlers(h) ``` ## Extension packages Package `bufwrite`: - `bufwrite.BufIOWriter` additionally implements `Sync(), Close()` methods by wrapping go's `bufio.Writer`, which is convenient to use - `bufwrite.LineWriter` refer to the implementation of `bufio.Writer` in go, which can support flushing the buffer by line, which is more useful for writing log files Package `rotatefile`: - `rotatefile.Writer` implements automatic cutting of log files according to size and specified time, and also supports automatic cleaning of log files - `handler/rotate_file` is to use it to cut the log file ### Use rotatefile on other log package Of course, the rotatefile.Writer can be use on other log package, such as: `log`, `glog` and more. Examples, use rotatefile on golang `log`: ```go package main import ( "log" "github.com/gookit/slog/rotatefile" ) func main() { logFile := "testdata/another_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } log.SetOutput(writer) log.Println("log message") } ``` ## Testing and benchmark ### Unit tests run unit tests: ```bash go test ./... ``` ### Benchmarks Benchmark code at [_example/bench_loglibs_test.go](_example/bench_loglibs_test.go) ```bash make test-bench ``` Benchmarks for `slog` and other log packages: > **Note**: test and record ad 2023.04.13 ```shell goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 8381674 1429 ns/op 216 B/op 3 allocs/op BenchmarkZapSugarNegative BenchmarkZapSugarNegative-4 8655980 1383 ns/op 104 B/op 4 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 14173719 849.8 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 27456256 451.2 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 2550771 4784 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogNegative >>>> BenchmarkGookitSlogNegative-4 8798220 1375 ns/op 120 B/op 3 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 10302483 1167 ns/op 192 B/op 1 allocs/op BenchmarkZapSugarPositive BenchmarkZapSugarPositive-4 3833311 3154 ns/op 344 B/op 7 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 14120524 846.7 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27152686 434.9 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2601892 4691 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive >>>> BenchmarkGookitSlogPositive-4 8997104 1340 ns/op 120 B/op 3 allocs/op PASS ok command-line-arguments 167.095s ``` ## Gookit packages - [gookit/ini](https://github.com/gookit/ini) Go config management, use INI files - [gookit/rux](https://github.com/gookit/rux) Simple and fast request router for golang HTTP - [gookit/gcli](https://github.com/gookit/gcli) Build CLI application, tool library, running CLI commands - [gookit/slog](https://github.com/gookit/slog) Lightweight, extensible, configurable logging library written in Go - [gookit/color](https://github.com/gookit/color) A command-line color library with true color support, universal API methods and Windows support - [gookit/event](https://github.com/gookit/event) Lightweight event manager and dispatcher implements by Go - [gookit/cache](https://github.com/gookit/cache) Generic cache use and cache manager for golang. support File, Memory, Redis, Memcached. - [gookit/config](https://github.com/gookit/config) Go config management. support JSON, YAML, TOML, INI, HCL, ENV and Flags - [gookit/filter](https://github.com/gookit/filter) Provide filtering, sanitizing, and conversion of golang data - [gookit/validate](https://github.com/gookit/validate) Use for data validation and filtering. support Map, Struct, Form data - [gookit/goutil](https://github.com/gookit/goutil) Some utils for the Go: string, array/slice, map, format, cli, env, filesystem, test and more - More, please see https://github.com/gookit ## Acknowledgment The projects is heavily inspired by follow packages: - https://github.com/phuslu/log - https://github.com/golang/glog - https://github.com/sirupsen/logrus - https://github.com/Seldaek/monolog - https://github.com/syyongx/llog - https://github.com/uber-go/zap - https://github.com/rs/zerolog - https://github.com/natefinch/lumberjack ## LICENSE [MIT](LICENSE) ================================================ FILE: README.zh-CN.md ================================================ # slog ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/gookit/slog?style=flat-square) [![GoDoc](https://pkg.go.dev/badge/github.com/gookit/slog.svg)](https://pkg.go.dev/github.com/gookit/slog) [![Go Report Card](https://goreportcard.com/badge/github.com/gookit/slog)](https://goreportcard.com/report/github.com/gookit/slog) [![Unit-Tests](https://github.com/gookit/slog/workflows/Unit-Tests/badge.svg)](https://github.com/gookit/slog/actions) [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gookit/slog)](https://github.com/gookit/slog) [![Coverage Status](https://coveralls.io/repos/github/gookit/slog/badge.svg?branch=master)](https://coveralls.io/github/gookit/slog?branch=master) 📑 Go 实现的一个易于使用的,结构化的,易扩展、可配置的日志库。 **控制台日志效果:** ![console-log-all-level](_example/images/console-log-all-level.png) ## 功能特色 - 简单,无需配置,开箱即用 - 支持常用的日志级别处理 - 如: `trace` `debug` `info` `notice` `warn` `error` `fatal` `panic` - 可以任意扩展自己需要的 `Handler` `Formatter` - 支持同时添加多个 `Handler` 日志处理,输出日志到不同的地方 - 支持自定义构建 `Handler` 处理器 - 内置的 `handler.Config` `handler.Builder`,可以方便快捷的构建想要的日志处理器 - 支持自定义 `Formatter` 格式化处理 - 内置了 `json` `text` 两个日志记录格式化 `Formatter` - 已经内置了常用的日志处理器 - `console` 输出日志到控制台,支持色彩输出 - `writer` 输出日志到指定的 `io.Writer` - `file` 输出日志到指定文件,可选启用 `buffer` 缓冲写入 - `simple` 输出日志到指定文件,无缓冲直接写入文件 - `rotate_file` 输出日志到指定文件,并且同时支持按时间、按大小分割文件,默认启用 `buffer` 缓冲写入 - 更多内置实现请查看 [./handler](./handler) 文件夹 - 基准性能测试请看 [Benchmarks](#benchmarks) ### 输出日志到文件 - 支持启用 `buffer` 缓冲日志写入 - 支持按时间、按大小自动分割文件 - 支持配置通过 `gzip` 压缩日志文件 - 支持清理旧日志文件 配置: `BackupNum` `BackupTime` ### `rotatefile` 子包 - `rotatefile` 子包是一个拥有文件分割,清理,压缩备份的独立工具库 - `rotatefile.Writer` 也可以直接包装使用用在其他日志库。例如:`log`、`glog`、`zap` 等等 - `rotatefile.FilesClear` 是一个独立的文件清理备份工具, 可以用在其他地方(如 PHP等其他程序日志清理) - 更多使用请查看 [rotatefile](rotatefile/README.md) ### GORM 中使用 slog 请查看 https://github.com/gookit/slog/issues/127#issuecomment-2827745713 ## [English](README.md) English instructions please see [./README](README.md) ## GoDoc - [Godoc for github](https://pkg.go.dev/github.com/gookit/slog?tab=doc) ## 安装 ```bash go get github.com/gookit/slog ``` ## 快速开始 `slog` 使用非常简单,无需任何配置即可使用。 ```go package main import ( "github.com/gookit/slog" ) func main() { slog.Info("info log message") slog.Warn("warning log message") slog.Infof("info log %s", "message") slog.Debugf("debug %s", "message") } ``` **输出预览:** ```text [2020/07/16 12:19:33] [application] [INFO] [main.go:7] info log message [2020/07/16 12:19:33] [application] [WARNING] [main.go:8] warning log message [2020/07/16 12:19:33] [application] [INFO] [main.go:9] info log message [2020/07/16 12:19:33] [application] [DEBUG] [main.go:10] debug message ``` ### 启用控制台颜色 您可以在输出控制台日志时启用颜色输出,将会根据不同级别打印不同色彩。 ```go package main import ( "github.com/gookit/slog" ) func main() { slog.Configure(func(logger *slog.SugaredLogger) { f := logger.Formatter.(*slog.TextFormatter) f.EnableColor = true }) slog.Trace("this is a simple log message") slog.Debug("this is a simple log message") slog.Info("this is a simple log message") slog.Notice("this is a simple log message") slog.Warn("this is a simple log message") slog.Error("this is a simple log message") slog.Fatal("this is a simple log message") } ``` **输出预览:** ![](_example/images/console-color-log.png) ### 更改日志输出样式 上面是更改了默认logger的 `Formatter` 设置。 > 你也可以创建自己的logger,并追加 `ConsoleHandler` 来支持打印日志到控制台: ```go h := handler.NewConsoleHandler(slog.AllLevels) l := slog.NewWithHandlers(h) l.Trace("this is a simple log message") l.Debug("this is a simple log message") ``` 更改默认的logger日志输出样式: ```go h.Formatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate) ``` **输出预览:** ![](_example/images/console-color-log1.png) > 注意:`slog.TextFormatter` 使用模板字符串来格式化输出日志,因此新增字段输出需要同时调整模板。 ### 使用JSON格式 slog 也内置了 JSON 格式的 `Formatter`。若不特别指定,默认都是使用 `TextFormatter` 格式化日志记录。 ```go package main import ( "github.com/gookit/slog" ) func main() { // use JSON formatter slog.SetFormatter(slog.NewJSONFormatter()) slog.Info("info log message") slog.Warn("warning log message") slog.WithData(slog.M{ "key0": 134, "key1": "abc", }).Infof("info log %s", "message") r := slog.WithFields(slog.M{ "category": "service", "IP": "127.0.0.1", }) r.Infof("info %s", "message") r.Debugf("debug %s", "message") } ``` **输出预览:** ```text {"channel":"application","data":{},"datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info log message"} {"channel":"application","data":{},"datetime":"2020/07/16 13:23:33","extra":{},"level":"WARNING","message":"warning log message"} {"channel":"application","data":{"key0":134,"key1":"abc"},"datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info log message"} {"IP":"127.0.0.1","category":"service","channel":"application","datetime":"2020/07/16 13:23:33","extra":{},"level":"INFO","message":"info message"} {"IP":"127.0.0.1","category":"service","channel":"application","datetime":"2020/07/16 13:23:33","extra":{},"level":"DEBUG","message":"debug message"} ``` ## 架构说明 - `Logger` - 日志调度器. 一个logger可以注册多个 `Handler`,`Processor` - `Record` - 日志记录,每条日志就是一个 `Record` 实例。 - `Processor` - 可以对日志记录进行扩展处理。它在日志 `Record` 被 `Handler` 处理之前调用。 - 你可以使用它对 `Record` 进行额外的操作,比如:新增字段,添加扩展信息等 - `Handler` - 日志处理器,每条日志都会经过 `Handler.Handle()` 处理。 - 在这里你可以将日志发送到 控制台,文件,远程服务器等等。 - `Formatter` - 日志记录数据格式化处理。 - 通常设置于 `Handler` 中,可以用于格式化日志记录,将记录转成文本,JSON等,`Handler` 再将格式化后的数据写入到指定的地方。 - `Formatter` 不是必须的。你可以不使用它,直接在 `Handler.Handle()` 中对日志记录进行处理。 **日志调度器简易结构**: ```text Processors Logger --{ Handlers --|- Handler0 With Formatter0 |- Handler1 With Formatter1 |- Handler2 (can also without Formatter) |- ... more ``` > 注意:一定要记得将 `Handler`, `Processor` 添加注册到 logger 实例上,日志记录才会经过 `Handler` 处理。 ### Processor 定义 `Processor` 接口定义如下: ```go // Processor interface definition type Processor interface { // Process record Process(record *Record) } // ProcessorFunc definition type ProcessorFunc func(record *Record) // Process record func (fn ProcessorFunc) Process(record *Record) { fn(record) } ``` > 你可以使用它在日志 `Record` 到达 `Handler` 处理之前,对Record进行额外的操作,比如:新增字段,添加扩展信息等 添加 processor 到 logger: ```go slog.AddProcessor(mypkg.AddHostname()) // or l := slog.New() l.AddProcessor(mypkg.AddHostname()) ``` 这里使用内置的processor `slog.AddHostname` 作为示例,它可以在每条日志记录上添加新字段 `hostname`。 ```go slog.AddProcessor(slog.AddHostname()) slog.Info("message") ``` 输出效果,包含新增字段 `"hostname":"InhereMac"`: ```json {"channel":"application","level":"INFO","datetime":"2020/07/17 12:01:35","hostname":"InhereMac","data":{},"extra":{},"message":"message"} ``` ### Handler 定义 `Handler` 接口定义如下: > 你可以自定义任何想要的 `Handler`,只需要实现 `slog.Handler` 接口即可。 ```go // Handler interface definition type Handler interface { io.Closer Flush() error // IsHandling Checks whether the given record will be handled by this handler. IsHandling(level Level) bool // Handle a log record. // all records may be passed to this method, and the handler should discard // those that it does not want to handle. Handle(*Record) error } ``` ### Formatter 定义 `Formatter` 接口定义如下: ```go // Formatter interface type Formatter interface { Format(record *Record) ([]byte, error) } ``` 函数包装类型: ```go // FormatterFunc wrapper definition type FormatterFunc func(r *Record) ([]byte, error) // Format a log record func (fn FormatterFunc) Format(r *Record) ([]byte, error) { return fn(r) } ``` **JSON格式化Formatter** ```go type JSONFormatter struct { // Fields exported log fields. Fields []string // Aliases for output fields. you can change export field name. // item: `"field" : "output name"` // eg: {"message": "msg"} export field will display "msg" Aliases StringMap // PrettyPrint will indent all json logs PrettyPrint bool // TimeFormat the time format layout. default is time.RFC3339 TimeFormat string } ``` **Text格式化formatter** 默认模板: ```go const DefaultTemplate = "[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\n" const NamedTemplate = "{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\n" ``` 更改模板: ```go myTemplate := "[{{datetime}}] [{{level}}] {{message}}" f := slog.NewTextFormatter() f.SetTemplate(myTemplate) ``` ## 自定义日志 自定义 Processor 和 自定义 Formatter 都比较简单,实现一个对应方法即可。 ### 创建自定义Logger实例 `slog.Info, slog.Warn` 等方法,使用的默认logger,并且默认输出日志到控制台。 你可以创建一个全新的 `slog.Logger` 实例: **方式1**: ```go l := slog.New() // add handlers ... h1 := handler.NewConsoleHandler(slog.AllLevels) l.AddHandlers(h1) ``` **方式2**: ```go l := slog.NewWithName("myLogger") // add handlers ... h1 := handler.NewConsoleHandler(slog.AllLevels) l.AddHandlers(h1) ``` **方式3**: ```go package main import ( "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func main() { l := slog.NewWithHandlers(handler.NewConsoleHandler(slog.AllLevels)) l.Info("message") } ``` ### 创建自定义 Handler 你只需要实现 `slog.Handler` 接口即可创建自定义 `Handler`。你可以通过 slog内置的 `handler.LevelsWithFormatter` `handler.LevelWithFormatter`等片段快速的组装自己的 Handler。 示例: > 使用了 `handler.LevelsWithFormatter`, 只还需要实现 `Close, Flush, Handle` 方法即可 ```go type MyHandler struct { handler.LevelsWithFormatter Output io.Writer } func (h *MyHandler) Handle(r *slog.Record) error { // you can write log message to file or send to remote. } func (h *MyHandler) Flush() error {} func (h *MyHandler) Close() error {} ``` 将 `Handler` 添加到 logger即可使用: ```go // 添加到默认 logger slog.AddHander(&MyHandler{}) // 或者添加到自定义 logger: l := slog.New() l.AddHander(&MyHandler{}) ``` ## 使用内置处理器 [./handler](handler) 包已经内置了常用的日志 Handler,基本上可以满足绝大部分场景。 ```go // 输出日志到控制台 func NewConsoleHandler(levels []slog.Level) *ConsoleHandler // 发送日志到email邮箱 func NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler // 发送日志到系统的syslog func NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error) // 一个简单的handler实现,输出日志到给定的 io.Writer func NewSimpleHandler(out io.Writer, level slog.Level) *SimpleHandler ``` **输出日志到文件**: ```go // 输出日志到指定文件,默认不带缓冲 func NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error) // 输出日志到指定文件且格式为JSON,默认不带缓冲 func JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error) // 带缓冲的输出日志到指定文件 func NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error) ``` > TIP: `NewFileHandler` `JSONFileHandler` 也可以通过传入 fns `handler.WithBuffSize(buffSize)` 启用写入缓冲 **输出日志到文件并自动切割**: ```go // 根据文件大小进行自动切割 func NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) // 根据时间进行自动切割 func NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) // 同时支持配置根据大小和时间进行切割, 默认设置文件大小是 20M,默认自动分割时间是 1小时(EveryHour)。 func NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) ``` > TIP: 通过传入 `fns ...ConfigFn` 可以设置更多选项,比如 日志文件保留时间, 日志写入缓冲大小等。 详细设置请看 `handler.Config` 结构体 ### 输出日志到文件 输出日志到指定文件,默认不启用 `buffer` 缓冲写入。 也可以通过传入参数启用缓冲。 ```go package mypkg import ( "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func main() { defer slog.MustClose() // DangerLevels 包含: slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel h1 := handler.MustFileHandler("/tmp/error.log", handler.WithLogLevels(slog.DangerLevels)) // 配置日志格式 // f := h1.Formatter().(*slog.TextFormatter) f := slog.AsTextFormatter(h1.Formatter()) f.SetTemplate("your template format\n") // NormalLevels 包含: slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel h2 := handler.MustFileHandler("/tmp/info.log", handler.WithLogLevels(slog.NormalLevels)) // 注册 handler 到 logger(调度器) slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message text") slog.Error("error message text") } ``` > 提示: 如果启用了写入缓冲 `buffer`,一定要在程序结束时调用 `logger.Close()/MustClose()` 刷出缓冲区的内容到文件并关闭句柄。 ### 带自动切割的日志处理器 `slog/handler` 也内置了输出日志到指定文件,并且同时支持按时间、按大小分割文件,默认启用 `buffer` 缓冲写入 ```go func Example_rotateFileHandler() { h1 := handler.MustRotateFile("/tmp/error.log", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels)) h2 := handler.MustRotateFile("/tmp/info.log", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels)) slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message") slog.Error("error message") } ``` 按时间切割文件示例: ```text time-rotate-file.log time-rotate-file.log.20201229_155753 time-rotate-file.log.20201229_155754 ``` 按大小进行切割的文件名示例, 格式 `filename.log.yMD_000N`. 例如: ```text size-rotate-file.log size-rotate-file.log.122915_00001 size-rotate-file.log.122915_00002 ``` 启用gzip压缩旧的日志文件: ```go h1 := handler.MustRotateFile("/tmp/error.log", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels), handler.WithCompress(true), ) ``` ```text size-rotate-file.log.122915_00001.gz size-rotate-file.log.122915_00002.gz ``` ### 根据配置快速创建Handler实例 ```go // Config struct type Config struct { // Logfile for write logs Logfile string `json:"logfile" yaml:"logfile"` // LevelMode 筛选日志记录的过滤级别,默认为 LevelModeList LevelMode uint8 `json:"level_mode" yaml:"level_mode"` // Level 筛选日志记录的级别值。当 LevelMode = LevelModeValue 时生效 Level slog.Level `json:"level" yaml:"level"` // Levels 日志记录的级别列表。当 LevelMode = LevelModeList 时生效 Levels []slog.Level `json:"levels" yaml:"levels"` // UseJSON 是否以 JSON 格式输出日志 UseJSON bool `json:"use_json" yaml:"use_json"` // BuffMode 使用的buffer缓冲模式. allow: line, bite BuffMode string `json:"buff_mode" yaml:"buff_mode"` // BuffSize 开启缓冲时的缓冲区大小,单位为字节。设置为 0 时禁用缓冲 BuffSize int `json:"buff_size" yaml:"buff_size"` // RotateTime 用于按时间切割文件,单位是秒。 RotateTime rotatefile.RotateTime `json:"rotate_time" yaml:"rotate_time"` // MaxSize 用于按大小旋转切割文件,单位是字节。 MaxSize uint64 `json:"max_size" yaml:"max_size"` // Compress 是否对切割后的日志进行 gzip 压缩。 默认为不压缩 Compress bool `json:"compress" yaml:"compress"` // BackupNum 日志清理,保留旧文件的最大数量。 // 0 不限制,默认为 20。 BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime 日志清理,保留旧文件的最长时间。单位是小时 // 0 不进行清理,默认为一周。 BackupTime uint `json:"backup_time" yaml:"backup_time"` // RenameFunc build filename for rotate file RenameFunc func(filepath string, rotateNum uint) string } ``` **Examples**: ```go testFile := "testdata/error.log" h := handler.NewEmptyConfig( handler.WithLogfile(testFile), handler.WithBuffSize(1024*8), handler.WithRotateTimeString("1hour"), handler.WithLogLevels(slog.DangerLevels), ). CreateHandler() l := slog.NewWithHandlers(h) ``` **BuffMode说明** `Config.BuffMode` 使用的 BuffMode 类型名称。允许的值:line、bite - `BuffModeLine`:按行缓冲,到达缓冲大小时,始终保证一行完整日志内容写入文件(可以避免日志内容被截断) - `BuffModeBite`:按字节缓冲,当缓冲区的字节数达到指定的大小时,将缓冲区的内容写入文件 ### 使用Builder快速创建Handler实例 使用 `handler.Builder` 可以方便快速的创建Handler实例。 ```go testFile := "testdata/info.log" h := handler.NewBuilder(). WithLogfile(testFile). WithLogLevels(slog.NormalLevels). WithBuffSize(1024*8). WithRotateTime(rotatefile.Every30Min). WithCompress(true). Build() l := slog.NewWithHandlers(h) ``` ## 扩展工具包 `bufwrite` 包: - `bufwrite.BufIOWriter` 通过包装go的 `bufio.Writer` 额外实现了 `Sync(), Close()` 方法,方便使用 - `bufwrite.LineWriter` 参考go的 `bufio.Writer` 实现, 可以支持按行刷出缓冲,对于写日志文件更有用 `rotatefile` 包: - `rotatefile.Writer` 实现对日志文件按大小和指定时间进行自动切割,同时也支持自动清理日志文件 - `handler/rotate_file` 即是通过使用它对日志文件进行切割处理 ### 在其他日志包上使用 rotatefile `rotatefile.Writer` 也可以用在其他日志包上,例如:`log`、`glog` 等等。 例如,在 golang `log` 上使用 rotatefile: ```go package main import ( "log" "github.com/gookit/slog/rotatefile" ) func main() { logFile := "testdata/another_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } log.SetOutput(writer) log.Println("log message") } ``` ## 测试以及性能 ### 单元测试 运行单元测试 ```bash go test -v ./... ``` ### 性能压测 Benchmark code at [_example/bench_loglibs_test.go](_example/bench_loglibs_test.go) ```bash make test-bench ``` Benchmarks for `slog` and other log packages: > **Note**: test and record ad 2023.04.13 ```shell goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 8381674 1429 ns/op 216 B/op 3 allocs/op BenchmarkZapSugarNegative BenchmarkZapSugarNegative-4 8655980 1383 ns/op 104 B/op 4 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 14173719 849.8 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 27456256 451.2 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 2550771 4784 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogNegative >>>> BenchmarkGookitSlogNegative-4 8798220 1375 ns/op 120 B/op 3 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 10302483 1167 ns/op 192 B/op 1 allocs/op BenchmarkZapSugarPositive BenchmarkZapSugarPositive-4 3833311 3154 ns/op 344 B/op 7 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 14120524 846.7 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27152686 434.9 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2601892 4691 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive >>>> BenchmarkGookitSlogPositive-4 8997104 1340 ns/op 120 B/op 3 allocs/op PASS ok command-line-arguments 167.095s ``` ## Gookit packages - [gookit/ini](https://github.com/gookit/ini) Go config management, use INI files - [gookit/rux](https://github.com/gookit/rux) Simple and fast request router for golang HTTP - [gookit/gcli](https://github.com/gookit/gcli) Build CLI application, tool library, running CLI commands - [gookit/slog](https://github.com/gookit/slog) Lightweight, extensible, configurable logging library written in Go - [gookit/color](https://github.com/gookit/color) A command-line color library with true color support, universal API methods and Windows support - [gookit/event](https://github.com/gookit/event) Lightweight event manager and dispatcher implements by Go - [gookit/cache](https://github.com/gookit/cache) Generic cache use and cache manager for golang. support File, Memory, Redis, Memcached. - [gookit/config](https://github.com/gookit/config) Go config management. support JSON, YAML, TOML, INI, HCL, ENV and Flags - [gookit/filter](https://github.com/gookit/filter) Provide filtering, sanitizing, and conversion of golang data - [gookit/validate](https://github.com/gookit/validate) Use for data validation and filtering. support Map, Struct, Form data - [gookit/goutil](https://github.com/gookit/goutil) Some utils for the Go: string, array/slice, map, format, cli, env, filesystem, test and more - More, please see https://github.com/gookit ## Acknowledgment 实现参考了以下项目,非常感谢它们 - https://github.com/phuslu/log - https://github.com/golang/glog - https://github.com/sirupsen/logrus - https://github.com/Seldaek/monolog - https://github.com/syyongx/llog - https://github.com/uber-go/zap - https://github.com/rs/zerolog - https://github.com/natefinch/lumberjack ## LICENSE [MIT](LICENSE) ================================================ FILE: _example/bench_loglibs.md ================================================ # Log libs benchmarks Run benchmark: `make test-bench` > **Note**: on each test will update all package to latest. ## v0.5.5 - 2023.11.30 ```shell goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz BenchmarkZapNegative BenchmarkZapNegative-4 14441875 821.2 ns/op 216 B/op 3 allocs/op BenchmarkZapSugarNegative BenchmarkZapSugarNegative-4 13870006 916.1 ns/op 104 B/op 4 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 34721730 359.2 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 39690291 314.4 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 5605184 2161 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 14375598 819.2 ns/op 256 B/op 4 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 15237236 788.5 ns/op 192 B/op 1 allocs/op BenchmarkZapSugarPositive BenchmarkZapSugarPositive-4 6592038 1910 ns/op 344 B/op 7 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 33931623 366.1 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 38740174 309.4 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 5697038 2197 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 14531062 814.6 ns/op 256 B/op 4 allocs/op PASS ok command-line-arguments 159.849s ``` ## v0.5.1 - 2023.04.13 > **Note**: test and record ad 2023.04.13 ```shell goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 8381674 1429 ns/op 216 B/op 3 allocs/op BenchmarkZapSugarNegative BenchmarkZapSugarNegative-4 8655980 1383 ns/op 104 B/op 4 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 14173719 849.8 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 27456256 451.2 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 2550771 4784 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 8798220 1375 ns/op 120 B/op 3 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 10302483 1167 ns/op 192 B/op 1 allocs/op BenchmarkZapSugarPositive BenchmarkZapSugarPositive-4 3833311 3154 ns/op 344 B/op 7 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 14120524 846.7 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27152686 434.9 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2601892 4691 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 8997104 1340 ns/op 120 B/op 3 allocs/op PASS ok command-line-arguments 167.095s ``` ## v0.3.5 - 2022.11.08 > **Note**: test and record ad 2022.11.08 ```shell % make test-bench goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 123297997 110.4 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 891508806 13.36 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 811990076 14.74 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 242633541 49.40 ns/op 16 B/op 1 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 29102253 422.8 ns/op 125 B/op 4 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 9772791 1194 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 13944360 856.8 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27839614 431.2 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2621076 4583 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 8908768 1359 ns/op 149 B/op 5 allocs/op PASS ok command-line-arguments 149.379s ``` ## v0.3.0 > **Note**: test and record ad 2022.04.27 ```shell % make test-bench goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 128133166 93.97 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 909583207 13.41 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 784099310 15.24 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 289939296 41.60 ns/op 16 B/op 1 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 29131203 417.4 ns/op 125 B/op 4 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 9910075 1219 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 13966810 871.0 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 26743148 446.2 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2658482 4481 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 8349562 1441 ns/op 165 B/op 6 allocs/op PASS ok command-line-arguments 146.669s ``` ### beta 2022.04.17 > **Note**: test and record ad 2022.04.17 ```shell $ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 130808992 91.91 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 914445844 13.19 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 792539167 15.32 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 289393606 40.61 ns/op 16 B/op 1 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 29522170 405.3 ns/op 125 B/op 4 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 9113048 1283 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 14691699 797.0 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27634338 424.5 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2734669 4363 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 7740348 1563 ns/op 165 B/op 6 allocs/op PASS ok command-line-arguments 145.175s ``` ## v0.2.1 > **Note**: test and record ad 2022.04.17 ```shell $ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go goos: darwin goarch: amd64 cpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz BenchmarkZapNegative BenchmarkZapNegative-4 125500471 125.8 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 839046109 13.71 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 757766400 15.56 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkLogrusNegative-4 253178256 47.12 ns/op 16 B/op 1 allocs/op BenchmarkGookitSlogNegative BenchmarkGookitSlogNegative-4 30091606 401.9 ns/op 45 B/op 3 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 9761935 1216 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 13860344 837.1 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 27666529 447.8 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 2705653 4403 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 1836384 6882 ns/op 680 B/op 11 allocs/op PASS ok command-line-arguments 156.038s ``` ## v0.2.0 > record ad 2022.02.26 ```shell $ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go goos: windows goarch: amd64 cpu: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz BenchmarkZapNegative BenchmarkZapNegative-4 139243226 86.39 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 1000000000 8.302 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkPhusLogNegative-4 1000000000 8.989 ns/op 0 B/op 0 allocs/op BenchmarkLogrusNegative BenchmarkGookitSlogNegative-4 38300540 323.3 ns/op 221 B/op 5 allocs/op BenchmarkZapPositive BenchmarkZapPositive-4 14453001 828.1 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 28671724 420.9 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 45619569 261.9 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 5092164 2366 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 3184557 3754 ns/op 856 B/op 13 allocs/op PASS ok command-line-arguments 135.460s ``` ## v0.1.5 > record ad 2022.02.26 ```shell $ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go goos: windows goarch: amd64 cpu: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz BenchmarkZapNegative BenchmarkZapNegative-4 137676860 86.43 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogNegative BenchmarkZeroLogNegative-4 1000000000 8.284 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogNegative BenchmarkZapPositive-4 14250313 831.7 ns/op 192 B/op 1 allocs/op BenchmarkZeroLogPositive BenchmarkZeroLogPositive-4 28183436 426.0 ns/op 0 B/op 0 allocs/op BenchmarkPhusLogPositive BenchmarkPhusLogPositive-4 44034984 258.7 ns/op 0 B/op 0 allocs/op BenchmarkLogrusPositive BenchmarkLogrusPositive-4 5005593 2421 ns/op 608 B/op 17 allocs/op BenchmarkGookitSlogPositive BenchmarkGookitSlogPositive-4 1714084 7029 ns/op 4480 B/op 45 allocs/op PASS ok command-line-arguments 138.199s ``` ================================================ FILE: _example/bench_loglibs_test.go ================================================ package main import ( "io" goslog "log/slog" "testing" "github.com/gookit/slog" "github.com/gookit/slog/handler" phuslu "github.com/phuslu/log" "github.com/rs/zerolog" "github.com/sirupsen/logrus" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // In _example/ dir, run: // // go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go // // code refer: // // https://github.com/phuslu/log var msg = "The quick brown fox jumps over the lazy dog" func BenchmarkGoSlogNegative(b *testing.B) { logger := goslog.New(goslog.NewTextHandler(io.Discard, &goslog.HandlerOptions{ Level: goslog.LevelInfo, })) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info(msg, goslog.String("rate", "15"), goslog.Int("low", 16), goslog.Float64("high", 123.2)) } } func BenchmarkZapNegative(b *testing.B) { logger := zap.New(zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(io.Discard), zapcore.InfoLevel, )) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info(msg, zap.String("rate", "15"), zap.Int("low", 16), zap.Float32("high", 123.2)) } } func BenchmarkZapSugarNegative(b *testing.B) { logger := zap.New(zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(io.Discard), // zapcore.AddSync(os.Stdout), zapcore.InfoLevel, )).Sugar() // logger.Info("rate", "15", "low", 16, "high", 123.2, msg) // return b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func BenchmarkZeroLogNegative(b *testing.B) { logger := zerolog.New(io.Discard).With().Timestamp().Logger().Level(zerolog.InfoLevel) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info().Str("rate", "15").Int("low", 16).Float32("high", 123.2).Msg(msg) } } func BenchmarkPhusLogNegative(b *testing.B) { logger := phuslu.Logger{Level: phuslu.InfoLevel, Writer: phuslu.IOWriter{Writer: io.Discard}} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info().Str("rate", "15").Int("low", 16).Float32("high", 123.2).Msg(msg) } } // "github.com/sirupsen/logrus" func BenchmarkLogrusNegative(b *testing.B) { logger := logrus.New() logger.Out = io.Discard logger.Level = logrus.InfoLevel b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func BenchmarkGookitSlogNegative(b *testing.B) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, []slog.Level{slog.InfoLevel}), // handler.NewIOWriter(os.Stdout, []slog.Level{slog.InfoLevel}), ) logger.ReportCaller = false // logger.Info("rate", "15", "low", 16, "high", 123.2, msg) // return b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func BenchmarkZapPositive(b *testing.B) { logger := zap.New(zapcore.NewCore( zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(io.Discard), zapcore.InfoLevel, )) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info(msg, zap.String("rate", "15"), zap.Int("low", 16), zap.Float32("high", 123.2)) } } func BenchmarkZapSugarPositive(b *testing.B) { logger := zap.New(zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()), zapcore.AddSync(io.Discard), zapcore.InfoLevel, )).Sugar() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info(msg, zap.String("rate", "15"), zap.Int("low", 16), zap.Float32("high", 123.2)) } } func BenchmarkZeroLogPositive(b *testing.B) { logger := zerolog.New(io.Discard).With().Timestamp().Logger().Level(zerolog.InfoLevel) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info().Str("rate", "15").Int("low", 16).Float32("high", 123.2).Msg(msg) } } func BenchmarkPhusLogPositive(b *testing.B) { logger := phuslu.Logger{Level: phuslu.InfoLevel, Writer: phuslu.IOWriter{Writer: io.Discard}} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info().Str("rate", "15").Int("low", 16).Float32("high", 123.2).Msg(msg) } } func BenchmarkLogrusPositive(b *testing.B) { logger := logrus.New() logger.Out = io.Discard logger.Level = logrus.InfoLevel b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func BenchmarkGookitSlogPositive(b *testing.B) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, []slog.Level{slog.InfoLevel}), ) logger.ReportCaller = false b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } ================================================ FILE: _example/demos/demo1.go ================================================ package main import ( log "github.com/gookit/slog" ) const simplestTemplate = "[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}" func init() { log.GetFormatter().(*log.TextFormatter).SetTemplate(simplestTemplate) log.SetLogLevel(log.ErrorLevel) log.Errorf("Test") } func main() { } ================================================ FILE: _example/demos/simple.go ================================================ package main import "github.com/gookit/slog" // profile run: // // go build -gcflags '-m -l' simple.go func main() { // stackIt() // _ = stackIt2() slogTest() } //go:noinline func stackIt() int { y := 2 return y * 2 } //go:noinline func stackIt2() *int { y := 2 res := y * 2 return &res } func slogTest() { var msg = "The quick brown fox jumps over the lazy dog" slog.Info("rate", "15", "low", 16, "high", 123.2, msg) // slog.WithFields(slog.M{ // "omg": true, // "number": 122, // }).Infof("slog %s", "message message") } ================================================ FILE: _example/demos/slog_all_level.go ================================================ package main import ( "errors" "github.com/gookit/goutil/errorx" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) // run: go run ./_example/slog_all_level.go func main() { l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() }) l.AddHandler(handler.NewConsoleHandler(slog.AllLevels)) printAllLevel(l, "this is a", "log", "message") } func printAllLevel(l *slog.Logger, args ...any) { l.Debug(args...) l.Info(args...) l.Warn(args...) l.Error(args...) l.Print(args...) l.Fatal(args...) l.Panic(args...) l.Trace(args...) l.Notice(args...) l.ErrorT(errors.New("a error object")) l.ErrorT(errorx.New("error with stack info")) } ================================================ FILE: _example/diff-with-zap-zerolog.md ================================================ # diff with zap, zerolog 是的,zap 非常快速。 但是有一点问题: - 配置起来稍显复杂 - 没有内置切割文件处理和文件清理 - 自定义扩展性不是很好 Yes, zap is very fast. But there is a little problem: - Slightly complicated to configure - No built-in cutting file handling, file cleanup - Custom extensibility is not very good ================================================ FILE: _example/go.mod ================================================ module slog_example go 1.19 require ( github.com/golang/glog v1.2.5 github.com/gookit/goutil v0.7.4 github.com/gookit/slog v0.6.0 github.com/phuslu/log v1.0.119 github.com/rs/zerolog v1.34.0 github.com/sirupsen/logrus v1.9.3 github.com/syyongx/llog v0.0.0-20200222114215-e8f9f86ac0a3 go.uber.org/zap v1.27.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) require ( github.com/gookit/color v1.6.0 // indirect github.com/gookit/gsr v0.1.1 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect ) replace github.com/gookit/slog => ../ ================================================ FILE: _example/handler/grouped.go ================================================ package handler import "github.com/gookit/slog" /******************************************************************************** * Grouped Handler ********************************************************************************/ // GroupedHandler definition type GroupedHandler struct { handlers []slog.Handler // Levels for log message Levels []slog.Level // IgnoreErr on handling messages IgnoreErr bool } // NewGroupedHandler create new GroupedHandler func NewGroupedHandler(handlers []slog.Handler) *GroupedHandler { return &GroupedHandler{ handlers: handlers, } } // IsHandling Check if the current level can be handling func (h *GroupedHandler) IsHandling(level slog.Level) bool { for _, l := range h.Levels { if l == level { return true } } return false } // Handle log record func (h *GroupedHandler) Handle(record *slog.Record) (err error) { for _, handler := range h.handlers { err = handler.Handle(record) if !h.IgnoreErr && err != nil { return err } } return } // Close log handlers func (h *GroupedHandler) Close() error { for _, handler := range h.handlers { err := handler.Close() if !h.IgnoreErr && err != nil { return err } } return nil } // Flush log records func (h *GroupedHandler) Flush() error { for _, handler := range h.handlers { err := handler.Flush() if !h.IgnoreErr && err != nil { return err } } return nil } ================================================ FILE: _example/issue100/issue100_test.go ================================================ package main import ( "fmt" "testing" "time" "github.com/gookit/slog" "github.com/gookit/slog/handler" "go.uber.org/zap" "go.uber.org/zap/zapcore" "gopkg.in/natefinch/lumberjack.v2" ) type Obj struct { a int b int64 c string d bool } var ( str1 = "str1" str2 = "str222222222222" int1 = 1 int2 = 2 obj = Obj{1, 2, "3", true} ) func TestZapSugar(t *testing.T) { w := zapcore.AddSync(&lumberjack.Logger{ Filename: "./zap-sugar.log", MaxSize: 500, // megabytes MaxBackups: 3, MaxAge: 28, // days }) core := zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), w, zap.InfoLevel, ) logger := zap.New(core) sugar := logger.Sugar() sugar.Info("message is msg") count := 100000 start := time.Now().UnixNano() for n := count; n > 0; n-- { sugar.Info("message is msg") } end := time.Now().UnixNano() fmt.Printf("\n zap sugar no format\n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) start = time.Now().UnixNano() for n := count; n > 0; n-- { sugar.Infof("message is %d %d %s %s %#v", int1, int2, str1, str2, obj) } end = time.Now().UnixNano() fmt.Printf("\n zap sugar format\n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) sugar.Sync() } func TestZapLog(t *testing.T) { w := zapcore.AddSync(&lumberjack.Logger{ Filename: "./zap.log", MaxSize: 500, // megabytes MaxBackups: 3, MaxAge: 28, // days }) core := zapcore.NewCore( zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), w, zap.InfoLevel, ) logger := zap.New(core) count := 100000 start := time.Now().UnixNano() for n := count; n > 0; n-- { logger.Info("message is msg") } end := time.Now().UnixNano() fmt.Printf("\n zap no format\n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) start = time.Now().UnixNano() for n := count; n > 0; n-- { logger.Info("failed to fetch URL", // Structured context as strongly typed Field values. zap.Int("int1", int1), zap.Int("int2", int2), zap.String("str", str1), zap.String("str2", str2), zap.Any("backoff", obj), ) } end = time.Now().UnixNano() fmt.Printf("\n zap format\n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) logger.Sync() } func TestSlog(t *testing.T) { h1, err := handler.NewEmptyConfig( handler.WithLogfile("./slog-info.log"), // 路径 handler.WithRotateTime(handler.EveryHour), // 日志分割间隔 handler.WithLogLevels(slog.AllLevels), // 日志level handler.WithBuffSize(4*1024*1024), // buffer大小 handler.WithCompress(true), // 是否压缩旧日志 zip handler.WithBackupNum(24*3), // 保留旧日志数量 handler.WithBuffMode(handler.BuffModeBite), // handler.WithRenameFunc(), //RenameFunc build filename for rotate file ).CreateHandler() if err != nil { fmt.Printf("Create slog handler err: %#v", err) return } f := slog.AsTextFormatter(h1.Formatter()) myTplt := "[{{datetime}}] [{{level}}] [{{caller}}] {{message}}\n" f.SetTemplate(myTplt) logs := slog.NewWithHandlers(h1) count := 100000 start := time.Now().UnixNano() for i := 0; i < count; i++ { logs.Info("message is msg") } end := time.Now().UnixNano() fmt.Printf("\n slog no format \n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) start = time.Now().UnixNano() for n := count; n > 0; n-- { logs.Infof("message is %d %d %s %s %#v", int1, int2, str1, str2, obj) } end = time.Now().UnixNano() fmt.Printf("\n slog format \n total cost %d ns\n avg cost %d ns \n count %d \n", end-start, (end-start)/int64(count), count) logs.MustClose() } ================================================ FILE: _example/issue111/main.go ================================================ package main import ( "fmt" "os" "time" "github.com/gookit/goutil/syncs" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" "github.com/gookit/slog/rotatefile" ) const pth = "./logs/main.log" func main() { log := slog.New() h, err := handler.NewTimeRotateFileHandler( pth, rotatefile.RotateTime(30), handler.WithBuffSize(0), handler.WithBackupNum(5), handler.WithCompress(true), func(c *handler.Config) { c.DebugMode = true }, ) if err != nil { panic(err) } log.AddHandler(h) fmt.Println("Start...(can be stop by CTRL+C)", timex.NowDate()) go func() { for { select { case <-time.After(time.Second): log.Info("Log " + time.Now().String()) } } }() syncs.WaitCloseSignals(func(sig os.Signal) { fmt.Println("\nGot signal:", sig) fmt.Println("Close logger ...") log.MustClose() }) fmt.Println("Exited at", timex.NowDate()) } ================================================ FILE: _example/issue137/main.go ================================================ package main import ( "fmt" "path" "time" "github.com/gookit/slog" "github.com/gookit/slog/handler" "github.com/gookit/slog/rotatefile" ) type GLogConfig137 struct { Level string `yaml:"Level"` Pattern string `yaml:"Pattern"` TimeField string `yaml:"TimeField"` TimeFormat string `yaml:"TimeFormat"` Template string `yaml:"Template"` RotateTimeFormat string `yaml:"RotateTimeFormat"` } type LogRotateConfig137 struct { Filepath string `yaml:"filepath"` RotateMode rotatefile.RotateMode `yaml:"rotate_mode"` RotateTime rotatefile.RotateTime `yaml:"rotate_time"` MaxSize uint64 `yaml:"max_size"` BackupNum uint `yaml:"backup_num"` BackupTime uint `yaml:"backup_time"` Compress bool `yaml:"compress"` TimeFormat string `yaml:"time_format"` BuffSize int `yaml:"buff_size"` BuffMode string `yaml:"buff_mode"` } type LogConfig137 struct { GLogConfig GLogConfig137 `yaml:"GLogConfig"` LogRotate LogRotateConfig137 `yaml:"LogRotate"` ErrorLogRotate LogRotateConfig137 `yaml:"ErrorLogRotate"` } func main() { slog.DebugMode = true logConfig := LogConfig137{ GLogConfig: GLogConfig137{ Level: "debug", Pattern: "development", TimeField: "time", TimeFormat: "2006-01-02 15:04:05.000", Template: "{{datetime}}\t{{level}}\t{{channel}}\t[{{caller}}]\t{{message}}\t{{data}}\t{{extra}}\n", RotateTimeFormat: "20060102", }, LogRotate: LogRotateConfig137{ Filepath: "testdata/info137c2.log", RotateMode: 0, RotateTime: 86400, MaxSize: 512, BackupNum: 3, BackupTime: 72, Compress: true, TimeFormat: "20060102", BuffSize: 512, BuffMode: "line", }, ErrorLogRotate: LogRotateConfig137{ Filepath: "testdata/err137c2.log", RotateMode: 0, RotateTime: 86400, MaxSize: 512, BackupNum: 3, BackupTime: 72, Compress: true, TimeFormat: "20060102", BuffSize: 512, BuffMode: "line", }, } tpl := logConfig.GLogConfig.Template // slog.DefaultChannelName = "gookit" slog.DefaultTimeFormat = logConfig.GLogConfig.TimeFormat slog.Configure(func(l *slog.SugaredLogger) { l.Level = slog.TraceLevel l.DoNothingOnPanicFatal() l.ChannelName = "gookit" }) slog.GetFormatter().(*slog.TextFormatter).SetTemplate(tpl) slog.GetFormatter().(*slog.TextFormatter).TimeFormat = slog.DefaultTimeFormat rotatefile.DefaultFilenameFn = func(filepath string, rotateNum uint) string { suffix := time.Now().Format(logConfig.GLogConfig.RotateTimeFormat) // eg: /tmp/error.log => /tmp/error_20250302_01.log // 将文件名扩展名取出来, 然后在扩展名中间加入下划线+日期+下划线+序号+扩展名的形式 ext := path.Ext(filepath) filename := filepath[:len(filepath)-len(ext)] return filename + fmt.Sprintf("_%s_%02d", suffix, rotateNum) + ext } h1 := handler.MustRotateFile(logConfig.ErrorLogRotate.Filepath, logConfig.ErrorLogRotate.RotateTime, // handler.WithFilePerm(os.ModeAppend|os.ModePerm), handler.WithLevelMode(slog.LevelModeList), handler.WithLogLevels(slog.DangerLevels), handler.WithMaxSize(logConfig.ErrorLogRotate.MaxSize), handler.WithBackupNum(logConfig.ErrorLogRotate.BackupNum), handler.WithBackupTime(logConfig.ErrorLogRotate.BackupTime), handler.WithCompress(logConfig.ErrorLogRotate.Compress), handler.WithBuffSize(logConfig.ErrorLogRotate.BuffSize), handler.WithBuffMode(logConfig.ErrorLogRotate.BuffMode), handler.WithRotateMode(logConfig.ErrorLogRotate.RotateMode), ) h1.Formatter().(*slog.TextFormatter).SetTemplate(tpl) h2 := handler.MustRotateFile(logConfig.LogRotate.Filepath, logConfig.LogRotate.RotateTime, // handler.WithFilePerm(os.ModeAppend|os.ModePerm), handler.WithLevelMode(slog.LevelModeList), handler.WithLogLevels(slog.AllLevels), handler.WithMaxSize(logConfig.LogRotate.MaxSize), handler.WithBackupNum(logConfig.LogRotate.BackupNum), handler.WithBackupTime(logConfig.LogRotate.BackupTime), handler.WithCompress(logConfig.LogRotate.Compress), handler.WithBuffSize(logConfig.LogRotate.BuffSize), handler.WithBuffMode(logConfig.LogRotate.BuffMode), handler.WithRotateMode(logConfig.LogRotate.RotateMode), ) h2.Formatter().(*slog.TextFormatter).SetTemplate(tpl) slog.PushHandlers(h1, h2) // add logs for i := 0; i < 20; i++ { slog.Infof("hi, this is a example information ... message text. log index=%d", i) slog.WithValue("test137", "some value").Warn("测试滚动多个文件,同时设置了清理日志文件") } slog.MustClose() time.Sleep(time.Second * 2) } ================================================ FILE: _example/pprof/main.go ================================================ package main import ( "fmt" "io" "log" "os" "runtime/pprof" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) // run serve: // // go run ./_examples/pprof // // see prof on cli: // // go tool pprof pprof/cpu_prof_data.out // // see prof on web: // // go tool pprof -http=:8080 pprof/cpu_prof_data.out func main() { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, slog.NormalLevels), ) times := 10000 fmt.Println("start profile, run times:", times) cpuProfile := "cpu_prof_data.out" f, err := os.Create(cpuProfile) if err != nil { log.Fatal(err) } err = pprof.StartCPUProfile(f) if err != nil { log.Fatal(err) } defer pprof.StopCPUProfile() var msg = "The quick brown fox jumps over the lazy dog" for i := 0; i < times; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } fmt.Println("see prof on web:\n go tool pprof -http=:8080", cpuProfile) } ================================================ FILE: _example/refer/main.go ================================================ package main import ( "flag" "log" "time" "github.com/golang/glog" "github.com/gookit/slog" "github.com/sirupsen/logrus" "github.com/syyongx/llog" "go.uber.org/zap" "github.com/rs/zerolog" zlog "github.com/rs/zerolog/log" ) func main() { // for glog flag.Parse() // -- log log.Println("raw log message") // -- glog glog.Infof("glog %s", "message message") // -- llog llog.NewLogger("llog test").Info("llog message message") // -- slog slog.Debug("slog message message") slog.WithFields(slog.M{ "omg": true, "number": 122, }).Infof("slog %s", "message message") // -- logrus logrus.Debug("logrus message message") logrus.WithFields(logrus.Fields{ "omg": true, "number": 122, }).Warn("The group's number increased tremendously!") // -- zerolog zerolog.TimeFieldFormat = zerolog.TimeFormatUnix zlog.Debug(). Str("Scale", "833 cents"). Float64("Interval", 833.09). Msg("zerolog message") zlog.Print("zerolog hello") // slog.Infof("log %s", "message") url := "/path/to/some" // -- zap logger, _ := zap.NewProduction() defer logger.Sync() // flushes buffer, if any sugar := logger.Sugar() sugar.Infow("failed to fetch URL", // Structured context as loosely typed key-value pairs. "url", url, "attempt", 3, "backoff", time.Second, ) sugar.Infof("zap log. Failed to fetch URL: %s", url) } ================================================ FILE: benchmark2_test.go ================================================ package slog import ( "fmt" "io" "testing" "github.com/gookit/goutil/dump" ) func TestLogger_newRecord_AllocTimes(_ *testing.T) { l := Std() l.Output = io.Discard defer l.Reset() // output: 0 times fmt.Println("Alloc Times:", int(testing.AllocsPerRun(100, func() { // logger.Info("rate", "15", "low", 16, "high", 123.2, msg) r := l.newRecord() // do something... l.releaseRecord(r) }))) } func Test_AllocTimes_formatArgsWithSpaces_oneElem(_ *testing.T) { // string Alloc Times: 0 fmt.Println("string Alloc Times:", int(testing.AllocsPerRun(10, func() { // logger.Info("rate", "15", "low", 16, "high", 123.2, msg) formatArgsWithSpaces([]any{"msg"}) }))) // int Alloc Times: 1 fmt.Println("int Alloc Times:", int(testing.AllocsPerRun(10, func() { formatArgsWithSpaces([]any{2343}) }))) // float Alloc Times: 2 fmt.Println("float Alloc Times:", int(testing.AllocsPerRun(10, func() { formatArgsWithSpaces([]any{123.2}) }))) } func Test_AllocTimes_formatArgsWithSpaces_manyElem(_ *testing.T) { // Alloc Times: 1 // TIP: // `float` will alloc 2 times memory // `int <0`, `int > 100` will alloc 1 times memory fmt.Println("Alloc Times:", int(testing.AllocsPerRun(50, func() { formatArgsWithSpaces([]any{ "rate", -23, true, 106, "high", 123.2, }) }))) } func Test_AllocTimes_stringsPool(_ *testing.T) { l := Std() l.Output = io.Discard l.LowerLevelName = true defer l.Reset() var ln, cp int // output: 0 times fmt.Println("Alloc Times:", int(testing.AllocsPerRun(100, func() { // logger.Info("rate", "15", "low", 16, "high", 123.2, msg) // oldnew := stringsPool.Get().([]string) // defer stringsPool.Put(oldnew) oldnew := make([]string, 0, len(map[string]string{"a": "b"})*2+1) oldnew = append(oldnew, "a") oldnew = append(oldnew, "b") oldnew = append(oldnew, "c") // oldnew = append(oldnew, "d") ln = len(oldnew) cp = cap(oldnew) }))) dump.P(ln, cp) } func TestLogger_Info_oneElem_AllocTimes(_ *testing.T) { l := Std() // l.Output = io.Discard l.ReportCaller = false l.LowerLevelName = true // 启用 color 会导致多次(10次左右)内存分配 l.Formatter.(*TextFormatter).EnableColor = false defer l.Reset() // output: 2 times fmt.Println("Alloc Times:", int(testing.AllocsPerRun(5, func() { // l.Info("rate", "15", "low", 16, "high", 123.2, "msg") l.Info("msg") }))) } func TestLogger_Info_moreElem_AllocTimes(_ *testing.T) { l := NewStdLogger() // l.Output = io.Discard l.ReportCaller = false l.LowerLevelName = true // 启用 color 会导致多次(10次左右)内存分配 l.Formatter.(*TextFormatter).EnableColor = false defer l.Reset() // output: 5 times fmt.Println("Alloc Times:", int(testing.AllocsPerRun(5, func() { l.Info("rate", "15", "low", 16, "high", 123.2, "msg") }))) // output: 5 times fmt.Println("Alloc Times:", int(testing.AllocsPerRun(5, func() { l.Info("rate", "15", "low", 16, "high") // l.Info("msg") }))) } ================================================ FILE: benchmark_test.go ================================================ package slog_test import ( "io" "testing" "github.com/gookit/goutil/dump" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) // go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_test.go // // code refer: // // https://github.com/phuslu/log var msg = "The quick brown fox jumps over the lazy dog" func BenchmarkGookitSlogNegative(b *testing.B) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, []slog.Level{slog.ErrorLevel}), ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func TestLogger_Info_Negative(t *testing.T) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, []slog.Level{slog.ErrorLevel}), ) logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } func BenchmarkGookitSlogPositive(b *testing.B) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, slog.NormalLevels), ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } } func BenchmarkTextFormatter_Format(b *testing.B) { r := newLogRecord("TEST_LOG_MESSAGE") f := slog.NewTextFormatter() // 1284 ns/op 456 B/op 11 allocs/op // On use DefaultTemplate // 304.4 ns/op 200 B/op 2 allocs/op // f.SetTemplate("{{datetime}} {{message}}") // 271.3 ns/op 200 B/op 2 allocs/op // f.SetTemplate("{{datetime}}") // f.SetTemplate("{{message}}") dump.P(f.Template()) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, err := f.Format(r) if err != nil { panic(err) } } } func TestLogger_Info_Positive(t *testing.T) { logger := slog.NewWithHandlers( handler.NewIOWriter(io.Discard, slog.NormalLevels), ) logger.Info("rate", "15", "low", 16, "high", 123.2, msg) } ================================================ FILE: bufwrite/bufio_writer.go ================================================ // Package bufwrite provides buffered io.Writer with sync and close methods. package bufwrite import ( "bufio" "io" ) // BufIOWriter wrap the bufio.Writer, implements the Sync() Close() methods type BufIOWriter struct { bufio.Writer // backup the bufio.Writer.wr writer io.Writer } // NewBufIOWriterSize instance with size func NewBufIOWriterSize(w io.Writer, size int) *BufIOWriter { return &BufIOWriter{ writer: w, Writer: *bufio.NewWriterSize(w, size), } } // NewBufIOWriter instance func NewBufIOWriter(w io.Writer) *BufIOWriter { return NewBufIOWriterSize(w, defaultBufSize) } // Close implements the io.Closer func (w *BufIOWriter) Close() error { if err := w.Flush(); err != nil { return err } // is closer if c, ok := w.writer.(io.Closer); ok { return c.Close() } return nil } // Sync implements the Syncer func (w *BufIOWriter) Sync() error { return w.Flush() } ================================================ FILE: bufwrite/bufwrite_test.go ================================================ package bufwrite_test import ( "bytes" "testing" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog/bufwrite" ) func TestNewBufIOWriter_WriteString(t *testing.T) { w := new(bytes.Buffer) bw := bufwrite.NewBufIOWriterSize(w, 12) _, err := bw.WriteString("hello, ") assert.NoErr(t, err) assert.Eq(t, 0, w.Len()) _, err = bw.WriteString("worlds. oh") assert.NoErr(t, err) assert.Eq(t, "hello, world", w.String()) // different the LineWriter assert.NoErr(t, bw.Close()) assert.Eq(t, "hello, worlds. oh", w.String()) } type closeWriter struct { errOnWrite bool errOnClose bool writeNum int } func (w *closeWriter) Close() error { if w.errOnClose { return errorx.Raw("close error") } return nil } func (w *closeWriter) Write(p []byte) (n int, err error) { if w.errOnWrite { return w.writeNum, errorx.Raw("write error") } if w.writeNum > 0 { return w.writeNum, nil } return len(p), nil } func TestBufIOWriter_Close_error(t *testing.T) { bw := bufwrite.NewBufIOWriterSize(&closeWriter{errOnWrite: true}, 24) _, err := bw.WriteString("hi") assert.NoErr(t, err) // flush write error err = bw.Close() assert.Err(t, err) assert.Eq(t, "write error", err.Error()) bw = bufwrite.NewBufIOWriterSize(&closeWriter{errOnClose: true}, 24) // close error err = bw.Close() assert.Err(t, err) assert.Eq(t, "close error", err.Error()) } func TestBufIOWriter_Sync(t *testing.T) { w := new(bytes.Buffer) bw := bufwrite.NewBufIOWriter(w) _, err := bw.WriteString("hello") assert.NoErr(t, err) assert.Eq(t, 0, w.Len()) assert.Eq(t, "", w.String()) assert.NoErr(t, bw.Sync()) assert.Eq(t, "hello", w.String()) } func TestNewLineWriter(t *testing.T) { w := new(bytes.Buffer) bw := bufwrite.NewLineWriter(w) assert.True(t, bw.Size() > 0) assert.NoErr(t, bw.Flush()) _, err := bw.WriteString("hello") assert.NoErr(t, err) assert.Eq(t, "", w.String()) assert.NoErr(t, bw.Sync()) assert.Eq(t, "hello", w.String()) bw.Reset(w) } func TestLineWriter_Write_error(t *testing.T) { w := &closeWriter{errOnWrite: true} bw := bufwrite.NewLineWriterSize(w, 6) t.Run("flush err on write", func(t *testing.T) { w1 := &closeWriter{} bw.Reset(w1) n, err := bw.WriteString("hi") // write ok assert.NoErr(t, err) assert.Equal(t, 2, n) // fire flush w1.errOnWrite = true _, err = bw.WriteString("hello, tom") assert.Err(t, err) assert.Eq(t, "write error", err.Error()) }) _, err := bw.WriteString("hello, tom") assert.Err(t, err) assert.Eq(t, "write error", err.Error()) // get old error w.errOnWrite = false _, err = bw.WriteString("hello, wo") assert.Err(t, err) assert.Eq(t, "write error", err.Error()) bw.Reset(w) _, err = bw.WriteString("hello") assert.NoErr(t, err) } func TestLineWriter_Flush_error(t *testing.T) { t.Run("write ok but n < b.n", func(t *testing.T) { w := &closeWriter{} bw := bufwrite.NewLineWriterSize(w, 6) _, err := bw.WriteString("hi!") assert.NoErr(t, err) // err: write n < b.n w.writeNum = 1 err = bw.Flush() assert.Err(t, err) assert.Eq(t, "short write", err.Error()) }) t.Run("write err and n < b.n", func(t *testing.T) { w := &closeWriter{} bw := bufwrite.NewLineWriterSize(w, 6) _, err := bw.WriteString("hi!") assert.NoErr(t, err) // err: write n < b.n w.writeNum = 1 w.errOnWrite = true err = bw.Flush() assert.Err(t, err) assert.Eq(t, "write error", err.Error()) }) w := &closeWriter{} bw := bufwrite.NewLineWriterSize(w, 6) _, err := bw.WriteString("hello") assert.NoErr(t, err) // error on flush w.errOnWrite = true err = bw.Flush() assert.Err(t, err) assert.Eq(t, "write error", err.Error()) // err: write n < b.n w.writeNum = 2 err = bw.Flush() assert.Err(t, err) w.writeNum = 0 // get old error w.errOnWrite = false err = bw.Flush() assert.Err(t, err) assert.Eq(t, "write error", err.Error()) bw.Reset(w) _, err = bw.WriteString("hello") assert.NoErr(t, err) } func TestLineWriter_Close_error(t *testing.T) { w := &closeWriter{} bw := bufwrite.NewLineWriterSize(w, 8) _, err := bw.WriteString("hello") assert.NoErr(t, err) // error on flush w.errOnWrite = true err = bw.Close() assert.Err(t, err) assert.Eq(t, "write error", err.Error()) w = &closeWriter{errOnClose: true} bw = bufwrite.NewLineWriterSize(w, 8) err = bw.Close() assert.Err(t, err) assert.Eq(t, "close error", err.Error()) } func TestNewLineWriterSize(t *testing.T) { w := new(bytes.Buffer) bw := bufwrite.NewLineWriterSize(w, 12) _, err := bw.WriteString("hello, ") assert.NoErr(t, err) assert.Eq(t, 0, w.Len()) assert.True(t, bw.Size() > 0) _, err = bw.WriteString("worlds. oh") assert.NoErr(t, err) assert.Eq(t, "hello, worlds. oh", w.String()) // different the BufIOWriter _, err = bw.WriteString("...") assert.NoErr(t, err) assert.NoErr(t, bw.Close()) assert.Eq(t, "hello, worlds. oh...", w.String()) w.Reset() bw = bufwrite.NewLineWriterSize(bw, 8) assert.Eq(t, 12, bw.Size()) bw = bufwrite.NewLineWriterSize(w, -12) assert.True(t, bw.Size() > 12) } ================================================ FILE: bufwrite/line_writer.go ================================================ package bufwrite import ( "io" ) const ( defaultBufSize = 1024 * 8 ) // LineWriter implements buffering for an io.Writer object. // If an error occurs writing to a LineWriter, no more data will be // accepted and all subsequent writes, and Flush, will return the error. // After all data has been written, the client should call the // Flush method to guarantee all data has been forwarded to // the underlying io.Writer. // // from bufio.Writer. // // Change: // // always keep write full line. more difference please see Write type LineWriter struct { err error buf []byte n int wr io.Writer } // NewLineWriterSize returns a new LineWriter whose buffer has at least the specified // size. If the argument io.Writer is already a LineWriter with large enough // size, it returns the underlying LineWriter. func NewLineWriterSize(w io.Writer, size int) *LineWriter { // Is it already a LineWriter? b, ok := w.(*LineWriter) if ok && len(b.buf) >= size { return b } if size <= 0 { size = defaultBufSize } return &LineWriter{ buf: make([]byte, size), wr: w, } } // NewLineWriter returns a new LineWriter whose buffer has the default size. func NewLineWriter(w io.Writer) *LineWriter { return NewLineWriterSize(w, defaultBufSize) } // Size returns the size of the underlying buffer in bytes. func (b *LineWriter) Size() int { return len(b.buf) } // Reset discards any unflushed buffered data, clears any error, and // resets b to write its output to w. func (b *LineWriter) Reset(w io.Writer) { b.n = 0 b.wr = w b.err = nil b.buf = b.buf[:0] } // Close implements the io.Closer func (b *LineWriter) Close() error { if err := b.Flush(); err != nil { return err } // is closer if c, ok := b.wr.(io.Closer); ok { return c.Close() } return nil } // Sync implements the Syncer func (b *LineWriter) Sync() error { return b.Flush() } // Flush writes any buffered data to the underlying io.Writer. // // TIP: please add lock before calling the method. func (b *LineWriter) Flush() error { if b.err != nil { return b.err } if b.n == 0 { return nil } n, err := b.wr.Write(b.buf[0:b.n]) if n < b.n && err == nil { err = io.ErrShortWrite } if err != nil { if n > 0 && n < b.n { copy(b.buf[0:b.n-n], b.buf[n:b.n]) } b.n -= n b.err = err return err } b.n = 0 return nil } // Available returns how many bytes are unused in the buffer. func (b *LineWriter) Available() int { return len(b.buf) - b.n } // Buffered returns the number of bytes that have been written into the current buffer. func (b *LineWriter) Buffered() int { return b.n } // Write writes the contents of p into the buffer. // It returns the number of bytes written. // If nn < len(p), it also returns an error explaining // why the writing is short. func (b *LineWriter) Write(p []byte) (nn int, err error) { // NOTE: 原来的 bufio.Writer#Write 会造成 p 写了一部分到 b.wr, 还有一部分在 b.buf, // 如果现在外部工具从 b.wr 收集数据,会收集到一行无法解析的数据(例如每个p是一行json日志) // for len(p) > b.Available() && b.err == nil { // var n int // if b.Buffered() == 0 { // // Large write, empty buffer. // // Write directly from p to avoid copy. // n, b.err = b.wr.Write(p) // } else { // n = copy(b.buf[b.n:], p) // b.n += n // b.Flush() // } // nn += n // p = p[n:] // } // UP: 改造一下逻辑,如果 len(p) > b.Available() 就将buf 和 p 都写入 b.wr if len(p) > b.Available() && b.err == nil { nn = b.Buffered() if nn > 0 { _ = b.Flush() if b.err != nil { return nn, b.err } } var n int n, b.err = b.wr.Write(p) if b.err != nil { return nn, b.err } nn += n return nn, nil } if b.err != nil { return nn, b.err } n := copy(b.buf[b.n:], p) b.n += n nn += n return nn, nil } // WriteString to the writer func (b *LineWriter) WriteString(s string) (int, error) { return b.Write([]byte(s)) } ================================================ FILE: common.go ================================================ package slog import ( "errors" "strconv" "strings" "time" "github.com/gookit/goutil/envutil" "github.com/gookit/goutil/strutil" "github.com/gookit/gsr" ) // SLogger interface type SLogger interface { gsr.Logger Log(level Level, v ...any) Logf(level Level, format string, v ...any) } // LoggerFn func type LoggerFn func(l *Logger) // // log level definitions // region Log level // Level type type Level uint32 // String get level name func (l Level) String() string { return LevelName(l) } // Name get level name. eg: INFO, DEBUG ... func (l Level) Name() string { return LevelName(l) } // LowerName get lower level name. eg: info, debug ... func (l Level) LowerName() string { if n, ok := lowerLevelNames[l]; ok { return n } return "unknown" } // ShouldHandling compare level, if current level <= l, it will be record. func (l Level) ShouldHandling(curLevel Level) bool { return curLevel <= l } // MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler] func (l Level) MarshalJSON() ([]byte, error) { return []byte(`"` + l.String() + `"`), nil } // UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler] func (l *Level) UnmarshalJSON(data []byte) error { s, err := strconv.Unquote(string(data)) if err != nil { return err } *l, err = StringToLevel(s) return err } // Levels level list type Levels []Level // Contains given level func (ls Levels) Contains(level Level) bool { for _, l := range ls { if l == level { return true } } return false } // These are the different logging levels. You can set the logging level to log handler const ( // PanicLevel level, the highest level of severity. will call panic() if the logging level <= PanicLevel. PanicLevel Level = 100 // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level <= FatalLevel. FatalLevel Level = 200 // ErrorLevel level. Runtime errors. Used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service. ErrorLevel Level = 300 // WarnLevel level. Non-critical entries that deserve eyes. WarnLevel Level = 400 // NoticeLevel level Uncommon events NoticeLevel Level = 500 // InfoLevel level. Examples: User logs in, SQL logs. InfoLevel Level = 600 // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel Level = 700 // TraceLevel level. Designates finer-grained informational events than the Debug. TraceLevel Level = 800 ) // // some common definitions // region common types // StringMap string map short name type StringMap = map[string]string // M short name of map[string]any type M map[string]any // String map to string func (m M) String() string { return mapToString(m) } // ClockFn func type ClockFn func() time.Time // Now implements the Clocker func (fn ClockFn) Now() time.Time { return fn() } // region CallerFlagMode // CallerFlagMode Defines the Caller backtrace information mode. type CallerFlagMode = uint8 // NOTICE: you must set `Logger.ReportCaller=true` for reporting caller. // then config the Logger.CallerFlag by follow flags. const ( // CallerFlagFnlFcn report short func name with filename and with line. // eg: "logger_test.go:48,TestLogger_ReportCaller" CallerFlagFnlFcn CallerFlagMode = iota // CallerFlagFull full func name with filename and with line. // eg: "github.com/gookit/slog_test.TestLogger_ReportCaller(),logger_test.go:48" CallerFlagFull // CallerFlagFunc full package with func name. // eg: "github.com/gookit/slog_test.TestLogger_ReportCaller" CallerFlagFunc // CallerFlagFcLine full package with func name and with line. // eg: "github.com/gookit/slog_test.TestLogger_ReportCaller:48" CallerFlagFcLine // CallerFlagPkg report full package name. // eg: "github.com/gookit/slog_test" CallerFlagPkg // CallerFlagPkgFnl report full package name + filename + line. // eg: "github.com/gookit/slog_test,logger_test.go:48" CallerFlagPkgFnl // CallerFlagFpLine report full filepath with line. // eg: "/work/go/gookit/slog/logger_test.go:48" CallerFlagFpLine // CallerFlagFnLine report filename with line. // eg: "logger_test.go:48" CallerFlagFnLine // CallerFlagFcName only report func name. // eg: "TestLogger_ReportCaller" CallerFlagFcName ) var ( // FieldKeyData define the key name for Record.Data FieldKeyData = "data" // FieldKeyTime key name FieldKeyTime = "time" // FieldKeyDate key name FieldKeyDate = "date" // FieldKeyDatetime key name FieldKeyDatetime = "datetime" // FieldKeyTimestamp key name FieldKeyTimestamp = "timestamp" // FieldKeyCaller the field key name for report caller. // // For caller style please see CallerFlagFull, CallerFlagFunc and more. // // NOTICE: you must set `Logger.ReportCaller=true` for reporting caller FieldKeyCaller = "caller" // FieldKeyLevel name FieldKeyLevel = "level" // FieldKeyError Define the key when adding errors using WithError. FieldKeyError = "error" // FieldKeyExtra key name FieldKeyExtra = "extra" // FieldKeyChannel name FieldKeyChannel = "channel" // FieldKeyMessage name FieldKeyMessage = "message" ) // region Global variables var ( // DefaultChannelName for log record DefaultChannelName = "application" // DefaultTimeFormat define DefaultTimeFormat = "2006/01/02T15:04:05.000" // DebugMode enable debug mode for logger. use for local development. DebugMode = envutil.GetBool("OPEN_SLOG_DEBUG", false) // DoNothingOnExit handle func. use for testing. DoNothingOnExit = func(code int) {} // DoNothingOnPanic handle func. use for testing. DoNothingOnPanic = func(v any) {} // DefaultPanicFn handle func DefaultPanicFn = func(v any) { panic(v) } // DefaultClockFn create func DefaultClockFn = ClockFn(func() time.Time { return time.Now() }) ) var ( // PrintLevel for use Logger.Print / Printf / Println PrintLevel = InfoLevel // AllLevels exposing all logging levels AllLevels = Levels{ PanicLevel, FatalLevel, ErrorLevel, WarnLevel, NoticeLevel, InfoLevel, DebugLevel, TraceLevel, } // DangerLevels define the commonly danger log levels DangerLevels = Levels{PanicLevel, FatalLevel, ErrorLevel, WarnLevel} // NormalLevels define the commonly normal log levels NormalLevels = Levels{InfoLevel, NoticeLevel, DebugLevel, TraceLevel} // LevelNames all level mapping name LevelNames = map[Level]string{ PanicLevel: "PANIC", FatalLevel: "FATAL", ErrorLevel: "ERROR", WarnLevel: "WARNING", NoticeLevel: "NOTICE", InfoLevel: "INFO", DebugLevel: "DEBUG", TraceLevel: "TRACE", } // lower level name. lowerLevelNames = buildLowerLevelName() // empty time for reset record. emptyTime = time.Time{} ) // region Global functions // LevelName match func LevelName(l Level) string { if n, ok := LevelNames[l]; ok { return n } return "UNKNOWN" } // LevelByName convert name to level, fallback to InfoLevel if not match func LevelByName(ln string) Level { l, err := StringToLevel(ln) if err != nil { return InfoLevel } return l } // Name2Level convert name to level func Name2Level(s string) (Level, error) { return StringToLevel(s) } // StringToLevel parse and convert string value to Level func StringToLevel(s string) (Level, error) { switch strings.ToLower(s) { case "panic": return PanicLevel, nil case "fatal": return FatalLevel, nil case "err", "error": return ErrorLevel, nil case "warn", "warning": return WarnLevel, nil case "note", "notice": return NoticeLevel, nil case "info", "": // make the zero value useful return InfoLevel, nil case "debug": return DebugLevel, nil case "trace": return TraceLevel, nil } // is int value, try to parse as int if strutil.IsInt(s) { iVal := strutil.SafeInt(s) return Level(iVal), nil } return 0, errors.New("slog: invalid log level name: " + s) } // // exit handle logic // // global exit handler var exitHandlers = make([]func(), 0) func runExitHandlers() { defer func() { if err := recover(); err != nil { printStderr("slog: run exit handler(global) recovered, error:", err) } }() for _, handler := range exitHandlers { handler() } } // ExitHandlers get all global exitHandlers func ExitHandlers() []func() { return exitHandlers } // RegisterExitHandler register an exit-handler on global exitHandlers func RegisterExitHandler(handler func()) { exitHandlers = append(exitHandlers, handler) } // PrependExitHandler prepend register an exit-handler on global exitHandlers func PrependExitHandler(handler func()) { exitHandlers = append([]func(){handler}, exitHandlers...) } // ResetExitHandlers reset all exitHandlers func ResetExitHandlers(applyToStd bool) { exitHandlers = make([]func(), 0) if applyToStd { std.ResetExitHandlers() } } ================================================ FILE: common_test.go ================================================ package slog_test import ( "bytes" "fmt" "testing" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/gsr" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) var ( testData1 = slog.M{"key0": "val0", "age": 23} // testData2 = slog.M{"key0": "val0", "age": 23, "sub": slog.M{ // "subKey0": 345, // }} ) func TestDefine_basic(t *testing.T) { assert.NotEmpty(t, slog.NoTimeFields) assert.NotEmpty(t, slog.FieldKeyDate) assert.NotEmpty(t, slog.FieldKeyTime) assert.NotEmpty(t, slog.FieldKeyCaller) assert.NotEmpty(t, slog.FieldKeyError) } func TestM_String(t *testing.T) { m := slog.M{ "k0": 12, "k1": "abc", "k2": true, "k3": 23.45, "k4": []int{12, 23}, "k5": []string{"ab", "bc"}, "k6": map[string]any{ "k6-1": 23, "k6-2": "def", }, } fmt.Println(m) dump.P(m.String(), m) assert.NotEmpty(t, m.String()) } func TestLevelName_func(t *testing.T) { for level, wantName := range slog.LevelNames { realName := slog.LevelName(level) assert.Eq(t, wantName, realName) } assert.Eq(t, "UNKNOWN", slog.LevelName(20)) // LevelByName assert.Eq(t, slog.InfoLevel, slog.LevelByName("info")) assert.Eq(t, slog.InfoLevel, slog.LevelByName("invalid")) } func TestName2Level(t *testing.T) { for wantLevel, name := range slog.LevelNames { level, err := slog.Name2Level(name) assert.NoErr(t, err) assert.Eq(t, wantLevel, level) } // special names tests := map[slog.Level]string{ slog.WarnLevel: "warn", slog.ErrorLevel: "err", slog.InfoLevel: "", } for wantLevel, name := range tests { level, err := slog.Name2Level(name) assert.NoErr(t, err) assert.Eq(t, wantLevel, level) } level, err := slog.Name2Level("unknown") assert.Err(t, err) assert.Eq(t, slog.Level(0), level) level, err = slog.StringToLevel("300") assert.NoErr(t, err) assert.Eq(t, slog.ErrorLevel, level) } func TestLevel_methods(t *testing.T) { t.Run("ShouldHandling", func(t *testing.T) { assert.True(t, slog.InfoLevel.ShouldHandling(slog.ErrorLevel)) assert.False(t, slog.InfoLevel.ShouldHandling(slog.TraceLevel)) assert.True(t, slog.DebugLevel.ShouldHandling(slog.InfoLevel)) assert.False(t, slog.DebugLevel.ShouldHandling(slog.TraceLevel)) }) t.Run("Name", func(t *testing.T) { assert.Eq(t, "INFO", slog.InfoLevel.Name()) assert.Eq(t, "INFO", slog.InfoLevel.String()) assert.Eq(t, "info", slog.InfoLevel.LowerName()) assert.Eq(t, "unknown", slog.Level(330).LowerName()) }) t.Run("encoding", func(t *testing.T) { // MarshalJSON bs, err := slog.InfoLevel.MarshalJSON() assert.NoErr(t, err) assert.Eq(t, `"INFO"`, string(bs)) // UnmarshalJSON level := slog.Level(0) assert.Eq(t, "UNKNOWN", level.Name()) err = level.UnmarshalJSON([]byte(`"warn"`)) assert.NoErr(t, err) assert.Eq(t, "WARNING", level.Name()) assert.Err(t, level.UnmarshalJSON([]byte(`a`))) }) } func TestLevels_Contains(t *testing.T) { assert.True(t, slog.DangerLevels.Contains(slog.ErrorLevel)) assert.False(t, slog.DangerLevels.Contains(slog.InfoLevel)) assert.True(t, slog.NormalLevels.Contains(slog.InfoLevel)) assert.False(t, slog.NormalLevels.Contains(slog.PanicLevel)) } func newLogRecord(msg string) *slog.Record { r := &slog.Record{ Channel: slog.DefaultChannelName, Level: slog.InfoLevel, Message: msg, Time: slog.DefaultClockFn.Now(), Data: map[string]any{ "data_key0": "value", "username": "inhere", }, Extra: map[string]any{ "source": "linux", "extra_key0": "hello", }, // Caller: goinfo.GetCallerInfo(), } r.Init(true) return r } type closedBuffer struct { bytes.Buffer } func newBuffer() *closedBuffer { return &closedBuffer{} } func (w *closedBuffer) Close() error { return nil } func (w *closedBuffer) StringReset() string { s := w.Buffer.String() w.Reset() return s } // // region test handler // type testHandler struct { slog.FormatterWrapper byteutil.Buffer errOnHandle bool errOnClose bool errOnFlush bool // hooks beforeFormat func(r *slog.Record) beforeWrite func(r *slog.Record) callOnFlush func() // NOTE: 如果设置为true,默认会让 error,fatal 等信息提前被reset丢弃掉. // see Logger.writeRecord() resetOnFlush bool } // built in test, will collect logs to buffer func newTestHandler() *testHandler { return &testHandler{} } func (h *testHandler) IsHandling(_ slog.Level) bool { return true } func (h *testHandler) Close() error { if h.errOnClose { return errorx.Raw("close error") } h.Reset() return nil } func (h *testHandler) Flush() error { if h.errOnFlush { return errorx.Raw("flush error") } if h.callOnFlush != nil { h.callOnFlush() } if h.resetOnFlush { h.Reset() } return nil } func (h *testHandler) Handle(r *slog.Record) error { if h.errOnHandle { return errorx.Raw("handle error") } if h.beforeFormat != nil { h.beforeFormat(r) } bs, err := h.Format(r) if err != nil { return err } if h.beforeWrite != nil { h.beforeWrite(r) } h.Write(bs) return nil } // // region test formatter // type testFormatter struct { errOnFormat bool } func newTestFormatter(errOnFormat ...bool) *testFormatter { return &testFormatter{ errOnFormat: len(errOnFormat) > 0 && errOnFormat[0], } } func (f testFormatter) Format(r *slog.Record) ([]byte, error) { if f.errOnFormat { return nil, errorx.Raw("format error") } return []byte(r.Message), nil } // // region test logger // func newLogger() *slog.Logger { return slog.NewWithConfig(func(l *slog.Logger) { l.ReportCaller = true l.DoNothingOnPanicFatal() }) } // newTestLogger create a logger for test, will write logs to buffer func newTestLogger() (*closedBuffer, *slog.Logger) { l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() l.CallerFlag = slog.CallerFlagFull }) w := newBuffer() h := handler.NewIOWriter(w, slog.AllLevels) // fmt.Print("Template:", h.TextFormatter().Template()) l.SetHandlers([]slog.Handler{h}) return w, l } func printAllLevelLogs(l gsr.Logger, args ...any) { l.Debug(args...) l.Info(args...) l.Warn(args...) l.Error(args...) l.Print(args...) l.Println(args...) l.Fatal(args...) l.Fatalln(args...) l.Panic(args...) l.Panicln(args...) sl, ok := l.(*slog.Logger) if ok { sl.Trace(args...) sl.Notice(args...) sl.ErrorT(errorx.Raw("a error object")) sl.ErrorT(errorx.New("error with stack info")) } } func printfAllLevelLogs(l gsr.Logger, tpl string, args ...any) { l.Printf(tpl, args...) l.Debugf(tpl, args...) l.Infof(tpl, args...) l.Warnf(tpl, args...) l.Errorf(tpl, args...) l.Panicf(tpl, args...) l.Fatalf(tpl, args...) if sl, ok := l.(*slog.Logger); ok { sl.Noticef(tpl, args...) sl.Tracef(tpl, args...) } } ================================================ FILE: example_test.go ================================================ package slog_test import ( "fmt" "sync" "time" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func Example_quickStart() { slog.Info("info log message") slog.Warn("warning log message") slog.Infof("info log %s", "message") slog.Debugf("debug %s", "message") } func Example_configSlog() { slog.Configure(func(logger *slog.SugaredLogger) { f := logger.Formatter.(*slog.TextFormatter) f.EnableColor = true }) slog.Trace("this is a simple log message") slog.Debug("this is a simple log message") slog.Info("this is a simple log message") slog.Notice("this is a simple log message") slog.Warn("this is a simple log message") slog.Error("this is a simple log message") slog.Fatal("this is a simple log message") } func Example_useJSONFormat() { // use JSON formatter slog.SetFormatter(slog.NewJSONFormatter()) slog.Info("info log message") slog.Warn("warning log message") slog.WithData(slog.M{ "key0": 134, "key1": "abc", }).Infof("info log %s", "message") r := slog.WithFields(slog.M{ "category": "service", "IP": "127.0.0.1", }) r.Infof("info %s", "message") r.Debugf("debug %s", "message") } func ExampleNew() { mylog := slog.New() levels := slog.AllLevels mylog.AddHandler(handler.MustFileHandler("app.log", handler.WithLogLevels(levels))) mylog.Info("info log message") mylog.Warn("warning log message") mylog.Infof("info log %s", "message") } func ExampleFlushDaemon() { wg := sync.WaitGroup{} wg.Add(1) go slog.FlushDaemon(func() { fmt.Println("flush daemon stopped") slog.MustClose() wg.Done() }) go func() { // mock app running time.Sleep(time.Second * 2) // stop daemon fmt.Println("stop flush daemon") slog.StopDaemon() }() // wait for stop wg.Wait() } ================================================ FILE: formatter.go ================================================ package slog import "runtime" // // Formatter interface // // Formatter interface type Formatter interface { // Format you can format record and write result to record.Buffer Format(record *Record) ([]byte, error) } // FormatterFunc wrapper definition type FormatterFunc func(r *Record) ([]byte, error) // Format a log record func (fn FormatterFunc) Format(r *Record) ([]byte, error) { return fn(r) } // Formattable interface type Formattable interface { // Formatter get the log formatter Formatter() Formatter // SetFormatter set the log formatter SetFormatter(Formatter) } // FormattableTrait alias of FormatterWrapper type FormattableTrait = FormatterWrapper // FormatterWrapper use for format log record. // // Default will use the TextFormatter type FormatterWrapper struct { // if not set, default uses the TextFormatter formatter Formatter } // Formatter get formatter. if not set, will return TextFormatter func (f *FormatterWrapper) Formatter() Formatter { if f.formatter == nil { f.formatter = NewTextFormatter() } return f.formatter } // SetFormatter to handler func (f *FormatterWrapper) SetFormatter(formatter Formatter) { f.formatter = formatter } // Format log record to bytes func (f *FormatterWrapper) Format(record *Record) ([]byte, error) { return f.Formatter().Format(record) } // CallerFormatFn caller format func type CallerFormatFn func(rf *runtime.Frame) (cs string) // AsTextFormatter util func func AsTextFormatter(f Formatter) *TextFormatter { if tf, ok := f.(*TextFormatter); ok { return tf } panic("slog: cannot cast input as *TextFormatter") } // AsJSONFormatter util func func AsJSONFormatter(f Formatter) *JSONFormatter { if jf, ok := f.(*JSONFormatter); ok { return jf } panic("slog: cannot cast input as *JSONFormatter") } ================================================ FILE: formatter_json.go ================================================ package slog import ( "encoding/json" "github.com/valyala/bytebufferpool" ) var ( // DefaultFields default log export fields for json formatter. DefaultFields = []string{ FieldKeyDatetime, FieldKeyChannel, FieldKeyLevel, FieldKeyCaller, FieldKeyMessage, FieldKeyData, FieldKeyExtra, } // NoTimeFields log export fields without time NoTimeFields = []string{ FieldKeyChannel, FieldKeyLevel, FieldKeyMessage, FieldKeyData, FieldKeyExtra, } ) // JSONFormatter definition type JSONFormatter struct { // Fields set exported common log fields. default is DefaultFields Fields []string // Aliases for output fields. you can change the export field name. // // - item: `"field" : "output name"` // // eg: {"message": "msg"} export field will display "msg" Aliases StringMap // PrettyPrint will indent all JSON logs PrettyPrint bool // TimeFormat the time format layout. default is DefaultTimeFormat TimeFormat string // CallerFormatFunc the caller format layout. default is defined by CallerFlag CallerFormatFunc CallerFormatFn } // NewJSONFormatter create new JSONFormatter func NewJSONFormatter(fn ...func(f *JSONFormatter)) *JSONFormatter { f := &JSONFormatter{ // Aliases: make(StringMap, 0), Fields: DefaultFields, TimeFormat: DefaultTimeFormat, } if len(fn) > 0 { fn[0](f) } return f } // Configure current formatter func (f *JSONFormatter) Configure(fn func(*JSONFormatter)) *JSONFormatter { fn(f) return f } // AddField for export func (f *JSONFormatter) AddField(name string) *JSONFormatter { f.Fields = append(f.Fields, name) return f } var jsonPool bytebufferpool.Pool // Format a log record to JSON bytes func (f *JSONFormatter) Format(r *Record) ([]byte, error) { logData := make(M, len(f.Fields)) // TODO perf: use buf write build JSON string. for _, field := range f.Fields { outName, ok := f.Aliases[field] if !ok { outName = field } switch { case field == FieldKeyDatetime: logData[outName] = r.Time.Format(f.TimeFormat) case field == FieldKeyTimestamp: logData[outName] = r.timestamp() case field == FieldKeyCaller && r.Caller != nil: logData[outName] = formatCaller(r.Caller, r.CallerFlag, f.CallerFormatFunc) case field == FieldKeyLevel: logData[outName] = r.LevelName() case field == FieldKeyChannel: logData[outName] = r.Channel case field == FieldKeyMessage: logData[outName] = r.Message case field == FieldKeyData: logData[outName] = r.Data case field == FieldKeyExtra: logData[outName] = r.Extra // default: // logData[outName] = r.Fields[field] } } // exported custom record fields for field, value := range r.Fields { fieldKey := field if _, has := logData[field]; has { fieldKey = "fields." + field } logData[fieldKey] = value } // sort.Interface() buf := jsonPool.Get() // buf.Reset() defer jsonPool.Put(buf) // buf := r.NewBuffer() // buf.Reset() // buf.Grow(256) encoder := json.NewEncoder(buf) if f.PrettyPrint { encoder.SetIndent("", " ") } // has been added newline in Encode(). err := encoder.Encode(logData) return buf.Bytes(), err } ================================================ FILE: formatter_test.go ================================================ package slog_test import ( "fmt" "runtime" "strings" "testing" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestFormattableTrait_Formatter(t *testing.T) { ft := &slog.FormattableTrait{} tf := slog.AsTextFormatter(ft.Formatter()) assert.NotNil(t, tf) assert.Panics(t, func() { slog.AsJSONFormatter(ft.Formatter()) }) ft.SetFormatter(slog.NewJSONFormatter()) jf := slog.AsJSONFormatter(ft.Formatter()) assert.NotNil(t, jf) assert.Panics(t, func() { slog.AsTextFormatter(ft.Formatter()) }) } func TestFormattable_Format(t *testing.T) { r := newLogRecord("TEST_LOG_MESSAGE format") f := &slog.FormattableTrait{} assert.Eq(t, "slog: TEST_LOG_MESSAGE format", r.GoString()) bts, err := f.Format(r) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "TEST_LOG_MESSAGE format") fn := slog.FormatterFunc(func(r *slog.Record) ([]byte, error) { return []byte(r.Message), nil }) bts, err = fn.Format(r) assert.NoErr(t, err) str = string(bts) assert.Contains(t, str, "TEST_LOG_MESSAGE format") } func TestNewTextFormatter(t *testing.T) { f := slog.NewTextFormatter() dump.Println(f.Fields()) assert.Contains(t, f.Fields(), "datetime") assert.Len(t, f.Fields(), strings.Count(slog.DefaultTemplate, "{{")) f.SetTemplate(slog.NamedTemplate) dump.Println(f.Fields()) assert.Contains(t, f.Fields(), "datetime") assert.Len(t, f.Fields(), strings.Count(slog.NamedTemplate, "{{")) f.WithEnableColor(true) assert.True(t, f.EnableColor) f1 := slog.NewTextFormatter() f1.Configure(func(f *slog.TextFormatter) { f.FullDisplay = true }) assert.True(t, f1.FullDisplay) t.Run("CallerFormatFunc", func(t *testing.T) { buf := byteutil.NewBuffer() h := handler.IOWriterWithMaxLevel(buf, slog.DebugLevel) h.SetFormatter(slog.TextFormatterWith(func(f *slog.TextFormatter) { f.CallerFormatFunc = func(rf *runtime.Frame) string { return "custom_caller" } })) l := slog.NewWithHandlers(h) l.Debug("test message") assert.Contains(t, buf.String(), "custom_caller") }) } func TestTextFormatter_Format(t *testing.T) { r := newLogRecord("TEST_LOG_MESSAGE") f := slog.NewTextFormatter() bs, err := f.Format(r) logTxt := string(bs) fmt.Println(f.Template(), logTxt) assert.NoErr(t, err) assert.NotEmpty(t, logTxt) assert.NotContains(t, logTxt, "{{") assert.NotContains(t, logTxt, "}}") } func TestTextFormatter_ColorRenderFunc(t *testing.T) { f := slog.NewTextFormatter() f.WithEnableColor(true) f.ColorRenderFunc = func(field, s string, l slog.Level) string { return fmt.Sprintf("NO-%s-NO", s) } r := newLogRecord("TEST_LOG_MESSAGE") bts, err := f.Format(r) assert.NoErr(t, err) str := string(bts) assert.StrContains(t, str, "[NO-info-NO]") assert.StrContains(t, str, "NO-TEST_LOG_MESSAGE-NO") } func TestTextFormatter_LimitLevelNameLen(t *testing.T) { f := slog.TextFormatterWith(slog.LimitLevelNameLen(4)) h := handler.ConsoleWithMaxLevel(slog.TraceLevel) h.SetFormatter(f) th := newTestHandler() th.resetOnFlush = false th.SetFormatter(f) l := slog.NewWithHandlers(h, th) l.DoNothingOnPanicFatal() for _, level := range slog.AllLevels { l.Logf(level, "a %s test message", level.String()) } assert.NoErr(t, l.LastErr()) str := th.ResetAndGet() assert.StrContains(t, str, "[PANI]") assert.StrContains(t, str, "[FATA]") assert.StrContains(t, str, "[ERRO]") assert.StrContains(t, str, "[TRAC]") } func TestTextFormatter_LimitLevelNameLen2(t *testing.T) { // set to max length. f := slog.TextFormatterWith(slog.LimitLevelNameLen(7)) h := handler.ConsoleWithMaxLevel(slog.TraceLevel) h.SetFormatter(f) th := newTestHandler() th.resetOnFlush = false th.SetFormatter(f) l := slog.NewWithHandlers(h, th) l.DoNothingOnPanicFatal() for _, level := range slog.AllLevels { l.Logf(level, "a %s test message", level.String()) } assert.NoErr(t, l.LastErr()) str := th.ResetAndGet() assert.StrContains(t, str, "[PANIC ]") assert.StrContains(t, str, "[FATAL ]") assert.StrContains(t, str, "[ERROR ]") assert.StrContains(t, str, "[WARNING]") } func TestNewJSONFormatter(t *testing.T) { f := slog.NewJSONFormatter() f.AddField(slog.FieldKeyTimestamp) h := handler.ConsoleWithLevels(slog.AllLevels) h.SetFormatter(f) l := slog.NewWithHandlers(h) fields := slog.M{ "field1": 123, "field2": "abc", "message": "field name is same of message", // will be as fields.message } l.WithFields(fields).Info("info", "message") t.Run("CallerFormatFunc", func(t *testing.T) { h.SetFormatter(slog.NewJSONFormatter(func(f *slog.JSONFormatter) { f.CallerFormatFunc = func(rf *runtime.Frame) string { return rf.Function } })) l.WithFields(fields).Info("info", "message") }) // PrettyPrint=true t.Run("PrettyPrint", func(t *testing.T) { l = slog.New() h = handler.ConsoleWithMaxLevel(slog.DebugLevel) f = slog.NewJSONFormatter(func(f *slog.JSONFormatter) { f.Aliases = slog.StringMap{ "level": "levelName", } f.PrettyPrint = true }) h.SetFormatter(f) l.AddHandler(h) l.WithFields(fields). SetData(slog.M{"key1": "val1"}). SetExtra(slog.M{"ext1": "val1"}). Info("info message and PrettyPrint is TRUE") }) } ================================================ FILE: formatter_text.go ================================================ package slog import ( "github.com/gookit/color" "github.com/gookit/goutil/arrutil" "github.com/valyala/bytebufferpool" ) // there are built in text log template const ( DefaultTemplate = "[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\n" NamedTemplate = "{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\n" ) // ColorTheme for format log to console var ColorTheme = map[Level]color.Color{ PanicLevel: color.FgRed, FatalLevel: color.FgRed, ErrorLevel: color.FgMagenta, WarnLevel: color.FgYellow, NoticeLevel: color.OpBold, InfoLevel: color.FgGreen, DebugLevel: color.FgCyan, // TraceLevel: color.FgLightGreen, } // TextFormatter definition type TextFormatter struct { // template text template for render output log messages template string // fields list, parsed from template string. // // NOTE: contains no-field items in the list. eg: ["level", "}}"} fields []string // TimeFormat the time format layout. default is DefaultTimeFormat TimeFormat string // Enable color on print log to terminal EnableColor bool // ColorTheme setting on render color on terminal ColorTheme map[Level]color.Color // FullDisplay Whether to display when record.Data, record.Extra, etc. are empty FullDisplay bool // EncodeFunc data encode for Record.Data, Record.Extra, etc. // // Default is encode by EncodeToString() EncodeFunc func(v any) string // CallerFormatFunc the caller format layout. default is defined by CallerFlag CallerFormatFunc CallerFormatFn // LevelFormatFunc custom the level name format. LevelFormatFunc func(s string) string // ColorRenderFunc custom color render func. // // - `s`: level name or message ColorRenderFunc func(filed, s string, l Level) string // TODO BeforeFunc call it before format, update fields or other // BeforeFunc func(r *Record) } // TextFormatterFn definition type TextFormatterFn func(*TextFormatter) // NewTextFormatter create new TextFormatter func NewTextFormatter(template ...string) *TextFormatter { var fmtTpl string if len(template) > 0 { fmtTpl = template[0] } else { fmtTpl = DefaultTemplate } f := &TextFormatter{ // default options ColorTheme: ColorTheme, TimeFormat: DefaultTimeFormat, // EnableColor: color.SupportColor(), // EncodeFunc: func(v any) string { // return fmt.Sprint(v) // }, EncodeFunc: EncodeToString, } f.SetTemplate(fmtTpl) return f } // TextFormatterWith create new TextFormatter with options func TextFormatterWith(fns ...TextFormatterFn) *TextFormatter { return NewTextFormatter().WithOptions(fns...) } // LimitLevelNameLen limit the length of the level name func LimitLevelNameLen(length int) TextFormatterFn { return func(f *TextFormatter) { f.LevelFormatFunc = func(s string) string { return FormatLevelName(s, length) } } } // Configure the formatter func (f *TextFormatter) Configure(fn TextFormatterFn) *TextFormatter { return f.WithOptions(fn) } // WithOptions func on the formatter func (f *TextFormatter) WithOptions(fns ...TextFormatterFn) *TextFormatter { for _, fn := range fns { fn(f) } return f } // SetTemplate set the log format template and update field-map func (f *TextFormatter) SetTemplate(fmtTpl string) { f.template = fmtTpl f.fields = parseTemplateToFields(fmtTpl) } // Template get func (f *TextFormatter) Template() string { return f.template } // WithEnableColor enable color on print log to terminal func (f *TextFormatter) WithEnableColor(enable bool) *TextFormatter { f.EnableColor = enable return f } // Fields get an export field list func (f *TextFormatter) Fields() []string { ss := make([]string, 0, len(f.fields)/2) for _, s := range f.fields { if s[0] >= 'a' && s[0] <= 'z' { ss = append(ss, s) } } return ss } var textPool bytebufferpool.Pool // Format a log record // //goland:noinspection GoUnhandledErrorResult func (f *TextFormatter) Format(r *Record) ([]byte, error) { f.beforeFormat() buf := textPool.Get() defer textPool.Put(buf) // record formatted custom fields var formattedFields []string for _, field := range f.fields { // is not field name. eg: "}}] " if field[0] < 'a' || field[0] > 'z' { // remove left "}}" if len(field) > 1 && field[0:2] == "}}" { buf.WriteString(field[2:]) } else { buf.WriteString(field) } continue } switch { case field == FieldKeyDatetime: buf.B = r.Time.AppendFormat(buf.B, f.TimeFormat) case field == FieldKeyTimestamp: buf.WriteString(r.timestamp()) case field == FieldKeyCaller && r.Caller != nil: buf.WriteString(formatCaller(r.Caller, r.CallerFlag, f.CallerFormatFunc)) case field == FieldKeyLevel: buf.WriteString(f.renderColorText(field, r.LevelName(), r.Level)) case field == FieldKeyChannel: buf.WriteString(r.Channel) case field == FieldKeyMessage: buf.WriteString(f.renderColorText(field, r.Message, r.Level)) case field == FieldKeyData: if f.FullDisplay || len(r.Data) > 0 { buf.WriteString(f.EncodeFunc(r.Data)) } case field == FieldKeyExtra: if f.FullDisplay || len(r.Extra) > 0 { buf.WriteString(f.EncodeFunc(r.Extra)) } default: if _, ok := r.Fields[field]; ok { formattedFields = append(formattedFields, field) buf.WriteString(f.EncodeFunc(r.Fields[field])) } else { buf.WriteString(field) } } } // UP: check not configured fields in template. if fLen := len(r.Fields); fLen > 0 && fLen != len(formattedFields) { unformattedFields := make(map[string]any) for k, v := range r.Fields { if !arrutil.StringsContains(formattedFields, k) { unformattedFields[k] = v } } buf.WriteString("UN-CONFIGURED FIELDS: ") buf.WriteString(f.EncodeFunc(unformattedFields)) buf.WriteByte('\n') } // return buf.Bytes(), nil return buf.B, nil } func (f *TextFormatter) beforeFormat() { // if f.BeforeFunc == nil {} if f.EncodeFunc == nil { f.EncodeFunc = EncodeToString } if f.ColorTheme == nil { f.ColorTheme = ColorTheme } } func (f *TextFormatter) renderColorText(field, s string, l Level) string { // custom level name format if f.LevelFormatFunc != nil && field == FieldKeyLevel { s = f.LevelFormatFunc(s) } if !f.EnableColor { return s } // custom color render func if f.ColorRenderFunc != nil { return f.ColorRenderFunc(field, s, l) } // output colored logs for console output if theme, ok := f.ColorTheme[l]; ok { return theme.Render(s) } return s } ================================================ FILE: go.mod ================================================ module github.com/gookit/slog go 1.19 require ( github.com/gookit/color v1.6.0 github.com/gookit/goutil v0.7.4 github.com/gookit/gsr v0.1.1 github.com/valyala/bytebufferpool v1.0.0 ) require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect ) ================================================ FILE: go.sum ================================================ github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0= github.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA= github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs= github.com/gookit/goutil v0.7.4 h1:OWgUngToNz+bPlX5aP+EMG31DraEU63uvKMwwT3vseM= github.com/gookit/goutil v0.7.4/go.mod h1:vJS9HXctYTCLtCsZot5L5xF+O1oR17cDYO9R0HxBmnU= github.com/gookit/gsr v0.1.1 h1:TaHD3M7qa6lcAf9D2J4mGNg+QjgDtD1bw7uctF8RXOM= github.com/gookit/gsr v0.1.1/go.mod h1:7wv4Y4WCnil8+DlDYHBjidzrEzfHhXEoFjEA0pPPWpI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= ================================================ FILE: handler/README.md ================================================ # Handlers Package handler provide useful common log handlers. eg: file, console, multi_file, rotate_file, stream, syslog, email ```text handler -> buffered -> rotated -> writer(os.File) ``` ## Built-in handlers - `handler.ConsoleHandler` Console handler - `handler.FileHandler` File handler - `handler.StreamHandler` Stream handler - `handler.SyslogHandler` Syslog handler - `handler.EmailHandler` Email handler - `handler.FlushCloseHandler` Flush and close handler ## Go Docs Docs generated by: `go doc ./handler` ### Handler Functions ```go func LineBuffOsFile(f *os.File, bufSize int, levels []slog.Level) slog.Handler func LineBuffWriter(w io.Writer, bufSize int, levels []slog.Level) slog.Handler func LineBufferedFile(logfile string, bufSize int, levels []slog.Level) (slog.Handler, error) type ConsoleHandler = IOWriterHandler func ConsoleWithLevels(levels []slog.Level) *ConsoleHandler func ConsoleWithMaxLevel(level slog.Level) *ConsoleHandler func NewConsole(levels []slog.Level) *ConsoleHandler func NewConsoleHandler(levels []slog.Level) *ConsoleHandler func NewConsoleWithLF(lf slog.LevelFormattable) *ConsoleHandler type EmailHandler struct{ ... } func NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler type EmailOption struct{ ... } type FlushCloseHandler struct{ ... } func FlushCloserWithLevels(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler func FlushCloserWithMaxLevel(out FlushCloseWriter, maxLevel slog.Level) *FlushCloseHandler func NewBuffered(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler func NewBufferedHandler(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler func NewFlushCloseHandler(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler func NewFlushCloser(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler func NewFlushCloserWithLF(out FlushCloseWriter, lf slog.LevelFormattable) *FlushCloseHandler type IOWriterHandler struct{ ... } func IOWriterWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler func IOWriterWithMaxLevel(out io.Writer, maxLevel slog.Level) *IOWriterHandler func NewIOWriter(out io.Writer, levels []slog.Level) *IOWriterHandler func NewIOWriterHandler(out io.Writer, levels []slog.Level) *IOWriterHandler func NewIOWriterWithLF(out io.Writer, lf slog.LevelFormattable) *IOWriterHandler func NewSimpleHandler(out io.Writer, maxLevel slog.Level) *IOWriterHandler func SimpleWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler type SimpleHandler = IOWriterHandler func NewHandler(out io.Writer, maxLevel slog.Level) *SimpleHandler func NewSimple(out io.Writer, maxLevel slog.Level) *SimpleHandler type SyncCloseHandler struct{ ... } func JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error) func MustFileHandler(logfile string, fns ...ConfigFn) *SyncCloseHandler func MustRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler func MustSimpleFile(filepath string, maxLv ...slog.Level) *SyncCloseHandler func MustSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) *SyncCloseHandler func MustTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler func NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error) func NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error) func NewRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) func NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) func NewSimpleFile(filepath string, maxLv ...slog.Level) (*SyncCloseHandler, error) func NewSimpleFileHandler(filePath string, maxLv ...slog.Level) (*SyncCloseHandler, error) func NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) func NewSizeRotateFileHandler(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) func NewSyncCloseHandler(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler func NewSyncCloser(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler func NewSyncCloserWithLF(out SyncCloseWriter, lf slog.LevelFormattable) *SyncCloseHandler func NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) func NewTimeRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) func SyncCloserWithLevels(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler func SyncCloserWithMaxLevel(out SyncCloseWriter, maxLevel slog.Level) *SyncCloseHandler type SysLogHandler struct{ ... } func NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error) type WriteCloserHandler struct{ ... } func NewWriteCloser(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler func NewWriteCloserHandler(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler func NewWriteCloserWithLF(out io.WriteCloser, lf slog.LevelFormattable) *WriteCloserHandler func WriteCloserWithLevels(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler func WriteCloserWithMaxLevel(out io.WriteCloser, maxLevel slog.Level) *WriteCloserHandler ``` ### Config Functions ```go type Builder struct{ ... } func NewBuilder() *Builder type Config struct{ ... } func NewConfig(fns ...ConfigFn) *Config func NewEmptyConfig(fns ...ConfigFn) *Config type ConfigFn func(c *Config) func WithBackupNum(n uint) ConfigFn func WithBackupTime(bt uint) ConfigFn func WithBuffMode(buffMode string) ConfigFn func WithBuffSize(buffSize int) ConfigFn func WithCompress(compress bool) ConfigFn func WithFilePerm(filePerm fs.FileMode) ConfigFn func WithLevelMode(mode slog.LevelMode) ConfigFn func WithLevelName(name string) ConfigFn func WithLevelNames(names []string) ConfigFn func WithLevelNamesString(names string) ConfigFn func WithLogLevel(level slog.Level) ConfigFn func WithLogLevels(levels slog.Levels) ConfigFn func WithLogfile(logfile string) ConfigFn func WithMaxLevelName(name string) ConfigFn func WithMaxSize(maxSize uint64) ConfigFn func WithRotateMode(m rotatefile.RotateMode) ConfigFn func WithRotateTime(rt rotatefile.RotateTime) ConfigFn func WithRotateTimeString(rt string) ConfigFn func WithTimeClock(clock rotatefile.Clocker) ConfigFn func WithUseJSON(useJSON bool) ConfigFn ``` ================================================ FILE: handler/buffer.go ================================================ package handler import ( "io" "os" "github.com/gookit/slog" "github.com/gookit/slog/bufwrite" ) // NewBuffered create new BufferedHandler func NewBuffered(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler { return NewBufferedHandler(w, bufSize, levels...) } // NewBufferedHandler create new BufferedHandler func NewBufferedHandler(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler { if len(levels) == 0 { levels = slog.AllLevels } out := bufwrite.NewBufIOWriterSize(w, bufSize) return FlushCloserWithLevels(out, levels) } // LineBufferedFile handler func LineBufferedFile(logfile string, bufSize int, levels []slog.Level) (slog.Handler, error) { cfg := NewConfig( WithLogfile(logfile), WithBuffSize(bufSize), WithLogLevels(levels), WithBuffMode(BuffModeLine), ) out, err := cfg.CreateWriter() if err != nil { return nil, err } return SyncCloserWithLevels(out, levels), nil } // LineBuffOsFile handler func LineBuffOsFile(f *os.File, bufSize int, levels []slog.Level) slog.Handler { if f == nil { panic("slog: the os file cannot be nil") } out := bufwrite.NewLineWriterSize(f, bufSize) return SyncCloserWithLevels(out, levels) } // LineBuffWriter handler func LineBuffWriter(w io.Writer, bufSize int, levels []slog.Level) slog.Handler { if w == nil { panic("slog: the io writer cannot be nil") } out := bufwrite.NewLineWriterSize(w, bufSize) return IOWriterWithLevels(out, levels) } // // --------- wrap a handler with buffer --------- // // FormatWriterHandler interface type FormatWriterHandler interface { slog.Handler // Formatter record formatter Formatter() slog.Formatter // Writer the output writer Writer() io.Writer } ================================================ FILE: handler/buffer_test.go ================================================ package handler_test import ( "os" "testing" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestNewBufferedHandler(t *testing.T) { logfile := "./testdata/buffer-os-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) file, err := handler.QuickOpenFile(logfile) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) bh := handler.NewBuffered(file, 128) // new logger l := slog.NewWithHandlers(bh) l.Info("buffered info message") bts, err := os.ReadFile(logfile) assert.NoErr(t, err) assert.Empty(t, bts) l.Warn("buffered warn message") bts, err = os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") err = l.FlushAll() assert.NoErr(t, err) } func TestLineBufferedFile(t *testing.T) { logfile := "./testdata/line-buff-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h, err := handler.LineBufferedFile(logfile, 12, slog.AllLevels) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) r := newLogRecord("Test LineBufferedFile") err = h.Handle(r) assert.NoErr(t, err) bts, err := os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, "Test LineBufferedFile") } func TestLineBuffOsFile(t *testing.T) { logfile := "./testdata/line-buff-os-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) file, err := fsutil.QuickOpenFile(logfile) assert.NoErr(t, err) h := handler.LineBuffOsFile(file, 12, slog.AllLevels) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) r := newLogRecord("Test LineBuffOsFile") err = h.Handle(r) assert.NoErr(t, err) bts, err := os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, "Test LineBuffOsFile") assert.Panics(t, func() { handler.LineBuffOsFile(nil, 12, slog.AllLevels) }) } func TestLineBuffWriter(t *testing.T) { logfile := "./testdata/line-buff-writer.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) file, err := fsutil.QuickOpenFile(logfile) assert.NoErr(t, err) h := handler.LineBuffWriter(file, 12, slog.AllLevels) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) assert.Panics(t, func() { handler.LineBuffWriter(nil, 12, slog.AllLevels) }) r := newLogRecord("Test LineBuffWriter") err = h.Handle(r) assert.NoErr(t, err) bts, err := os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, "Test LineBuffWriter") assert.Panics(t, func() { handler.LineBuffOsFile(nil, 12, slog.AllLevels) }) } ================================================ FILE: handler/builder.go ================================================ package handler import ( "io" "github.com/gookit/slog" "github.com/gookit/slog/rotatefile" ) // // --------------------------------------------------------------------------- // handler builder // --------------------------------------------------------------------------- // // Builder struct for create handler type Builder struct { *Config Output io.Writer } // NewBuilder create func NewBuilder() *Builder { return &Builder{ Config: NewEmptyConfig(), } } // WithOutput to the builder func (b *Builder) WithOutput(w io.Writer) *Builder { b.Output = w return b } // With some config fn // // Deprecated: please use WithConfigFn() func (b *Builder) With(fns ...ConfigFn) *Builder { return b.WithConfigFn(fns...) } // WithConfigFn some config fn func (b *Builder) WithConfigFn(fns ...ConfigFn) *Builder { b.Config.With(fns...) return b } // WithLogfile setting func (b *Builder) WithLogfile(logfile string) *Builder { b.Logfile = logfile return b } // WithLevelMode setting func (b *Builder) WithLevelMode(mode slog.LevelMode) *Builder { b.LevelMode = mode return b } // WithLogLevel setting max log level func (b *Builder) WithLogLevel(level slog.Level) *Builder { b.Level = level b.LevelMode = slog.LevelModeMax return b } // WithLogLevels setting func (b *Builder) WithLogLevels(levels []slog.Level) *Builder { b.Levels = levels b.LevelMode = slog.LevelModeList return b } // WithBuffMode setting func (b *Builder) WithBuffMode(bufMode string) *Builder { b.BuffMode = bufMode return b } // WithBuffSize setting func (b *Builder) WithBuffSize(bufSize int) *Builder { b.BuffSize = bufSize return b } // WithMaxSize setting func (b *Builder) WithMaxSize(maxSize uint64) *Builder { b.MaxSize = maxSize return b } // WithRotateTime setting func (b *Builder) WithRotateTime(rt rotatefile.RotateTime) *Builder { b.RotateTime = rt return b } // WithCompress setting func (b *Builder) WithCompress(compress bool) *Builder { b.Compress = compress return b } // WithUseJSON setting func (b *Builder) WithUseJSON(useJSON bool) *Builder { b.UseJSON = useJSON return b } // Build slog handler. func (b *Builder) Build() slog.FormattableHandler { if b.Output != nil { return b.buildFromWriter(b.Output) } if b.Logfile != "" { w, err := b.CreateWriter() if err != nil { panic(err) } return b.buildFromWriter(w) } panic("slog: missing information for build slog handler") } // Build slog handler. func (b *Builder) buildFromWriter(w io.Writer) (h slog.FormattableHandler) { defer b.reset() bufSize := b.BuffSize lf := b.newLevelFormattable() if scw, ok := w.(SyncCloseWriter); ok { if bufSize > 0 { scw = b.wrapBuffer(scw) } h = NewSyncCloserWithLF(scw, lf) } else if fcw, ok := w.(FlushCloseWriter); ok { if bufSize > 0 { fcw = b.wrapBuffer(fcw) } h = NewFlushCloserWithLF(fcw, lf) } else if wc, ok := w.(io.WriteCloser); ok { if bufSize > 0 { wc = b.wrapBuffer(wc) } h = NewWriteCloserWithLF(wc, lf) } else { if bufSize > 0 { w = b.wrapBuffer(w) } h = NewIOWriterWithLF(w, lf) } // use json format. if b.UseJSON { h.SetFormatter(slog.NewJSONFormatter()) } return } // rest builder. func (b *Builder) reset() { b.Output = nil b.Config = NewEmptyConfig() } ================================================ FILE: handler/config.go ================================================ package handler import ( "encoding/json" "io" "io/fs" "strings" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" "github.com/gookit/slog" "github.com/gookit/slog/bufwrite" "github.com/gookit/slog/rotatefile" ) // the buff mode constants const ( BuffModeLine = "line" BuffModeBite = "bite" ) const ( // LevelModeList use level list for limit record write LevelModeList = slog.LevelModeList // LevelModeValue use max level limit log record write LevelModeValue = slog.LevelModeMax ) // ConfigFn for config some settings type ConfigFn func(c *Config) // Config struct type Config struct { // Logfile for writing logs Logfile string `json:"logfile" yaml:"logfile"` // FilePerm for create log file. default rotatefile.DefaultFilePerm FilePerm fs.FileMode `json:"file_perm" yaml:"file_perm"` // LevelMode for limit log records. default LevelModeList LevelMode slog.LevelMode `json:"level_mode" yaml:"level_mode"` // Level max value. valid on LevelMode = LevelModeValue // // eg: set Level=slog.LevelError, it will only write messages on level <= error. Level slog.Level `json:"level" yaml:"level"` // Levels list for writing. valid on LevelMode = LevelModeList Levels []slog.Level `json:"levels" yaml:"levels"` // UseJSON for format logs UseJSON bool `json:"use_json" yaml:"use_json"` // BuffMode type name. allow: line, bite // // Recommend use BuffModeLine(it's default) BuffMode string `json:"buff_mode" yaml:"buff_mode"` // BuffSize for enable buffer, unit is bytes. set 0 to disable buffer BuffSize int `json:"buff_size" yaml:"buff_size"` // RotateTime for a rotating file, unit is seconds. RotateTime rotatefile.RotateTime `json:"rotate_time" yaml:"rotate_time"` // RotateMode for a rotating file by time. default rotatefile.ModeRename RotateMode rotatefile.RotateMode `json:"rotate_mode" yaml:"rotate_mode"` // TimeClock for a rotating file by time. TimeClock rotatefile.Clocker `json:"-" yaml:"-"` // MaxSize for a rotating file by size, unit is bytes. MaxSize uint64 `json:"max_size" yaml:"max_size"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // BackupNum max number for keep old files. // // 0 is not limit, default is 20. BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files, unit is hours. // // 0 is not limit, default is a week. BackupTime uint `json:"backup_time" yaml:"backup_time"` // RenameFunc build filename for rotate file RenameFunc func(filepath string, rotateNum uint) string // CleanOnClose determines if the rotated log files should be cleaned up when close. CleanOnClose bool `json:"clean_on_close" yaml:"clean_on_close"` // DebugMode for debug on development. DebugMode bool } // NewEmptyConfig new config instance func NewEmptyConfig(fns ...ConfigFn) *Config { c := &Config{Levels: slog.AllLevels} return c.WithConfigFn(fns...) } // NewConfig new config instance with some default settings. func NewConfig(fns ...ConfigFn) *Config { c := &Config{ Levels: slog.AllLevels, BuffMode: BuffModeLine, BuffSize: DefaultBufferSize, // rotate file settings MaxSize: rotatefile.DefaultMaxSize, RotateTime: rotatefile.EveryHour, // old files clean settings BackupNum: rotatefile.DefaultBackNum, BackupTime: rotatefile.DefaultBackTime, DebugMode: slog.DebugMode, } return c.WithConfigFn(fns...) } // FromJSON load config from json string func (c *Config) FromJSON(bts []byte) error { return json.Unmarshal(bts, c) } // With more config settings func func (c *Config) With(fns ...ConfigFn) *Config { return c.WithConfigFn(fns...) } // WithConfigFn more config settings func func (c *Config) WithConfigFn(fns ...ConfigFn) *Config { for _, fn := range fns { fn(c) } return c } func (c *Config) newLevelFormattable() slog.LevelFormattable { if c.LevelMode == LevelModeValue { return slog.NewLvFormatter(c.Level) } return slog.NewLvsFormatter(c.Levels) } // CreateHandler quick create a handler by config func (c *Config) CreateHandler() (*SyncCloseHandler, error) { output, err := c.CreateWriter() if err != nil { return nil, err } h := &SyncCloseHandler{ Output: output, // with log level and formatter LevelFormattable: c.newLevelFormattable(), } if c.UseJSON { h.SetFormatter(slog.NewJSONFormatter()) } return h, nil } // RotateWriter build rotate writer by config func (c *Config) RotateWriter() (output SyncCloseWriter, err error) { if c.MaxSize == 0 && c.RotateTime == 0 { return nil, errorx.E("slog: cannot create rotate writer, MaxSize and RotateTime both is 0") } return c.CreateWriter() } // CreateWriter build writer by config func (c *Config) CreateWriter() (output SyncCloseWriter, err error) { if c.Logfile == "" { return nil, errorx.Raw("slog: logfile cannot be empty for create writer") } if c.FilePerm == 0 { c.FilePerm = rotatefile.DefaultFilePerm } // create a rotated writer by config. if c.MaxSize > 0 || c.RotateTime > 0 { rc := rotatefile.EmptyConfigWith() // has locked on logger.write() rc.CloseLock = true rc.Filepath = c.Logfile rc.FilePerm = c.FilePerm rc.DebugMode = c.DebugMode // copy settings rc.MaxSize = c.MaxSize rc.RotateTime = c.RotateTime rc.RotateMode = c.RotateMode rc.BackupNum = c.BackupNum rc.BackupTime = c.BackupTime rc.Compress = c.Compress rc.CleanOnClose = c.CleanOnClose if c.RenameFunc != nil { rc.RenameFunc = c.RenameFunc } if c.TimeClock != nil { rc.TimeClock = c.TimeClock } output, err = rc.Create() } else { // create a file writer output, err = fsutil.OpenAppendFile(c.Logfile, c.FilePerm) } if err != nil { return nil, err } // wrap buffer if c.BuffSize > 0 { output = c.wrapBuffer(output) } return } type flushSyncCloseWriter interface { FlushCloseWriter Sync() error } // wrap buffer for the writer func (c *Config) wrapBuffer(w io.Writer) (bw flushSyncCloseWriter) { if c.BuffMode == BuffModeLine { bw = bufwrite.NewLineWriterSize(w, c.BuffSize) } else { bw = bufwrite.NewBufIOWriterSize(w, c.BuffSize) } return bw } // // --------------------------------------------------------------------------- // global config func // --------------------------------------------------------------------------- // // WithLogfile setting func WithLogfile(logfile string) ConfigFn { return func(c *Config) { c.Logfile = logfile } } // WithFilePerm setting func WithFilePerm(filePerm fs.FileMode) ConfigFn { return func(c *Config) { c.FilePerm = filePerm } } // WithLevelMode setting func WithLevelMode(lm slog.LevelMode) ConfigFn { return func(c *Config) { c.LevelMode = lm } } // WithLevelModeString setting func WithLevelModeString(s string) ConfigFn { return func(c *Config) { c.LevelMode = slog.SafeToLevelMode(s) } } // WithLogLevel setting max log level func WithLogLevel(level slog.Level) ConfigFn { return func(c *Config) { c.Level = level c.LevelMode = LevelModeValue } } // WithLevelName setting max level by name func WithLevelName(name string) ConfigFn { return WithLogLevel(slog.LevelByName(name)) } // WithMaxLevelName setting max level by name func WithMaxLevelName(name string) ConfigFn { return WithLogLevel(slog.LevelByName(name)) } // WithLogLevels setting func WithLogLevels(levels slog.Levels) ConfigFn { return func(c *Config) { c.Levels = levels c.LevelMode = LevelModeList } } // WithLevelNamesString setting multi levels by level names string, multi names split by comma. func WithLevelNamesString(names string) ConfigFn { return WithLevelNames(strings.Split(names, ",")) } // WithLevelNames set multi levels by level names. func WithLevelNames(names []string) ConfigFn { levels := make([]slog.Level, 0, len(names)) for _, name := range names { levels = append(levels, slog.LevelByName(name)) } return WithLogLevels(levels) } // WithRotateTime setting the rotated time func WithRotateTime(rt rotatefile.RotateTime) ConfigFn { return func(c *Config) { c.RotateTime = rt } } // WithRotateTimeString setting the rotated time by string. // // eg: "1hour", "24h", "1day", "7d", "1m", "30s" func WithRotateTimeString(s string) ConfigFn { return func(c *Config) { rt, err := rotatefile.StringToRotateTime(s) if err != nil { panic(err) } c.RotateTime = rt } } // WithRotateMode setting rotating mode rotatefile.RotateMode func WithRotateMode(m rotatefile.RotateMode) ConfigFn { return func(c *Config) { c.RotateMode = m } } // WithRotateModeString setting rotatefile.RotateMode by string. func WithRotateModeString(s string) ConfigFn { return func(c *Config) { m, err := rotatefile.StringToRotateMode(s) if err != nil { panic(err) } c.RotateMode = m } } // WithTimeClock setting func WithTimeClock(clock rotatefile.Clocker) ConfigFn { return func(c *Config) { c.TimeClock = clock } } // WithBackupNum setting func WithBackupNum(n uint) ConfigFn { return func(c *Config) { c.BackupNum = n } } // WithBackupTime setting backup time func WithBackupTime(bt uint) ConfigFn { return func(c *Config) { c.BackupTime = bt } } // WithBuffMode setting buffer mode func WithBuffMode(buffMode string) ConfigFn { return func(c *Config) { c.BuffMode = buffMode } } // WithBuffSize setting buffer size, unit is bytes. func WithBuffSize(buffSize int) ConfigFn { return func(c *Config) { c.BuffSize = buffSize } } // WithMaxSize setting max size for a rotated file func WithMaxSize(maxSize uint64) ConfigFn { return func(c *Config) { c.MaxSize = maxSize } } // WithCompress setting compress func WithCompress(compress bool) ConfigFn { return func(c *Config) { c.Compress = compress } } // WithUseJSON setting uses JSON format func WithUseJSON(useJSON bool) ConfigFn { return func(c *Config) { c.UseJSON = useJSON } } // WithDebugMode setting for debug mode func WithDebugMode(c *Config) { c.DebugMode = true } ================================================ FILE: handler/config_test.go ================================================ package handler_test import ( "bytes" "testing" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/x/fmtutil" "github.com/gookit/slog" "github.com/gookit/slog/handler" "github.com/gookit/slog/rotatefile" ) func TestNewConfig(t *testing.T) { c := handler.NewConfig( handler.WithCompress(true), handler.WithLevelMode(handler.LevelModeValue), handler.WithBackupNum(20), handler.WithBackupTime(1800), handler.WithRotateMode(rotatefile.ModeCreate), func(c *handler.Config) { c.BackupTime = 23 c.RenameFunc = func(fpath string, num uint) string { return fpath + ".bak" } }, ). With(handler.WithBuffSize(129)). WithConfigFn(handler.WithLogLevel(slog.ErrorLevel)) assert.True(t, c.Compress) assert.Eq(t, 129, c.BuffSize) assert.Eq(t, handler.LevelModeValue, c.LevelMode) assert.Eq(t, slog.ErrorLevel, c.Level) assert.Eq(t, rotatefile.ModeCreate, c.RotateMode) c.With(handler.WithLevelModeString("max")) assert.Eq(t, slog.LevelModeMax, c.LevelMode) c.WithConfigFn(handler.WithLevelNames([]string{"info", "debug"})) assert.Eq(t, []slog.Level{slog.InfoLevel, slog.DebugLevel}, c.Levels) } func TestConfig_fromJSON(t *testing.T) { c := &handler.Config{} assert.Eq(t, slog.LevelModeList, c.LevelMode) assert.Eq(t, rotatefile.ModeRename, c.RotateMode) assert.NoErr(t, c.FromJSON([]byte(`{ "logfile": "testdata/config_test.log", "level": "debug", "level_mode": "max", "levels": ["info", "debug"], "buff_mode": "line", "buff_size": 128, "backup_num": 3, "backup_time": 3600, "rotate_mode": "create", "rotate_time": "1day" }`))) c.With(handler.WithDebugMode) dump.P(c) assert.Eq(t, slog.LevelModeMax, c.LevelMode) assert.Eq(t, rotatefile.ModeCreate, c.RotateMode) assert.Eq(t, "Every 1 Day", c.RotateTime.String()) } func TestWithLevelNamesString(t *testing.T) { c := handler.NewConfig(handler.WithLevelNamesString("info,error")) assert.Eq(t, []slog.Level{slog.InfoLevel, slog.ErrorLevel}, c.Levels) } func TestWithMaxLevelName(t *testing.T) { c := handler.NewConfig(handler.WithMaxLevelName("error")) assert.Eq(t, slog.ErrorLevel, c.Level) assert.Eq(t, handler.LevelModeValue, c.LevelMode) c1 := handler.NewConfig(handler.WithLevelName("warn")) assert.Eq(t, slog.WarnLevel, c1.Level) assert.Eq(t, handler.LevelModeValue, c1.LevelMode) } func TestWithRotateMode(t *testing.T) { c := handler.Config{} c.With(handler.WithRotateModeString("rename")) assert.Eq(t, rotatefile.ModeRename, c.RotateMode) assert.PanicsErrMsg(t, func() { c.With(handler.WithRotateModeString("invalid")) }, "rotatefile: invalid rotate mode: invalid") } func TestWithRotateTimeString(t *testing.T) { tests := []struct { input string expected rotatefile.RotateTime panics bool }{ {"1hours", rotatefile.RotateTime(3600), false}, {"24h", rotatefile.RotateTime(86400), false}, {"1day", rotatefile.RotateTime(86400), false}, {"7d", rotatefile.RotateTime(604800), false}, {"1m", rotatefile.RotateTime(60), false}, {"30s", rotatefile.RotateTime(30), false}, {"invalid", 0, true}, } for _, tt := range tests { t.Run(tt.input, func(t *testing.T) { c := &handler.Config{} if tt.panics { assert.Panics(t, func() { handler.WithRotateTimeString(tt.input)(c) }) } else { assert.NotPanics(t, func() { handler.WithRotateTimeString(tt.input)(c) }) assert.Eq(t, tt.expected, c.RotateTime) } }) } } func TestNewBuilder(t *testing.T) { testFile := "testdata/builder.log" assert.NoErr(t, fsutil.DeleteIfFileExist(testFile)) b := handler.NewBuilder(). WithLogfile(testFile). WithLogLevels(slog.AllLevels). WithBuffSize(128). WithBuffMode(handler.BuffModeBite). WithMaxSize(fmtutil.OneMByte * 3). WithRotateTime(rotatefile.Every30Min). WithCompress(true). With(func(c *handler.Config) { c.BackupNum = 3 }) assert.Eq(t, uint(3), b.BackupNum) assert.Eq(t, handler.BuffModeBite, b.BuffMode) assert.Eq(t, rotatefile.Every30Min, b.RotateTime) h := b.Build() assert.NotNil(t, h) assert.NoErr(t, h.Close()) b1 := handler.NewBuilder(). WithOutput(new(bytes.Buffer)). WithUseJSON(true). WithLogLevel(slog.ErrorLevel). WithLevelMode(handler.LevelModeValue) assert.Eq(t, handler.LevelModeValue, b1.LevelMode) assert.Eq(t, slog.ErrorLevel, b1.Level) h2 := b1.Build() assert.NotNil(t, h2) assert.Panics(t, func() { handler.NewBuilder().Build() }) } type simpleWriter struct { errOnWrite bool } func (w *simpleWriter) Write(p []byte) (n int, err error) { if w.errOnWrite { return 0, errorx.Raw("write error") } return len(p), nil } type closeWriter struct { errOnWrite bool errOnClose bool } func (w *closeWriter) Close() error { if w.errOnClose { return errorx.Raw("close error") } return nil } func (w *closeWriter) Write(p []byte) (n int, err error) { if w.errOnWrite { return 0, errorx.Raw("write error") } return len(p), nil } type flushCloseWriter struct { closeWriter errOnFlush bool } // Flush implement stdio.Flusher func (w *flushCloseWriter) Flush() error { if w.errOnFlush { return errorx.Raw("flush error") } return nil } type syncCloseWriter struct { closeWriter errOnSync bool } // Sync implement stdio.Syncer func (w *syncCloseWriter) Sync() error { if w.errOnSync { return errorx.Raw("sync error") } return nil } func TestNewBuilder_buildFromWriter(t *testing.T) { t.Run("FlushCloseWriter", func(t *testing.T) { out := &flushCloseWriter{} out.errOnFlush = true h := handler.NewBuilder(). WithOutput(out). WithConfigFn(func(c *handler.Config) { c.RenameFunc = func(fpath string, num uint) string { return fpath + ".bak" } }). Build() assert.Err(t, h.Flush()) // wrap buffer h = handler.NewBuilder(). WithOutput(out). WithBuffSize(128). Build() assert.NoErr(t, h.Close()) assert.NoErr(t, h.Flush()) }) t.Run("CloseWriter", func(t *testing.T) { h := handler.NewBuilder(). WithOutput(&closeWriter{errOnClose: true}). WithBuffSize(128). Build() assert.NotNil(t, h) assert.Err(t, h.Close()) }) t.Run("SimpleWriter", func(t *testing.T) { h := handler.NewBuilder(). WithOutput(&simpleWriter{errOnWrite: true}). WithBuffSize(128). Build() assert.NotNil(t, h) assert.NoErr(t, h.Close()) }) } ================================================ FILE: handler/console.go ================================================ package handler import ( "os" "github.com/gookit/color" "github.com/gookit/slog" ) /******************************************************************************** * console log handler ********************************************************************************/ // ConsoleHandler definition type ConsoleHandler = IOWriterHandler // NewConsoleWithLF create new ConsoleHandler and with custom slog.LevelFormattable func NewConsoleWithLF(lf slog.LevelFormattable) *ConsoleHandler { h := NewIOWriterWithLF(os.Stdout, lf) // default use text formatter f := slog.NewTextFormatter() // default enable color on console f.WithEnableColor(color.SupportColor()) h.SetFormatter(f) return h } // // ------------- Use max log level ------------- // // ConsoleWithMaxLevel create new ConsoleHandler and with max log level func ConsoleWithMaxLevel(level slog.Level) *ConsoleHandler { return NewConsoleWithLF(slog.NewLvFormatter(level)) } // // ------------- Use multi log levels ------------- // // NewConsole create new ConsoleHandler, alias of NewConsoleHandler func NewConsole(levels []slog.Level) *ConsoleHandler { return NewConsoleHandler(levels) } // ConsoleWithLevels create new ConsoleHandler and with limited log levels func ConsoleWithLevels(levels []slog.Level) *ConsoleHandler { return NewConsoleHandler(levels) } // NewConsoleHandler create new ConsoleHandler with limited log levels func NewConsoleHandler(levels []slog.Level) *ConsoleHandler { return NewConsoleWithLF(slog.NewLvsFormatter(levels)) } ================================================ FILE: handler/console_test.go ================================================ package handler_test import ( "testing" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestConsoleWithMaxLevel(t *testing.T) { l := slog.NewWithHandlers(handler.ConsoleWithMaxLevel(slog.InfoLevel)) l.DoNothingOnPanicFatal() for _, level := range slog.AllLevels { l.Log(level, "a test message") } assert.NoErr(t, l.LastErr()) } ================================================ FILE: handler/email.go ================================================ package handler import ( "net/smtp" "strconv" "github.com/gookit/slog" ) // EmailOption struct type EmailOption struct { SMTPHost string `json:"smtp_host"` // eg "smtp.gmail.com" SMTPPort int `json:"smtp_port"` // eg 587 FromAddr string `json:"from_addr"` // eg "yourEmail@gmail.com" Password string `json:"password"` } // EmailHandler struct type EmailHandler struct { NopFlushClose slog.LevelWithFormatter // From the sender email information From EmailOption // ToAddresses email list ToAddresses []string } // NewEmailHandler instance func NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler { h := &EmailHandler{ From: from, // to receivers ToAddresses: toAddresses, } // init default log level h.Level = slog.InfoLevel return h } // Handle a log record func (h *EmailHandler) Handle(r *slog.Record) error { msgBytes, err := h.Format(r) if err != nil { return err } var auth = smtp.PlainAuth("", h.From.FromAddr, h.From.Password, h.From.SMTPHost) addr := h.From.SMTPHost + ":" + strconv.Itoa(h.From.SMTPPort) return smtp.SendMail(addr, auth, h.From.FromAddr, h.ToAddresses, msgBytes) } ================================================ FILE: handler/example_test.go ================================================ package handler_test import ( "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func Example_fileHandler() { withLevels := handler.WithLogLevels(slog.Levels{slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel}) h1 := handler.MustFileHandler("/tmp/error.log", withLevels) withLevels = handler.WithLogLevels(slog.Levels{slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel}) h2 := handler.MustFileHandler("/tmp/info.log", withLevels) slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message") slog.Error("error message") } func Example_rotateFileHandler() { h1 := handler.MustRotateFile("/tmp/error.log", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels)) h2 := handler.MustRotateFile("/tmp/info.log", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels)) slog.PushHandler(h1) slog.PushHandler(h2) // add logs slog.Info("info message") slog.Error("error message") } ================================================ FILE: handler/file.go ================================================ package handler import ( "github.com/gookit/goutil/x/basefn" "github.com/gookit/slog" ) // JSONFileHandler create new FileHandler with JSON formatter func JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error) { return NewFileHandler(logfile, append(fns, WithUseJSON(true))...) } // NewBuffFileHandler create file handler with buff size func NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error) { return NewFileHandler(logfile, append(fns, WithBuffSize(buffSize))...) } // MustFileHandler create file handler func MustFileHandler(logfile string, fns ...ConfigFn) *SyncCloseHandler { return basefn.Must(NewFileHandler(logfile, fns...)) } // NewFileHandler create new FileHandler func NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error) { return NewEmptyConfig(fns...).With(WithLogfile(logfile)).CreateHandler() } // // ------------- simple file handler ------------- // // MustSimpleFile new instance func MustSimpleFile(filepath string, maxLv ...slog.Level) *SyncCloseHandler { return basefn.Must(NewSimpleFileHandler(filepath, maxLv...)) } // NewSimpleFile new instance func NewSimpleFile(filepath string, maxLv ...slog.Level) (*SyncCloseHandler, error) { return NewSimpleFileHandler(filepath, maxLv...) } // NewSimpleFileHandler instance, default log level is InfoLevel // // Usage: // // h, err := NewSimpleFileHandler("/tmp/error.log") // // Custom formatter: // // h.SetFormatter(slog.NewJSONFormatter()) // slog.PushHandler(h) // slog.Info("log message") func NewSimpleFileHandler(filePath string, maxLv ...slog.Level) (*SyncCloseHandler, error) { file, err := QuickOpenFile(filePath) if err != nil { return nil, err } h := SyncCloserWithMaxLevel(file, basefn.FirstOr(maxLv, slog.InfoLevel)) return h, nil } ================================================ FILE: handler/file_test.go ================================================ package handler_test import ( "os" "testing" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) // const testSubFile = "./testdata/subdir/app.log" func TestNewFileHandler(t *testing.T) { testFile := "testdata/file.log" h, err := handler.NewFileHandler(testFile, handler.WithFilePerm(0644)) assert.NoErr(t, err) l := slog.NewWithHandlers(h) l.DoNothingOnPanicFatal() l.Info("info message") l.Warn("warn message") logAllLevel(l, "file handler message") assert.True(t, fsutil.IsFile(testFile)) str, err := fsutil.ReadStringOrErr(testFile) assert.NoErr(t, err) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, "info message") assert.Contains(t, str, "[WARNING]") assert.Contains(t, str, "warn message") // assert.NoErr(t, os.Remove(testFile)) } func TestMustFileHandler(t *testing.T) { testFile := "testdata/file-must.log" h := handler.MustFileHandler(testFile) assert.NotEmpty(t, h.Writer()) r := newLogRecord("test file must handler") err := h.Handle(r) assert.NoErr(t, err) assert.NoErr(t, h.Close()) bts := fsutil.MustReadFile(testFile) str := string(bts) assert.Contains(t, str, `INFO`) assert.Contains(t, str, `test file must handler`) } func TestNewFileHandler_basic(t *testing.T) { testFile := "testdata/file-basic.log" assert.NoErr(t, fsutil.DeleteIfFileExist(testFile)) h, err := handler.NewFileHandler(testFile) assert.NoErr(t, err) assert.NotEmpty(t, h.Writer()) r := newLogRecord("test file handler") err = h.Handle(r) assert.NoErr(t, err) assert.NoErr(t, h.Close()) bts := fsutil.MustReadFile(testFile) str := string(bts) assert.Contains(t, str, `INFO`) assert.Contains(t, str, `test file handler`) } func TestNewBuffFileHandler(t *testing.T) { testFile := "testdata/file-buff.log" assert.NoErr(t, fsutil.DeleteIfFileExist(testFile)) h, err := handler.NewBuffFileHandler(testFile, 56) assert.NoErr(t, err) assert.NotEmpty(t, h.Writer()) r := newLogRecord("test file buff handler") err = h.Handle(r) assert.NoErr(t, err) assert.NoErr(t, h.Close()) bts := fsutil.MustReadFile(testFile) str := string(bts) assert.Contains(t, str, `INFO`) assert.Contains(t, str, `test file buff handler`) } func TestJSONFileHandler(t *testing.T) { testFile := "testdata/file-json.log" assert.NoErr(t, fsutil.DeleteIfFileExist(testFile)) h, err := handler.JSONFileHandler(testFile) assert.NoErr(t, err) r := newLogRecord("test json file handler") err = h.Handle(r) assert.NoErr(t, err) err = h.Close() assert.NoErr(t, err) bts := fsutil.MustReadFile(testFile) str := string(bts) assert.Contains(t, str, `"level":"INFO"`) assert.Contains(t, str, `"message":"test json file handler"`) } func TestSimpleFile(t *testing.T) { logfile := "./testdata/must-simple-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h := handler.MustSimpleFile(logfile) assert.True(t, h.IsHandling(slog.InfoLevel)) // NewSimpleFile logfile = "./testdata/test-simple-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h2, err := handler.NewSimpleFile(logfile) assert.NoErr(t, err) assert.True(t, h2.IsHandling(slog.InfoLevel)) } func TestNewSimpleFileHandler(t *testing.T) { logfile := "./testdata/simple-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) assert.False(t, fsutil.IsFile(logfile)) h, err := handler.NewSimpleFileHandler(logfile) assert.NoErr(t, err) l := slog.NewWithHandlers(h) l.Info("info message") l.Warn("warn message") assert.True(t, fsutil.IsFile(logfile)) // assert.NoErr(t, os.Remove(logfile)) bts, err := os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, slog.WarnLevel.Name()) } ================================================ FILE: handler/handler.go ================================================ // Package handler provide useful common log handlers. // // eg: file, console, multi_file, rotate_file, stream, syslog, email package handler import ( "io" "os" "sync" "github.com/gookit/goutil/fsutil" "github.com/gookit/slog" ) // DefaultBufferSize sizes the buffer associated with each log file. It's large // so that log records can accumulate without the logging thread blocking // on disk I/O. The flushDaemon will block instead. var DefaultBufferSize = 8 * 1024 var ( // DefaultFilePerm perm and flags for create log file DefaultFilePerm os.FileMode = 0664 // DefaultFileFlags for create/open file DefaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND ) // FlushWriter is the interface satisfied by logging destinations. type FlushWriter interface { Flush() error // Writer the output writer io.Writer } // FlushCloseWriter is the interface satisfied by logging destinations. type FlushCloseWriter interface { Flush() error // WriteCloser the output writer io.WriteCloser } // SyncCloseWriter is the interface satisfied by logging destinations. // such as os.File type SyncCloseWriter interface { Sync() error // WriteCloser the output writer io.WriteCloser } /******************************************************************************** * Common parts for handler ********************************************************************************/ // LevelWithFormatter struct definition // // - support set log formatter // - only support set one log level // // Deprecated: please use slog.LevelWithFormatter instead. type LevelWithFormatter = slog.LevelWithFormatter // LevelsWithFormatter struct definition // // - support set log formatter // - support setting multi log levels // // Deprecated: please use slog.LevelsWithFormatter instead. type LevelsWithFormatter = slog.LevelsWithFormatter // NopFlushClose no operation. // // provide empty Flush(), Close() methods, useful for tests. type NopFlushClose struct{} // Flush logs to disk func (h *NopFlushClose) Flush() error { return nil } // Close handler func (h *NopFlushClose) Close() error { return nil } // LockWrapper struct type LockWrapper struct { sync.Mutex disable bool } // Lock it func (lw *LockWrapper) Lock() { if !lw.disable { lw.Mutex.Lock() } } // Unlock it func (lw *LockWrapper) Unlock() { if !lw.disable { lw.Mutex.Unlock() } } // EnableLock enable lock func (lw *LockWrapper) EnableLock(enable bool) { lw.disable = !enable } // LockEnabled status func (lw *LockWrapper) LockEnabled() bool { return !lw.disable } // QuickOpenFile like os.OpenFile func QuickOpenFile(filepath string) (*os.File, error) { return fsutil.OpenFile(filepath, DefaultFileFlags, DefaultFilePerm) } ================================================ FILE: handler/handler_test.go ================================================ package handler_test import ( "fmt" "testing" "github.com/gookit/goutil" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) var ( sampleData = slog.M{ "name": "inhere", "age": 100, "skill": "go,php,java", } ) func TestMain(m *testing.M) { fmt.Println("TestMain: remove all test files in ./testdata") goutil.PanicErr(fsutil.RemoveSub("./testdata", fsutil.ExcludeNames(".keep"))) m.Run() } func TestConfig_CreateWriter(t *testing.T) { cfg := handler.NewEmptyConfig() w, err := cfg.CreateWriter() assert.Nil(t, w) assert.Err(t, err) h, err := cfg.CreateHandler() assert.Nil(t, h) assert.Err(t, err) logfile := "./testdata/file-by-config.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) cfg.With( handler.WithBuffMode(handler.BuffModeBite), handler.WithLogLevels(slog.NormalLevels), handler.WithLogfile(logfile), ) w, err = cfg.CreateWriter() assert.NoErr(t, err) _, err = w.Write([]byte("hello, config")) assert.NoErr(t, err) bts := fsutil.MustReadFile(logfile) str := string(bts) assert.Eq(t, str, "hello, config") assert.NoErr(t, w.Sync()) assert.NoErr(t, w.Close()) } func TestConfig_RotateWriter(t *testing.T) { cfg := handler.NewEmptyConfig() w, err := cfg.RotateWriter() assert.Nil(t, w) assert.Err(t, err) } func TestConsoleHandlerWithColor(t *testing.T) { l := slog.NewWithHandlers(handler.ConsoleWithLevels(slog.AllLevels)) l.DoNothingOnPanicFatal() l.Configure(func(l *slog.Logger) { l.ReportCaller = true }) logAllLevel(l, "this is a simple log message") // logfAllLevel() } func TestConsoleHandlerNoColor(t *testing.T) { h := handler.NewConsole(slog.AllLevels) // no color h.TextFormatter().EnableColor = false l := slog.NewWithHandlers(h) l.DoNothingOnPanicFatal() l.ReportCaller = true logAllLevel(l, "this is a simple log message") } func TestNewEmailHandler(t *testing.T) { from := handler.EmailOption{ SMTPHost: "smtp.gmail.com", SMTPPort: 587, FromAddr: "someone@gmail.com", } h := handler.NewEmailHandler(from, []string{ "another@gmail.com", }) assert.Eq(t, slog.InfoLevel, h.Level) // handle error h.SetFormatter(newTestFormatter(true)) assert.Err(t, h.Handle(newLogRecord("test email handler"))) } func TestLevelWithFormatter(t *testing.T) { lf := handler.LevelWithFormatter{Level: slog.InfoLevel} assert.True(t, lf.IsHandling(slog.ErrorLevel)) assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.False(t, lf.IsHandling(slog.DebugLevel)) } func TestLevelsWithFormatter(t *testing.T) { lsf := handler.LevelsWithFormatter{Levels: slog.NormalLevels} assert.False(t, lsf.IsHandling(slog.ErrorLevel)) assert.True(t, lsf.IsHandling(slog.InfoLevel)) assert.True(t, lsf.IsHandling(slog.DebugLevel)) } func TestNopFlushClose_Flush(t *testing.T) { nfc := handler.NopFlushClose{} assert.NoErr(t, nfc.Flush()) assert.NoErr(t, nfc.Close()) } func TestLockWrapper_Lock(t *testing.T) { lw := &handler.LockWrapper{} assert.True(t, lw.LockEnabled()) lw.EnableLock(true) assert.True(t, lw.LockEnabled()) a := 1 lw.Lock() a++ lw.Unlock() assert.Eq(t, 2, a) } func logAllLevel(log slog.SLogger, msg string) { for _, level := range slog.AllLevels { log.Log(level, msg) } } func newLogRecord(msg string) *slog.Record { r := &slog.Record{ Channel: "handler_test", Level: slog.InfoLevel, Message: msg, Time: slog.DefaultClockFn.Now(), Data: sampleData, Extra: map[string]any{ "source": "linux", "extra_key0": "hello", "sub": slog.M{"sub_key1": "val0"}, }, } r.Init(false) return r } type testHandler struct { errOnHandle bool errOnFlush bool errOnClose bool } func newTestHandler() *testHandler { return &testHandler{} } // func (h testHandler) Reset() { // h.errOnHandle = false // h.errOnFlush = false // h.errOnClose = false // } func (h testHandler) IsHandling(_ slog.Level) bool { return true } func (h testHandler) Close() error { if h.errOnClose { return errorx.Raw("close error") } return nil } func (h testHandler) Flush() error { if h.errOnFlush { return errorx.Raw("flush error") } return nil } func (h testHandler) Handle(_ *slog.Record) error { if h.errOnHandle { return errorx.Raw("handle error") } return nil } type testFormatter struct { errOnFormat bool } func newTestFormatter(errOnFormat ...bool) *testFormatter { return &testFormatter{ errOnFormat: len(errOnFormat) > 0 && errOnFormat[0], } } func (f testFormatter) Format(r *slog.Record) ([]byte, error) { if f.errOnFormat { return nil, errorx.Raw("format error") } return []byte(r.Message), nil } ================================================ FILE: handler/rotatefile.go ================================================ package handler import ( "github.com/gookit/goutil/x/basefn" "github.com/gookit/slog/rotatefile" ) // NewRotateFileHandler instance. It supports splitting log files by time and size func NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) { cfg := NewConfig(fns...).With(WithLogfile(logfile), WithRotateTime(rt)) writer, err := cfg.RotateWriter() if err != nil { return nil, err } h := NewSyncCloseHandler(writer, cfg.Levels) return h, nil } // MustRotateFile handler instance, will panic on create error func MustRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler { return basefn.Must(NewRotateFileHandler(logfile, rt, fns...)) } // NewRotateFile instance. alias of NewRotateFileHandler() func NewRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) { return NewRotateFileHandler(logfile, rt, fns...) } // // --------------------------------------------------------------------------- // rotate file by size // --------------------------------------------------------------------------- // // MustSizeRotateFile instance func MustSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) *SyncCloseHandler { return basefn.Must(NewSizeRotateFileHandler(logfile, maxSize, fns...)) } // NewSizeRotateFile instance func NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) { return NewSizeRotateFileHandler(logfile, maxSize, fns...) } // NewSizeRotateFileHandler instance, default close rotate by time. func NewSizeRotateFileHandler(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) { // close rotate by time. fns = append(fns, WithMaxSize(uint64(maxSize))) return NewRotateFileHandler(logfile, 0, fns...) } // // --------------------------------------------------------------------------- // rotate log file by time // --------------------------------------------------------------------------- // // RotateTime rotate log file by time. // // EveryDay: // - "error.log.20201223" // // EveryHour, Every30Minutes, EveryMinute: // - "error.log.20201223_1500" // - "error.log.20201223_1530" // - "error.log.20201223_1523" // // Deprecated: please use rotatefile.RotateTime type RotateTime = rotatefile.RotateTime // Deprecated: Please use define constants on pkg rotatefile. e.g. rotatefile.EveryDay const ( EveryDay = rotatefile.EveryDay EveryHour = rotatefile.EveryDay Every30Minutes = rotatefile.Every30Min Every15Minutes = rotatefile.Every15Min EveryMinute = rotatefile.EveryMinute EverySecond = rotatefile.EverySecond // only use for tests ) // MustTimeRotateFile instance func MustTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler { return basefn.Must(NewTimeRotateFileHandler(logfile, rt, fns...)) } // NewTimeRotateFile instance func NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) { return NewTimeRotateFileHandler(logfile, rt, fns...) } // NewTimeRotateFileHandler instance, default close rotate by size func NewTimeRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) { // default close rotate by size: WithMaxSize(0) return NewRotateFileHandler(logfile, rt, append(fns, WithMaxSize(0))...) } ================================================ FILE: handler/rotatefile_test.go ================================================ package handler_test import ( "fmt" "os" "testing" "time" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" "github.com/gookit/slog/internal" "github.com/gookit/slog/rotatefile" ) func TestNewRotateFileHandler(t *testing.T) { // by size logfile := "./testdata/both-rotate-bysize.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h, err := handler.NewRotateFile(logfile, handler.EveryMinute, handler.WithMaxSize(128)) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) l := slog.NewWithHandlers(h) l.ReportCaller = true for i := 0; i < 3; i++ { l.Info("info", "message", i) l.Warn("warn message", i) } l.MustClose() // by time logfile = "./testdata/both-rotate-bytime.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h = handler.MustRotateFile(logfile, handler.EverySecond) assert.True(t, fsutil.IsFile(logfile)) l = slog.NewWithHandlers(h) for i := 0; i < 3; i++ { l.Info("info", "message", i) l.Warn("warn message", i) fmt.Println("second ", i+1) time.Sleep(time.Second * 1) } l.Error("error message") assert.NoErr(t, l.FlushAll()) } func TestNewSizeRotateFileHandler(t *testing.T) { t.Run("NewSizeRotateFile", func(t *testing.T) { logfile := "./testdata/size-rotate-file.log" assert.NoErr(t, fsutil.DeleteIfFileExist(logfile)) h, err := handler.NewSizeRotateFile(logfile, 468, handler.WithBuffSize(256)) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) l := slog.NewWithHandlers(h) l.ReportCaller = true l.CallerFlag = slog.CallerFlagFull for i := 0; i < 4; i++ { l.Info("this is a info", "message, index=", i) l.Warn("this is a warning message, index=", i) } assert.NoErr(t, l.Close()) checkLogFileContents(t, logfile) }) t.Run("MustSizeRotateFile", func(t *testing.T) { logfile := "./testdata/must-size-rotate-file.log" h := handler.MustSizeRotateFile(logfile, 128, handler.WithBuffSize(128)) h.SetFormatter(slog.NewJSONFormatter()) err := h.Handle(newLogRecord("this is a info message")) assert.NoErr(t, err) files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.Len(t, files, 2) }) } func TestNewTimeRotateFileHandler_EveryDay(t *testing.T) { logfile := "./testdata/time-rotate_EveryDay.log" newFile := internal.AddSuffix2path(logfile, "20221116") clock := rotatefile.NewMockClock("2022-11-16 23:59:57") options := []handler.ConfigFn{ handler.WithBuffSize(128), handler.WithTimeClock(clock), } h := handler.MustTimeRotateFile(logfile, handler.EveryDay, options...) assert.True(t, fsutil.IsFile(logfile)) l := slog.NewWithHandlers(h) l.ReportCaller = true l.TimeClock = clock.Now for i := 0; i < 6; i++ { l.WithData(sampleData).Info("the th:", i, "info message") l.Warnf("the th:%d warning message text", i) fmt.Println("log number ", (i+1)*2) clock.Add(time.Second * 1) } l.MustClose() checkLogFileContents(t, logfile) checkLogFileContents(t, newFile) } func TestNewTimeRotateFileHandler_EveryHour(t *testing.T) { clock := rotatefile.NewMockClock("2022-04-28 20:59:58") logfile := "./testdata/time-rotate_EveryHour.log" newFile := internal.AddSuffix2path(logfile, timex.DateFormat(clock.Now(), "Ymd_H00")) options := []handler.ConfigFn{ handler.WithTimeClock(clock), handler.WithBuffSize(0), } h, err := handler.NewTimeRotateFile(logfile, rotatefile.EveryHour, options...) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) l := slog.NewWithHandlers(h) l.ReportCaller = true l.TimeClock = clock.Now for i := 0; i < 6; i++ { l.WithData(sampleData).Info("the th:", i, "info message") l.Warnf("the th:%d warning message text", i) fmt.Println("log number ", (i+1)*2) clock.Add(time.Second * 1) } l.MustClose() checkLogFileContents(t, logfile) checkLogFileContents(t, newFile) } func TestNewTimeRotateFileHandler_someSeconds(t *testing.T) { logfile := "./testdata/time-rotate-Seconds.log" assert.NoErr(t, fsutil.DeleteIfExist(logfile)) h, err := handler.NewTimeRotateFileHandler(logfile, handler.EverySecond) assert.NoErr(t, err) assert.True(t, fsutil.IsFile(logfile)) l := slog.NewWithHandlers(h) l.ReportCaller = true for i := 0; i < 3; i++ { l.Info("info", "message", i) l.Warn("warning message", i) fmt.Println("second ", i+1) time.Sleep(time.Second * 1) } l.MustClose() // assert.NoErr(t, os.Remove(fpath)) } func checkLogFileContents(t *testing.T, logfile string) { assert.True(t, fsutil.IsFile(logfile)) bts, err := os.ReadFile(logfile) assert.NoErr(t, err) str := string(bts) assert.Contains(t, str, "[INFO]") assert.Contains(t, str, "info message") assert.Contains(t, str, "[WARNING]") assert.Contains(t, str, "warning message") } ================================================ FILE: handler/syslog.go ================================================ //go:build !windows && !plan9 package handler import ( "log/syslog" "github.com/gookit/slog" ) // SysLogOpt for syslog handler type SysLogOpt struct { // Tag syslog tag Tag string // Priority syslog priority Priority syslog.Priority // Network syslog network Network string // Raddr syslog address Raddr string } // SysLogHandler struct type SysLogHandler struct { slog.LevelWithFormatter writer *syslog.Writer } // NewSysLogHandler instance func NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error) { return NewSysLog(&SysLogOpt{ Priority: priority, Tag: tag, }) } // NewSysLog handler instance with all custom options. func NewSysLog(opt *SysLogOpt) (*SysLogHandler, error) { slWriter, err := syslog.Dial(opt.Network, opt.Raddr, opt.Priority, opt.Tag) if err != nil { return nil, err } h := &SysLogHandler{ writer: slWriter, } // init default log level h.Level = slog.InfoLevel return h, nil } // Handle a log record func (h *SysLogHandler) Handle(record *slog.Record) error { bts, err := h.Formatter().Format(record) if err != nil { return err } s := string(bts) // write log by level switch record.Level { case slog.DebugLevel, slog.TraceLevel: return h.writer.Debug(s) case slog.NoticeLevel: return h.writer.Notice(s) case slog.WarnLevel: return h.writer.Warning(s) case slog.ErrorLevel: return h.writer.Err(s) case slog.FatalLevel: return h.writer.Crit(s) case slog.PanicLevel: return h.writer.Emerg(s) default: // as info level return h.writer.Info(s) } } // Close handler func (h *SysLogHandler) Close() error { return h.writer.Close() } // Flush handler func (h *SysLogHandler) Flush() error { return nil } ================================================ FILE: handler/syslog_test.go ================================================ //go:build !windows && !plan9 package handler_test import ( "log/syslog" "testing" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog/handler" ) func TestNewSysLogHandler(t *testing.T) { h, err := handler.NewSysLogHandler(syslog.LOG_INFO, "slog") assert.NoErr(t, err) err = h.Handle(newLogRecord("test syslog handler")) assert.NoErr(t, err) assert.NoErr(t, h.Flush()) assert.NoErr(t, h.Close()) } ================================================ FILE: handler/write_close_flusher.go ================================================ package handler import ( "github.com/gookit/slog" ) // FlushCloseHandler definition type FlushCloseHandler struct { slog.LevelFormattable Output FlushCloseWriter } // NewFlushCloserWithLF create new FlushCloseHandler, with custom slog.LevelFormattable func NewFlushCloserWithLF(out FlushCloseWriter, lf slog.LevelFormattable) *FlushCloseHandler { return &FlushCloseHandler{ Output: out, // init formatter and level handle LevelFormattable: lf, } } // // ------------- Use max log level ------------- // // FlushCloserWithMaxLevel create new FlushCloseHandler, with max log level func FlushCloserWithMaxLevel(out FlushCloseWriter, maxLevel slog.Level) *FlushCloseHandler { return NewFlushCloserWithLF(out, slog.NewLvFormatter(maxLevel)) } // // ------------- Use multi log levels ------------- // // NewFlushCloser create new FlushCloseHandler, alias of NewFlushCloseHandler() func NewFlushCloser(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler { return NewFlushCloseHandler(out, levels) } // FlushCloserWithLevels create new FlushCloseHandler, alias of NewFlushCloseHandler() func FlushCloserWithLevels(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler { return NewFlushCloseHandler(out, levels) } // NewFlushCloseHandler create new FlushCloseHandler // // Usage: // // buf := new(byteutil.Buffer) // h := handler.NewFlushCloseHandler(&buf, slog.AllLevels) // // f, err := os.OpenFile("my.log", ...) // h := handler.NewFlushCloseHandler(f, slog.AllLevels) func NewFlushCloseHandler(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler { return NewFlushCloserWithLF(out, slog.NewLvsFormatter(levels)) } // Close the handler func (h *FlushCloseHandler) Close() error { if err := h.Flush(); err != nil { return err } return h.Output.Close() } // Flush the handler func (h *FlushCloseHandler) Flush() error { return h.Output.Flush() } // Handle log record func (h *FlushCloseHandler) Handle(record *slog.Record) error { bts, err := h.Formatter().Format(record) if err != nil { return err } _, err = h.Output.Write(bts) return err } ================================================ FILE: handler/write_close_syncer.go ================================================ package handler import ( "io" "github.com/gookit/slog" ) // SyncCloseHandler definition type SyncCloseHandler struct { slog.LevelFormattable Output SyncCloseWriter } // NewSyncCloserWithLF create new SyncCloseHandler, with custom slog.LevelFormattable func NewSyncCloserWithLF(out SyncCloseWriter, lf slog.LevelFormattable) *SyncCloseHandler { return &SyncCloseHandler{ Output: out, // init formatter and level handle LevelFormattable: lf, } } // // ------------- Use max log level ------------- // // SyncCloserWithMaxLevel create new SyncCloseHandler, with max log level func SyncCloserWithMaxLevel(out SyncCloseWriter, maxLevel slog.Level) *SyncCloseHandler { return NewSyncCloserWithLF(out, slog.NewLvFormatter(maxLevel)) } // // ------------- Use multi log levels ------------- // // NewSyncCloser create new SyncCloseHandler, alias of NewSyncCloseHandler() func NewSyncCloser(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler { return NewSyncCloseHandler(out, levels) } // SyncCloserWithLevels create new SyncCloseHandler, alias of NewSyncCloseHandler() func SyncCloserWithLevels(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler { return NewSyncCloseHandler(out, levels) } // NewSyncCloseHandler create new SyncCloseHandler with limited log levels // // Usage: // // f, err := os.OpenFile("my.log", ...) // h := handler.NewSyncCloseHandler(f, slog.AllLevels) func NewSyncCloseHandler(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler { return NewSyncCloserWithLF(out, slog.NewLvsFormatter(levels)) } // Close the handler func (h *SyncCloseHandler) Close() error { if err := h.Flush(); err != nil { return err } return h.Output.Close() } // Flush the handler func (h *SyncCloseHandler) Flush() error { return h.Output.Sync() } // Writer of the handler func (h *SyncCloseHandler) Writer() io.Writer { return h.Output } // Handle log record func (h *SyncCloseHandler) Handle(record *slog.Record) error { bts, err := h.Formatter().Format(record) if err != nil { return err } _, err = h.Output.Write(bts) return err } ================================================ FILE: handler/write_closer.go ================================================ package handler import ( "io" "github.com/gookit/slog" ) // WriteCloserHandler definition type WriteCloserHandler struct { slog.LevelFormattable Output io.WriteCloser } // NewWriteCloserWithLF create new WriteCloserHandler and with custom slog.LevelFormattable func NewWriteCloserWithLF(out io.WriteCloser, lf slog.LevelFormattable) *WriteCloserHandler { return &WriteCloserHandler{ Output: out, // init formatter and level handle LevelFormattable: lf, } } // WriteCloserWithMaxLevel create new WriteCloserHandler and with max log level func WriteCloserWithMaxLevel(out io.WriteCloser, maxLevel slog.Level) *WriteCloserHandler { return NewWriteCloserWithLF(out, slog.NewLvFormatter(maxLevel)) } // // ------------- Use multi log levels ------------- // // WriteCloserWithLevels create a new instance and with limited log levels func WriteCloserWithLevels(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler { // h := &WriteCloserHandler{Output: out} // h.LimitLevels(levels) return NewWriteCloserHandler(out, levels) } // NewWriteCloser create a new instance func NewWriteCloser(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler { return NewWriteCloserHandler(out, levels) } // NewWriteCloserHandler create new WriteCloserHandler // // Usage: // // buf := new(bytes.Buffer) // h := handler.NewIOWriteCloserHandler(&buf, slog.AllLevels) // // f, err := os.OpenFile("my.log", ...) // h := handler.NewIOWriteCloserHandler(f, slog.AllLevels) func NewWriteCloserHandler(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler { return NewWriteCloserWithLF(out, slog.NewLvsFormatter(levels)) } // Close the handler func (h *WriteCloserHandler) Close() error { return h.Output.Close() } // Flush the handler func (h *WriteCloserHandler) Flush() error { return nil } // Handle log record func (h *WriteCloserHandler) Handle(record *slog.Record) error { bts, err := h.Formatter().Format(record) if err != nil { return err } _, err = h.Output.Write(bts) return err } ================================================ FILE: handler/writer.go ================================================ package handler import ( "io" "github.com/gookit/slog" ) // IOWriterHandler definition type IOWriterHandler struct { NopFlushClose slog.LevelFormattable Output io.Writer } // TextFormatter get the formatter func (h *IOWriterHandler) TextFormatter() *slog.TextFormatter { return h.Formatter().(*slog.TextFormatter) } // Handle log record func (h *IOWriterHandler) Handle(record *slog.Record) error { bts, err := h.Formatter().Format(record) if err != nil { return err } _, err = h.Output.Write(bts) return err } // NewIOWriterWithLF create new IOWriterHandler, with custom slog.LevelFormattable func NewIOWriterWithLF(out io.Writer, lf slog.LevelFormattable) *IOWriterHandler { return &IOWriterHandler{ Output: out, // init formatter and level handle LevelFormattable: lf, } } // // ------------- Use max log level ------------- // // IOWriterWithMaxLevel create new IOWriterHandler, with max log level // // Usage: // // buf := new(bytes.Buffer) // h := handler.IOWriterWithMaxLevel(buf, slog.InfoLevel) // slog.AddHandler(h) // slog.Info("info message") func IOWriterWithMaxLevel(out io.Writer, maxLevel slog.Level) *IOWriterHandler { return NewIOWriterWithLF(out, slog.NewLvFormatter(maxLevel)) } // // ------------- Use multi log levels ------------- // // NewIOWriter create a new instance and with limited log levels func NewIOWriter(out io.Writer, levels []slog.Level) *IOWriterHandler { return NewIOWriterHandler(out, levels) } // IOWriterWithLevels create a new instance and with limited log levels func IOWriterWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler { return NewIOWriterHandler(out, levels) } // NewIOWriterHandler create new IOWriterHandler // // Usage: // // buf := new(bytes.Buffer) // h := handler.NewIOWriterHandler(&buf, slog.AllLevels) // // f, err := os.OpenFile("my.log", ...) // h := handler.NewIOWriterHandler(f, slog.AllLevels) func NewIOWriterHandler(out io.Writer, levels []slog.Level) *IOWriterHandler { return NewIOWriterWithLF(out, slog.NewLvsFormatter(levels)) } // SimpleHandler definition. alias of IOWriterHandler type SimpleHandler = IOWriterHandler // NewHandler create a new instance func NewHandler(out io.Writer, maxLevel slog.Level) *SimpleHandler { return NewSimpleHandler(out, maxLevel) } // NewSimple create a new instance func NewSimple(out io.Writer, maxLevel slog.Level) *SimpleHandler { return NewSimpleHandler(out, maxLevel) } // SimpleWithLevels create new simple handler, with log levels func SimpleWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler { return NewIOWriterHandler(out, levels) } // NewSimpleHandler create new SimpleHandler // // Usage: // // buf := new(bytes.Buffer) // h := handler.NewSimpleHandler(&buf, slog.InfoLevel) // // f, err := os.OpenFile("my.log", ...) // h := handler.NewSimpleHandler(f, slog.InfoLevel) func NewSimpleHandler(out io.Writer, maxLevel slog.Level) *IOWriterHandler { return IOWriterWithMaxLevel(out, maxLevel) } ================================================ FILE: handler/writer_test.go ================================================ package handler_test import ( "bytes" "fmt" "testing" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/x/fakeobj" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestNewIOWriter(t *testing.T) { w := new(bytes.Buffer) h := handler.NewIOWriter(w, slog.NormalLevels) assert.True(t, h.IsHandling(slog.NoticeLevel)) r := newLogRecord("test io.writer handler") assert.NoErr(t, h.Handle(r)) assert.NoErr(t, h.Flush()) str := w.String() assert.Contains(t, str, "test io.writer handler") assert.NoErr(t, h.Close()) } func TestNewSyncCloser(t *testing.T) { logfile := "./testdata/sync_closer.log" f, err := handler.QuickOpenFile(logfile) assert.NoErr(t, err) h := handler.NewSyncCloser(f, slog.DangerLevels) assert.True(t, h.IsHandling(slog.WarnLevel)) assert.False(t, h.IsHandling(slog.InfoLevel)) r := newLogRecord("test sync closer handler") r.Level = slog.ErrorLevel err = h.Handle(r) assert.NoErr(t, err) assert.NoErr(t, h.Flush()) str := fsutil.ReadString(logfile) assert.Contains(t, str, "test sync closer handler") assert.NoErr(t, h.Close()) t.Run("err on sync", func(t *testing.T) { w := &syncCloseWriter{} w.errOnSync = true h = handler.SyncCloserWithLevels(w, slog.NormalLevels) assert.Err(t, h.Flush()) assert.Err(t, h.Close()) }) // test handle error h.SetFormatter(newTestFormatter(true)) assert.Err(t, h.Handle(r)) } func TestNewWriteCloser(t *testing.T) { w := fakeobj.NewWriter() h := handler.NewWriteCloser(w, slog.NormalLevels) assert.True(t, h.IsHandling(slog.NoticeLevel)) r := newLogRecord("test writeCloser handler") assert.NoErr(t, h.Handle(r)) assert.NoErr(t, h.Flush()) str := w.String() assert.Contains(t, str, "test writeCloser handler") assert.NoErr(t, h.Close()) t.Run("use max level", func(t *testing.T) { h = handler.WriteCloserWithMaxLevel(w, slog.WarnLevel) r = newLogRecord("test max level") assert.False(t, h.IsHandling(r.Level)) r.Level = slog.ErrorLevel assert.True(t, h.IsHandling(r.Level)) }) // test handle error t.Run("handle error", func(t *testing.T) { h = handler.WriteCloserWithLevels(w, slog.NormalLevels) h.SetFormatter(newTestFormatter(true)) assert.Err(t, h.Handle(r)) }) } func TestNewFlushCloser(t *testing.T) { w := fakeobj.NewWriter() h := handler.NewFlushCloser(w, slog.AllLevels) w.WriteString("before flush\n") r := newLogRecord("TestNewFlushCloser") assert.NoErr(t, h.Handle(r)) str := w.ResetGet() assert.Contains(t, str, "TestNewFlushCloser") assert.NoErr(t, h.Flush()) assert.NoErr(t, h.Close()) t.Run("ErrOnFlush", func(t *testing.T) { w.ErrOnFlush = true assert.Err(t, h.Flush()) assert.Err(t, h.Close()) }) t.Run("With max level", func(t *testing.T) { h = handler.FlushCloserWithMaxLevel(w, slog.WarnLevel) r = newLogRecord("test max level") assert.False(t, h.IsHandling(r.Level)) assert.Empty(t, w.String()) r.Level = slog.ErrorLevel assert.True(t, h.IsHandling(r.Level)) assert.NoErr(t, h.Handle(r)) assert.NotEmpty(t, w.String()) }) // test handle error h = handler.FlushCloserWithMaxLevel(w, slog.WarnLevel) h.SetFormatter(newTestFormatter(true)) assert.Err(t, h.Handle(r)) } func TestNewSimpleHandler(t *testing.T) { buf := fakeobj.NewWriter() h := handler.NewSimple(buf, slog.InfoLevel) r := newLogRecord("test simple handler") assert.NoErr(t, h.Handle(r)) s := buf.String() buf.Reset() fmt.Print(s) assert.Contains(t, s, "test simple handler") assert.NoErr(t, h.Flush()) assert.NoErr(t, h.Close()) h = handler.NewHandler(buf, slog.InfoLevel) r = newLogRecord("test simple handler2") assert.NoErr(t, h.Handle(r)) s = buf.ResetGet() fmt.Print(s) assert.Contains(t, s, "test simple handler2") assert.NoErr(t, h.Flush()) assert.NoErr(t, h.Close()) h = handler.SimpleWithLevels(buf, slog.NormalLevels) r = newLogRecord("test simple handler with levels") assert.NoErr(t, h.Handle(r)) s = buf.ResetGet() fmt.Print(s) assert.Contains(t, s, "test simple handler with levels") // handle error h.SetFormatter(newTestFormatter(true)) assert.Err(t, h.Handle(r)) } ================================================ FILE: handler.go ================================================ package slog import ( "fmt" "io" "strconv" "github.com/gookit/goutil/strutil" ) // // Handler interface // // Handler interface definition type Handler interface { // Closer Close handler. // You should first call Flush() on close logic. // Refer the FileHandler.Close() handle io.Closer // Flush and sync logs to disk file. Flush() error // IsHandling Checks whether the given record will be handled by this handler. IsHandling(level Level) bool // Handle a log record. // // All records may be passed to this method, and the handler should discard // those that it does not want to handle. Handle(*Record) error } // LevelFormattable support limit log levels and provide formatter type LevelFormattable interface { Formattable IsHandling(level Level) bool } // FormattableHandler interface type FormattableHandler interface { Handler Formattable } /******************************************************************************** * Common parts for handler ********************************************************************************/ // LevelWithFormatter struct definition // // - support set log formatter // - only support set max log level type LevelWithFormatter struct { FormattableTrait // Level max for logging messages. if current level <= Level will log messages Level Level } // NewLvFormatter create new LevelWithFormatter instance func NewLvFormatter(maxLv Level) *LevelWithFormatter { return &LevelWithFormatter{Level: maxLv} } // SetMaxLevel set max level for logging messages func (h *LevelWithFormatter) SetMaxLevel(maxLv Level) { h.Level = maxLv } // IsHandling Check if the current level can be handling func (h *LevelWithFormatter) IsHandling(level Level) bool { return h.Level.ShouldHandling(level) } // LevelsWithFormatter struct definition // // - support set log formatter // - support setting multi log levels type LevelsWithFormatter struct { FormattableTrait // Levels for logging messages Levels []Level } // NewLvsFormatter create new instance func NewLvsFormatter(levels []Level) *LevelsWithFormatter { return &LevelsWithFormatter{Levels: levels} } // SetLimitLevels set limit levels for log message func (h *LevelsWithFormatter) SetLimitLevels(levels []Level) { h.Levels = levels } // IsHandling Check if the current level can be handling func (h *LevelsWithFormatter) IsHandling(level Level) bool { for _, l := range h.Levels { if l == level { return true } } return false } // LevelMode define level mode for logging type LevelMode uint8 // MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler] func (m LevelMode) MarshalJSON() ([]byte, error) { return []byte(`"` + m.String() + `"`), nil } // UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler] func (m *LevelMode) UnmarshalJSON(data []byte) error { s, err := strconv.Unquote(string(data)) if err != nil { return err } *m, err = StringToLevelMode(s) return err } // String return string value func (m LevelMode) String() string { switch m { case LevelModeList: return "list" case LevelModeMax: return "max" default: return "unknown" } } const ( // LevelModeList use level list for limit record write LevelModeList LevelMode = iota // LevelModeMax use max level limit log record write LevelModeMax ) // SafeToLevelMode parse string value to LevelMode, fail return LevelModeList func SafeToLevelMode(s string) LevelMode { lm, err := StringToLevelMode(s) if err != nil { return LevelModeList } return lm } // StringToLevelMode parse string value to LevelMode func StringToLevelMode(s string) (LevelMode, error) { switch s { case "", "list", "list_level", "level_list": return LevelModeList, nil case "max", "max_level", "level_max": return LevelModeMax, nil default: // is int value, try to parse as int if strutil.IsInt(s) { iVal := strutil.SafeInt(s) if iVal >= 0 && iVal <= int(LevelModeMax) { return LevelMode(iVal), nil } } return 0, fmt.Errorf("slog: invalid level mode: %s", s) } } // LevelHandling struct definition type LevelHandling struct { // level check mode. default is LevelModeList lvMode LevelMode // max level for a log message. if the current level <= Level will log a message maxLevel Level // levels limit for log message levels []Level } // SetMaxLevel set max level for a log message func (h *LevelHandling) SetMaxLevel(maxLv Level) { h.lvMode = LevelModeMax h.maxLevel = maxLv } // SetLimitLevels set limit levels for log message func (h *LevelHandling) SetLimitLevels(levels []Level) { h.lvMode = LevelModeList h.levels = levels } // IsHandling Check if the current level can be handling func (h *LevelHandling) IsHandling(level Level) bool { if h.lvMode == LevelModeMax { return h.maxLevel.ShouldHandling(level) } for _, l := range h.levels { if l == level { return true } } return false } // LevelFormatting wrap level handling and log formatter type LevelFormatting struct { LevelHandling FormatterWrapper } // NewMaxLevelFormatting create new instance with max level func NewMaxLevelFormatting(maxLevel Level) *LevelFormatting { lf := &LevelFormatting{} lf.SetMaxLevel(maxLevel) return lf } // NewLevelsFormatting create new instance with levels func NewLevelsFormatting(levels []Level) *LevelFormatting { lf := &LevelFormatting{} lf.SetLimitLevels(levels) return lf } ================================================ FILE: handler_test.go ================================================ package slog_test import ( "testing" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" ) func TestSafeToLevelMode(t *testing.T) { assert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode("list")) assert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode("0")) assert.Eq(t, slog.LevelModeMax, slog.SafeToLevelMode("1")) assert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode("unknown")) mode := slog.SafeToLevelMode("max") assert.Eq(t, slog.LevelModeMax, mode) // MarshalJSON bs, err := mode.MarshalJSON() assert.Nil(t, err) assert.Eq(t, `"max"`, string(bs)) // UnmarshalJSON mode = slog.LevelMode(0) err = mode.UnmarshalJSON([]byte(`"max"`)) assert.Nil(t, err) assert.Eq(t, slog.LevelModeMax, mode) assert.Err(t, mode.UnmarshalJSON([]byte("ab"))) } func TestNewLvFormatter(t *testing.T) { lf := slog.NewLvFormatter(slog.InfoLevel) assert.True(t, lf.IsHandling(slog.ErrorLevel)) assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.False(t, lf.IsHandling(slog.DebugLevel)) lf.SetMaxLevel(slog.DebugLevel) assert.True(t, lf.IsHandling(slog.DebugLevel)) } func TestNewLvsFormatter(t *testing.T) { lf := slog.NewLvsFormatter([]slog.Level{slog.InfoLevel, slog.ErrorLevel}) assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.False(t, lf.IsHandling(slog.DebugLevel)) lf.SetLimitLevels([]slog.Level{slog.InfoLevel, slog.ErrorLevel, slog.DebugLevel}) assert.True(t, lf.IsHandling(slog.DebugLevel)) } func TestLevelFormatting(t *testing.T) { lf := slog.NewMaxLevelFormatting(slog.InfoLevel) assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.False(t, lf.IsHandling(slog.TraceLevel)) // use levels lf = slog.NewLevelsFormatting([]slog.Level{slog.InfoLevel, slog.ErrorLevel}) assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.True(t, lf.IsHandling(slog.ErrorLevel)) assert.False(t, lf.IsHandling(slog.TraceLevel)) // test level mode assert.Eq(t, "list", slog.LevelModeList.String()) assert.Eq(t, "max", slog.LevelModeMax.String()) assert.Eq(t, "unknown", slog.LevelMode(9).String()) } ================================================ FILE: internal/util.go ================================================ package internal import "path/filepath" // AddSuffix2path add suffix to file path. // // eg: "/path/to/error.log" => "/path/to/error.{suffix}.log" func AddSuffix2path(filePath, suffix string) string { ext := filepath.Ext(filePath) return filePath[:len(filePath)-len(ext)] + "." + suffix + ext } // BuildGlobPattern builds a glob pattern for the given logfile. NOTE: use for testing only. func BuildGlobPattern(logfile string) string { return logfile[:len(logfile)-4] + "*" } ================================================ FILE: issues_test.go ================================================ package slog_test import ( "context" "fmt" "sync" "testing" "time" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" "github.com/gookit/slog/rotatefile" ) // https://github.com/gookit/slog/issues/27 func TestIssues_27(t *testing.T) { defer slog.Reset() count := 0 for { if count >= 6 { break } slog.Infof("info log %d", count) time.Sleep(time.Second) count++ } } // https://github.com/gookit/slog/issues/31 func TestIssues_31(t *testing.T) { defer slog.Reset() defer slog.MustClose() // slog.DangerLevels equals slog.Levels{slog.PanicLevel, slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel} h1 := handler.MustFileHandler("testdata/error_issue31.log", handler.WithLogLevels(slog.DangerLevels)) infoLevels := slog.Levels{slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel} h2 := handler.MustFileHandler("testdata/info_issue31.log", handler.WithLogLevels(infoLevels)) slog.PushHandler(h1) slog.PushHandlers(h2) // add logs slog.Info("info message text") slog.Error("error message text") } // https://github.com/gookit/slog/issues/52 func TestIssues_52(t *testing.T) { testTemplate := "[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}" slog.SetLogLevel(slog.ErrorLevel) slog.GetFormatter().(*slog.TextFormatter).SetTemplate(testTemplate) slog.Error("Error message") slog.Reset() fmt.Println() // dump.P(slog.GetFormatter()) } // https://github.com/gookit/slog/issues/75 func TestIssues_75(t *testing.T) { slog.Error("Error message 1") // set max level slog.SetLogLevel(slog.Level(0)) // slog.SetLogLevel(slog.PanicLevel) slog.Error("Error message 2") slog.Reset() // dump.P(slog.GetFormatter()) } // https://github.com/gookit/slog/issues/105 func TestIssues_105(t *testing.T) { t.Run("simple write", func(t *testing.T) { for i := 0; i < 10; i++ { slog.Error("simple error log", i) time.Sleep(time.Millisecond * 100) } }) // test concurrent write t.Run("concurrent write", func(t *testing.T) { wg := sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func(i int) { slog.Error("concurrent error log", i) time.Sleep(time.Millisecond * 100) wg.Done() }(i) } wg.Wait() }) } // https://github.com/gookit/slog/issues/108 func TestIssues_108(t *testing.T) { buf1 := byteutil.NewBuffer() root := slog.NewWithName("root", func(l *slog.Logger) { l.ChannelName = l.Name() l.AddHandler(handler.NewSimple(buf1, slog.InfoLevel)) }) root.Info("root info message") root.Warn("root warn message") str := buf1.ResetGet() fmt.Println(str) assert.StrContains(t, str, "[root] [INFO") assert.StrContains(t, str, "[root] [WARN") buf2 := byteutil.NewBuffer() probe := slog.NewWithName("probe", func(l *slog.Logger) { l.ChannelName = l.Name() l.AddHandler(handler.NewSimple(buf2, slog.InfoLevel)) }) probe.Info("probe info message") probe.Warn("probe warn message") str = buf2.ResetGet() fmt.Println(str) assert.StrContains(t, str, "[probe] [INFO") assert.StrContains(t, str, "[probe] [WARN") } // https://github.com/gookit/slog/issues/121 // 当我配置按日期的方式来滚动日志时,当大于 1 天时只能按 1 天来滚动日志。 func TestIssues_121(t *testing.T) { seconds := timex.OneDaySec * 7 // 7天 logFile := "testdata/issue121_7day.log" clock := rotatefile.NewMockClock("2024-03-25 08:04:02") fh, err := handler.NewTimeRotateFileHandler( logFile, rotatefile.RotateTime(seconds), handler.WithLogLevels(slog.NormalLevels), handler.WithBuffSize(128), handler.WithBackupNum(20), handler.WithTimeClock(clock), handler.WithDebugMode, // debug mode // handler.WithCompress(log.compress), // handler.WithFilePerm(log.filePerm), ) assert.NoError(t, err) // create logger with handler and clock. l := slog.NewWithHandlers(fh).Config(func(sl *slog.Logger) { sl.TimeClock = clock.Now }) // add logs for i := 0; i < 50; i++ { l.Infof("hi, this is a exmple information ... message text. log index=%d", i) clock.Add(24 * timex.Hour) } l.MustClose() } // https://github.com/gookit/slog/issues/137 // 按日期滚动 如果当天时间节点的日志文件已存在 不会append 会直接替换 #137 func TestIssues_137(t *testing.T) { logFile := "testdata/issue137_case1.log" fsutil.MustSave(logFile, "hello, this is a log file content\n") l := slog.NewWithHandlers(handler.MustFileHandler(logFile)) // add logs for i := 0; i < 5; i++ { l.Infof("hi, this is a example information ... message text. log index=%d", i) } l.MustClose() // read file content content := fsutil.ReadString(logFile) assert.StrContains(t, content, "this is a log file content") assert.StrContains(t, content, "log index=4") } // https://github.com/gookit/slog/issues/139 // 自定义模板报 invalid memory address or nil pointer dereference #139 func TestIssues_139(t *testing.T) { myTemplate := "[{{datetime}}] [{{requestid}}] [{{level}}] {{message}}\n" textFormatter := &slog.TextFormatter{TimeFormat: "2006-01-02 15:04:05.000"} textFormatter.SetTemplate(myTemplate) // use func create // textFormatter := slog.NewTextFormatter(myTemplate).Configure(func(f *slog.TextFormatter) { // f.TimeFormat = "2006-01-02 15:04:05.000" // }) h1 := handler.NewConsoleHandler(slog.AllLevels) h1.SetFormatter(textFormatter) L := slog.New() L.AddHandlers(h1) // add processor <==== // L.AddProcessor(slog.ProcessorFunc(func(r *slog.Record) { // r.Fields["requestid"] = r.Ctx.Value("requestid") // })) L.AddProcessor(slog.AppendCtxKeys("requestid")) ctx := context.WithValue(context.Background(), "requestid", "111111") L.WithCtx(ctx).Info("test") } // https://github.com/gookit/slog/issues/144 // slog: failed to handle log, error: write ./logs/info.log: file already closed #144 func TestIssues_144(t *testing.T) { defer slog.MustClose() slog.Reset() // DangerLevels 包含: slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel h1 := handler.MustRotateFile("./testdata/logs/error_is144.log", rotatefile.EveryDay, handler.WithLogLevels(slog.DangerLevels), handler.WithCompress(true), ) // NormalLevels 包含: slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel h2 := handler.MustFileHandler("./testdata/logs/info_is144.log", handler.WithLogLevels(slog.NormalLevels)) // 注册 handler 到 logger(调度器) slog.PushHandlers(h1, h2) // add logs slog.Info("info message text") slog.Error("error message text") } // https://github.com/gookit/slog/issues/161 自定义level、caller的宽度 func TestIssues_161(t *testing.T) { // 这样是全局影响的 - 不推荐 // slog.LevelNames[slog.WarnLevel] = "WARNI" // slog.LevelNames[slog.InfoLevel] = "INFO " // slog.LevelNames[slog.NoticeLevel] = "NOTIC" l := slog.New() l.DoNothingOnPanicFatal() h := handler.ConsoleWithMaxLevel(slog.TraceLevel) // 通过 SetFormatter 设置格式化 LevelNameLen=5 h.SetFormatter(slog.TextFormatterWith(slog.LimitLevelNameLen(5))) l.AddHandler(h) for _, level := range slog.AllLevels { l.Logf(level, "a %s test message", level.String()) } assert.NoErr(t, l.LastErr()) } // https://github.com/gookit/slog/issues/163 func TestIssues_163(t *testing.T) { h, e := handler.NewRotateFile("testdata/app_iss163.log", rotatefile.EveryDay) assert.NoError(t, e) l := slog.NewWithHandlers(h) defer l.MustClose() l.Debugf("error %+v", e) l.Infof("2222") // TODO assert.FileExists("testdata/app_iss163.log") } ================================================ FILE: logger.go ================================================ package slog import ( "context" "sync" "time" "github.com/gookit/goutil" ) // Logger log dispatcher definition. // // The logger implements the `github.com/gookit/gsr.Logger` type Logger struct { name string // lock for writing logs mu sync.Mutex // logger latest error err error // mark logger is closed closed bool // log handlers for logger handlers []Handler processors []Processor // reusable empty record recordPool sync.Pool // handlers on exit. exitHandlers []func() quitDaemon chan struct{} // // logger options // // ChannelName log channel name, default is DefaultChannelName ChannelName string // FlushInterval flush interval time. default is defaultFlushInterval=30s FlushInterval time.Duration // LowerLevelName use lower level name LowerLevelName bool // ReportCaller on writing log record ReportCaller bool CallerSkip int // CallerFlag used to set caller traceback information in different modes CallerFlag CallerFlagMode // BackupArgs backup log input args to Record.Args BackupArgs bool // GlobalFields global fields. will be added to all log records // // NOTE: add field need config Formatter template fields. GlobalFields map[string]any // TimeClock custom time clock, timezone TimeClock ClockFn // custom exit, panic handler. ExitFunc func(code int) PanicFunc func(v any) } // New create a new logger func New(fns ...LoggerFn) *Logger { return NewWithName("logger", fns...) } // NewWithHandlers create a new logger with handlers func NewWithHandlers(hs ...Handler) *Logger { logger := NewWithName("logger") logger.AddHandlers(hs...) return logger } // NewWithConfig create a new logger with config func func NewWithConfig(fns ...LoggerFn) *Logger { return NewWithName("logger", fns...) } // NewWithName create a new logger with name func NewWithName(name string, fns ...LoggerFn) *Logger { logger := &Logger{ name: name, // exit handle // ExitFunc: os.Exit, PanicFunc: DefaultPanicFn, exitHandlers: []func(){}, // options ChannelName: DefaultChannelName, ReportCaller: true, CallerSkip: 6, TimeClock: DefaultClockFn, // flush interval time FlushInterval: defaultFlushInterval, } logger.recordPool.New = func() any { return newRecord(logger) } return logger.Config(fns...) } // NewRecord get new logger record func (l *Logger) newRecord() *Record { r := l.recordPool.Get().(*Record) r.reuse = false r.freed = false r.Fields = l.GlobalFields return r } func (l *Logger) releaseRecord(r *Record) { // must reset for each record r.Time = emptyTime r.Message = "" r.Caller = nil r.Fmt = "" r.Args = nil // reuse=true: will not be released if r.reuse || r.freed { return } // reset ctx data r.Ctx = nil r.Extra = nil r.Data = map[string]any{} r.Fields = map[string]any{} // reset flags r.inited = false r.reuse = false r.freed = true r.CallerSkip = l.CallerSkip l.recordPool.Put(r) } // // --------------------------------------------------------------------------- // region Configure logger // --------------------------------------------------------------------------- // // Config current logger func (l *Logger) Config(fns ...LoggerFn) *Logger { for _, fn := range fns { fn(l) } return l } // Configure current logger. alias of Config() func (l *Logger) Configure(fn LoggerFn) *Logger { return l.Config(fn) } // RegisterExitHandler register an exit-handler on global exitHandlers func (l *Logger) RegisterExitHandler(handler func()) { l.exitHandlers = append(l.exitHandlers, handler) } // PrependExitHandler prepend register an exit-handler on global exitHandlers func (l *Logger) PrependExitHandler(handler func()) { l.exitHandlers = append([]func(){handler}, l.exitHandlers...) } // ResetExitHandlers reset logger exitHandlers func (l *Logger) ResetExitHandlers() { l.exitHandlers = make([]func(), 0) } // ExitHandlers get all exitHandlers of the logger func (l *Logger) ExitHandlers() []func() { return l.exitHandlers } // SetName for logger func (l *Logger) SetName(name string) { l.name = name } // Name of the logger func (l *Logger) Name() string { return l.name } // // --------------------------------------------------------------------------- // region Management logger // --------------------------------------------------------------------------- // const defaultFlushInterval = 30 * time.Second // FlushDaemon run flush handle on daemon // // Usage, please refer to the FlushDaemon() on package. func (l *Logger) FlushDaemon(onStops ...func()) { l.quitDaemon = make(chan struct{}) if l.FlushInterval <= 0 { l.FlushInterval = defaultFlushInterval } // create a ticker tk := time.NewTicker(l.FlushInterval) defer tk.Stop() for { select { case <-tk.C: if err := l.lockAndFlushAll(); err != nil { printStderr("slog.FlushDaemon: daemon flush logs error: ", err) } case <-l.quitDaemon: for _, fn := range onStops { fn() } return } } } // StopDaemon stop flush daemon func (l *Logger) StopDaemon() { if l.quitDaemon == nil { panic("cannot quit daemon, please call FlushDaemon() first") } close(l.quitDaemon) } // FlushTimeout flush logs on limit time. // // refer from glog package func (l *Logger) FlushTimeout(timeout time.Duration) { done := make(chan bool, 1) go func() { if err := l.lockAndFlushAll(); err != nil { printStderr("slog.FlushTimeout: flush logs error: ", err) } done <- true }() select { case <-done: case <-time.After(timeout): printStderr("slog.FlushTimeout: flush took longer than timeout:", timeout) } } // Sync flushes buffered logs (if any). alias of the Flush() func (l *Logger) Sync() error { return Flush() } // Flush flushes all the logs and attempts to "sync" their data to disk. // l.mu is held. func (l *Logger) Flush() error { return l.lockAndFlushAll() } // MustFlush flush logs. will panic on error func (l *Logger) MustFlush() { goutil.PanicErr(l.lockAndFlushAll()) } // FlushAll flushes all the logs and attempts to "sync" their data to disk. // // alias of the Flush() func (l *Logger) FlushAll() error { return l.lockAndFlushAll() } // lockAndFlushAll is like flushAll but locks l.mu first. func (l *Logger) lockAndFlushAll() error { l.mu.Lock() l.flushAll() l.mu.Unlock() return l.err } // flush all without lock func (l *Logger) flushAll() { // flush from fatal down, in case there's trouble flushing. _ = l.VisitAll(func(handler Handler) error { if err := handler.Flush(); err != nil { l.err = err printStderr("slog: call handler.Flush() error:", err) } return nil }) } // MustClose close logger. will panic on error func (l *Logger) MustClose() { goutil.PanicErr(l.Close()) } // Close the logger, will flush all logs and close all handlers // // IMPORTANT: // // if enable async/buffer mode, please call the Close() before exit. func (l *Logger) Close() error { if l.closed { return nil } _ = l.VisitAll(func(handler Handler) error { if err := handler.Close(); err != nil { l.err = err printStderr("slog: call handler.Close() error:", err) } return nil }) l.closed = true return l.err } // VisitAll logger handlers func (l *Logger) VisitAll(fn func(handler Handler) error) error { for _, handler := range l.handlers { // TIP: you can return nil for ignore error if err := fn(handler); err != nil { return err } } return nil } // Reset the logger. will reset: handlers, processors, closed=false func (l *Logger) Reset() { l.closed = false l.ResetHandlers() l.ResetProcessors() } // ResetProcessors for the logger func (l *Logger) ResetProcessors() { l.processors = make([]Processor, 0) } // ResetHandlers for the logger func (l *Logger) ResetHandlers() { l.handlers = make([]Handler, 0) } // Exit logger handle func (l *Logger) Exit(code int) { l.runExitHandlers() // global exit handlers runExitHandlers() if l.ExitFunc != nil { l.ExitFunc(code) } } func (l *Logger) runExitHandlers() { defer func() { if err := recover(); err != nil { printStderr("slog: run exit handler recovered, error:", err) } }() for _, handler := range l.exitHandlers { handler() } } // DoNothingOnPanicFatal do nothing on panic or fatal level. TIP: useful on testing. func (l *Logger) DoNothingOnPanicFatal() { l.PanicFunc = DoNothingOnPanic l.ExitFunc = DoNothingOnExit } // HandlersNum returns the number of handlers func (l *Logger) HandlersNum() int { return len(l.handlers) } // LastErr get, will clear it after read. func (l *Logger) LastErr() error { err := l.err l.err = nil return err } // // --------------------------------------------------------------------------- // region Register handlers, processors // --------------------------------------------------------------------------- // // AddHandler to the logger func (l *Logger) AddHandler(h Handler) { l.PushHandlers(h) } // AddHandlers to the logger func (l *Logger) AddHandlers(hs ...Handler) { l.PushHandlers(hs...) } // PushHandler to the l. alias of AddHandler() func (l *Logger) PushHandler(h Handler) { l.PushHandlers(h) } // PushHandlers to the logger func (l *Logger) PushHandlers(hs ...Handler) { if len(hs) > 0 { l.handlers = append(l.handlers, hs...) } } // SetHandlers for the logger func (l *Logger) SetHandlers(hs []Handler) { l.handlers = hs } // AddProcessor to the logger func (l *Logger) AddProcessor(p Processor) { l.processors = append(l.processors, p) } // PushProcessor to the logger, alias of AddProcessor() func (l *Logger) PushProcessor(p Processor) { l.processors = append(l.processors, p) } // AddProcessors to the logger. alias of AddProcessor() func (l *Logger) AddProcessors(ps ...Processor) { l.processors = append(l.processors, ps...) } // SetProcessors for the logger func (l *Logger) SetProcessors(ps []Processor) { l.processors = ps } // -------------------------- New sub-logger ----------------------------- // NewSub return a new sub logger on the logger, can keep fields/data/ctx for sub logger. // // Usage: // // sl := logger.NewSub().KeepCtx(custom ctx). // KeepFields(slog.M{"ip": ...}). // KeepData(slog.M{"username": ...}) // defer sl.Release() // // sl.Info("some message") // sl.Warn("some message") func (l *Logger) NewSub() *SubLogger { return NewSubWith(l) } // // --------------------------------------------------------------------------- // region New record with logger // --------------------------------------------------------------------------- // // Record return a new record with logger, will release after writing log. func (l *Logger) Record() *Record { return l.newRecord() } // Reused return a new record with logger, but it can be reused. // if you want to release the record, please call the Record.Release() after write log. // // Usage: // // r := logger.Reused() // defer r.Release() // // // can write log multiple times // r.Info("some message1") // r.Warn("some message1") func (l *Logger) Reused() *Record { return l.newRecord().Reused() } // WithField new record with field // // TIP: add field need config Formatter template fields. func (l *Logger) WithField(name string, value any) *Record { r := l.newRecord() // defer l.releaseRecord(r) return r.WithField(name, value) } // WithFields new record with fields // // TIP: add field need config Formatter template fields. func (l *Logger) WithFields(fields M) *Record { r := l.newRecord() // defer l.releaseRecord(r) return r.WithFields(fields) } // WithData new record with data func (l *Logger) WithData(data M) *Record { return l.newRecord().WithData(data) } // WithValue new record with data value func (l *Logger) WithValue(key string, value any) *Record { return l.newRecord().AddValue(key, value) } // WithExtra new record with extra data func (l *Logger) WithExtra(ext M) *Record { return l.newRecord().SetExtra(ext) } // WithTime new record with time.Time func (l *Logger) WithTime(t time.Time) *Record { r := l.newRecord() // defer l.releaseRecord(r) return r.WithTime(t) } // WithCtx new record with context.Context func (l *Logger) WithCtx(ctx context.Context) *Record { return l.WithContext(ctx) } // WithContext new record with context.Context func (l *Logger) WithContext(ctx context.Context) *Record { r := l.newRecord() // defer l.releaseRecord(r) return r.WithContext(ctx) } // // --------------------------------------------------------------------------- // region Add log message // --------------------------------------------------------------------------- // func (l *Logger) log(level Level, args []any) { r := l.newRecord() r.CallerSkip++ r.log(level, args) } // Logf a format message with level func (l *Logger) logf(level Level, format string, args []any) { r := l.newRecord() r.CallerSkip++ r.logf(level, format, args) } // logCtx a context message with level func (l *Logger) logCtx(ctx context.Context, level Level, args []any) { r := l.newRecord() r.Ctx = ctx r.CallerSkip++ r.log(level, args) } // logfCtx a format message with level, context func (l *Logger) logfCtx(ctx context.Context, level Level, format string, args []any) { r := l.newRecord() r.Ctx = ctx r.CallerSkip++ r.logf(level, format, args) } // Log a message with level func (l *Logger) Log(level Level, args ...any) { l.log(level, args) } // Logf a format message with level func (l *Logger) Logf(level Level, format string, args ...any) { l.logf(level, format, args) } // Print logs a message at level PrintLevel func (l *Logger) Print(args ...any) { l.log(PrintLevel, args) } // Println logs a message at level PrintLevel func (l *Logger) Println(args ...any) { l.log(PrintLevel, args) } // Printf logs a message at level PrintLevel func (l *Logger) Printf(format string, args ...any) { l.logf(PrintLevel, format, args) } // Trace logs a message at level trace func (l *Logger) Trace(args ...any) { l.log(TraceLevel, args) } // Tracef logs a message at level trace func (l *Logger) Tracef(format string, args ...any) { l.logf(TraceLevel, format, args) } // TraceCtx logs a message at level trace with context func (l *Logger) TraceCtx(ctx context.Context, args ...any) { l.logCtx(ctx, TraceLevel, args) } // TracefCtx logs a message at level trace with context func (l *Logger) TracefCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, TraceLevel, format, args) } // Debug logs a message at level debug func (l *Logger) Debug(args ...any) { l.log(DebugLevel, args) } // Debugf logs a message at level debug func (l *Logger) Debugf(format string, args ...any) { l.logf(DebugLevel, format, args) } // DebugCtx logs a message at level debug with context func (l *Logger) DebugCtx(ctx context.Context, args ...any) { l.logCtx(ctx, DebugLevel, args) } // DebugfCtx logs a message at level debug with context func (l *Logger) DebugfCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, DebugLevel, format, args) } // Info logs a message at level Info func (l *Logger) Info(args ...any) { l.log(InfoLevel, args) } // Infof logs a message at level Info func (l *Logger) Infof(format string, args ...any) { l.logf(InfoLevel, format, args) } // InfoCtx logs a message at level Info with context func (l *Logger) InfoCtx(ctx context.Context, args ...any) { l.logCtx(ctx, InfoLevel, args) } // InfofCtx logs a message at level Info with context func (l *Logger) InfofCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, InfoLevel, format, args) } // Notice logs a message at level notice func (l *Logger) Notice(args ...any) { l.log(NoticeLevel, args) } // Noticef logs a message at level notice func (l *Logger) Noticef(format string, args ...any) { l.logf(NoticeLevel, format, args) } // NoticeCtx logs a message at level notice with context func (l *Logger) NoticeCtx(ctx context.Context, args ...any) { l.logCtx(ctx, NoticeLevel, args) } // NoticefCtx logs a message at level notice with context func (l *Logger) NoticefCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, NoticeLevel, format, args) } // Warn logs a message at level Warn func (l *Logger) Warn(args ...any) { l.log(WarnLevel, args) } // Warnf logs a message at level Warn func (l *Logger) Warnf(format string, args ...any) { l.logf(WarnLevel, format, args) } // WarnCtx logs a message at level Warn with context func (l *Logger) WarnCtx(ctx context.Context, args ...any) { l.logCtx(ctx, WarnLevel, args) } // WarnfCtx logs a message at level Warn with context func (l *Logger) WarnfCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, WarnLevel, format, args) } // Warning logs a message at level Warn, alias of Logger.Warn() func (l *Logger) Warning(args ...any) { l.log(WarnLevel, args) } // Error logs a message at level error func (l *Logger) Error(args ...any) { l.log(ErrorLevel, args) } // Errorf logs a message at level error func (l *Logger) Errorf(format string, args ...any) { l.logf(ErrorLevel, format, args) } // ErrorT logs an error type at level error func (l *Logger) ErrorT(err error) { if err != nil { l.log(ErrorLevel, []any{err}) } } // ErrorCtx logs a message at level error with context func (l *Logger) ErrorCtx(ctx context.Context, args ...any) { l.logCtx(ctx, ErrorLevel, args) } // ErrorfCtx logs a message at level error with context func (l *Logger) ErrorfCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, ErrorLevel, format, args) } // Stack logs a error message and with call stack. TODO // func EStack(args ...any) { std.log(ErrorLevel, args) } // Fatal logs a message at level fatal func (l *Logger) Fatal(args ...any) { l.log(FatalLevel, args) } // Fatalf logs a message at level fatal func (l *Logger) Fatalf(format string, args ...any) { l.logf(FatalLevel, format, args) } // Fatalln logs a message at level fatal func (l *Logger) Fatalln(args ...any) { l.log(FatalLevel, args) } // FatalCtx logs a message at level panic with context func (l *Logger) FatalCtx(ctx context.Context, args ...any) { l.logCtx(ctx, FatalLevel, args) } // FatalfCtx logs a message at level panic with context func (l *Logger) FatalfCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, FatalLevel, format, args) } // Panic logs a message at level panic func (l *Logger) Panic(args ...any) { l.log(PanicLevel, args) } // Panicf logs a message at level panic func (l *Logger) Panicf(format string, args ...any) { l.logf(PanicLevel, format, args) } // Panicln logs a message at level panic func (l *Logger) Panicln(args ...any) { l.log(PanicLevel, args) } // PanicCtx logs a message at level panic with context func (l *Logger) PanicCtx(ctx context.Context, args ...any) { l.logCtx(ctx, PanicLevel, args) } // PanicfCtx logs a message at level panic with context func (l *Logger) PanicfCtx(ctx context.Context, format string, args ...any) { l.logfCtx(ctx, PanicLevel, format, args) } ================================================ FILE: logger_sub.go ================================================ package slog import "context" // SubLogger is a sub-logger, It can be used to keep a certain amount of contextual information and log multiple times. // 可以用于保持一定的上下文信息多次记录日志。例如在循环中使用,或者作为方法参数传入。 // // Usage: // // sl := slog.NewSub().KeepCtx(custom ctx). // KeepFields(slog.M{"ip": ...}). // KeepData(slog.M{"username": ...}) // defer sl.Release() // // sl.Info("some message") type SubLogger struct { l *Logger // parent logger // Ctx keep context for all log records Ctx context.Context // Fields keep custom fields data for all log records Fields M // Data keep data for all log records Data M // Extra data. will keep for all log records Extra M } // NewSubWith returns a new SubLogger with parent logger. func NewSubWith(l *Logger) *SubLogger { return &SubLogger{l: l} } // KeepCtx keep context for all log records func (sub *SubLogger) KeepCtx(ctx context.Context) *SubLogger { sub.Ctx = ctx return sub } // KeepFields keep custom fields data for all log records func (sub *SubLogger) KeepFields(fields M) *SubLogger { sub.Fields = fields return sub } // KeepField keep custom field for all log records func (sub *SubLogger) KeepField(field string, value any) *SubLogger { if sub.Fields == nil { sub.Fields = make(M) } sub.Fields[field] = value return sub } // KeepData keep data for all log records func (sub *SubLogger) KeepData(data M) *SubLogger { sub.Data = data return sub } // KeepExtra keep extra data for all log records func (sub *SubLogger) KeepExtra(extra M) *SubLogger { sub.Extra = extra return sub } // Release releases the SubLogger. func (sub *SubLogger) Release() { sub.l = nil sub.Ctx = nil sub.Fields = nil sub.Data = nil sub.Extra = nil } func (sub *SubLogger) withKeepCtx() *Record { r := sub.l.WithContext(sub.Ctx) r.Data = sub.Data r.Extra = sub.Extra r.Fields = sub.Fields return r } // // --------------------------------------------------------------------------- // Add log message with level // --------------------------------------------------------------------------- // // Print logs a message at PrintLevel. will with sub logger's context, fields and data func (sub *SubLogger) Print(args ...any) { sub.withKeepCtx().Print(args...) } // Printf logs a message at PrintLevel. will with sub logger's context, fields and data func (sub *SubLogger) Printf(format string, args ...any) { sub.withKeepCtx().Printf(format, args...) } // Trace logs a message at TraceLevel. will with sub logger's context, fields and data func (sub *SubLogger) Trace(args ...any) { sub.withKeepCtx().Trace(args...) } // Tracef logs a formatted message at TraceLevel. will with sub logger's context, fields and data func (sub *SubLogger) Tracef(format string, args ...any) { sub.withKeepCtx().Tracef(format, args...) } // Debug logs a message at DebugLevel. will with sub logger's context, fields and data func (sub *SubLogger) Debug(args ...any) { sub.withKeepCtx().Debug(args...) } // Debugf logs a formatted message at DebugLevel. will with sub logger's context, fields and data func (sub *SubLogger) Debugf(format string, args ...any) { sub.withKeepCtx().Debugf(format, args...) } // Info logs a message at InfoLevel. will with sub logger's context, fields and data func (sub *SubLogger) Info(args ...any) { sub.withKeepCtx().Info(args...) } // Infof logs a formatted message at InfoLevel. will with sub logger's context, fields and data func (sub *SubLogger) Infof(format string, args ...any) { sub.withKeepCtx().Infof(format, args...) } // Notice logs a message at NoticeLevel. will with sub logger's context, fields and data func (sub *SubLogger) Notice(args ...any) { sub.withKeepCtx().Notice(args...) } // Noticef logs a formatted message at NoticeLevel. will with sub logger's context, fields and data func (sub *SubLogger) Noticef(format string, args ...any) { sub.withKeepCtx().Noticef(format, args...) } // Warn logs a message at WarnLevel. will with sub logger's context, fields and data func (sub *SubLogger) Warn(args ...any) { sub.withKeepCtx().Warn(args...) } // Warnf logs a formatted message at WarnLevel. will with sub logger's context, fields and data func (sub *SubLogger) Warnf(format string, args ...any) { sub.withKeepCtx().Warnf(format, args...) } // Error logs a message at ErrorLevel. will with sub logger's context, fields and data func (sub *SubLogger) Error(args ...any) { sub.withKeepCtx().Error(args...) } // Errorf logs a formatted message at ErrorLevel. will with sub logger's context, fields and data func (sub *SubLogger) Errorf(format string, args ...any) { sub.withKeepCtx().Errorf(format, args...) } // Fatal logs a message at FatalLevel. will with sub logger's context, fields and data func (sub *SubLogger) Fatal(args ...any) { sub.withKeepCtx().Fatal(args...) } // Fatalf logs a formatted message at FatalLevel. will with sub logger's context, fields and data func (sub *SubLogger) Fatalf(format string, args ...any) { sub.withKeepCtx().Fatalf(format, args...) } // Panic logs a message at PanicLevel. will with sub logger's context, fields and data func (sub *SubLogger) Panic(args ...any) { sub.withKeepCtx().Panic(args...) } // Panicf logs a formatted message at PanicLevel. will with sub logger's context, fields and data func (sub *SubLogger) Panicf(format string, args ...any) { sub.withKeepCtx().Panicf(format, args...) } ================================================ FILE: logger_test.go ================================================ package slog_test import ( "bytes" "context" "fmt" "testing" "time" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestLoggerBasic(t *testing.T) { l := slog.New() l.SetName("testName") assert.Eq(t, "testName", l.Name()) l = slog.NewWithName("testName") assert.Eq(t, "testName", l.Name()) } func TestLogger_PushHandler(t *testing.T) { l := slog.New().Configure(func(l *slog.Logger) { l.DoNothingOnPanicFatal() }) w1 := new(bytes.Buffer) h1 := handler.NewIOWriterHandler(w1, slog.DangerLevels) l.PushHandler(h1) w2 := new(bytes.Buffer) h2 := handler.NewIOWriterHandler(w2, slog.NormalLevels) l.PushHandlers(h2) l.Warning(slog.WarnLevel, "message") l.Logf(slog.TraceLevel, "%s message", slog.TraceLevel) assert.Contains(t, w1.String(), "WARNING message") assert.Contains(t, w2.String(), "TRACE message") assert.Contains(t, w2.String(), "TestLogger_PushHandler") assert.NoErr(t, l.Sync()) assert.NoErr(t, l.Flush()) l.MustFlush() assert.NoErr(t, l.Close()) l.MustClose() l.Reset() } func TestLogger_ReportCaller(t *testing.T) { l := slog.NewWithConfig(func(logger *slog.Logger) { logger.ReportCaller = true logger.CallerFlag = slog.CallerFlagFnLine }) var buf bytes.Buffer h := handler.NewIOWriterHandler(&buf, slog.AllLevels) h.SetFormatter(slog.NewJSONFormatter(func(f *slog.JSONFormatter) { f.Fields = append(f.Fields, slog.FieldKeyCaller) })) l.AddHandler(h) l.Info("message") str := buf.String() assert.Contains(t, str, `"caller":"logger_test.go`) } func TestLogger_Log(t *testing.T) { l := slog.NewWithConfig(func(l *slog.Logger) { l.ReportCaller = true l.DoNothingOnPanicFatal() }) l.AddHandler(handler.NewConsoleHandler(slog.AllLevels)) l.Log(slog.InfoLevel, "a", slog.InfoLevel, "message") l.WithField("newKey", "value").Fatalln("a fatal message") l.WithTime(timex.NowHourStart()).Panicln("a panic message") } func TestLogger_WithContext(t *testing.T) { var buf bytes.Buffer h := handler.NewIOWriterHandler(&buf, slog.AllLevels) l := newLogger() l.AddHandlers(h) ctx := context.Background() r := l.WithCtx(ctx) r.Info("with context") str := buf.String() assert.Contains(t, str, `with context`) } func TestLogger_panic(t *testing.T) { h := newTestHandler() h.errOnFlush = true l := slog.NewWithHandlers(h) assert.Panics(t, func() { l.MustFlush() }) err := l.LastErr() assert.Err(t, err) assert.Eq(t, "flush error", err.Error()) h.errOnClose = true assert.Panics(t, func() { l.MustClose() }) err = l.LastErr() assert.Err(t, err) assert.Eq(t, "close error", err.Error()) } func TestLogger_error(t *testing.T) { h := newTestHandler() l := slog.NewWithHandlers(h) err := l.VisitAll(func(h slog.Handler) error { return errorx.Raw("visit error") }) assert.Err(t, err) assert.Eq(t, "visit error", err.Error()) h.errOnClose = true err = l.Close() assert.Err(t, err) assert.Eq(t, "close error", err.Error()) } func TestLogger_panicLevel(t *testing.T) { w := new(bytes.Buffer) l := slog.NewWithHandlers(handler.NewIOWriter(w, slog.AllLevels)) // assert.PanicsWithValue(t, "slog: panic message", func() { assert.Panics(t, func() { l.Panicln("panicln message") }) assert.Contains(t, w.String(), "[PANIC]") assert.Contains(t, w.String(), "panicln message") w.Reset() assert.Panics(t, func() { l.Panicf("panicf message") }) assert.Contains(t, w.String(), "panicf message") w.Reset() assert.Panics(t, func() { l.Panic("panic message") }) assert.Contains(t, w.String(), "panic message") assert.NoErr(t, l.FlushAll()) } func TestLogger_log_allLevel(t *testing.T) { l := slog.NewWithConfig(func(l *slog.Logger) { l.ReportCaller = true l.DoNothingOnPanicFatal() }) l.AddHandler(handler.NewConsoleHandler(slog.AllLevels)) printAllLevelLogs(l, "this a", "log", "message") } func TestLogger_logf_allLevel(t *testing.T) { l := slog.NewWithConfig(func(l *slog.Logger) { l.ReportCaller = true l.CallerFlag = slog.CallerFlagFpLine l.DoNothingOnPanicFatal() }) l.AddHandler(handler.NewConsoleHandler(slog.AllLevels)) printfAllLevelLogs(l, "this a log %s", "message") } func TestLogger_write_error(t *testing.T) { h := newTestHandler() h.errOnHandle = true l := slog.NewWithHandlers(h) l.Info("a message") err := l.LastErr() assert.Err(t, err) assert.Eq(t, "handle error", err.Error()) } func TestLogger_AddWithCtx(t *testing.T) { h := newTestHandler() l := slog.NewWithHandlers(h) l.DoNothingOnPanicFatal() l.AddProcessor(slog.CtxKeysProcessor("data", "ctx1", "ctx2")) ctx := context.WithValue(context.Background(), "ctx1", "ctx1-value") ctx = context.WithValue(ctx, "ctx2", "ctx2-value") t.Run("normal", func(t *testing.T) { l.TraceCtx(ctx, "A message", "test") l.DebugCtx(ctx, "A message", "test") l.InfoCtx(ctx, "A message", "test") l.NoticeCtx(ctx, "A message", "test") l.WarnCtx(ctx, "A message", "test") l.ErrorCtx(ctx, "A message", "test") l.FatalCtx(ctx, "A message", "test") l.PanicCtx(ctx, "A message", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ctx2-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) t.Run("with format", func(t *testing.T) { l.TracefCtx(ctx, "A message %s", "test") l.DebugfCtx(ctx, "A message %s", "test") l.InfofCtx(ctx, "A message %s", "test") l.NoticefCtx(ctx, "A message %s", "test") l.WarnfCtx(ctx, "A message %s", "test") l.ErrorfCtx(ctx, "A message %s", "test") l.PanicfCtx(ctx, "A message %s", "test") l.FatalfCtx(ctx, "A message %s", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ctx2-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) } func TestLogger_GlobalFields(t *testing.T) { buf, l := newTestLogger() l.Config(func(l *slog.Logger) { l.GlobalFields = slog.M{ "global1": "test-app", "global2": "test-value2", } }) l.Info("A message") s := buf.StringReset() fmt.Print(s) assert.StrContains(t, s, "global1") assert.StrContains(t, s, "global2") } func TestLogger_option_BackupArgs(t *testing.T) { l := slog.New(func(l *slog.Logger) { l.BackupArgs = true l.CallerFlag = slog.CallerFlagPkgFnl }) var rFmt string var rArgs []any h := newTestHandler() h.beforeFormat = func(r *slog.Record) { rFmt = r.Fmt rArgs = r.Args } l.AddHandler(h) l.Info("str message1") assert.NotEmpty(t, rArgs) rFmt = "" rArgs = nil l.Infof("fmt %s", "message2") assert.NotEmpty(t, rFmt) assert.NotEmpty(t, rArgs) l.WithField("key", "value").Info("field message3") s := h.ResetGet() fmt.Println(s) assert.StrContains(t, s, "str message1") assert.StrContains(t, s, "fmt message2") assert.StrContains(t, s, "field message3") assert.StrContains(t, s, "UN-CONFIGURED FIELDS: {key:value}") } func TestLogger_FlushTimeout(t *testing.T) { h := newTestHandler() l := slog.NewWithHandlers(h) // test flush error h.errOnFlush = true l.FlushTimeout(time.Millisecond * 2) // test flush timeout h.errOnFlush = false h.callOnFlush = func() { time.Sleep(time.Millisecond * 25) } l.FlushTimeout(time.Millisecond * 20) assert.Panics(t, func() { l.StopDaemon() }) } func TestLogger_rewrite_record(t *testing.T) { h := newTestHandler() l := slog.NewWithHandlers(h) t.Run("Record rewrite", func(t *testing.T) { r := l.Record() r.Info("a message1") fmt.Printf("%+v\n", r) time.Sleep(time.Millisecond * 2) r.Warn("a message2") fmt.Printf("%+v\n", r) time.Sleep(time.Millisecond * 2) r.Warn("a message3") fmt.Printf("%+v\n", r) r.Release() dump.P(h.ResetGet()) }) t.Run("Reused rewrite", func(t *testing.T) { r := l.Reused() r.Info("A message1") fmt.Printf("%+v\n", r) time.Sleep(time.Millisecond * 2) r.Warn("A message2") fmt.Printf("%+v\n", r) r.Release() dump.P(h.ResetGet()) }) } func TestLogger_Sub(t *testing.T) { h := newTestHandler() l := slog.NewWithHandlers(h) l.DoNothingOnPanicFatal() l.AddProcessor(slog.CtxKeysProcessor("extra", "ctx1")) sub := l.NewSub(). KeepData(slog.M{"data1": "data1-value"}). KeepExtra(slog.M{"ext1": "ext1-value"}). KeepFields(slog.M{"field1": "field1-value"}). KeepCtx(context.WithValue(context.Background(), "ctx1", "ctx1-value")) assert.ContainsKey(t, sub.Data, "data1") assert.ContainsKey(t, sub.Extra, "ext1") assert.ContainsKey(t, sub.Fields, "field1") assert.Eq(t, "ctx1-value", sub.Ctx.Value("ctx1")) t.Run("normal", func(t *testing.T) { sub.Print("A message", "test") sub.Trace("A message", "test") sub.Debug("A message", "test") sub.Info("A message", "test") sub.Notice("A message", "test") sub.Warn("A message", "test") sub.Error("A message", "test") sub.Fatal("A message", "test") sub.Panic("A message", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ext1-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) t.Run("formated", func(t *testing.T) { sub.Printf("A message %s", "test") sub.Tracef("A message %s", "test") sub.Debugf("A message %s", "test") sub.Infof("A message %s", "test") sub.Noticef("A message %s", "test") sub.Warnf("A message %s", "test") sub.Errorf("A message %s", "test") sub.Panicf("A message %s", "test") sub.Fatalf("A message %s", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ext1-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) // Release sub.Release() assert.Nil(t, sub.Ctx) assert.Nil(t, sub.Data) assert.Nil(t, sub.Extra) assert.Nil(t, sub.Fields) } ================================================ FILE: logger_write.go ================================================ package slog // // --------------------------------------------------------------------------- // Do write log message // --------------------------------------------------------------------------- // // func (r *Record) logWrite(level Level) { // Will reduce memory allocation once // r.Message = strutil.Byte2str(message) // var buf *bytes.Buffer // buf = bufferPool.Get().(*bytes.Buffer) // defer bufferPool.Put(buf) // r.Buffer = buf // TODO release on here ?? // defer r.logger.releaseRecord(r) // r.logger.writeRecord(level, r) // r.Buffer = nil // } // Init something for record(eg: time, level name). func (r *Record) Init(lowerLevelName bool) { r.inited = true // use lower level name if lowerLevelName { r.levelName = r.Level.LowerName() } else { r.levelName = r.Level.Name() } // init log time if r.Time.IsZero() { r.Time = r.logger.TimeClock.Now() } // r.microSecond = r.Time.Nanosecond() / 1000 } // Init something for record. func (r *Record) beforeHandle(l *Logger) { // log caller. will alloc 3 times if l.ReportCaller { caller, ok := getCaller(r.CallerSkip) if ok { r.Caller = &caller } } // processing log record for i := range l.processors { l.processors[i].Process(r) } } // do write record to handlers, will add lock. func (l *Logger) writeRecord(level Level, r *Record) { l.mu.Lock() defer l.mu.Unlock() // reset init flag, useful for repeat use Record r.inited = false for _, handler := range l.handlers { if handler.IsHandling(level) { // init record, call processors if !r.inited { r.Init(l.LowerLevelName) r.beforeHandle(l) } // do write a log message by handler if err := handler.Handle(r); err != nil { l.err = err printStderr("slog: failed to handle log, error:", err) } } } // ---- after write log ---- r.Time = emptyTime // flush logs on level <= error level. if level <= ErrorLevel { l.flushAll() // has been in lock } if level <= PanicLevel { l.PanicFunc(r) } else if level <= FatalLevel { l.Exit(1) } } ================================================ FILE: processor.go ================================================ package slog import ( "crypto/md5" "encoding/hex" "os" "runtime" "github.com/gookit/goutil/strutil" ) // // Processor interface // // Processor interface definition type Processor interface { // Process record Process(record *Record) } // ProcessorFunc wrapper definition type ProcessorFunc func(record *Record) // Process record func (fn ProcessorFunc) Process(record *Record) { fn(record) } // ProcessableHandler interface type ProcessableHandler interface { // AddProcessor add a processor AddProcessor(Processor) // ProcessRecord handle a record ProcessRecord(record *Record) } // Processable definition type Processable struct { processors []Processor } // AddProcessor to the handler func (p *Processable) AddProcessor(processor Processor) { p.processors = append(p.processors, processor) } // ProcessRecord process record func (p *Processable) ProcessRecord(r *Record) { // processing log record for _, processor := range p.processors { processor.Process(r) } } // // there are some built-in processors // // AddHostname to record func AddHostname() Processor { hostname, _ := os.Hostname() return ProcessorFunc(func(record *Record) { record.AddField("hostname", hostname) }) } // AddUniqueID to record func AddUniqueID(fieldName string) Processor { hs := md5.New() return ProcessorFunc(func(record *Record) { rb, _ := strutil.RandomBytes(32) hs.Write(rb) randomID := hex.EncodeToString(hs.Sum(nil)) hs.Reset() record.AddField(fieldName, randomID) }) } // MemoryUsage get memory usage. var MemoryUsage ProcessorFunc = func(record *Record) { stat := new(runtime.MemStats) runtime.ReadMemStats(stat) record.SetExtraValue("memoryUsage", stat.Alloc) } // AppendCtxKeys append context keys to Record.Fields func AppendCtxKeys(keys ...string) Processor { return ProcessorFunc(func(record *Record) { if record.Ctx == nil { return } for _, key := range keys { if val := record.Ctx.Value(key); val != nil { record.AddField(key, val) } } }) } // CtxKeysProcessor append context keys to Record.Data, Record.Fields, Record.Extra // - dist: "data" | "fields" | "extra" func CtxKeysProcessor(dist string, keys ...string) Processor { return ProcessorFunc(func(r *Record) { if r.Ctx == nil { return } kvMap := map[string]any{} for _, key := range keys { if val := r.Ctx.Value(key); val != nil { kvMap[key] = val } } switch dist { case "field", "fields": r.AddFields(kvMap) case "ext", "extra": r.AddExtra(kvMap) default: r.AddData(kvMap) } }) } ================================================ FILE: processor_test.go ================================================ package slog_test import ( "context" "fmt" "os" "testing" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog" ) func TestLogger_AddProcessor(t *testing.T) { buf := new(byteutil.Buffer) l := slog.NewJSONSugared(buf, slog.InfoLevel) l.AddProcessor(slog.AddHostname()) l.Info("message") hostname, _ := os.Hostname() // {"channel":"application","data":{},"datetime":"2020/07/17 12:01:35","extra":{},"hostname":"InhereMac","level":"INFO","message":"message"} str := buf.String() buf.Reset() assert.Contains(t, str, `"level":"INFO"`) assert.Contains(t, str, `"message":"message"`) assert.Contains(t, str, fmt.Sprintf(`"hostname":"%s"`, hostname)) l.ResetProcessors() l.PushProcessor(slog.MemoryUsage) l.Info("message2") // {"channel":"application","data":{},"datetime":"2020/07/16 16:40:18","extra":{"memoryUsage":326072},"level":"INFO","message":"message2"} str = buf.String() buf.Reset() assert.Contains(t, str, `"message":"message2"`) assert.Contains(t, str, `"memoryUsage":`) l.ResetProcessors() l.SetProcessors([]slog.Processor{slog.AddUniqueID("requestId")}) l.Info("message3") str = buf.String() buf.Reset() assert.Contains(t, str, `"message":"message3"`) assert.Contains(t, str, `"requestId":`) fmt.Print(str) l.ResetProcessors() l.AddProcessors(slog.AppendCtxKeys("traceId", "userId")) l.Info("message4") str = buf.ResetAndGet() fmt.Print(str) assert.Contains(t, str, `"message":"message4"`) assert.NotContains(t, str, `"traceId"`) ctx := context.WithValue(context.Background(), "traceId", "traceId123abc456") l.WithCtx(ctx).Info("message5") str = buf.ResetAndGet() fmt.Print(str) assert.Contains(t, str, `"message":"message5"`) assert.Contains(t, str, `"traceId":"traceId123abc456"`) } func TestCtxKeysProcessor(t *testing.T) { // CtxKeysProcessor tr := newLogRecord("test message") tr.Extra = map[string]any{} // reset procFn := slog.CtxKeysProcessor("ext", "traceId") procFn.Process(tr) assert.Empty(t, tr.Extra) ctx := context.WithValue(context.Background(), "traceId", "traceId123abc456") tr = tr.WithCtx(ctx) procFn.Process(tr) assert.Equal(t, "traceId123abc456", tr.Extra["traceId"]) } func TestProcessable_AddProcessor(t *testing.T) { ps := &slog.Processable{} ps.AddProcessor(slog.MemoryUsage) r := newLogRecord("error message") ps.ProcessRecord(r) assert.NotEmpty(t, r.Extra) assert.Contains(t, r.Extra, "memoryUsage") } ================================================ FILE: record.go ================================================ package slog import ( "context" "fmt" "runtime" "strconv" "time" "github.com/gookit/goutil/strutil" ) // Record a log record definition type Record struct { logger *Logger // reuse flag. for reuse a Record, will not be released on after writing. // release the record need call Release() method. // // Reuse field: Ctx, Data, Fields, Extra // // NOTE, if you reuse a record, you must call Reused() method. reuse bool // Mark whether the current record is released to the pool. TODO freed bool // inited flag for record inited bool // Time for record log, if is empty will use now. // // TIP: Will be emptied after each use (write) Time time.Time // Level log level for record Level Level // level name cache from Level levelName string // Channel log channel name. eg: "order", "goods", "user" Channel string Message string // Ctx context.Context Ctx context.Context // Fields custom fields data. // Contains all the fields set by the user. Fields M // Data log context data Data M // Extra log extra data Extra M // Caller information Caller *runtime.Frame // CallerFlag value. default is equals to Logger.CallerFlag CallerFlag uint8 // CallerSkip value. default is equals to Logger.CallerSkip CallerSkip int // EnableStack enable stack info, default is false. TODO EnableStack bool // Buffer Can use Buffer on formatter // Buffer *bytes.Buffer // log input args backups, from log() and logf(). its dont use in formatter. Fmt string Args []any } func newRecord(logger *Logger) *Record { return &Record{ logger: logger, Channel: strutil.OrElse(logger.ChannelName, DefaultChannelName), // with some options CallerFlag: logger.CallerFlag, CallerSkip: logger.CallerSkip, // init map data field // Data: make(M, 2), // Extra: make(M, 0), // Fields: make(M, 0), } } // Reused set record is reused, will not be released on after writing. // so, MUST call Release() method after use completed. func (r *Record) Reused() *Record { r.reuse = true return r } // Release manual release record to pool func (r *Record) Release() { if r.reuse { r.reuse = false r.logger.releaseRecord(r) } } // // --------------------------------------------------------------------------- // Copy record with something // --------------------------------------------------------------------------- // // WithTime set the record time func (r *Record) WithTime(t time.Time) *Record { nr := r.Copy() nr.Time = t return nr } // WithCtx on record func (r *Record) WithCtx(ctx context.Context) *Record { return r.WithContext(ctx) } // WithContext on record func (r *Record) WithContext(ctx context.Context) *Record { nr := r.Copy() nr.Ctx = ctx return nr } // WithError on record func (r *Record) WithError(err error) *Record { return r.WithFields(M{FieldKeyError: err}) } // WithData on record func (r *Record) WithData(data M) *Record { nr := r.Copy() nr.Data = data return nr } // WithField with a new field to record // // Note: add field need config Formatter template fields. func (r *Record) WithField(name string, val any) *Record { return r.WithFields(M{name: val}) } // WithFields with new fields to record // // Note: add field need config Formatter template fields. func (r *Record) WithFields(fields M) *Record { nr := r.Copy() if nr.Fields == nil { nr.Fields = make(M, len(fields)) } for k, v := range fields { nr.Fields[k] = v } return nr } // Copy new record from old record func (r *Record) Copy() *Record { dataCopy := make(M, len(r.Data)) for k, v := range r.Data { dataCopy[k] = v } fieldsCopy := make(M, len(r.Fields)) for k, v := range r.Fields { fieldsCopy[k] = v } extraCopy := make(M, len(r.Extra)) for k, v := range r.Extra { extraCopy[k] = v } return &Record{ // reuse: true, // copy record is reused logger: r.logger, Channel: r.Channel, // Time: r.Time, Level: r.Level, levelName: r.levelName, CallerFlag: r.CallerFlag, CallerSkip: r.CallerSkip, Message: r.Message, Data: dataCopy, Extra: extraCopy, Fields: fieldsCopy, } } // // --------------------------------------------------------------------------- // Direct set value to record // --------------------------------------------------------------------------- // // SetCtx on record func (r *Record) SetCtx(ctx context.Context) *Record { return r.SetContext(ctx) } // SetContext on record func (r *Record) SetContext(ctx context.Context) *Record { r.Ctx = ctx return r } // SetData on record func (r *Record) SetData(data M) *Record { r.Data = data return r } // AddData on record func (r *Record) AddData(data M) *Record { if r.Data == nil { r.Data = data return r } for k, v := range data { r.Data[k] = v } return r } // WithValue add Data value to record. alias of AddValue func (r *Record) WithValue(key string, value any) *Record { return r.AddValue(key, value) } // AddValue add Data value to record func (r *Record) AddValue(key string, value any) *Record { if r.Data == nil { r.Data = make(M, 8) } r.Data[key] = value return r } // Value get Data value from record func (r *Record) Value(key string) any { if r.Data == nil { return nil } return r.Data[key] } // SetExtra information on record func (r *Record) SetExtra(data M) *Record { r.Extra = data return r } // AddExtra information on record func (r *Record) AddExtra(data M) *Record { if r.Extra == nil { r.Extra = data return r } for k, v := range data { r.Extra[k] = v } return r } // SetExtraValue on record func (r *Record) SetExtraValue(k string, v any) { if r.Extra == nil { r.Extra = make(M, 8) } r.Extra[k] = v } // SetTime on record func (r *Record) SetTime(t time.Time) *Record { r.Time = t return r } // AddField add new field to the record func (r *Record) AddField(name string, val any) *Record { if r.Fields == nil { r.Fields = make(M, 8) } r.Fields[name] = val return r } // AddFields add new fields to the record func (r *Record) AddFields(fields M) *Record { if r.Fields == nil { r.Fields = fields return r } for n, v := range fields { r.Fields[n] = v } return r } // SetFields to the record func (r *Record) SetFields(fields M) *Record { r.Fields = fields return r } // Field value gets from record func (r *Record) Field(key string) any { if r.Fields == nil { return nil } return r.Fields[key] } // // --------------------------------------------------------------------------- // Add log message with builder // TODO r.Build(InfoLevel).Str().Int().Float().Msg() // --------------------------------------------------------------------------- // // Object data on record TODO optimize performance // func (r *Record) Obj(obj fmt.Stringer) *Record { // r.Data = ctx // return r // } // Object data on record TODO optimize performance // func (r *Record) Any(v any) *Record { // r.Data = ctx // return r // } // func (r *Record) Str(message string) { // r.logWrite(level, []byte(message)) // } // func (r *Record) Int(val int) { // r.logWrite(level, []byte(message)) // } // // --------------------------------------------------------------------------- // Add log message with level // --------------------------------------------------------------------------- // func (r *Record) log(level Level, args []any) { r.Level = level if r.logger.BackupArgs { r.Args = args } // r.Message = strutil.Byte2str(formatArgsWithSpaces(args)) // will reduce memory allocation once r.Message = formatArgsWithSpaces(args) // do write log, then release record r.logger.writeRecord(level, r) r.logger.releaseRecord(r) } func (r *Record) logf(level Level, format string, args []any) { if r.logger.BackupArgs { r.Fmt, r.Args = format, args } r.Level = level r.Message = fmt.Sprintf(format, args...) // do write log, then release record r.logger.writeRecord(level, r) r.logger.releaseRecord(r) } // Log a message with level func (r *Record) Log(level Level, args ...any) { r.log(level, args) } // Logf a message with level func (r *Record) Logf(level Level, format string, args ...any) { r.logf(level, format, args) } // Info logs a message at level Info func (r *Record) Info(args ...any) { r.log(InfoLevel, args) } // Infof logs a message at level Info func (r *Record) Infof(format string, args ...any) { r.logf(InfoLevel, format, args) } // Trace logs a message at level Trace func (r *Record) Trace(args ...any) { r.log(TraceLevel, args) } // Tracef logs a message at level Trace func (r *Record) Tracef(format string, args ...any) { r.logf(TraceLevel, format, args) } // Error logs a message at level Error func (r *Record) Error(args ...any) { r.log(ErrorLevel, args) } // Errorf logs a message at level Error func (r *Record) Errorf(format string, args ...any) { r.logf(ErrorLevel, format, args) } // Warn logs a message at level Warn func (r *Record) Warn(args ...any) { r.log(WarnLevel, args) } // Warnf logs a message at level Warn func (r *Record) Warnf(format string, args ...any) { r.logf(WarnLevel, format, args) } // Notice logs a message at level Notice func (r *Record) Notice(args ...any) { r.log(NoticeLevel, args) } // Noticef logs a message at level Notice func (r *Record) Noticef(format string, args ...any) { r.logf(NoticeLevel, format, args) } // Debug logs a message at level Debug func (r *Record) Debug(args ...any) { r.log(DebugLevel, args) } // Debugf logs a message at level Debug func (r *Record) Debugf(format string, args ...any) { r.logf(DebugLevel, format, args) } // Print logs a message at level Print func (r *Record) Print(args ...any) { r.log(PrintLevel, args) } // Println logs a message at level Print. alias of Print func (r *Record) Println(args ...any) { r.log(PrintLevel, args) } // Printf logs a message at level Print func (r *Record) Printf(format string, args ...any) { r.logf(PrintLevel, format, args) } // Fatal logs a message at level Fatal func (r *Record) Fatal(args ...any) { r.log(FatalLevel, args) } // Fatalln logs a message at level Fatal func (r *Record) Fatalln(args ...any) { r.log(FatalLevel, args) } // Fatalf logs a message at level Fatal func (r *Record) Fatalf(format string, args ...any) { r.logf(FatalLevel, format, args) } // Panic logs a message at level Panic func (r *Record) Panic(args ...any) { r.log(PanicLevel, args) } // Panicln logs a message at level Panic func (r *Record) Panicln(args ...any) { r.log(PanicLevel, args) } // Panicf logs a message at level Panic func (r *Record) Panicf(format string, args ...any) { r.logf(PanicLevel, format, args) } // --------------------------------------------------------------------------- // helper methods // --------------------------------------------------------------------------- // LevelName get func (r *Record) LevelName() string { return r.levelName } // GoString of the record func (r *Record) GoString() string { return "slog: " + r.Message } func (r *Record) timestamp() string { s := strconv.FormatInt(r.Time.UnixMicro(), 10) return s[:10] + "." + s[10:] } ================================================ FILE: record_test.go ================================================ package slog_test import ( "context" "fmt" "os" "sync" "testing" "time" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) func TestRecord_AddData(t *testing.T) { w := newBuffer() l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() l.CallerFlag = slog.CallerFlagFull }) l.SetHandlers([]slog.Handler{ handler.NewIOWriter(w, slog.AllLevels), }) r := l.Record() // - add data r.AddData(testData1).Trace("log message with data") s := w.StringReset() fmt.Print(s) assert.Contains(t, s, "slog_test.TestRecord_AddData") assert.Contains(t, s, "log message with data") r.AddData(slog.M{"key01": "val01"}).Print("log message add data2") s = w.StringReset() fmt.Print(s) assert.Contains(t, s, "log message add data2") assert.Contains(t, s, "key01:val01") assert.Eq(t, "val01", r.Value("key01")) // - add value r.AddValue("key01", "val02").Println("log message add value") s = w.StringReset() fmt.Print(s) assert.Contains(t, s, "log message add value") assert.Contains(t, s, "key01:val02") // - first add value nr := &slog.Record{} assert.Nil(t, nr.Value("key01")) nr.WithValue("key01", "val02") assert.Eq(t, "val02", nr.Value("key01")) // -with data r.CallerFlag = slog.CallerFlagFcName r.WithData(slog.M{"key1": "val1"}).Warn("warn message with data") s = w.StringReset() fmt.Print(s) assert.Contains(t, s, "TestRecord_AddData") assert.Contains(t, s, "warn message with data") assert.Contains(t, s, "{key1:val1}") } func TestRecord_AddExtra(t *testing.T) { w := newBuffer() l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() l.CallerFlag = slog.CallerFlagFcName }) l.SetHandlers([]slog.Handler{ handler.NewIOWriter(w, slog.AllLevels), }) r := l.Record() r.AddExtra(testData1).Trace("log message and add extra") s := w.StringReset() fmt.Print(s) assert.Contains(t, s, "TestRecord_AddExtra") assert.Contains(t, s, "log message and add extra") assert.Contains(t, s, "key0:val0") r.AddExtra(slog.M{"key002": "val002"}).AddExtra(slog.M{"key01": "val01"}). Trace("log message and add extra2") s = w.StringReset() fmt.Print(s) assert.Contains(t, s, "log message and add extra2") assert.Contains(t, s, "TestRecord_AddExtra") assert.Contains(t, s, "key002:val002") } func TestRecord_SetContext(t *testing.T) { w := newBuffer() l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() }).Config(func(l *slog.Logger) { l.CallerFlag = slog.CallerFlagPkg }) l.SetHandlers([]slog.Handler{ handler.NewIOWriter(w, slog.AllLevels), }) r := l.Record() r.SetCtx(context.Background()).Info("info message") r.WithCtx(context.Background()).Debug("debug message") s := w.StringReset() fmt.Print(s) assert.Contains(t, s, "github.com/gookit/slog_test") } func TestRecord_WithError(t *testing.T) { w := newBuffer() l := slog.NewWithConfig(func(l *slog.Logger) { l.CallerFlag = slog.CallerFlagFunc l.DoNothingOnPanicFatal() }) h := handler.NewIOWriter(w, slog.AllLevels) h.SetFormatter(slog.NewTextFormatter("ts={{timestamp}} err={{error}} msg={{message}}\n")) l.SetHandlers([]slog.Handler{h}) r := l.Record() r.WithError(errorx.Raw("error message")).Notice("test record with error") s := w.StringReset() assert.Contains(t, s, "err=error message") assert.Contains(t, s, "msg=test record with error") fmt.Print(s) } func TestRecord_WithTime(t *testing.T) { w, l := newTestLogger() ht := timex.NowHourStart() r := l.Record() r.WithTime(ht).Notice("a message with time") s := w.StringReset() assert.Contains(t, s, "a message with time") assert.Contains(t, s, timex.FormatByTpl(ht, slog.DefaultTimeFormat)) fmt.Print(s) } func TestRecord_AddFields(t *testing.T) { r := newLogRecord("AddFields") r.AddFields(slog.M{"f1": "hi", "env": "prod"}) assert.NotEmpty(t, r.Fields) r.AddFields(slog.M{"app": "goods"}) assert.NotEmpty(t, r.Fields) // WithFields r = r.WithFields(slog.M{"f2": "v2"}) assert.Eq(t, "v2", r.Field("f2")) // - first add field nr := slog.Record{} assert.Nil(t, nr.Field("f3")) nr.AddField("f3", "val02") assert.Eq(t, "val02", nr.Field("f3")) } func TestRecord_WithFields(t *testing.T) { w, l := newTestLogger() r := l.Record(). WithFields(slog.M{"key1": "value1", "key2": "value2"}). WithFields(slog.M{"key3": "value3"}) assert.Eq(t, "value1", r.Field("key1")) assert.Eq(t, "value2", r.Field("key2")) assert.Eq(t, "value3", r.Field("key3")) r.Info("log message with fields") s := w.StringReset() fmt.Print(s) assert.Contains(t, s, "log message with fields") } func TestRecord_SetFields(t *testing.T) { r := newLogRecord("AddFields") r.SetTime(timex.Now().Yesterday().T()) r.SetFields(slog.M{"f1": "hi", "env": "prod"}) assert.NotEmpty(t, r.Fields) assert.NotEmpty(t, r.Time) } func TestRecord_allLevel(t *testing.T) { w := newBuffer() l := slog.NewWithConfig(func(l *slog.Logger) { l.DoNothingOnPanicFatal() }) l.SetHandlers([]slog.Handler{ handler.NewIOWriter(w, slog.AllLevels), }) r := l.Record() r = r.WithContext(context.Background()) printAllLevelLogs(r, "a message use record.XX()") r.Log(slog.InfoLevel, "a message use record.XX()") r.Notice("a message use record.XX()") r.Trace("a message use record.XX()") s := w.StringReset() assert.Contains(t, s, "printAllLevelLogs") assert.Contains(t, s, "a message use record.XX()") assert.Contains(t, s, "[NOTICE]") assert.Contains(t, s, "[TRACE]") printfAllLevelLogs(r, "a message use %s()", "record.XXf") r.Logf(slog.InfoLevel, "a message use %s()", "record.XXf") r.Noticef("a message use %s()", "record.XXf") r.Tracef("a message use %s()", "record.XXf") s = w.StringReset() assert.Contains(t, s, "printfAllLevelLogs") assert.Contains(t, s, "a message use record.XXf()") assert.Contains(t, s, "[NOTICE]") assert.Contains(t, s, "[TRACE]") } func TestRecord_useMultiTimes(t *testing.T) { buf := byteutil.NewBuffer() l := slog.NewWithHandlers( handler.NewSimple(buf, slog.DebugLevel), handler.NewSimple(os.Stdout, slog.DebugLevel), ) r := l.Record() t.Run("simple", func(t *testing.T) { for i := 0; i < 10; i++ { r.Error("simple error log", i) time.Sleep(time.Millisecond * 100) } }) // test concurrent write t.Run("concurrent", func(t *testing.T) { wg := sync.WaitGroup{} for i := 0; i < 100; i++ { wg.Add(1) go func(i int) { r.Error("concurrent error log", i) time.Sleep(time.Millisecond * 100) wg.Done() }(i) } wg.Wait() }) } ================================================ FILE: rotatefile/README.md ================================================ # Rotate File `rotatefile` provides simple file rotation, compression and cleanup. ## Features - Rotate file by size and time - Custom filename for rotate file by size - Custom time clock for rotate - Custom file perm for create log file - Custom rotate mode: create, rename - Compress rotated file - Cleanup old files ## Install ```bash go get github.com/gookit/slog/rotatefile ``` ## Usage ### Create a file writer ```go logFile := "testdata/go_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } // use writer writer.Write([]byte("log message\n")) ``` ### Use on another logger ```go package main import ( "log" "github.com/gookit/slog/rotatefile" ) func main() { logFile := "testdata/go_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } log.SetOutput(writer) log.Println("log message") } ``` ### Available config options ```go // Config struct for rotate dispatcher type Config struct { // Filepath the log file path, will be rotating Filepath string `json:"filepath" yaml:"filepath"` // FilePerm for create log file. default DefaultFilePerm FilePerm os.FileMode `json:"file_perm" yaml:"file_perm"` // MaxSize file contents max size, unit is bytes. // If is equals zero, disable rotate file by size // // default see DefaultMaxSize MaxSize uint64 `json:"max_size" yaml:"max_size"` // RotateTime the file rotate interval time, unit is seconds. // If is equals zero, disable rotate file by time // // default see EveryHour RotateTime RotateTime `json:"rotate_time" yaml:"rotate_time"` // CloseLock use sync lock on write contents, rotating file. // // default: false CloseLock bool `json:"close_lock" yaml:"close_lock"` // BackupNum max number for keep old files. // // 0 is not limit, default is DefaultBackNum BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files, unit is hours. // // 0 is not limit, default is DefaultBackTime BackupTime uint `json:"backup_time" yaml:"backup_time"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // RenameFunc you can custom-build filename for rotate file by size. // // default see DefaultFilenameFn RenameFunc func(filePath string, rotateNum uint) string // TimeClock for rotate TimeClock Clocker } ``` ## Files clear ```go fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { c.AddPattern("/path/to/some*.log") c.BackupNum = 2 c.BackupTime = 12 // 12 hours }) // clear files on daemon go fc.DaemonClean(nil) // NOTE: stop daemon before exit // fc.QuitDaemon() ``` ### Configs ```go // CConfig struct for clean files type CConfig struct { // BackupNum max number for keep old files. // 0 is not limit, default is 20. BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files, unit is TimeUnit. // // 0 is not limit, default is a week. BackupTime uint `json:"backup_time" yaml:"backup_time"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // Patterns dir path with filename match patterns. // // eg: ["/tmp/error.log.*", "/path/to/info.log.*", "/path/to/dir/*"] Patterns []string `json:"patterns" yaml:"patterns"` // TimeClock for clean files TimeClock Clocker // TimeUnit for BackupTime. default is hours: time.Hour TimeUnit time.Duration `json:"time_unit" yaml:"time_unit"` // CheckInterval for clean files on daemon run. default is 60s. CheckInterval time.Duration `json:"check_interval" yaml:"check_interval"` // IgnoreError ignore remove error // TODO IgnoreError bool // RotateMode for rotate split files TODO // - copy+cut: copy contents then truncate file // - rename : rename file(use for like PHP-FPM app) // RotateMode RotateMode `json:"rotate_mode" yaml:"rotate_mode"` } ``` ================================================ FILE: rotatefile/cleanup.go ================================================ package rotatefile import ( "os" "sort" "time" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" ) const defaultCheckInterval = 60 * time.Second // CConfig struct for clean files type CConfig struct { // BackupNum max number for keep old files. // // 0 is not limit, default is 20. BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files, unit is TimeUnit. // // 0 is not limit, default is a week. BackupTime uint `json:"backup_time" yaml:"backup_time"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // Patterns dir path with filename match patterns. // // eg: ["/tmp/error.log.*", "/path/to/info.log.*", "/path/to/dir/*"] Patterns []string `json:"patterns" yaml:"patterns"` // TimeClock for clean files TimeClock Clocker // TimeUnit for BackupTime. default is hours: time.Hour TimeUnit time.Duration `json:"time_unit" yaml:"time_unit"` // CheckInterval for clean files on daemon run. default is 60s. CheckInterval time.Duration `json:"check_interval" yaml:"check_interval"` // IgnoreError ignore remove error // TODO IgnoreError bool // RotateMode for rotate split files TODO // - copy+cut: copy contents then truncate file // - rename : rename file(use for like PHP-FPM app) // RotateMode RotateMode `json:"rotate_mode" yaml:"rotate_mode"` } // CConfigFunc for clean config type CConfigFunc func(c *CConfig) // AddDirPath for clean, will auto append * for match all files func (c *CConfig) AddDirPath(dirPaths ...string) *CConfig { for _, dirPath := range dirPaths { if !fsutil.IsDir(dirPath) { continue } c.Patterns = append(c.Patterns, dirPath+"/*") } return c } // AddPattern for clean. eg: "/tmp/error.log.*" func (c *CConfig) AddPattern(patterns ...string) *CConfig { c.Patterns = append(c.Patterns, patterns...) return c } // WithConfigFn for custom settings func (c *CConfig) WithConfigFn(fns ...CConfigFunc) *CConfig { for _, fn := range fns { if fn != nil { fn(c) } } return c } // NewCConfig instance func NewCConfig() *CConfig { return &CConfig{ BackupNum: DefaultBackNum, BackupTime: DefaultBackTime, TimeClock: DefaultTimeClockFn, TimeUnit: time.Hour, // check interval time CheckInterval: defaultCheckInterval, } } // FilesClear multi files by time. // // use for rotate and clear other program produce log files type FilesClear struct { // mu sync.Mutex cfg *CConfig // inited mark inited bool // file max backup time. equals CConfig.BackupTime * CConfig.TimeUnit backupDur time.Duration quitDaemon chan struct{} } // NewFilesClear instance func NewFilesClear(fns ...CConfigFunc) *FilesClear { cfg := NewCConfig().WithConfigFn(fns...) return &FilesClear{cfg: cfg} } // Config get func (r *FilesClear) Config() *CConfig { return r.cfg } // WithConfig for custom set config func (r *FilesClear) WithConfig(cfg *CConfig) *FilesClear { r.cfg = cfg return r } // WithConfigFn for custom settings func (r *FilesClear) WithConfigFn(fns ...CConfigFunc) *FilesClear { r.cfg.WithConfigFn(fns...) return r } // // --------------------------------------------------------------------------- // clean backup files // --------------------------------------------------------------------------- // // StopDaemon for stop daemon clean func (r *FilesClear) StopDaemon() { if r.quitDaemon == nil { panic("cannot quit daemon, please call DaemonClean() first") } close(r.quitDaemon) } // DaemonClean daemon clean old files by config // // NOTE: this method will block current goroutine // // Usage: // // fc := rotatefile.NewFilesClear(nil) // fc.WithConfigFn(func(c *rotatefile.CConfig) { // c.AddDirPath("./testdata") // }) // // wg := sync.WaitGroup{} // wg.Add(1) // // // start daemon // go fc.DaemonClean(func() { // wg.Done() // }) // // // wait for stop // wg.Wait() func (r *FilesClear) DaemonClean(onStop func()) { if r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 { panic("clean: backupNum and backupTime are both 0") } r.quitDaemon = make(chan struct{}) tk := time.NewTicker(r.cfg.CheckInterval) defer tk.Stop() for { select { case <-r.quitDaemon: if onStop != nil { onStop() } return case <-tk.C: // do cleaning printErrln("files-clear: cleanup old files error:", r.Clean()) } } } // Clean old files by config func (r *FilesClear) prepare() { if r.inited { return } r.inited = true // check backup time if r.cfg.BackupTime > 0 { r.backupDur = time.Duration(r.cfg.BackupTime) * r.cfg.TimeUnit } } // Clean old files by config func (r *FilesClear) Clean() error { if r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 { return errorx.Err("clean: backupNum and backupTime are both 0") } // clear by time, can also clean by number for _, filePattern := range r.cfg.Patterns { if err := r.cleanByPattern(filePattern); err != nil { return err } } return nil } // CleanByPattern clean files by pattern func (r *FilesClear) cleanByPattern(filePattern string) (err error) { r.prepare() oldFiles := make([]fileInfo, 0, 8) cutTime := r.cfg.TimeClock.Now().Add(-r.backupDur) // find and clean expired files err = fsutil.GlobWithFunc(filePattern, func(filePath string) error { stat, err := os.Stat(filePath) if err != nil { return err } // not handle subdir TODO: support subdir if stat.IsDir() { return nil } // collect not expired if stat.ModTime().After(cutTime) { oldFiles = append(oldFiles, newFileInfo(filePath, stat)) return nil } // remove expired file return r.remove(filePath) }) // clear by backup number. backNum := int(r.cfg.BackupNum) remNum := len(oldFiles) - backNum if backNum > 0 && remNum > 0 { // sort by mod-time, oldest at first. sort.Sort(modTimeFInfos(oldFiles)) for idx := 0; idx < len(oldFiles); idx++ { if err = r.remove(oldFiles[idx].Path()); err != nil { break } remNum-- if remNum == 0 { break } } } return } func (r *FilesClear) remove(filePath string) (err error) { return os.Remove(filePath) } ================================================ FILE: rotatefile/cleanup_test.go ================================================ package rotatefile_test import ( "fmt" "os" "sync" "testing" "time" "github.com/gookit/goutil" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog/rotatefile" ) func TestFilesClear_Clean(t *testing.T) { // make files for clean makeNum := 5 makeWaitCleanFiles("file_clean.log", makeNum) _, err := fsutil.PutContents("testdata/subdir/some.txt", "test data") assert.NoErr(t, err) // create clear fc := rotatefile.NewFilesClear() fc.WithConfig(rotatefile.NewCConfig()) fc.WithConfigFn(func(c *rotatefile.CConfig) { c.AddDirPath("testdata", "not-exist-dir") c.BackupNum = 1 c.BackupTime = 3 c.TimeUnit = time.Second // for test }) cfg := fc.Config() assert.Eq(t, uint(1), cfg.BackupNum) dump.P(cfg) // do clean assert.NoErr(t, fc.Clean()) files := fsutil.Glob("testdata/file_clean.log.*") dump.P(files) assert.NotEmpty(t, files) assert.Lt(t, len(files), makeNum) t.Run("error", func(t *testing.T) { fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { c.BackupNum = 0 c.BackupTime = 0 }) assert.Err(t, fc.Clean()) }) } func TestFilesClear_DaemonClean(t *testing.T) { t.Run("panic", func(t *testing.T) { fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { c.BackupNum = 0 c.BackupTime = 0 }) assert.Panics(t, func() { fc.StopDaemon() }) assert.Panics(t, func() { fc.DaemonClean(nil) }) }) fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { c.AddPattern("testdata/file_daemon_clean.*") c.BackupNum = 1 c.BackupTime = 3 c.TimeUnit = time.Second // for test c.CheckInterval = time.Second // for test }) cfg := fc.Config() dump.P(cfg) // make files for clean makeNum := 5 makeWaitCleanFiles("file_daemon_clean.log", makeNum) // test daemon clean wg := sync.WaitGroup{} wg.Add(1) // start daemon go fc.DaemonClean(func() { fmt.Println("daemon clean stopped, at", timex.Now().DateFormat("ymdTH:i:s.v")) wg.Done() }) // stop daemon go func() { time.Sleep(time.Millisecond * 1200) fmt.Println("stop daemon clean, at", timex.Now().DateFormat("ymdTH:i:s.v")) fc.StopDaemon() }() // wait for stop wg.Wait() files := fsutil.Glob("testdata/file_daemon_clean.log.*") dump.P(files) assert.NotEmpty(t, files) assert.Lt(t, len(files), makeNum) } func makeWaitCleanFiles(nameTpl string, makeNum int) { for i := 0; i < makeNum; i++ { fpath := fmt.Sprintf("testdata/%s.%03d", nameTpl, i) fmt.Println("make file:", fpath) _, err := fsutil.PutContents(fpath, []byte("test contents ...")) goutil.PanicErr(err) time.Sleep(time.Second) } fmt.Println("wait clean files:") err := fsutil.GlobWithFunc("./testdata/"+nameTpl+".*", func(fpath string) error { fi, err := os.Stat(fpath) goutil.PanicErr(err) fmt.Printf(" %s => mtime: %s\n", fpath, fi.ModTime().Format("060102T15:04:05")) return nil }) goutil.PanicErr(err) } ================================================ FILE: rotatefile/config.go ================================================ package rotatefile import ( "fmt" "os" "strconv" "time" "github.com/gookit/goutil/strutil" "github.com/gookit/goutil/timex" ) // // ---------------------------- rotate time ------------------------------- // type rotateLevel uint8 const ( levelDay rotateLevel = iota levelHour levelMin levelSec ) // RotateTime for a rotating file. unit is seconds. // // EveryDay: // - "error.log.20201223" // // EveryHour, Every30Min, EveryMinute: // - "error.log.20201223_1500" // - "error.log.20201223_1530" // - "error.log.20201223_1523" type RotateTime int // built in rotate time constants const ( EveryMonth RotateTime = 30 * timex.OneDaySec EveryDay RotateTime = timex.OneDaySec EveryHour RotateTime = timex.OneHourSec Every30Min RotateTime = 30 * timex.OneMinSec Every15Min RotateTime = 15 * timex.OneMinSec EveryMinute RotateTime = timex.OneMinSec EverySecond RotateTime = 1 // only use for tests ) // Interval get check interval time. unit is seconds. func (rt RotateTime) Interval() int64 { return int64(rt) } // FirstCheckTime for a rotated file. // - will automatically align the time from the start of each hour. func (rt RotateTime) FirstCheckTime(now time.Time) time.Time { interval := rt.Interval() switch rt.level() { case levelDay: return timex.DayEnd(now) case levelHour: // should check on H:59:59.500 return timex.HourStart(now).Add(timex.OneHour - 500*time.Millisecond) case levelMin: // eg: minutes=5 minutes := int(interval / 60) nextMin := now.Minute() + minutes // will rotate at next hour start. eg: now.Minute()=57, nextMin=62. if nextMin >= 60 { return timex.HourStart(now).Add(timex.OneHour) } // eg: now.Minute()=37, nextMin=42, will get nextDur=40 nextDur := time.Duration(nextMin).Round(time.Duration(minutes)) return timex.HourStart(now).Add(nextDur * time.Minute) default: // levelSec return now.Add(time.Duration(interval) * time.Second) } } // level for rotating time func (rt RotateTime) level() rotateLevel { switch { case rt >= timex.OneDaySec: return levelDay case rt >= timex.OneHourSec: return levelHour case rt >= EveryMinute: return levelMin default: return levelSec } } // TimeFormat get log file suffix format // // EveryDay: // - "error.log.20201223" // // EveryHour, Every30Min, EveryMinute: // - "error.log.20201223_1500" // - "error.log.20201223_1530" // - "error.log.20201223_1523" func (rt RotateTime) TimeFormat() (suffixFormat string) { suffixFormat = "20060102_1500" // default is levelHour switch rt.level() { case levelDay: suffixFormat = "20060102" case levelHour: suffixFormat = "20060102_1500" case levelMin: suffixFormat = "20060102_1504" case levelSec: suffixFormat = "20060102_150405" } return } // MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler] func (rt RotateTime) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%ds"`, rt.Interval())), nil } // UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler] func (rt *RotateTime) UnmarshalJSON(data []byte) error { s, err := strconv.Unquote(string(data)) if err != nil { return err } *rt, err = StringToRotateTime(s) return err } // String rotate type to string func (rt RotateTime) String() string { switch rt.level() { case levelDay: return fmt.Sprintf("Every %d Day", rt.Interval()/timex.OneDaySec) case levelHour: return fmt.Sprintf("Every %d Hours", rt.Interval()/timex.OneHourSec) case levelMin: return fmt.Sprintf("Every %d Minutes", rt.Interval()/timex.OneMinSec) default: // levelSec return fmt.Sprintf("Every %d Seconds", rt.Interval()) } } // StringToRotateTime parse and convert string to RotateTime func StringToRotateTime(s string) (RotateTime, error) { // is int value, try to parse as seconds if strutil.IsInt(s) { iVal := strutil.SafeInt(s) if iVal < 0 || iVal > timex.OneMonthSec*3 { return 0, fmt.Errorf("rotatefile: invalid rotate time: %s", s) } return RotateTime(iVal), nil } // parse time duration string. eg: "1h", "1m", "1d" rtDur, err := timex.ToDuration(s) if err != nil { return 0, err } return RotateTime(rtDur.Seconds()), nil } // // ---------------------------- RotateMode ------------------------------- // // RotateMode for a rotated file. 0: rename, 1: create type RotateMode uint8 const ( // ModeRename rotating file by rename. // // Example flow: // - always write to "error.log" // - rotating by rename it to "error.log.20201223" // - then re-create "error.log" ModeRename RotateMode = iota // ModeCreate rotating file by create a new file. // // Example flow: // - directly create a new file on each rotated time. eg: "error.log.20201223", "error.log.20201224" ModeCreate ) // String get string name func (m RotateMode) String() string { switch m { case ModeRename: return "rename" case ModeCreate: return "create" default: return "unknown" } } // MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler] func (m RotateMode) MarshalJSON() ([]byte, error) { return []byte(`"` + m.String() + `"`), nil } // UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler] func (m *RotateMode) UnmarshalJSON(data []byte) error { s, err := strconv.Unquote(string(data)) if err != nil { return err } *m, err = StringToRotateMode(s) return err } // StringToRotateMode convert string to RotateMode func StringToRotateMode(s string) (RotateMode, error) { switch s { case "rename": return ModeRename, nil case "create", "make": return ModeCreate, nil default: // is int value, try to parse as int if strutil.IsInt(s) { iVal := strutil.SafeInt(s) if iVal >= int(ModeRename) && iVal <= int(ModeCreate) { return RotateMode(iVal), nil } } return 0, fmt.Errorf("rotatefile: invalid rotate mode: %s", s) } } // // ---------------------------- Clocker ------------------------------- // // Clocker is the interface used for determine the current time type Clocker interface { Now() time.Time } // ClockFn func type ClockFn func() time.Time // Now implements the Clocker func (fn ClockFn) Now() time.Time { return fn() } // ConfigFn for setting config type ConfigFn func(c *Config) // Config struct for rotate dispatcher type Config struct { // Filepath the log file path, will be rotating. eg: "logs/error.log" Filepath string `json:"filepath" yaml:"filepath"` // FilePerm for create log file. default DefaultFilePerm FilePerm os.FileMode `json:"file_perm" yaml:"file_perm"` // RotateMode for rotate file. default ModeRename RotateMode RotateMode `json:"rotate_mode" yaml:"rotate_mode"` // MaxSize file contents max size, unit is bytes. // If is equals zero, disable rotate file by size // // default see DefaultMaxSize MaxSize uint64 `json:"max_size" yaml:"max_size"` // RotateTime the file rotating interval time, unit is seconds. // If is equals zero, disable rotate file by time // // default: EveryHour RotateTime RotateTime `json:"rotate_time" yaml:"rotate_time"` // CloseLock use sync lock on writing contents, rotating file. // // default: false CloseLock bool `json:"close_lock" yaml:"close_lock"` // BackupNum max number for keep old files. // // 0 is not limit, default is DefaultBackNum BackupNum uint `json:"backup_num" yaml:"backup_num"` // BackupTime max time for keep old files, unit is hours. // // 0 is not limit, default is DefaultBackTime BackupTime uint `json:"backup_time" yaml:"backup_time"` // CleanOnClose determines if the rotated log files should be cleaned up when close. CleanOnClose bool `json:"clean_on_close" yaml:"clean_on_close"` // Compress determines if the rotated log files should be compressed using gzip. // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` // RenameFunc you can custom-build filename for rotate file by size. // // Example: // // c.RenameFunc = func(filepath string, rotateNum uint) string { // suffix := time.Now().Format("06010215") // // // eg: /tmp/error.log => /tmp/error.log.24032116_894136 // return filepath + fmt.Sprintf(".%s_%d", suffix, rotateNum) // } RenameFunc func(filePath string, rotateNum uint) string `json:"-" yaml:"-"` // TimeClock for a rotating file by time. TimeClock Clocker `json:"-" yaml:"-"` // DebugMode for debug on development. DebugMode bool `json:"debug_mode" yaml:"debug_mode"` } func (c *Config) backupDuration() time.Duration { if c.BackupTime < 1 { return 0 } return time.Duration(c.BackupTime) * time.Hour } // With more config setting func func (c *Config) With(fns ...ConfigFn) *Config { for _, fn := range fns { fn(c) } return c } // Create new Writer by config func (c *Config) Create() (*Writer, error) { return NewWriter(c) } // IsMode check rotate mode func (c *Config) IsMode(m RotateMode) bool { return c.RotateMode == m } var ( // DefaultFilePerm perm and flags for create log file DefaultFilePerm os.FileMode = 0664 // DefaultFileFlags for open log file DefaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND // DefaultTimeClockFn for create time DefaultTimeClockFn = ClockFn(func() time.Time { return time.Now() }) ) // NewDefaultConfig instance func NewDefaultConfig() *Config { return &Config{ MaxSize: DefaultMaxSize, RotateTime: EveryHour, BackupNum: DefaultBackNum, BackupTime: DefaultBackTime, // RenameFunc: DefaultFilenameFn, TimeClock: DefaultTimeClockFn, FilePerm: DefaultFilePerm, } } // NewConfig by file path, and can with custom setting func NewConfig(filePath string, fns ...ConfigFn) *Config { if len(fns) == 0 { return NewConfigWith(WithFilepath(filePath)) } return NewConfigWith(append(fns, WithFilepath(filePath))...) } // NewConfigWith custom func func NewConfigWith(fns ...ConfigFn) *Config { return NewDefaultConfig().With(fns...) } // EmptyConfigWith new empty config with custom func func EmptyConfigWith(fns ...ConfigFn) *Config { c := &Config{ // RenameFunc: DefaultFilenameFn, TimeClock: DefaultTimeClockFn, FilePerm: DefaultFilePerm, } return c.With(fns...) } // WithFilepath setting func WithFilepath(logfile string) ConfigFn { return func(c *Config) { c.Filepath = logfile } } // WithDebugMode setting for debug mode func WithDebugMode(c *Config) { c.DebugMode = true } // WithCompress setting for compress func WithCompress(c *Config) { c.Compress = true } // WithBackupNum setting for backup number func WithBackupNum(num uint) ConfigFn { return func(c *Config) { c.BackupNum = num } } ================================================ FILE: rotatefile/config_test.go ================================================ package rotatefile_test import ( "testing" "time" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/jsonutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/goutil/x/fmtutil" "github.com/gookit/slog/rotatefile" ) func TestNewDefaultConfig(t *testing.T) { size := fmtutil.DataSize(1024 * 1024 * 10) dump.P(size) c := rotatefile.NewDefaultConfig() assert.Eq(t, rotatefile.DefaultMaxSize, c.MaxSize) } func TestNewConfig(t *testing.T) { cfg := rotatefile.NewConfig("testdata/test.log") dump.P(cfg) assert.Eq(t, rotatefile.DefaultBackNum, cfg.BackupNum) assert.Eq(t, rotatefile.DefaultBackTime, cfg.BackupTime) assert.Eq(t, rotatefile.EveryHour, cfg.RotateTime) assert.Eq(t, rotatefile.DefaultMaxSize, cfg.MaxSize) assert.Eq(t, rotatefile.ModeRename, cfg.RotateMode) cfg = rotatefile.EmptyConfigWith(func(c *rotatefile.Config) { c.Compress = true }) assert.True(t, cfg.Compress) assert.Eq(t, uint(0), cfg.BackupNum) assert.Eq(t, uint(0), cfg.BackupTime) cfg = &rotatefile.Config{} assert.Eq(t, rotatefile.ModeRename, cfg.RotateMode) err := jsonutil.DecodeString(`{ "debug_mode": true, "rotate_mode": "create", "rotate_time": "1day" }`, cfg) dump.P(cfg) assert.NoErr(t, err) assert.Eq(t, rotatefile.ModeCreate, cfg.RotateMode) assert.Eq(t, "Every 1 Day", cfg.RotateTime.String()) } func TestRotateMode_cases(t *testing.T) { t.Run("String", func(t *testing.T) { assert.Eq(t, "rename", rotatefile.ModeRename.String()) assert.Eq(t, "create", rotatefile.ModeCreate.String()) assert.Eq(t, "unknown", rotatefile.RotateMode(9).String()) }) t.Run("UnmarshalJSON", func(t *testing.T) { rm := rotatefile.RotateMode(0) // UnmarshalJSON err := rm.UnmarshalJSON([]byte(`"create"`)) assert.NoErr(t, err) assert.Eq(t, rotatefile.ModeCreate, rm) rm = rotatefile.RotateMode(0) // use int err = rm.UnmarshalJSON([]byte(`"1"`)) assert.NoErr(t, err) assert.Eq(t, rotatefile.ModeCreate, rm) // error case assert.Err(t, rm.UnmarshalJSON([]byte(`create`))) }) t.Run("MarshalJSON", func(t *testing.T) { bs, err := rotatefile.ModeRename.MarshalJSON() assert.NoErr(t, err) assert.Eq(t, `"rename"`, string(bs)) bs, err = rotatefile.ModeCreate.MarshalJSON() assert.NoErr(t, err) assert.Eq(t, `"create"`, string(bs)) bs, err = rotatefile.RotateMode(35).MarshalJSON() assert.NoErr(t, err) assert.Eq(t, `"unknown"`, string(bs)) }) } func TestRotateTime_encode(t *testing.T) { rt := rotatefile.RotateTime(0) // UnmarshalJSON err := rt.UnmarshalJSON([]byte(`"1h"`)) assert.NoErr(t, err) assert.Eq(t, "Every 1 Hours", rt.String()) err = rt.UnmarshalJSON([]byte(`"3600"`)) assert.NoErr(t, err) assert.Eq(t, "Every 1 Hours", rt.String()) // error case assert.Err(t, rt.UnmarshalJSON([]byte(`a`))) // MarshalJSON bs, err := rt.MarshalJSON() assert.NoErr(t, err) assert.Eq(t, `"3600s"`, string(bs)) } func TestRotateTime_TimeFormat(t *testing.T) { now := timex.Now() rt := rotatefile.EveryDay assert.Eq(t, "20060102", rt.TimeFormat()) ft := rt.FirstCheckTime(now.T()) assert.True(t, now.DayEnd().Equal(ft)) rt = rotatefile.EveryHour assert.Eq(t, "20060102_1500", rt.TimeFormat()) rt = rotatefile.Every15Min assert.Eq(t, "20060102_1504", rt.TimeFormat()) ft = rt.FirstCheckTime(now.T()) assert.Gt(t, ft.Unix(), 0) rt = rotatefile.EverySecond assert.Eq(t, "20060102_150405", rt.TimeFormat()) ft = rt.FirstCheckTime(now.T()) assert.Eq(t, now.Unix()+rt.Interval(), ft.Unix()) } func TestRotateTime_String(t *testing.T) { assert.Eq(t, "Every 1 Day", rotatefile.EveryDay.String()) assert.Eq(t, "Every 1 Hours", rotatefile.EveryHour.String()) assert.Eq(t, "Every 1 Minutes", rotatefile.EveryMinute.String()) assert.Eq(t, "Every 1 Seconds", rotatefile.EverySecond.String()) assert.Eq(t, "Every 2 Hours", rotatefile.RotateTime(timex.OneHourSec*2).String()) assert.Eq(t, "Every 15 Minutes", rotatefile.RotateTime(timex.OneMinSec*15).String()) assert.Eq(t, "Every 5 Minutes", rotatefile.RotateTime(timex.OneMinSec*5).String()) assert.Eq(t, "Every 3 Seconds", rotatefile.RotateTime(3).String()) assert.Eq(t, "Every 2 Day", rotatefile.RotateTime(timex.OneDaySec*2).String()) } func TestRotateTime_FirstCheckTime_Round(t *testing.T) { // log rotate interval minutes logMin := 5 // now := timex.Now() // nowMin := now.Minute() nowMin := 37 // dur := time.Duration(now.Minute() + min) dur := time.Duration(nowMin + logMin) assert.Eq(t, time.Duration(40), dur.Round(time.Duration(logMin))) nowMin = 40 dur = time.Duration(nowMin + logMin) assert.Eq(t, time.Duration(45), dur.Round(time.Duration(logMin))) nowMin = 41 dur = time.Duration(nowMin + logMin) assert.Eq(t, time.Duration(45), dur.Round(time.Duration(logMin))) } ================================================ FILE: rotatefile/issues_test.go ================================================ package rotatefile_test import ( "testing" "time" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/mathutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog/internal" "github.com/gookit/slog/rotatefile" ) // https://github.com/gookit/slog/issues/138 // 日志按everyday自动滚动,文件名的日期对应的是前一天的日志 #138 func TestIssues_138(t *testing.T) { logfile := "testdata/iss138_rotate_day.log" mt := rotatefile.NewMockClock("2023-11-16 23:59:55") w, err := rotatefile.NewWriterWith(rotatefile.WithDebugMode, func(c *rotatefile.Config) { c.TimeClock = mt // c.MaxSize = 128 c.Filepath = logfile c.RotateTime = rotatefile.EveryDay }) assert.NoErr(t, err) defer w.MustClose() for i := 0; i < 15; i++ { dt := mt.Datetime() _, err = w.WriteString(dt + " [INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) // increase time mt.Add(time.Second * 3) // mt.Add(time.Millisecond * 300) } // Out: rotate_day.log, rotate_day.log.20231116 files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.Len(t, files, 2) // check contents assert.True(t, fsutil.IsFile(logfile)) s := fsutil.ReadString(logfile) assert.StrContains(t, s, "2023-11-17 00:00") oldFile := internal.AddSuffix2path(logfile, "20231116") assert.True(t, fsutil.IsFile(oldFile)) s = fsutil.ReadString(oldFile) assert.StrContains(t, s, "2023-11-16 23:") } // https://github.com/gookit/slog/issues/150 // 日志轮转时间设置为分钟时,FirstCheckTime计算单位错误,导致生成预期外的多个日志文件 #150 func TestIssues_150(t *testing.T) { logfile := "testdata/iss150_rotate_min.log" mt := rotatefile.NewMockClock("2024-09-14 18:39:55") w, err := rotatefile.NewWriterWith(rotatefile.WithDebugMode, func(c *rotatefile.Config) { c.TimeClock = mt // c.MaxSize = 128 c.Filepath = logfile c.RotateTime = rotatefile.EveryMinute * 3 }) assert.NoErr(t, err) defer w.MustClose() for i := 0; i < 15; i++ { dt := mt.Datetime() _, err = w.WriteString(dt + " [INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) // increase time mt.Add(time.Minute * 1) } files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.LenGt(t, files, 3) // check contents assert.True(t, fsutil.IsFile(logfile)) s := fsutil.ReadString(logfile) assert.StrContains(t, s, "2024-09-14 18:") // iss150_rotate_min.20240914_1842.log oldFile := internal.AddSuffix2path(logfile, "20240914_1842") assert.True(t, fsutil.IsFile(oldFile)) s = fsutil.ReadString(oldFile) assert.StrContains(t, s, "2024-09-14 18:41") } ================================================ FILE: rotatefile/rotatefile.go ================================================ // Package rotatefile provides simple file rotation, compression and cleanup. package rotatefile import ( "io" ) // RotateWriter interface type RotateWriter interface { io.WriteCloser Clean() error Flush() error Rotate() error Sync() error } const ( // OneMByte size OneMByte uint64 = 1024 * 1024 // DefaultMaxSize of a log file. default is 20M. DefaultMaxSize = 20 * OneMByte // DefaultBackNum default backup numbers for old files. DefaultBackNum uint = 20 // DefaultBackTime default backup time for old files. default keeps a week. DefaultBackTime uint = 24 * 7 ) ================================================ FILE: rotatefile/rotatefile_test.go ================================================ package rotatefile_test import ( "fmt" "log" "testing" "github.com/gookit/goutil" "github.com/gookit/goutil/fsutil" "github.com/gookit/slog/rotatefile" ) func TestMain(m *testing.M) { fmt.Println("TestMain: remove all test files in ./testdata") goutil.PanicErr(fsutil.RemoveSub("./testdata", fsutil.ExcludeNames(".keep"))) m.Run() } func ExampleNewWriter_on_other_logger() { logFile := "testdata/another_logger.log" writer, err := rotatefile.NewConfig(logFile).Create() if err != nil { panic(err) } log.SetOutput(writer) log.Println("log message") } ================================================ FILE: rotatefile/util.go ================================================ package rotatefile import ( "compress/gzip" "fmt" "io" "io/fs" "os" "time" "github.com/gookit/goutil" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/timex" ) const compressSuffix = ".gz" func printErrln(pfx string, err error) { if err != nil { _, _ = fmt.Fprintln(os.Stderr, pfx, err) } } func compressFile(srcPath, dstPath string) error { srcFile, err := os.OpenFile(srcPath, os.O_RDONLY, 0) if err != nil { return err } defer srcFile.Close() // create and open a gz file gzFile, err := fsutil.OpenTruncFile(dstPath) if err != nil { return err } defer gzFile.Close() srcSt, err := srcFile.Stat() if err != nil { return err } zw := gzip.NewWriter(gzFile) zw.Name = srcSt.Name() zw.ModTime = srcSt.ModTime() // do copy if _, err = io.Copy(zw, srcFile); err != nil { _ = zw.Close() return err } return zw.Close() } // TODO replace to fsutil.FileInfo type fileInfo struct { fs.FileInfo filePath string } // Path get file full path. eg: "/path/to/file.go" func (fi *fileInfo) Path() string { return fi.filePath } func newFileInfo(filePath string, fi fs.FileInfo) fileInfo { return fileInfo{filePath: filePath, FileInfo: fi} } // modTimeFInfos sorts by oldest time modified in the fileInfo. // eg: [old_220211, old_220212, old_220213] type modTimeFInfos []fileInfo // Less check func (fis modTimeFInfos) Less(i, j int) bool { return fis[j].ModTime().After(fis[i].ModTime()) } // Swap value func (fis modTimeFInfos) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } // Len get func (fis modTimeFInfos) Len() int { return len(fis) } // MockClocker mock clock for test type MockClocker struct { tt time.Time } // NewMockClock create a mock time instance from datetime string. func NewMockClock(datetime string) *MockClocker { nt := goutil.Must(timex.FromString(datetime)) return &MockClocker{tt: nt.Time} } // Now get current time. func (mt *MockClocker) Now() time.Time { return mt.tt } // Add progresses time by the given duration. func (mt *MockClocker) Add(d time.Duration) { mt.tt = mt.tt.Add(d) } // Datetime returns the current time in the format "2006-01-02 15:04:05". func (mt *MockClocker) Datetime() string { return mt.tt.Format("2006-01-02 15:04:05") } ================================================ FILE: rotatefile/util_test.go ================================================ package rotatefile import ( "errors" "testing" ) func TestPrintErrln(t *testing.T) { printErrln("test", nil) printErrln("test", errors.New("an error")) } ================================================ FILE: rotatefile/writer.go ================================================ package rotatefile import ( "fmt" "io/fs" "math/rand" "os" "path/filepath" "sort" "strings" "sync" "time" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/mathutil" "github.com/gookit/goutil/strutil" "github.com/gookit/goutil/x/stdio" ) // Writer a flush, close, writer and support rotate file. // // refer https://github.com/flike/golog/blob/master/filehandler.go type Writer struct { // writer instance id, use for debug id string mu sync.RWMutex // config of the writer cfg *Config // current opened logfile file *os.File // current opened file path. NOTE it maybe not equals Config.Filepath path string // The original file dir path for the Config.Filepath fileDir string // The original name and ext information fileName, onlyName, fileExt string // logfile max backup time. equals Config.BackupTime * time.Hour backupDur time.Duration // oldFiles []string cleanCh chan struct{} stopCh chan struct{} // context use for rotating file by size written uint64 // written size rotateNum uint // rotate times number // ---- context use for rotating file by time ---- // the rotating file name suffix format. eg: "20210102", "20210102_1500" suffixFormat string checkInterval int64 // check interval seconds. nextRotatingAt time.Time // next rotating time } // NewWriter create rotate write with config and init it. func NewWriter(c *Config) (*Writer, error) { d := &Writer{cfg: c} if err := d.init(); err != nil { return nil, err } return d, nil } // NewWriterWith create a rotated writer with some settings. func NewWriterWith(fns ...ConfigFn) (*Writer, error) { return NewWriter(NewConfigWith(fns...)) } // init rotate dispatcher func (d *Writer) init() error { d.id = fmt.Sprintf("%p", d) logfile := d.cfg.Filepath // dirSep := filepath.Separator // d.fileDir = filepath.Dir(logfile) d.fileDir, d.fileName = filepath.Split(d.cfg.Filepath) d.fileExt = filepath.Ext(d.fileName) // eg: .log d.onlyName = strings.TrimSuffix(d.fileName, d.fileExt) // eg: error // removes the trailing separator on the dir path if ln := len(d.fileDir); ln > 1 && d.fileDir[ln-1] == filepath.Separator { d.fileDir = d.fileDir[:ln-1] } d.backupDur = d.cfg.backupDuration() d.suffixFormat = d.cfg.RotateTime.TimeFormat() d.checkInterval = d.cfg.RotateTime.Interval() // calc and storage next rotating time if d.checkInterval > 0 { now := d.cfg.TimeClock.Now() // next rotating time d.nextRotatingAt = d.cfg.RotateTime.FirstCheckTime(now) if d.cfg.RotateMode == ModeCreate { // logfile = d.cfg.Filepath + "." + now.Format(d.suffixFormat) logfile = d.buildFilePath(now.Format(d.suffixFormat)) } } // open the current file return d.openFile(logfile) } // Config gets the config func (d *Writer) Config() Config { return *d.cfg } // Flush sync data to disk. alias of Sync() func (d *Writer) Flush() error { return d.file.Sync() } // Sync data to disk. func (d *Writer) Sync() error { return d.file.Sync() } // Close the writer. will sync data to disk, then close the file handle. // and it will stop the async clean backups. func (d *Writer) Close() error { if d.cfg.CleanOnClose { d.debugLog("cfg.CleanOnClose=true: start clean old files") printErrln("files-clear-onClose: cleanup old files error:", d.Clean()) } return d.close(true) } // MustClose the writer. alias of Close(), but will panic if has error. func (d *Writer) MustClose() { printErrln("rotatefile: close writer -", d.Close()) } func (d *Writer) close(closeStopCh bool) error { if err := d.file.Sync(); err != nil { return err } // stop the async clean backups if closeStopCh && d.stopCh != nil { d.debugLog("close stopCh for stop async clean old files") close(d.stopCh) d.stopCh = nil } return d.file.Close() } // // --------------------------------------------------------------------------- // write and rotate file // --------------------------------------------------------------------------- // // WriteString implements the io.StringWriter func (d *Writer) WriteString(s string) (n int, err error) { return d.Write([]byte(s)) } // Write data to file. then check and do rotate file, then async clean backups func (d *Writer) Write(p []byte) (n int, err error) { // do write data if n, err = d.doWrite(p); err != nil { return } // do rotate file err = d.doRotate() // async clean backup files. if err == nil && d.shouldClean(true) { d.asyncClean() } return } func (d *Writer) doWrite(p []byte) (n int, err error) { // if enable lock if !d.cfg.CloseLock { d.mu.Lock() defer d.mu.Unlock() } n, err = d.file.Write(p) if err == nil { // update size d.written += uint64(n) } return } // Rotate the file by config and async clean backups func (d *Writer) Rotate() error { err := d.doRotate() // async clean backup files. if err == nil && d.shouldClean(true) { d.asyncClean() } return err } // do rotate the logfile by config func (d *Writer) doRotate() (err error) { // if enable lock if !d.cfg.CloseLock { d.mu.Lock() defer d.mu.Unlock() } // do rotate a file by size if d.cfg.MaxSize > 0 && d.written >= d.cfg.MaxSize { err = d.rotatingBySize() if err != nil { return } } // do rotate a file by time if d.checkInterval > 0 && d.written > 0 { err = d.rotatingByTime() } return } // TIP: should only call on d.checkInterval > 0 func (d *Writer) rotatingByTime() error { now := d.cfg.TimeClock.Now() if now.Before(d.nextRotatingAt) { return nil } // generate new file path. // eg: /tmp/error.log => /tmp/error.20220423_1600.log // file := d.cfg.Filepath + "." + d.nextRotatingAt.Format(d.suffixFormat) file := d.buildFilePath(d.nextRotatingAt.Format(d.suffixFormat)) err := d.rotatingFile(file, false) // calc and storage next rotating time d.nextRotatingAt = d.nextRotatingAt.Add(time.Duration(d.checkInterval) * time.Second) return err } func (d *Writer) rotatingBySize() error { d.rotateNum++ now := d.cfg.TimeClock.Now() // up: use now minutes + seconds as rotate number numStr := fmt.Sprintf("%d%d%d", now.Hour(), now.Minute(), now.Second()) numInt := strutil.IntOr(numStr, 0) + now.Nanosecond()/1000 rotateNum := uint(numInt) + d.rotateNum var bakFile string if d.cfg.IsMode(ModeCreate) { // eg: /tmp/error.log => /tmp/error.894136.log // eg: /tmp/error.20220423_1600.log => /tmp/error.20220423_1600_894136.log pathNoExt := d.path[:len(d.path)-len(d.fileExt)] bakFile = fmt.Sprintf("%s_%d%s", pathNoExt, rotateNum, d.fileExt) } else if d.cfg.RenameFunc != nil { // rename current to new file by custom RenameFunc // eg: /tmp/error.log => /tmp/error.163021_894136.log bakFile = d.cfg.RenameFunc(d.cfg.Filepath, rotateNum) } else { // eg: /tmp/error.log => /tmp/error.25031615_894136.log bakFile = d.buildFilePath(fmt.Sprintf("%s_%d", now.Format("06010215"), rotateNum)) } // always rename current to a new file return d.rotatingFile(bakFile, true) } // rotateFile closes the syncBuffer's file and starts a new one. func (d *Writer) rotatingFile(bakFile string, rename bool) error { // close the current file if err := d.close(false); err != nil { return err } // record old files for clean. // d.oldFiles = append(d.oldFiles, bakFile) // rename current to a new file. if rename || d.cfg.RotateMode == ModeRename { if err := os.Rename(d.path, bakFile); err != nil { return err } } // filepath for reopening logfile := d.path if d.cfg.RotateMode == ModeRename { logfile = d.cfg.Filepath } // reopen log file if err := d.openFile(logfile); err != nil { return err } // reset written d.written = 0 return nil } // // --------------------------------------------------------------------------- // clean backup files // --------------------------------------------------------------------------- // // check should clean old files by config func (d *Writer) shouldClean(withRand bool) bool { cfgIsYes := d.cfg.BackupNum > 0 || d.cfg.BackupTime > 0 if !withRand { return cfgIsYes } // 20% probability trigger clean return cfgIsYes && rand.Intn(100) < 20 } // async clean old files by config. should be in lock. func (d *Writer) asyncClean() { if !d.shouldClean(false) { return } // if already running, send a signal if d.cleanCh != nil { d.notifyClean() return } // add lock for deny concurrent clean d.mu.RLock() defer d.mu.RUnlock() // re-check d.cleanCh is not nil if d.cleanCh != nil { d.notifyClean() return } // init clean channel d.debugLog("INIT clean and stop channels for clean old files") d.cleanCh = make(chan struct{}) d.stopCh = make(chan struct{}) // start a goroutine to clean backups go func() { d.debugLog("START a goroutine consumer for clean old files") // consume the signal until stop for { select { case <-d.cleanCh: d.debugLog("receive signal - clean old files handling") printErrln("rotatefile: clean old files error:", d.doClean()) case <-d.stopCh: d.cleanCh = nil d.debugLog("STOP consumer for clean old files") return // stop clean } } }() } func (d *Writer) notifyClean() { select { case d.cleanCh <- struct{}{}: // notify clean old files d.debugLog("sent signal - start clean old files...") default: // skip on blocking d.debugLog("clean old files signal blocked, SKIP") } } // Clean old files by config func (d *Writer) Clean() (err error) { if d.cfg.BackupNum == 0 && d.cfg.BackupTime == 0 { return errorx.Err("clean: backupNum and backupTime are both 0") } // up: 单独运行清理,不需要设置 skipSeconds return d.doClean(0) } // do clean old files by config // // - skipSeconds: skip find files that are within the specified seconds func (d *Writer) doClean(skipSeconds ...int) (err error) { // oldFiles: xx.log.yy files, no gz file var oldFiles, gzFiles []fileInfo fileDir, fileName := d.fileDir, d.fileName curFileName := filepath.Base(d.path) // FIX: do not process recent changes to avoid conflicts skipSec := 30 if len(skipSeconds) > 0 { skipSec = skipSeconds[0] } limitTime := d.cfg.TimeClock.Now().Add(-time.Second * time.Duration(skipSec)) // find and clean old files d.debugLog("clean - find old files, match name:", fileName, ", in dir:", fileDir) err = fsutil.FindInDir(fileDir, func(fPath string, ent fs.DirEntry) error { fi, err := ent.Info() if err != nil { return err } // fix: exclude the current file if ent.Name() == curFileName { return nil } if strings.HasSuffix(ent.Name(), compressSuffix) { gzFiles = append(gzFiles, newFileInfo(fPath, fi)) } else if fi.ModTime().Before(limitTime) { oldFiles = append(oldFiles, newFileInfo(fPath, fi)) } return nil }, d.buildFilterFns(fileName)...) gzNum := len(gzFiles) oldNum := len(oldFiles) remNum := mathutil.Max(gzNum+oldNum-int(d.cfg.BackupNum), 0) d.debugLog("clean old files, gzNum:", gzNum, "oldNum:", oldNum, "remNum:", remNum) if remNum > 0 && d.cfg.BackupNum > 0 { // remove old gz files if gzNum > 0 { remNum, err = d.removeOldGzFiles(remNum, gzFiles) if err != nil { return err } } // remove old log files if remNum > 0 && oldNum > 0 { oldFiles, err = d.removeOldFiles(remNum, oldFiles) if err != nil { return err } } } if d.cfg.Compress && len(oldFiles) > 0 { d.debugLog("compress old normal files to gz files") err = d.compressFiles(oldFiles) } return } // remove old gz files func (d *Writer) removeOldGzFiles(remNum int, gzFiles []fileInfo) (rn int, err error) { gzNum := len(gzFiles) sort.Sort(modTimeFInfos(gzFiles)) // sort by mod-time d.debugLog("remove old gz files ...") for idx := 0; idx < gzNum; idx++ { d.debugLog("remove old gz file:", gzFiles[idx].filePath) if err = os.Remove(gzFiles[idx].filePath); err != nil { break } remNum-- if remNum == 0 { break } } if err != nil { return remNum, errorx.Wrap(err, "remove old gz file error") } return remNum, nil } // remove old log files func (d *Writer) removeOldFiles(remNum int, oldFiles []fileInfo) (files []fileInfo, err error) { // sort by mod-time, oldest at first. sort.Sort(modTimeFInfos(oldFiles)) d.debugLog("remove old normal files ...") var idx int oldNum := len(oldFiles) for idx = 0; idx < oldNum; idx++ { d.debugLog("remove old file:", oldFiles[idx].filePath) if err = os.Remove(oldFiles[idx].filePath); err != nil { break } remNum-- if remNum == 0 { break } } oldFiles = oldFiles[idx+1:] if err != nil { return nil, errorx.Wrap(err, "remove old file error") } return oldFiles, nil } // // --------------------------------------------------------------------------- // helper methods // --------------------------------------------------------------------------- // // open the current file. and set the d.file, d.path func (d *Writer) openFile(logfile string) error { file, err := fsutil.OpenFile(logfile, DefaultFileFlags, d.cfg.FilePerm) if err != nil { return err } d.path = logfile d.file = file return nil } // return eg. logs/error.20220423_1600.log func (d *Writer) buildFilePath(suffix string) string { fileName := d.onlyName + "." + suffix + d.fileExt return fmt.Sprintf("%s/%s", d.fileDir, fileName) } func (d *Writer) buildFilterFns(fileName string) []fsutil.FilterFunc { onlyName := d.onlyName filterFns := []fsutil.FilterFunc{ fsutil.OnlyFindFile, // filter by name. match pattern like: error.log.* eg: error.log.xx, error.log.xx.gz func(fPath string, ent fs.DirEntry) bool { // ok, _ := path.Match(fileName+".*", ent.Name()) if !strings.HasPrefix(ent.Name(), fileName) { // 自定义文件名 eg: error.log -> error.20220423_02.log return strings.HasPrefix(ent.Name(), onlyName) } return true }, } // filter by mod-time, clear expired files if d.cfg.BackupTime > 0 { cutTime := d.cfg.TimeClock.Now().Add(-d.backupDur) filterFns = append(filterFns, func(fPath string, ent fs.DirEntry) bool { fi, err := ent.Info() if err != nil { return false // skip, not handle } // collect un-expired if fi.ModTime().After(cutTime) { return true } // remove expired files d.debugLog("remove expired file:", fPath) printErrln("rotatefile: remove expired file error:", os.Remove(fPath)) return false }) } return filterFns } func (d *Writer) compressFiles(oldFiles []fileInfo) error { for _, fi := range oldFiles { err := compressFile(fi.filePath, fi.filePath+compressSuffix) if err != nil { return errorx.Wrap(err, "compress old file error") } // remove an old log file d.debugLog("compress and rm old file:", fi.filePath) if err = os.Remove(fi.filePath); err != nil { return errorx.Wrap(err, "remove file error after compress") } } return nil } // Debug print debug message on development func (d *Writer) debugLog(vs ...any) { if d.cfg.DebugMode { stdio.WriteString("[rotatefile.DEBUG] ID:" + d.id + " | " + fmt.Sprintln(vs...)) } } ================================================ FILE: rotatefile/writer_test.go ================================================ package rotatefile_test import ( "path/filepath" "testing" "time" "github.com/gookit/goutil/dump" "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/mathutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog/internal" "github.com/gookit/slog/rotatefile" ) func TestNewWriter(t *testing.T) { testFile := "testdata/test_writer.log" assert.NoErr(t, fsutil.DeleteIfExist(testFile)) w, err := rotatefile.NewConfig(testFile).Create() assert.NoErr(t, err) c := w.Config() // dump.P(c) assert.Eq(t, c.MaxSize, rotatefile.DefaultMaxSize) _, err = w.WriteString("info log message\n") assert.NoErr(t, err) assert.True(t, fsutil.IsFile(testFile)) assert.NoErr(t, w.Sync()) assert.NoErr(t, w.Flush()) assert.NoErr(t, w.Close()) w, err = rotatefile.NewWriterWith(rotatefile.WithFilepath(testFile)) assert.NoErr(t, err) assert.Eq(t, w.Config().Filepath, testFile) } func TestWriter_Rotate_modeCreate(t *testing.T) { logfile := "testdata/mode_create.log" c := rotatefile.NewConfig(logfile) c.RotateMode = rotatefile.ModeCreate wr, err := c.Create() assert.NoErr(t, err) _, err = wr.WriteString("[INFO] this is a log message\n") assert.NoErr(t, err) assert.False(t, fsutil.IsFile(logfile)) ls, err := filepath.Glob("testdata/mode_create*") assert.NoErr(t, err) assert.Len(t, ls, 1) for i := 0; i < 20; i++ { _, err = wr.WriteString("[INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) } // test clean and backup c.BackupNum = 2 c.MaxSize = 128 err = wr.Rotate() assert.NoErr(t, err) _, err = wr.WriteString("hi, rotated\n") assert.NoErr(t, err) } func TestWriter_rotateByTime(t *testing.T) { logfile := "testdata/rotate-by-time.log" c := rotatefile.NewConfig(logfile).With(func(c *rotatefile.Config) { c.DebugMode = true c.Compress = true c.CleanOnClose = true c.RotateTime = rotatefile.EverySecond * 2 }) w, err := c.Create() assert.NoErr(t, err) defer func() { _ = w.Close() }() for i := 0; i < 5; i++ { _, err = w.WriteString("[INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) time.Sleep(time.Second) } files := fsutil.Glob(internal.BuildGlobPattern(logfile)) dump.P(files) } func TestWriter_Clean(t *testing.T) { logfile := "testdata/writer_clean.log" c := rotatefile.NewConfig(logfile) c.MaxSize = 128 // will rotate by size wr, err := c.Create() assert.NoErr(t, err) defer func() { _ = wr.Close() }() for i := 0; i < 20; i++ { _, err = wr.WriteString("[INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) } assert.True(t, fsutil.IsFile(logfile)) _, err = wr.WriteString("hi\n") assert.NoErr(t, err) files := fsutil.Glob(internal.BuildGlobPattern(logfile)) dump.P(files) // test clean error t.Run("clean error", func(t *testing.T) { c.BackupNum = 0 c.BackupTime = 0 assert.Err(t, wr.Clean()) }) // test clean and compress backup t.Run("clean and compress", func(t *testing.T) { c.BackupNum = 2 c.Compress = true err = wr.Clean() assert.NoErr(t, err) files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.Lt(t, 2, len(files)) }) } // test writer compress func TestWriter_Compress(t *testing.T) { logfile := "testdata/test_compress.log" c := rotatefile.NewConfig(logfile) c.MaxSize = 128 // will rotate by size c.With(rotatefile.WithDebugMode) wr, err := c.Create() assert.NoErr(t, err) for i := 0; i < 20; i++ { _, err = wr.WriteString("[INFO] this is a log message, idx=" + mathutil.String(i) + "\n") assert.NoErr(t, err) } assert.True(t, fsutil.IsFile(logfile)) _, err = wr.WriteString("hi\n") assert.NoErr(t, err) wr.MustClose() files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.NotEmpty(t, files) dump.P(files) // test clean and compress backup t.Run("compress backup", func(t *testing.T) { c := rotatefile.NewConfig(logfile, rotatefile.WithDebugMode, rotatefile.WithCompress, rotatefile.WithBackupNum(2), ) wr, err := c.Create() assert.NoErr(t, err) defer wr.MustClose() err = wr.Clean() assert.NoErr(t, err) files := fsutil.Glob(internal.BuildGlobPattern(logfile)) assert.Lt(t, 2, len(files)) dump.P(files) }) } // TODO set github.com/benbjohnson/clock for mock clock type constantClock time.Time func (c constantClock) Now() time.Time { return time.Time(c) } func (c constantClock) NewTicker(d time.Duration) *time.Ticker { return &time.Ticker{} } ================================================ FILE: slog.go ================================================ /* Package slog Lightweight, extensible, configurable logging library written in Go. Source code and other details for the project are available at GitHub: https://github.com/gookit/slog Quick usage: package main import ( "github.com/gookit/slog" ) func main() { slog.Info("info log message") slog.Warn("warning log message") slog.Infof("info log %s", "message") slog.Debugf("debug %s", "message") } More usage please see README. */ package slog import ( "context" "time" "github.com/gookit/goutil" ) // // ------------------------------------------------------------ // Global std logger operate // ------------------------------------------------------------ // // std logger is a SugaredLogger. // It is directly available without any additional configuration var std = NewStdLogger() // Std get std logger func Std() *SugaredLogger { return std } // Reset the std logger and reset exit handlers func Reset() { ResetExitHandlers(true) // new std std = NewStdLogger() } // Configure the std logger func Configure(fn func(l *SugaredLogger)) { std.Config(fn) } // SetExitFunc to the std logger func SetExitFunc(fn func(code int)) { std.ExitFunc = fn } // Exit runs all exit handlers and then terminates the program using os.Exit(code) func Exit(code int) { std.Exit(code) } // Close logger, flush and close all handlers. // // IMPORTANT: please call Close() before app exit. func Close() error { return std.Close() } // MustClose logger, flush and close all handlers. // // IMPORTANT: please call Close() before app exit. func MustClose() { goutil.PanicErr(Close()) } // Flush log messages func Flush() error { return std.Flush() } // MustFlush log messages func MustFlush() { goutil.PanicErr(Flush()) } // FlushTimeout flush logs with timeout. func FlushTimeout(timeout time.Duration) { std.FlushTimeout(timeout) } // FlushDaemon run flush handle on daemon. // // Usage please see slog_test.ExampleFlushDaemon() func FlushDaemon(onStops ...func()) { std.FlushDaemon(onStops...) } // StopDaemon stop flush daemon func StopDaemon() { std.StopDaemon() } // SetLogLevel max level for the std logger func SetLogLevel(l Level) { std.Level = l } // SetLevelByName set max log level by name. eg: "info", "debug" ... func SetLevelByName(name string) { std.Level = LevelByName(name) } // SetFormatter to std logger func SetFormatter(f Formatter) { std.Formatter = f } // GetFormatter of the std logger func GetFormatter() Formatter { return std.Formatter } // AddHandler to the std logger func AddHandler(h Handler) { std.AddHandler(h) } // PushHandler to the std logger func PushHandler(h Handler) { std.AddHandler(h) } // AddHandlers to the std logger func AddHandlers(hs ...Handler) { std.AddHandlers(hs...) } // PushHandlers to the std logger func PushHandlers(hs ...Handler) { std.PushHandlers(hs...) } // AddProcessor to the logger func AddProcessor(p Processor) { std.AddProcessor(p) } // AddProcessors to the logger func AddProcessors(ps ...Processor) { std.AddProcessors(ps...) } // -------------------------- New sub-logger ----------------------------- // NewSub returns a new SubLogger on the std logger. func NewSub() *SubLogger { return NewSubWith(std.Logger) } // -------------------------- New record with log data, fields ----------------------------- // WithExtra new record with extra data func WithExtra(ext M) *Record { return std.WithExtra(ext) } // WithData new record with data func WithData(data M) *Record { return std.WithData(data) } // WithValue new record with data value func WithValue(key string, value any) *Record { return std.WithValue(key, value) } // WithField new record with field. // // **NOTE**: add field need config Formatter template fields. func WithField(name string, value any) *Record { return std.WithField(name, value) } // WithFields new record with fields // // **NOTE**: add field need config Formatter template fields. func WithFields(fields M) *Record { return std.WithFields(fields) } // WithContext new record with context func WithContext(ctx context.Context) *Record { return std.WithContext(ctx) } // region Add log messages // -------------------------- Add log messages with level ----------------------------- // Log logs a message with level func Log(level Level, args ...any) { std.log(level, args) } // Print logs a message at level PrintLevel func Print(args ...any) { std.log(PrintLevel, args) } // Println logs a message at level PrintLevel func Println(args ...any) { std.log(PrintLevel, args) } // Printf logs a message at level PrintLevel func Printf(format string, args ...any) { std.logf(PrintLevel, format, args) } // Trace logs a message at level TraceLevel func Trace(args ...any) { std.log(TraceLevel, args) } // Tracef logs a message at level TraceLevel func Tracef(format string, args ...any) { std.logf(TraceLevel, format, args) } // TraceCtx logs a message at level TraceLevel with context func TraceCtx(ctx context.Context, args ...any) { std.logCtx(ctx, TraceLevel, args) } // TracefCtx logs a message at level TraceLevel with context func TracefCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, TraceLevel, format, args) } // Debug logs a message at level DebugLevel func Debug(args ...any) { std.log(DebugLevel, args) } // Debugf logs a message at level DebugLevel func Debugf(format string, args ...any) { std.logf(DebugLevel, format, args) } // DebugCtx logs a message at level DebugLevel with context func DebugCtx(ctx context.Context, args ...any) { std.logCtx(ctx, DebugLevel, args) } // DebugfCtx logs a message at level DebugLevel with context func DebugfCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, DebugLevel, format, args) } // Info logs a message at level InfoLevel func Info(args ...any) { std.log(InfoLevel, args) } // Infof logs a message at level InfoLevel func Infof(format string, args ...any) { std.logf(InfoLevel, format, args) } // InfoCtx logs a message at level InfoLevel with context func InfoCtx(ctx context.Context, args ...any) { std.logCtx(ctx, InfoLevel, args) } // InfofCtx logs a message at level InfoLevel with context func InfofCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, InfoLevel, format, args) } // Notice logs a message at level NoticeLevel func Notice(args ...any) { std.log(NoticeLevel, args) } // Noticef logs a message at level NoticeLevel func Noticef(format string, args ...any) { std.logf(NoticeLevel, format, args) } // NoticeCtx logs a message at level NoticeLevel with context func NoticeCtx(ctx context.Context, args ...any) { std.logCtx(ctx, NoticeLevel, args) } // NoticefCtx logs a message at level NoticeLevel with context func NoticefCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, NoticeLevel, format, args) } // Warn logs a message at level WarnLevel func Warn(args ...any) { std.log(WarnLevel, args) } // Warnf logs a message at level WarnLevel func Warnf(format string, args ...any) { std.logf(WarnLevel, format, args) } // WarnCtx logs a message at level Warn with a context func WarnCtx(ctx context.Context, args ...any) { std.logCtx(ctx, WarnLevel, args) } // WarnfCtx logs a message at level Warn with a context func WarnfCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, WarnLevel, format, args) } // Error logs a message at level Error func Error(args ...any) { std.log(ErrorLevel, args) } // Errorf logs a message at level Error func Errorf(format string, args ...any) { std.logf(ErrorLevel, format, args) } // ErrorT logs a error type at level Error func ErrorT(err error) { if err != nil { std.log(ErrorLevel, []any{err}) } } // ErrorCtx logs a message at level Error with context func ErrorCtx(ctx context.Context, args ...any) { std.logCtx(ctx, ErrorLevel, args) } // ErrorfCtx logs a message at level Error with context func ErrorfCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, ErrorLevel, format, args) } // EStack logs a error message and with call stack. // func EStack(args ...any) { // std.WithExtra(map[string]any{"stack": goinfo.GetCallerInfo(2)}). // log(ErrorLevel, args) // } // Fatal logs a message at level Fatal func Fatal(args ...any) { std.log(FatalLevel, args) } // Fatalf logs a message at level Fatal func Fatalf(format string, args ...any) { std.logf(FatalLevel, format, args) } // FatalErr logs a message at level Fatal on err is not nil func FatalErr(err error) { if err != nil { std.log(FatalLevel, []any{err}) } } // FatalCtx logs a message at level Fatal with context func FatalCtx(ctx context.Context, args ...any) { std.logCtx(ctx, FatalLevel, args) } // FatalfCtx logs a message at level Fatal with context func FatalfCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, FatalLevel, format, args) } // Panic logs a message at level Panic func Panic(args ...any) { std.log(PanicLevel, args) } // Panicf logs a message at level Panic func Panicf(format string, args ...any) { std.logf(PanicLevel, format, args) } // PanicErr logs a message at level Panic on err is not nil func PanicErr(err error) { if err != nil { std.log(PanicLevel, []any{err}) } } // PanicCtx logs a message at level panic with context func PanicCtx(ctx context.Context, args ...any) { std.logCtx(ctx, PanicLevel, args) } // PanicfCtx logs a message at level panic with context func PanicfCtx(ctx context.Context, format string, args ...any) { std.logfCtx(ctx, PanicLevel, format, args) } ================================================ FILE: slog_test.go ================================================ package slog_test import ( "bytes" "context" "errors" "fmt" "strconv" "sync" "testing" "time" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" "github.com/gookit/slog" "github.com/gookit/slog/handler" ) var doNothing = func(code int) { // do nothing } func TestStd(t *testing.T) { defer slog.Reset() assert.Eq(t, "stdLogger", slog.Std().Name()) _, ok := slog.GetFormatter().(*slog.TextFormatter) assert.True(t, ok) slog.SetLogLevel(slog.WarnLevel) slog.SetFormatter(slog.NewJSONFormatter()) assert.True(t, slog.Std().IsHandling(slog.WarnLevel)) assert.True(t, slog.Std().IsHandling(slog.ErrorLevel)) assert.False(t, slog.Std().IsHandling(slog.InfoLevel)) _, ok = slog.GetFormatter().(*slog.JSONFormatter) assert.True(t, ok) buf := new(bytes.Buffer) slog.Std().ExitFunc = func(code int) { buf.WriteString("Exited,") buf.WriteString(strconv.Itoa(code)) } slog.Exit(34) assert.Eq(t, "Exited,34", buf.String()) } func TestTextFormatNoColor(t *testing.T) { defer slog.Reset() slog.Configure(func(l *slog.SugaredLogger) { f := l.Formatter.(*slog.TextFormatter) f.EnableColor = false l.DoNothingOnPanicFatal() }) printLogs("print log message") printfLogs("print log with %s", "params") assert.NoErr(t, slog.Std().FlushAll()) assert.NoErr(t, slog.Std().Close()) } func TestFlushDaemon(t *testing.T) { defer slog.Reset() buf := byteutil.NewBuffer() slog.Configure(func(l *slog.SugaredLogger) { l.FlushInterval = timex.Millisecond * 100 l.Output = buf }) wg := sync.WaitGroup{} wg.Add(1) go slog.FlushDaemon(func() { fmt.Println("flush daemon stopped") wg.Done() }) go func() { // mock app running time.Sleep(time.Second * 1) // stop daemon fmt.Println("stop flush daemon") slog.StopDaemon() }() slog.Info("print log message") wg.Wait() fmt.Print(buf.ResetGet()) } func TestFlushTimeout(t *testing.T) { defer slog.Reset() slog.Info("print log message") slog.NewSub().KeepData(map[string]any{"key": "value"}).Warn("test message") slog.FlushTimeout(timex.Second * 1) slog.MustFlush() } func TestNewSugaredLogger(t *testing.T) { buf := byteutil.NewBuffer() l := slog.NewSugared(buf, slog.DebugLevel, func(sl *slog.SugaredLogger) { sl.SetName("test") sl.ReportCaller = true sl.CallerFlag = slog.CallerFlagFcLine }) l.Debug("debug message") l.Info("info message") s := buf.ResetAndGet() assert.StrContains(t, s, "debug message") l = slog.NewStd(func(sl *slog.SugaredLogger) { sl.SetName("test") sl.ReportCaller = true sl.CallerFlag = slog.CallerFlagFunc }) l.Info("info message1") } type logTest struct { *slog.SugaredLogger } func (l logTest) testPrint() { l.Logger.Info("print testing") } func TestTextFormatWithColor(t *testing.T) { defer slog.Reset() slog.Configure(func(l *slog.SugaredLogger) { l.Level = slog.TraceLevel l.DoNothingOnPanicFatal() }) printLogs("this is a simple log message") fmt.Println() slog.Std().Trace("this is a simple log message") lt := &logTest{slog.Std()} lt.testPrint() fmt.Println() slog.GetFormatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate) printfLogs("print log with %s", "params") fmt.Println() tpl := "[{{datetime}}] [{{channel}}] [{{level}}] [{{func}}] {{message}} {{data}} {{extra}}\n" slog.GetFormatter().(*slog.TextFormatter).SetTemplate(tpl) printfLogs("print log with %s", "params") lt = &logTest{ slog.Std(), } lt.testPrint() } func printLogs(msg string) { slog.Log(slog.TraceLevel, msg) slog.Print(msg) slog.Println(msg) slog.Trace(msg) slog.Debug(msg) slog.Info(msg) slog.Notice(msg) slog.Warn(msg) slog.Error(msg) slog.Fatal(msg) slog.FatalErr(errorx.Rawf("Fatal Err: %s", msg)) slog.Panic(msg) slog.PanicErr(errorx.Rawf("Panic Err: %s", msg)) slog.ErrorT(errors.New(msg)) slog.ErrorT(errorx.Newf("Traced Err: %s", msg)) } func printfLogs(msg string, args ...any) { slog.Printf(msg, args...) slog.Tracef(msg, args...) slog.Debugf(msg, args...) slog.Infof(msg, args...) slog.Noticef(msg, args...) slog.Warnf(msg, args...) slog.Errorf(msg, args...) slog.Panicf(msg, args...) slog.Fatalf(msg, args...) } func TestSetFormatter_jsonFormat(t *testing.T) { defer slog.Reset() slog.SetLevelByName("trace") slog.SetFormatter(slog.NewJSONFormatter()) th := newTestHandler() th.SetFormatter(slog.NewJSONFormatter().Configure(func(f *slog.JSONFormatter) { f.Fields = slog.NoTimeFields })) slog.PushHandler(th) assert.Eq(t, 2, slog.Std().HandlersNum()) slog.Info("info log message1") slog.Warn("warning log message2") s := th.ResetGet() assert.StrContains(t, s, `"level":"INFO"`) assert.StrContains(t, s, `info log message1`) assert.StrContains(t, s, `"level":"WARNING"`) assert.StrContains(t, s, `warning log message2`) // WithData slog.WithData(slog.M{ "key0": 134, "key1": "abc", }).Infof("info log %s", "message") s = th.ResetGet() assert.StrContains(t, s, `"key1":"abc"`) // reused record r := slog.WithFields(slog.M{ "category": "service", "IP": "127.0.0.1", }).Reused() r.Infof("info %s", "message") r.Debugf("debug %s", "message") r.Release() s = th.ResetGet() assert.StrContains(t, s, `"category"`) assert.StrCount(t, s, `"127.0.0.1"`, 2) // reused record r = slog.WithField("category", "app-service").Reused() r.Infof("info %s", "message") r.Debugf("debug %s", "message") r.Release() s = th.ResetGet() assert.StrContains(t, s, `"category"`) assert.StrCount(t, s, `"app-service"`, 2) // sub logger sub := slog.NewSub().KeepField("app", "order") sub.Trace("trace message") sub.Print("print message") sub.Release() s = th.ResetGet() assert.StrContains(t, s, `"app":"order"`) assert.StrCount(t, s, `"app":"order"`, 2) // WithContext ctx := context.WithValue(context.Background(), "ctxField", "ctx1-value") slog.AddProcessor(slog.CtxKeysProcessor("fields", "ctxField")) slog.WithContext(ctx).Print("print message with ctx") s = th.ResetGet() assert.StrContains(t, s, "print message with ctx") assert.StrContains(t, s, "ctxField") } func TestAddHandler(t *testing.T) { defer slog.Reset() slog.AddHandler(handler.NewConsoleHandler(slog.AllLevels)) h2 := handler.NewConsoleHandler(slog.AllLevels) h2.SetFormatter(slog.NewJSONFormatter().Configure(func(f *slog.JSONFormatter) { f.Aliases = slog.StringMap{ "level": "levelName", "message": "msg", "data": "params", } })) slog.AddHandlers(h2) slog.Infof("info %s", "message") } func TestWithExtra(t *testing.T) { defer slog.Reset() th := newTestHandler() slog.AddHandler(th) slog.WithExtra(slog.M{"ext1": "val1"}). AddValue("key1", "val2"). Info("info message") s := th.ResetGet() assert.StrContains(t, s, `ext1:val1`) assert.StrContains(t, s, `{key1:val2}`) slog.WithValue("key1", "val2").Info("info message") s = th.ResetGet() assert.StrContains(t, s, `{key1:val2}`) } func TestAddProcessor(t *testing.T) { defer slog.Reset() buf := new(bytes.Buffer) slog.Configure(func(logger *slog.SugaredLogger) { logger.Level = slog.TraceLevel logger.Output = buf logger.Formatter = slog.NewJSONFormatter() }) slog.AddProcessor(slog.AddHostname()) slog.Trace("Trace message") slog.Tracef("Tracef %s", "message") str := buf.String() buf.Reset() fmt.Println(str) assert.Contains(t, str, `"hostname":`) assert.Contains(t, str, "Trace message") assert.Contains(t, str, "Tracef message") slog.AddProcessors(slog.ProcessorFunc(func(r *slog.Record) { r.AddField("newField", "newValue") })) slog.Debug("Debug message") slog.Debugf("Debugf %s", "message") str = buf.String() buf.Reset() assert.Contains(t, str, `"newField":"newValue"`) assert.Contains(t, str, "Debug message") assert.Contains(t, str, "Debugf message") } func TestPrependExitHandler(t *testing.T) { defer slog.Reset() assert.Len(t, slog.ExitHandlers(), 0) buf := new(bytes.Buffer) slog.PrependExitHandler(func() { buf.WriteString("HANDLER1-") }) slog.PrependExitHandler(func() { buf.WriteString("HANDLER2-") }) assert.Len(t, slog.ExitHandlers(), 2) slog.SetExitFunc(func(code int) { buf.WriteString("Exited") }) slog.Exit(23) assert.Eq(t, "HANDLER2-HANDLER1-Exited", buf.String()) } func TestRegisterExitHandler(t *testing.T) { defer slog.Reset() assert.Len(t, slog.ExitHandlers(), 0) buf := new(bytes.Buffer) slog.RegisterExitHandler(func() { buf.WriteString("HANDLER1-") }) slog.RegisterExitHandler(func() { buf.WriteString("HANDLER2-") }) // prepend slog.PrependExitHandler(func() { buf.WriteString("HANDLER3-") }) assert.Len(t, slog.ExitHandlers(), 3) slog.SetExitFunc(func(code int) { buf.WriteString("Exited") }) slog.Exit(23) assert.Eq(t, "HANDLER3-HANDLER1-HANDLER2-Exited", buf.String()) } func TestExitHandlerWithError(t *testing.T) { defer slog.Reset() assert.Len(t, slog.ExitHandlers(), 0) slog.RegisterExitHandler(func() { panic("test error") }) slog.SetExitFunc(func(code int) {}) testutil.RewriteStderr() slog.Exit(23) str := testutil.RestoreStderr() assert.Eq(t, "slog: run exit handler(global) recovered, error: test error\n", str) } func TestLogger_ExitHandlerWithError(t *testing.T) { l := slog.NewWithConfig(func(l *slog.Logger) { l.ExitFunc = doNothing }) assert.Len(t, l.ExitHandlers(), 0) l.RegisterExitHandler(func() { panic("test error") }) testutil.RewriteStderr() l.Exit(23) str := testutil.RestoreStderr() assert.Eq(t, "slog: run exit handler recovered, error: test error\n", str) } func TestLogger_PrependExitHandler(t *testing.T) { l := slog.NewWithConfig(func(l *slog.Logger) { l.ExitFunc = doNothing }) assert.Len(t, l.ExitHandlers(), 0) l.PrependExitHandler(func() { panic("test error2") }) testutil.RewriteStderr() l.Exit(23) str := testutil.RestoreStderr() assert.Eq(t, "slog: run exit handler recovered, error: test error2\n", str) } func TestSugaredLogger_Close(t *testing.T) { h := newTestHandler() sl := slog.NewStd(func(sl *slog.SugaredLogger) { sl.PushHandler(h) sl.Formatter = newTestFormatter() }) h.errOnClose = true err := sl.Close() assert.Err(t, err) assert.Err(t, sl.LastErr()) assert.Eq(t, "close error", err.Error()) } func TestSugaredLogger_Handle(t *testing.T) { buf := byteutil.NewBuffer() sl := slog.NewStd(func(sl *slog.SugaredLogger) { sl.Output = buf sl.Formatter = newTestFormatter(true) }) // Handle error: format error sl.WithField("key", "value").Error("error message") err := sl.LastErr() assert.Err(t, err) assert.Eq(t, "format error", err.Error()) } func TestAddWithCtx(t *testing.T) { h := newTestHandler() slog.Reset() slog.PushHandler(h) slog.Std().DoNothingOnPanicFatal() slog.AddProcessor(slog.CtxKeysProcessor("data", "ctx1", "ctx2")) ctx := context.WithValue(context.Background(), "ctx1", "ctx1-value") ctx = context.WithValue(ctx, "ctx2", "ctx2-value") t.Run("normal", func(t *testing.T) { slog.TraceCtx(ctx, "A message", "test") slog.DebugCtx(ctx, "A message", "test") slog.InfoCtx(ctx, "A message", "test") slog.NoticeCtx(ctx, "A message", "test") slog.WarnCtx(ctx, "A message", "test") slog.ErrorCtx(ctx, "A message", "test") slog.FatalCtx(ctx, "A message", "test") slog.PanicCtx(ctx, "A message", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ctx2-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) t.Run("with format", func(t *testing.T) { slog.TracefCtx(ctx, "A message %s", "test") slog.DebugfCtx(ctx, "A message %s", "test") slog.InfofCtx(ctx, "A message %s", "test") slog.NoticefCtx(ctx, "A message %s", "test") slog.WarnfCtx(ctx, "A message %s", "test") slog.ErrorfCtx(ctx, "A message %s", "test") slog.PanicfCtx(ctx, "A message %s", "test") slog.FatalfCtx(ctx, "A message %s", "test") s := h.ResetGet() assert.StrContains(t, s, "ctx1-value") assert.StrContains(t, s, "ctx2-value") for _, level := range slog.AllLevels { assert.StrContains(t, s, level.Name()) } }) slog.Reset() } ================================================ FILE: sugared.go ================================================ package slog import ( "io" "os" "github.com/gookit/color" ) // SugaredLoggerFn func type. type SugaredLoggerFn func(sl *SugaredLogger) // SugaredLogger Is a fast and usable Logger, which already contains // the default formatting and handling capabilities type SugaredLogger struct { *Logger // Formatter log message formatter. default use TextFormatter Formatter Formatter // Output writer Output io.Writer // Level for log handling. if log record level <= Level, it will be record. default: DebugLevel // // TIP: setting the level to lower will ignore more logs. Level Level } // NewStd logger instance, alias of NewStdLogger() func NewStd(fns ...SugaredLoggerFn) *SugaredLogger { return NewStdLogger(fns...) } // NewStdLogger instance func NewStdLogger(fns ...SugaredLoggerFn) *SugaredLogger { setFns := []SugaredLoggerFn{ func(sl *SugaredLogger) { sl.SetName("stdLogger") // sl.CallerSkip += 1 sl.ReportCaller = true // auto enable console color sl.Formatter.(*TextFormatter).EnableColor = color.SupportColor() }, } if len(fns) > 0 { setFns = append(setFns, fns...) } return NewSugaredLogger(os.Stdout, DebugLevel, setFns...) } // NewSugared create new SugaredLogger. alias of NewSugaredLogger() func NewSugared(out io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger { return NewSugaredLogger(out, level, fns...) } // NewSugaredLogger create new SugaredLogger func NewSugaredLogger(output io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger { sl := &SugaredLogger{ Level: level, Output: output, Logger: New(), // default value Formatter: NewTextFormatter(), } // NOTICE: use self as a log handler sl.AddHandler(sl) return sl.Config(fns...) } // NewJSONSugared create new SugaredLogger with JSONFormatter func NewJSONSugared(out io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger { sl := NewSugaredLogger(out, level) sl.Formatter = NewJSONFormatter() return sl.Config(fns...) } // Config current logger func (sl *SugaredLogger) Config(fns ...SugaredLoggerFn) *SugaredLogger { for _, fn := range fns { fn(sl) } return sl } // Reset the logger func (sl *SugaredLogger) Reset() { sl.Level = DebugLevel sl.Output = os.Stdout sl.Formatter = NewTextFormatter() } // IsHandling Check if the current level can be handling func (sl *SugaredLogger) IsHandling(level Level) bool { return sl.Level.ShouldHandling(level) } // Handle log record func (sl *SugaredLogger) Handle(record *Record) error { bts, err := sl.Formatter.Format(record) if err != nil { return err } _, err = sl.Output.Write(bts) return err } // Close all log handlers, will flush and close all handlers. // // IMPORTANT: // // if enable async/buffer mode, please call the Close() before exit. func (sl *SugaredLogger) Close() error { _ = sl.Logger.VisitAll(func(handler Handler) error { // TIP: must exclude self, because self is a handler if _, ok := handler.(*SugaredLogger); !ok { if err := handler.Close(); err != nil { sl.err = err } } return nil }) return sl.err } // Flush all logs. alias of the FlushAll() func (sl *SugaredLogger) Flush() error { return sl.FlushAll() } // FlushAll all logs func (sl *SugaredLogger) FlushAll() error { return sl.Logger.VisitAll(func(handler Handler) error { if _, ok := handler.(*SugaredLogger); !ok { _ = handler.Flush() } return nil }) } ================================================ FILE: util.go ================================================ package slog import ( "fmt" "os" "path/filepath" "runtime" "strconv" "strings" "github.com/gookit/goutil/byteutil" "github.com/gookit/goutil/strutil" "github.com/valyala/bytebufferpool" ) // const ( // defaultMaxCallerDepth int = 15 // defaultKnownSlogFrames int = 4 // ) // Stack that attempts to recover the data for all goroutines. // func getCallStacks(callerSkip int) []byte { // return nil // } // FormatLevelName Format the level name, specify the length returned, // fill the space with less length, and truncate than the length func FormatLevelName(name string, length int) string { if len(name) < length { return fmt.Sprintf("%-"+strconv.Itoa(length)+"s", name) } return name[:length] } func buildLowerLevelName() map[Level]string { mp := make(map[Level]string, len(LevelNames)) for level, s := range LevelNames { mp[level] = strings.ToLower(s) } return mp } // getCaller retrieves the name of the first non-slog calling function func getCaller(callerSkip int) (fr runtime.Frame, ok bool) { pcs := make([]uintptr, 1) // alloc 1 times num := runtime.Callers(callerSkip, pcs) if num > 0 { fr, _ = runtime.CallersFrames(pcs).Next() ok = fr.PC != 0 } return } func formatCaller(rf *runtime.Frame, flag uint8, userFn CallerFormatFn) (cs string) { if userFn != nil { return userFn(rf) } lineNum := strconv.FormatInt(int64(rf.Line), 10) switch flag { case CallerFlagFull: return rf.Function + "," + filepath.Base(rf.File) + ":" + lineNum case CallerFlagFunc: return rf.Function case CallerFlagFcLine: return rf.Function + ":" + lineNum case CallerFlagPkg: i := strings.LastIndex(rf.Function, "/") i += strings.IndexByte(rf.Function[i+1:], '.') return rf.Function[:i+1] case CallerFlagPkgFnl: i := strings.LastIndex(rf.Function, "/") i += strings.IndexByte(rf.Function[i+1:], '.') return rf.Function[:i+1] + "," + filepath.Base(rf.File) + ":" + lineNum case CallerFlagFnlFcn: ss := strings.Split(rf.Function, ".") return filepath.Base(rf.File) + ":" + lineNum + "," + ss[len(ss)-1] case CallerFlagFnLine: return filepath.Base(rf.File) + ":" + lineNum case CallerFlagFcName: ss := strings.Split(rf.Function, ".") return ss[len(ss)-1] default: // CallerFlagFpLine return rf.File + ":" + lineNum } } var msgBufPool bytebufferpool.Pool // it like Println, will add spaces for each argument func formatArgsWithSpaces(vs []any) string { ln := len(vs) if ln == 0 { return "" } if ln == 1 { // cast is string, return it. NOT ALLOC MEMORY if str, ok := vs[0].(string); ok { return str } return strutil.SafeString(vs[0]) } // buf = make([]byte, 0, ln*8) bb := msgBufPool.Get() defer msgBufPool.Put(bb) // TIP: // `float` to string - will alloc 2 times memory // `int <0`, `int > 100` to string - will alloc 1 times memory for i := range vs { if i > 0 { // add space bb.B = append(bb.B, ' ') } bb.B = byteutil.AppendAny(bb.B, vs[i]) } return string(bb.B) // return byteutil.String(bb.B) // perf: Reduce one memory allocation } // EncodeToString data to string func EncodeToString(v any) string { if mp, ok := v.(map[string]any); ok { return mapToString(mp) } return strutil.SafeString(v) } func mapToString(mp map[string]any) string { ln := len(mp) if ln == 0 { return "{}" } // TODO use bytebufferpool buf := make([]byte, 0, ln*8) buf = append(buf, '{') for k, val := range mp { buf = append(buf, k...) buf = append(buf, ':') str, _ := strutil.AnyToString(val, false) buf = append(buf, str...) buf = append(buf, ',', ' ') } // remove last ', ' buf = append(buf[:len(buf)-2], '}') return strutil.Byte2str(buf) } func parseTemplateToFields(tplStr string) []string { ss := strings.Split(tplStr, "{{") vars := make([]string, 0, len(ss)*2) for _, s := range ss { if len(s) == 0 { continue } fieldAndOther := strings.SplitN(s, "}}", 2) if len(fieldAndOther) < 2 { vars = append(vars, s) } else { vars = append(vars, fieldAndOther[0], "}}"+fieldAndOther[1]) } } return vars } func printStderr(args ...any) { _, _ = fmt.Fprintln(os.Stderr, args...) } ================================================ FILE: util_test.go ================================================ package slog import ( "strings" "testing" "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/goutil/timex" ) func revertTemplateString(ss []string) string { var sb strings.Builder for _, s := range ss { // is field if s[0] >= 'a' && s[0] <= 'z' { sb.WriteString("{{") sb.WriteString(s) // sb.WriteString("}}") } else { sb.WriteString(s) } } // sb.WriteByte('\n') return sb.String() } func TestInner_parseTemplateToFields(t *testing.T) { ss := parseTemplateToFields(NamedTemplate) str := revertTemplateString(ss) // dump.P(ss, str) assert.Eq(t, NamedTemplate, str) ss = parseTemplateToFields(DefaultTemplate) str = revertTemplateString(ss) // dump.P(ss, str) assert.Eq(t, DefaultTemplate, str) testTemplate := "[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}" ss = parseTemplateToFields(testTemplate) str = revertTemplateString(ss) assert.Eq(t, testTemplate, str) // dump.P(ss, str) } func TestUtil_EncodeToString(t *testing.T) { assert.Eq(t, "{a:1}", EncodeToString(map[string]any{"a": 1})) } func TestUtil_formatArgsWithSpaces(t *testing.T) { // tests for formatArgsWithSpaces tests := []struct { args []any want string }{ {nil, ""}, {[]any{"a", "b", "c"}, "a b c"}, {[]any{"a", "b", "c", 1, 2, 3}, "a b c 1 2 3"}, {[]any{"a", 1, nil}, "a 1 "}, {[]any{12, int8(12), int16(12), int32(12), int64(12)}, "12 12 12 12 12"}, {[]any{uint(12), uint8(12), uint16(12), uint32(12), uint64(12)}, "12 12 12 12 12"}, {[]any{float32(12.12), 12.12}, "12.12 12.12"}, {[]any{true, false}, "true false"}, {[]any{[]byte("abc"), []byte("123")}, "abc 123"}, {[]any{timex.OneHour}, "3600000000000"}, {[]any{errorx.Raw("a error message")}, "a error message"}, {[]any{[]int{1, 2, 3}}, "[1 2 3]"}, } for _, tt := range tests { assert.Eq(t, tt.want, formatArgsWithSpaces(tt.args)) } assert.NotEmpty(t, formatArgsWithSpaces([]any{timex.Now().T()})) }