[
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: inhere\n\n---\n\n**System (please complete the following information):**\n\n - OS: `linux` [e.g. linux, macOS]\n - GO Version: `1.13` [e.g. `1.13`]\n - Pkg Version: `1.1.1` [e.g. `1.1.1`]\n\n**Describe the bug**\n\nA clear and concise description of what the bug is.\n\n**To Reproduce**\n\n```go\n// go code\n```\n\n**Expected behavior**\n\nA clear and concise description of what you expected to happen.\n\n**Screenshots**\n\nIf applicable, add screenshots to help explain your problem.\n\n**Additional context**\n\nAdd any other context about the problem here.\n"
  },
  {
    "path": ".github/changelog.yml",
    "content": "title: '## Change Log'\n# style allow: simple, markdown(mkdown), ghr(gh-release)\nstyle: gh-release\n# group names\nnames: [Refactor, Fixed, Feature, Update, Other]\nrepo_url: https://github.com/gookit/slog\n\nfilters:\n  # message length should >= 12\n  - name: msg_len\n    min_len: 12\n  # message words should >= 3\n  - name: words_len\n    min_len: 3\n  - name: keyword\n    keyword: format code\n    exclude: true\n  - name: keywords\n    keywords: format code, action test\n    exclude: true\n\n# group match rules\n# not matched will use 'Other' group.\nrules:\n  - name: Refactor\n    start_withs: [refactor, break]\n    contains: ['refactor:']\n  - name: Fixed\n    start_withs: [fix]\n    contains: ['fix:']\n  - name: Feature\n    start_withs: [feat, new]\n    contains: [feature]\n  - name: Update\n    start_withs: [update, 'up:']\n    contains: []\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n- package-ecosystem: gomod\n  directory: \"/\"\n  schedule:\n    interval: daily\n  open-pull-requests-limit: 10\n\n- package-ecosystem: \"github-actions\"\n  directory: \"/\"\n  schedule:\n    # Check for updates to GitHub Actions every weekday\n    interval: \"daily\"\n"
  },
  {
    "path": ".github/revive.toml",
    "content": "ignoreGeneratedHeader = false\n# Sets the default severity to \"warning\"\n#severity = \"error\"\nseverity = \"warning\"\nconfidence = 0.8\nerrorCode = 0\nwarningCode = 0\n\n[rule.blank-imports]\n[rule.context-as-argument]\n[rule.context-keys-type]\n[rule.dot-imports]\n[rule.error-return]\n[rule.error-strings]\n[rule.error-naming]\n[rule.exported]\nseverity = \"warning\"\n[rule.if-return]\n[rule.increment-decrement]\n[rule.var-naming]\n[rule.var-declaration]\n[rule.package-comments]\n[rule.range]\n[rule.receiver-naming]\n[rule.time-naming]\n[rule.unexported-return]\n[rule.indent-error-flow]\n[rule.errorf]\n[rule.argument-limit]\narguments = [4]\n[rule.function-result-limit]\narguments = [3]\n[rule.empty-block]\n[rule.confusing-naming]\n[rule.superfluous-else]\n[rule.unused-parameter]\n[rule.unreachable-code]\n[rule.unnecessary-stmt]\n[rule.struct-tag]\n[rule.atomic]\n[rule.empty-lines]\n[rule.duplicated-imports]\n[rule.import-shadowing]\n[rule.confusing-results]\n[rule.modifies-parameter]\n[rule.redefines-builtin-id]"
  },
  {
    "path": ".github/workflows/go.yml",
    "content": "name: Unit-Tests\non:\n  pull_request:\n    paths:\n      - 'go.mod'\n      - '**.go'\n      - '**.yml'\n  push:\n    paths:\n      - 'go.mod'\n      - '**.go'\n      - '**.yml'\n\njobs:\n\n  test:\n    name: Test on go ${{ matrix.go_version }} and ${{ matrix.os }}\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        go_version: [1.24, 1.23, 1.22, 1.21, 1.19, stable]\n        os: [ubuntu-latest, windows-latest] # , macOS-latest\n\n    steps:\n    - name: Check out code\n      uses: actions/checkout@v6\n\n    - name: Setup Go SDK\n      uses: actions/setup-go@v6\n      timeout-minutes: 3\n      with:\n        go-version: ${{ matrix.go_version }}\n\n    - name: Tidy go mod\n      run: go mod tidy\n\n    # https://github.com/actions/setup-go\n#    - name: Use Go ${{ matrix.go_version }}\n#      timeout-minutes: 3\n#      uses: actions/setup-go@v3\n#      with:\n#        go-version: ${{ matrix.go_version }}\n\n#    - name: Revive check\n#      uses: docker://morphy/revive-action:v2\n#      if: ${{ matrix.os == 'ubuntu-latest' && matrix.go_version == 'stable' }}\n#      with:\n#        config: .github/revive.toml\n#        # Exclude patterns, separated by semicolons (optional)\n#        exclude: \"./internal/...\"\n\n    - name: Run staticcheck\n      uses: reviewdog/action-staticcheck@v1\n      if: ${{ github.event_name == 'pull_request'}}\n      with:\n        github_token: ${{ secrets.github_token }}\n        # Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review].\n        reporter: github-pr-check\n        # Report all results. [added,diff_context,file,nofilter].\n        filter_mode: added\n        # Exit with 1 when it find at least one finding.\n        fail_on_error: true\n\n    - name: Run unit tests\n      # run: go test -v -cover ./...\n      run: go test -coverprofile=\"profile.cov\" ./...\n\n    - name: Send coverage\n      uses: shogo82148/actions-goveralls@v1\n      if: ${{ matrix.os == 'ubuntu-latest' }}\n      with:\n        path-to-profile: profile.cov\n        flag-name: Go-${{ matrix.go_version }}\n        parallel: true\n        shallow: true\n\n  # notifies that all test jobs are finished.\n  # https://github.com/shogo82148/actions-goveralls\n  finish:\n    needs: test\n    runs-on: ubuntu-latest\n    steps:\n      - uses: shogo82148/actions-goveralls@v1\n        with:\n          shallow: true\n          parallel-finished: true\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Tag-release\n\non:\n  push:\n    tags:\n      - v*\n\njobs:\n  release:\n    name: Release new version\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          fetch-depth: 0\n\n      - name: Setup ENV\n        # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable\n        run: |\n          echo \"RELEASE_TAG=${GITHUB_REF:10}\" >> $GITHUB_ENV\n          echo \"RELEASE_NAME=$GITHUB_WORKFLOW\" >> $GITHUB_ENV\n\n      - name: Generate changelog\n        run: |\n          curl https://github.com/gookit/gitw/releases/latest/download/chlog-linux-amd64 -L -o /usr/local/bin/chlog\n          chmod a+x /usr/local/bin/chlog\n          chlog -c .github/changelog.yml -o changelog.md prev last \n\n      # https://github.com/softprops/action-gh-release\n      - name: Create release and upload assets\n        uses: softprops/action-gh-release@v2\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          name: ${{ env.RELEASE_TAG }}\n          tag_name: ${{ env.RELEASE_TAG }}\n          body_path: changelog.md\n          token: ${{ secrets.GITHUB_TOKEN }}\n#          files: macos-chlog.exe\n"
  },
  {
    "path": ".gitignore",
    "content": "*.log\n*.swp\n.idea\n*.patch\n*.tmp\n\n# Go template\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n*.log.*\n*~\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n.DS_Store\n*.prof\n\n# shell script\n/*.bash\n/*.sh\n/*.zsh\n/*.pid\ngo.work\nchangelog.md\ntestdata\n_example/go.sum\n.xenv.*"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2016 inhere\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
  },
  {
    "path": "Makefile",
    "content": "# Make does not offer a recursive wildcard function, so here's one:\n# from https://github.com/jenkins-x-plugins/jx-gitops/blob/main/Makefile\nrwildcard=$(wildcard $1$2) $(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2))\n\nSHELL := /bin/bash\nNAME := slog\nBUILD_TARGET = testdata\nMAIN_SRC_FILE=cmd/main.go\nGO :=go\n\nORG := gookit\nREV := $(shell git rev-parse --short HEAD 2> /dev/null || echo 'unknown')\nORG_REPO := $(ORG)/$(NAME)\nRELEASE_ORG_REPO := $(ORG_REPO)\nROOT_PACKAGE := github.com/$(ORG_REPO)\n\nGO_VERSION := $(shell $(GO) version | sed -e 's/^[^0-9.]*\\([0-9.]*\\).*/\\1/')\nGO_DEPENDENCIES := $(call rwildcard,pkg/,*.go) $(call rwildcard,cmd/,*.go)\n\nBRANCH     := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null  || echo 'unknown')\nBUILD_DATE := $(shell date +%Y%m%d-%H:%M:%S)\nCGO_ENABLED = 0\n\nREPORTS_DIR=$(BUILD_TARGET)/reports\n\nGOTEST := $(GO) test\n\n# set dev version unless VERSION is explicitly set via environment\nVERSION ?= $(shell echo \"$$(git for-each-ref refs/tags/ --count=1 --sort=-version:refname --format='%(refname:short)' 2>/dev/null)-dev+$(REV)\" | sed 's/^v//')\n\n# Build flags for setting build-specific configuration at build time - defaults to empty\n#BUILD_TIME_CONFIG_FLAGS ?= \"\"\n\n# Full build flags used when building binaries. Not used for test compilation/execution.\nBUILDFLAGS :=  -ldflags \\\n  \" -X $(ROOT_PACKAGE)/pkg/cmd/version.Version=$(VERSION)\\\n\t\t-X github.com/jenkins-x-plugins/jx-gitops/pkg/cmd/version.Version=$(VERSION)\\\n\t\t-X $(ROOT_PACKAGE)/pkg/cmd/version.Revision='$(REV)'\\\n\t\t-X $(ROOT_PACKAGE)/pkg/cmd/version.Branch='$(BRANCH)'\\\n\t\t-X $(ROOT_PACKAGE)/pkg/cmd/version.BuildDate='$(BUILD_DATE)'\\\n\t\t-X $(ROOT_PACKAGE)/pkg/cmd/version.GoVersion='$(GO_VERSION)'\\\n\t\t$(BUILD_TIME_CONFIG_FLAGS)\"\n\n# Some tests expect default values for version.*, so just use the config package values there.\nTEST_BUILDFLAGS :=  -ldflags \"$(BUILD_TIME_CONFIG_FLAGS)\"\n\nifdef DEBUG\nBUILDFLAGS := -gcflags \"all=-N -l\" $(BUILDFLAGS)\nendif\n\nifdef PARALLEL_BUILDS\nBUILDFLAGS += -p $(PARALLEL_BUILDS)\nGOTEST += -p $(PARALLEL_BUILDS)\nelse\n# -p 4 seems to work well for people\nGOTEST += -p 4\nendif\n\nifdef DISABLE_TEST_CACHING\nGOTEST += -count=1\nendif\n\nTEST_PACKAGE ?= ./...\nCOVER_OUT:=$(REPORTS_DIR)/cover.out\nCOVERFLAGS=-coverprofile=$(COVER_OUT) --covermode=count --coverpkg=./...\n\n.PHONY: list\nlist: ## List all make targets\n\t@$(MAKE) -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ \"^[#.]\") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort\n\n.PHONY: help\n.DEFAULT_GOAL := help\nhelp:\n\t@echo -e \"Some useful commands for develop\\n\"\n\t@grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-20s\\033[0m %s\\n\", $$1, $$2}'\n\nfull: check ## Build and run the tests\ncheck: build test ## Build and run the tests\nget-test-deps: ## Install test dependencies\n\tget install github.com/axw/gocov/gocov\n\tget install gopkg.in/matm/v1/gocov-html\n\nprint-version: ## Print version\n\t@echo $(VERSION)\n\nbuild: $(GO_DEPENDENCIES) clean ## Build jx-labs binary for current OS\n\tgo mod download\n\tCGO_ENABLED=$(CGO_ENABLED) $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/$(NAME) $(MAIN_SRC_FILE)\n\nlabel: $(GO_DEPENDENCIES)\n\tCGO_ENABLED=$(CGO_ENABLED) $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/jx-label fns/label/main.go\n\nbuild-all: $(GO_DEPENDENCIES) build make-reports-dir ## Build all files - runtime, all tests etc.\n\tCGO_ENABLED=$(CGO_ENABLED) $(GOTEST) -run=nope -tags=integration -failfast -short ./... $(BUILDFLAGS)\n\ntidy-deps: ## Cleans up dependencies\n\t$(GO) mod tidy\n\t# mod tidy only takes compile dependencies into account, let's make sure we capture tooling dependencies as well\n\t@$(MAKE) install-generate-deps\n\npprof-web: ## generate pprof file and start an web-ui\n\tcd ./_example; go mod tidy; go run ./pprof\n\t#go tool pprof rux_prof_data.prof\n\tgo tool pprof -http=:8080 ./_example/rux_prof_data.prof\n\n.PHONY: make-reports-dir\nmake-reports-dir:\n\tmkdir -p $(REPORTS_DIR)\n\ntest: ## Run tests with the \"unit\" build tag\n\tKUBECONFIG=/cluster/connections/not/allowed CGO_ENABLED=$(CGO_ENABLED) $(GOTEST) --tags=unit -failfast -short ./... $(TEST_BUILDFLAGS)\n\ntest-coverage : make-reports-dir ## Run tests and coverage for all tests with the \"unit\" build tag\n\tCGO_ENABLED=$(CGO_ENABLED) $(GOTEST) --tags=unit $(COVERFLAGS) -failfast -short ./... $(TEST_BUILDFLAGS)\n\ntest-report: make-reports-dir get-test-deps test-coverage ## Create the test report\n\t@gocov convert $(COVER_OUT) | gocov report\n\ntest-report-html: make-reports-dir get-test-deps test-coverage ## Create the test report in HTML format\n\t@gocov convert $(COVER_OUT) | gocov-html > $(REPORTS_DIR)/cover.html && open $(REPORTS_DIR)/cover.html\n\ntest-bench: ## run bench test report in _example dir\n\tcd ./_example; go mod tidy; \\\n\tgo test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\n\ninstall: $(GO_DEPENDENCIES) ## Install the binary\n\tGOBIN=${GOPATH}/bin $(GO) install $(BUILDFLAGS) $(MAIN_SRC_FILE)\n\nlinux: ## Build for Linux\n\tCGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/linux/$(NAME) $(MAIN_SRC_FILE)\n\tchmod +x build/linux/$(NAME)\n\narm: ## Build for ARM\n\tCGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=arm $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/arm/$(NAME) $(MAIN_SRC_FILE)\n\tchmod +x build/arm/$(NAME)\n\nwin: ## Build for Windows\n\tCGO_ENABLED=$(CGO_ENABLED) GOOS=windows GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/win/$(NAME)-windows-amd64.exe $(MAIN_SRC_FILE)\n\ndarwin: ## Build for OSX\n\tCGO_ENABLED=$(CGO_ENABLED) GOOS=darwin GOARCH=amd64 $(GO) $(BUILD_TARGET) $(BUILDFLAGS) -o build/darwin/$(NAME) $(MAIN_SRC_FILE)\n\tchmod +x build/darwin/$(NAME)\n\n.PHONY: release\nrelease: clean linux test\n\nrelease-all: release linux win darwin\n\npromoter:\n\tcd promote && go build main.go\n\n.PHONY: goreleaser\ngoreleaser:\n\tstep-go-releaser --organisation=$(ORG) --revision=$(REV) --branch=$(BRANCH) --build-date=$(BUILD_DATE) --go-version=$(GO_VERSION) --root-package=$(ROOT_PACKAGE) --version=$(VERSION) --timeout 200m\n\n.PHONY: clean\nclean: ## Clean the generated artifacts\n\trm -rf build release dist\n\nget-fmt-deps: ## Install test dependencies\n\tget install golang.org/x/tools/cmd/goimports\n\n.PHONY: fmt\nfmt: importfmt ## Format the code\n\t$(eval FORMATTED = $(shell $(GO) fmt ./...))\n\t@if [ \"$(FORMATTED)\" == \"\" ]; \\\n      \tthen \\\n      \t    echo \"All Go files properly formatted\"; \\\n      \telse \\\n      \t\techo \"Fixed formatting for: $(FORMATTED)\"; \\\n      \tfi\n\n.PHONY: importfmt\nimportfmt: get-fmt-deps\n\t@echo \"Formatting the imports...\"\n\tgoimports -w $(GO_DEPENDENCIES)\n\n.PHONY: lint\nlint: ## Lint the code\n\t./hack/gofmt.sh\n\t./hack/linter.sh\n\t./hack/generate.sh\n\n.PHONY: all\nall: fmt build test lint generate-refdocs\n\ninstall-refdocs:\n\t$(GO) get github.com/jenkins-x/gen-crd-api-reference-docs\n\ngenerate-refdocs: install-refdocs\n\tgen-crd-api-reference-docs -config \"hack/configdocs/config.json\" \\\n\t-template-dir hack/configdocs/templates \\\n    -api-dir \"./pkg/apis/gitops/v1alpha1\" \\\n    -out-file docs/config.md\n\ngenerate-scheduler-refdocs: install-refdocs\n\tgen-crd-api-reference-docs -config \"hack/configdocs/config.json\" \\\n\t-template-dir hack/configdocs/templates \\\n    -api-dir \"./pkg/apis/scheduler/v1alpha1\" \\\n    -out-file docs/scheduler-config.md\n\nbin/docs:\n\tgo build $(LDFLAGS) -v -o bin/docs cmd/docs/*.go\n\n.PHONY: docs\ndocs: bin/docs generate-refdocs generate-scheduler-refdocs ## update docs\n\t@echo \"Generating docs\"\n\t@./bin/docs --target=./docs/cmd\n\t@./bin/docs --target=./docs/man/man1 --kind=man\n\t@rm -f ./bin/docs"
  },
  {
    "path": "README.md",
    "content": "# slog\n\n![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/gookit/slog?style=flat-square)\n[![GoDoc](https://pkg.go.dev/badge/github.com/gookit/slog.svg)](https://pkg.go.dev/github.com/gookit/slog)\n[![Go Report Card](https://goreportcard.com/badge/github.com/gookit/slog)](https://goreportcard.com/report/github.com/gookit/slog)\n[![Unit-Tests](https://github.com/gookit/slog/workflows/Unit-Tests/badge.svg)](https://github.com/gookit/slog/actions)\n[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gookit/slog)](https://github.com/gookit/slog)\n[![Coverage Status](https://coveralls.io/repos/github/gookit/slog/badge.svg?branch=master)](https://coveralls.io/github/gookit/slog?branch=master)\n\n📑 Lightweight, structured, extensible, configurable logging library written in Golang.\n\n**Output in console:**\n\n![console-log-all-level](_example/images/console-log-all-level.png)\n\n## Features\n\n- Simple, directly available without configuration\n- Support common log level processing.\n  - eg: `trace` `debug` `info` `notice` `warn` `error` `fatal` `panic`\n- Support any extension of `Handler` `Formatter` as needed\n- Supports adding multiple `Handler` log processing at the same time, outputting logs to different places\n- Support to custom log message `Formatter`\n  - Built-in `json` `text` two log record formatting `Formatter`\n- Support to custom build log messages `Handler`\n  - The built-in `handler.Config` `handler.Builder` can easily and quickly build the desired log handler\n- Has built-in common log write handler program\n  - `console` output logs to the console, supports color output\n  - `writer` output logs to the specified `io.Writer`\n  - `file` output log to the specified file, optionally enable `buffer` to buffer writes\n  - `simple` output log to the specified file, write directly to the file without buffering\n  - `rotate_file` outputs logs to the specified file, and supports splitting files by time and size, and `buffer` buffered writing is enabled by default\n  - See [./handler](./handler) folder for more built-in implementations\n- Benchmark performance test please see [Benchmarks](#benchmarks)\n\n### Output logs to file\n\n- Support enabling `buffer` for log writing\n- Support splitting log files by `time` and `size`\n- Support configuration to compress log files via `gzip`\n- Support clean old log files by `BackupNum` `BackupTime`\n\n### `rotatefile` subpackage\n\n- The `rotatefile` subpackage is a stand-alone tool library with file splitting, cleaning, and compressing backups\n- `rotatefile.Writer` can also be directly wrapped and used in other logging libraries. For example: `log`, `glog`, `zap`, etc.\n- `rotatefile.FilesClear` is an independent file cleaning backup tool, which can be used in other places (such as other program log cleaning such as PHP)\n- For more usage, please see [rotatefile](rotatefile/README.md)\n\n### Use slog in GORM\n\nPlease see https://github.com/gookit/slog/issues/127#issuecomment-2827745713\n\n## [中文说明](README.zh-CN.md)\n\n中文说明请阅读 [README.zh-CN](README.zh-CN.md)\n\n## GoDoc\n\n- [Godoc for github](https://pkg.go.dev/github.com/gookit/slog?tab=doc)\n\n## Install\n\n```bash\ngo get github.com/gookit/slog\n```\n\n## Quick Start\n\n`slog` is very simple to use and can be used without any configuration\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.Infof(\"info log %s\", \"message\")\n\tslog.Debugf(\"debug %s\", \"message\")\n}\n```\n\n**Output:**\n\n```text\n[2020/07/16 12:19:33] [application] [INFO] [main.go:7] info log message  \n[2020/07/16 12:19:33] [application] [WARNING] [main.go:8] warning log message  \n[2020/07/16 12:19:33] [application] [INFO] [main.go:9] info log message  \n[2020/07/16 12:19:33] [application] [DEBUG] [main.go:10] debug message  \n```\n\n### Console Color\n\nYou can enable color on output logs to console. _This is default_\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\tslog.Configure(func(logger *slog.SugaredLogger) {\n\t\tf := logger.Formatter.(*slog.TextFormatter)\n\t\tf.EnableColor = true\n\t})\n\n\tslog.Trace(\"this is a simple log message\")\n\tslog.Debug(\"this is a simple log message\")\n\tslog.Info(\"this is a simple log message\")\n\tslog.Notice(\"this is a simple log message\")\n\tslog.Warn(\"this is a simple log message\")\n\tslog.Error(\"this is a simple log message\")\n\tslog.Fatal(\"this is a simple log message\")\n}\n```\n\n**Output:**\n\n![](_example/images/console-color-log.png)\n\n### Change log output style\n\nAbove is the `Formatter` setting that changed the default logger.\n\n> You can also create your own logger and append `ConsoleHandler` to support printing logs to the console:\n\n```go\nh := handler.NewConsoleHandler(slog.AllLevels)\nl := slog.NewWithHandlers(h)\n\nl.Trace(\"this is a simple log message\")\nl.Debug(\"this is a simple log message\")\n```\n\nChange the default logger log output style:\n\n```go\nh.Formatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate)\n```\n\n**Output:**\n\n![](_example/images/console-color-log1.png)\n\n> Note: `slog.TextFormatter` uses a template string to format the output log, so the new field output needs to adjust the template at the same time.\n\n### Use JSON Format\n\n`slog` also has a built-in `Formatter` for JSON format. If not specified, the default is to use `TextFormatter` to format log records.\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\t// use JSON formatter\n\tslog.SetFormatter(slog.NewJSONFormatter())\n\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.WithData(slog.M{\n\t\t\"key0\": 134,\n\t\t\"key1\": \"abc\",\n\t}).Infof(\"info log %s\", \"message\")\n\n\tr := slog.WithFields(slog.M{\n\t\t\"category\": \"service\",\n\t\t\"IP\": \"127.0.0.1\",\n\t})\n\tr.Infof(\"info %s\", \"message\")\n\tr.Debugf(\"debug %s\", \"message\")\n}\n```\n\n**Output:**\n\n```text\n{\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info log message\"}\n{\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"WARNING\",\"message\":\"warning log message\"}\n{\"channel\":\"application\",\"data\":{\"key0\":134,\"key1\":\"abc\"},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info log message\"}\n{\"IP\":\"127.0.0.1\",\"category\":\"service\",\"channel\":\"application\",\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info message\"}\n{\"IP\":\"127.0.0.1\",\"category\":\"service\",\"channel\":\"application\",\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"DEBUG\",\"message\":\"debug message\"}\n```\n\n## Introduction\n\n- `Logger` - log dispatcher. One logger can register multiple `Handler`, `Processor`\n- `Record` - log records, each log is a `Record` instance.\n- `Processor` - enables extended processing of log records. It is called before the log `Record` is processed by the `Handler`.\n  - You can use it to perform additional operations on `Record`, such as: adding fields, adding extended information, etc.\n- `Handler` - log handler, each log will be processed by `Handler.Handle()`.\n  - Here you can send logs to console, file, remote server, etc.\n- `Formatter` - logging data formatting process.\n  - Usually set in `Handler`, it can be used to format log records, convert records into text, JSON, etc., `Handler` then writes the formatted data to the specified place.\n  - `Formatter` is not required. You can do without it and handle logging directly in `Handler.Handle()`.\n\n**Simple structure of log scheduler**：\n\n```text\n          Processors\nLogger --{\n          Handlers --|- Handler0 With Formatter0\n                     |- Handler1 With Formatter1\n                     |- Handler2 (can also without Formatter)\n                     |- ... more\n```\n\n> Note: Be sure to remember to add `Handler`, `Processor` to the logger instance and log records will be processed by `Handler`.\n\n### Processor\n\n`Processor` interface:\n\n```go\n// Processor interface definition\ntype Processor interface {\n\t// Process record\n\tProcess(record *Record)\n}\n\n// ProcessorFunc definition\ntype ProcessorFunc func(record *Record)\n\n// Process record\nfunc (fn ProcessorFunc) Process(record *Record) {\n\tfn(record)\n}\n```\n\n> You can use it to perform additional operations on the Record before the log `Record` reaches the `Handler` for processing, such as: adding fields, adding extended information, etc.\n\nAdd processor to logger:\n\n```go\nslog.AddProcessor(slog.AddHostname())\n\n// or\nl := slog.New()\nl.AddProcessor(slog.AddHostname())\n```\n\nThe built-in processor `slog.AddHostname` is used here as an example, which can add a new field `hostname` on each log record.\n\n```go\nslog.AddProcessor(slog.AddHostname())\nslog.Info(\"message\")\n```\n\nOutput, including new fields `\"hostname\":\"InhereMac\"`：\n\n```json\n{\"channel\":\"application\",\"level\":\"INFO\",\"datetime\":\"2020/07/17 12:01:35\",\"hostname\":\"InhereMac\",\"data\":{},\"extra\":{},\"message\":\"message\"}\n```\n\n### Handler\n\n`Handler` interface:\n\n> You can customize any `Handler` you want, just implement the `slog.Handler` interface.\n\n```go\n// Handler interface definition\ntype Handler interface {\n\tio.Closer\n\tFlush() error\n\t// IsHandling Checks whether the given record will be handled by this handler.\n\tIsHandling(level Level) bool\n\t// Handle a log record.\n\t// all records may be passed to this method, and the handler should discard\n\t// those that it does not want to handle.\n\tHandle(*Record) error\n}\n```\n\n### Formatter\n\n`Formatter` interface:\n\n```go\n// Formatter interface\ntype Formatter interface {\n\tFormat(record *Record) ([]byte, error)\n}\n```\n\nFunction wrapper type：\n\n```go\n// FormatterFunc wrapper definition\ntype FormatterFunc func(r *Record) ([]byte, error)\n\n// Format a log record\nfunc (fn FormatterFunc) Format(r *Record) ([]byte, error) {\n\treturn fn(r)\n}\n```\n\n**JSON formatter**\n\n```go\ntype JSONFormatter struct {\n\t// Fields exported log fields.\n\tFields []string\n\t// Aliases for output fields. you can change export field name.\n\t// item: `\"field\" : \"output name\"`\n\t// eg: {\"message\": \"msg\"} export field will display \"msg\"\n\tAliases StringMap\n\t// PrettyPrint will indent all json logs\n\tPrettyPrint bool\n\t// TimeFormat the time format layout. default is time.RFC3339\n\tTimeFormat string\n}\n```\n\n**Text formatter**\n\nDefault templates:\n\n```go\nconst DefaultTemplate = \"[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\\n\"\nconst NamedTemplate = \"{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\\n\"\n```\n\nChange template:\n\n```go\nmyTemplate := \"[{{datetime}}] [{{level}}] {{message}}\"\n\nf := slog.NewTextFormatter()\nf.SetTemplate(myTemplate)\n```\n\n## Custom logger\n\nCustom `Processor` and `Formatter` are relatively simple, just implement a corresponding method.\n\n### Create new logger\n\n`slog.Info, slog.Warn` and other methods use the default logger and output logs to the console by default.\n\nYou can create a brand-new instance of `slog.Logger`:\n\n**Method 1**：\n\n```go\nl := slog.New()\n// add handlers ...\nh1 := handler.NewConsoleHandler(slog.AllLevels)\nl.AddHandlers(h1)\n```\n\n**Method 2**：\n\n```go\nl := slog.NewWithName(\"myLogger\")\n// add handlers ...\nh1 := handler.NewConsoleHandler(slog.AllLevels)\nl.AddHandlers(h1)\n```\n\n**Method 3**：\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc main() {\n\tl := slog.NewWithHandlers(handler.NewConsoleHandler(slog.AllLevels))\n\tl.Info(\"message\")\n}\n```\n\n### Create custom Handler\n\nYou only need to implement the `slog.Handler` interface to create a custom `Handler`.\n\nYou can quickly assemble your own Handler through the built-in `handler.LevelsWithFormatter` `handler.LevelWithFormatter` and other fragments of slog.\n\nExamples:\n\n> Use `handler.LevelsWithFormatter`, only need to implement `Close, Flush, Handle` methods\n\n```go\ntype MyHandler struct {\n\thandler.LevelsWithFormatter\n    Output io.Writer\n}\n\nfunc (h *MyHandler) Handle(r *slog.Record) error {\n\t// you can write log message to file or send to remote.\n}\n\nfunc (h *MyHandler) Flush() error {}\nfunc (h *MyHandler) Close() error {}\n```\n\nAdd `Handler` to the logger to use:\n\n```go\n// add to default logger\nslog.AddHander(&MyHandler{})\n\n// or, add to custom logger:\nl := slog.New()\nl.AddHander(&MyHandler{})\n```\n\n## Use the built-in handlers\n\n[./handler](handler) package has built-in common log handlers, which can basically meet most scenarios.\n\n```go\n// Output logs to console, allow render color.\nfunc NewConsoleHandler(levels []slog.Level) *ConsoleHandler\n// Send logs to email\nfunc NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler\n// Send logs to syslog\nfunc NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error)\n// A simple handler implementation that outputs logs to a given io.Writer\nfunc NewSimpleHandler(out io.Writer, level slog.Level) *SimpleHandler\n```\n\n**Output log to file**:\n\n```go\n// Output log to the specified file, without buffering by default\nfunc NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error)\n// Output logs to the specified file in JSON format, without buffering by default\nfunc JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error)\n// Buffered output log to specified file\nfunc NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n```\n\n> TIP: `NewFileHandler` `JSONFileHandler` can also enable write buffering by passing in fns `handler.WithBuffSize(buffSize)`\n\n**Output log to file and rotate automatically**:\n\n```go\n// Automatic rotating according to file size\nfunc NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n// Automatic rotating according to time\nfunc NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n// It supports configuration to rotate according to size and time. \n// The default setting file size is 20M, and the default automatic splitting time is 1 hour (EveryHour).\nfunc NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n```\n\n> TIP: By passing in `fns ...ConfigFn`, more options can be set, such as log file retention time, log write buffer size, etc. For detailed settings, see the `handler.Config` structure\n\n### Logs to file\n\nOutput log to the specified file, `buffer` buffered writing is not enabled by default. Buffering can also be enabled by passing in a parameter.\n\n```go\npackage mypkg\n\nimport (\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc main() {\n\tdefer slog.MustClose()\n\n\t// DangerLevels contains: slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel\n\th1 := handler.MustFileHandler(\"/tmp/error.log\", handler.WithLogLevels(slog.DangerLevels))\n\t// custom log format\n\t// f := h1.Formatter().(*slog.TextFormatter)\n\tf := slog.AsTextFormatter(h1.Formatter())\n\tf.SetTemplate(\"your template format\\n\")\n\n\t// NormalLevels contains: slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel\n\th2 := handler.MustFileHandler(\"/tmp/info.log\", handler.WithLogLevels(slog.NormalLevels))\n\n\t// register handlers\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message text\")\n\tslog.Error(\"error message text\")\n}\n```\n\n> **Note**: If write buffering `buffer` is enabled, be sure to call `logger.Close()` at the end of the program to flush the contents of the buffer to the file.\n\n### Log to file with automatic rotating\n\n`slog/handler` also has a built-in output log to a specified file, and supports splitting files by time and size at the same time.\nBy default, `buffer` buffered writing is enabled\n\n```go\nfunc Example_rotateFileHandler() {\n\th1 := handler.MustRotateFile(\"/tmp/error.log\", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels))\n\th2 := handler.MustRotateFile(\"/tmp/info.log\", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels))\n\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message\")\n\tslog.Error(\"error message\")\n}\n```\n\nExample of file name sliced by time:\n\n```text\ntime-rotate-file.log\ntime-rotate-file.log.20201229_155753\ntime-rotate-file.log.20201229_155754\n```\n\nExample of a filename cut by size, in the format `filename.log.HIS_000N`. For example:\n\n```text\nsize-rotate-file.log\nsize-rotate-file.log.122915_0001\nsize-rotate-file.log.122915_0002\n```\n\n### Use rotatefile on another logger\n\n`rotatefile.Writer` can also be used with other logging packages, such as: `log`, `glog`, etc. \n\nFor example, using `rotatefile` on golang `log`:\n\n```go\npackage main\n\nimport (\n  \"log\"\n\n  \"github.com/gookit/slog/rotatefile\"\n)\n\nfunc main() {\n\tlogFile := \"testdata/go_logger.log\"\n\twriter, err := rotatefile.NewConfig(logFile).Create()\n\tif err != nil {\n\t\tpanic(err) \n\t}\n\n\tlog.SetOutput(writer)\n\tlog.Println(\"log message\")\n}\n```\n\n### Quickly create a Handler based on config\n\nThis is config struct for create a Handler:\n\n```go\n// Config struct\ntype Config struct {\n\t// Logfile for write logs\n\tLogfile string `json:\"logfile\" yaml:\"logfile\"`\n\t// LevelMode for filter log record. default LevelModeList\n\tLevelMode uint8 `json:\"level_mode\" yaml:\"level_mode\"`\n\t// Level value. use on LevelMode = LevelModeValue\n\tLevel slog.Level `json:\"level\" yaml:\"level\"`\n\t// Levels for log record\n\tLevels []slog.Level `json:\"levels\" yaml:\"levels\"`\n\t// UseJSON for format logs\n\tUseJSON bool `json:\"use_json\" yaml:\"use_json\"`\n\t// BuffMode type name. allow: line, bite\n\tBuffMode string `json:\"buff_mode\" yaml:\"buff_mode\"`\n\t// BuffSize for enable buffer, unit is bytes. set 0 to disable buffer\n\tBuffSize int `json:\"buff_size\" yaml:\"buff_size\"`\n\t// RotateTime for rotate file, unit is seconds.\n\tRotateTime rotatefile.RotateTime `json:\"rotate_time\" yaml:\"rotate_time\"`\n\t// MaxSize on rotate file by size, unit is bytes.\n\tMaxSize uint64 `json:\"max_size\" yaml:\"max_size\"`\n\t// Compress determines if the rotated log files should be compressed using gzip.\n\t// The default is not to perform compression.\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\t// BackupNum max number for keep old files.\n\t// 0 is not limit, default is 20.\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\t// BackupTime max time for keep old files. unit is hours\n\t// 0 is not limit, default is a week.\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\t// RenameFunc build filename for rotate file\n\tRenameFunc func(filepath string, rotateNum uint) string\n}\n```\n\n**Examples**:\n\n```go\n\ttestFile := \"testdata/error.log\"\n\n\th := handler.NewEmptyConfig(\n\t\t\thandler.WithLogfile(testFile),\n\t\t\thandler.WithBuffSize(1024*8),\n\t\t\thandler.WithRotateTimeString(\"1hour\"),\n\t\t\thandler.WithLogLevels(slog.DangerLevels),\n\t\t).\n\t\tCreateHandler()\n\n\tl := slog.NewWithHandlers(h)\n```\n\n**About BuffMode**\n\n`Config.BuffMode` The name of the BuffMode type to use. Allow: line, bite\n\n- `BuffModeBite`: Buffer by bytes, when the number of bytes in the buffer reaches the specified size, write the contents of the buffer to the file\n- `BuffModeLine`: Buffer by line, when the buffer size is reached, always ensure that a complete line of log content is written to the file (to avoid log content being truncated)\n\n### Use Builder to quickly create Handler\n\nUse `handler.Builder` to easily and quickly create Handler instances.\n\n```go\n\ttestFile := \"testdata/info.log\"\n\n\th := handler.NewBuilder().\n\t\tWithLogfile(testFile).\n\t\tWithLogLevels(slog.NormalLevels).\n\t\tWithBuffSize(1024*8).\n\t\tWithRotateTime(rotatefile.Every30Min).\n\t\tWithCompress(true).\n\t\tBuild()\n\n\tl := slog.NewWithHandlers(h)\n```\n\n## Extension packages\n\nPackage `bufwrite`:\n\n- `bufwrite.BufIOWriter` additionally implements `Sync(), Close()` methods by wrapping go's `bufio.Writer`, which is convenient to use\n- `bufwrite.LineWriter` refer to the implementation of `bufio.Writer` in go, which can support flushing the buffer by line, which is more useful for writing log files\n\nPackage `rotatefile`:\n\n- `rotatefile.Writer` implements automatic cutting of log files according to size and specified time, and also supports automatic cleaning of log files\n  - `handler/rotate_file` is to use it to cut the log file\n\n### Use rotatefile on other log package\n\nOf course, the rotatefile.Writer can be use on other log package, such as: `log`, `glog` and more.\n\nExamples, use rotatefile on golang `log`:\n\n```go\npackage main\n\nimport (\n  \"log\"\n\n  \"github.com/gookit/slog/rotatefile\"\n)\n\nfunc main() {\n\tlogFile := \"testdata/another_logger.log\"\n\twriter, err := rotatefile.NewConfig(logFile).Create()\n\tif err != nil {\n\t\tpanic(err) \n\t}\n\n\tlog.SetOutput(writer)\n\tlog.Println(\"log message\")\n}\n```\n\n## Testing and benchmark\n\n### Unit tests\n\nrun unit tests:\n\n```bash\ngo test ./...\n```\n\n### Benchmarks\n\nBenchmark code at [_example/bench_loglibs_test.go](_example/bench_loglibs_test.go)\n\n```bash\nmake test-bench\n```\n\nBenchmarks for `slog` and other log packages:\n\n> **Note**: test and record ad 2023.04.13\n\n```shell\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                   8381674              1429 ns/op             216 B/op          3 allocs/op\nBenchmarkZapSugarNegative\nBenchmarkZapSugarNegative-4              8655980              1383 ns/op             104 B/op          4 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              14173719               849.8 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              27456256               451.2 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4                2550771              4784 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogNegative\n>>>> BenchmarkGookitSlogNegative-4       8798220              1375 ns/op             120 B/op          3 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                  10302483              1167 ns/op             192 B/op          1 allocs/op\nBenchmarkZapSugarPositive\nBenchmarkZapSugarPositive-4              3833311              3154 ns/op             344 B/op          7 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              14120524               846.7 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27152686               434.9 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2601892              4691 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\n>>>> BenchmarkGookitSlogPositive-4            8997104              1340 ns/op             120 B/op          3 allocs/op\nPASS\nok      command-line-arguments  167.095s\n```\n\n## Gookit packages\n\n  - [gookit/ini](https://github.com/gookit/ini) Go config management, use INI files\n  - [gookit/rux](https://github.com/gookit/rux) Simple and fast request router for golang HTTP \n  - [gookit/gcli](https://github.com/gookit/gcli) Build CLI application, tool library, running CLI commands\n  - [gookit/slog](https://github.com/gookit/slog) Lightweight, extensible, configurable logging library written in Go\n  - [gookit/color](https://github.com/gookit/color) A command-line color library with true color support, universal API methods and Windows support\n  - [gookit/event](https://github.com/gookit/event) Lightweight event manager and dispatcher implements by Go\n  - [gookit/cache](https://github.com/gookit/cache) Generic cache use and cache manager for golang. support File, Memory, Redis, Memcached.\n  - [gookit/config](https://github.com/gookit/config) Go config management. support JSON, YAML, TOML, INI, HCL, ENV and Flags\n  - [gookit/filter](https://github.com/gookit/filter) Provide filtering, sanitizing, and conversion of golang data\n  - [gookit/validate](https://github.com/gookit/validate) Use for data validation and filtering. support Map, Struct, Form data\n  - [gookit/goutil](https://github.com/gookit/goutil) Some utils for the Go: string, array/slice, map, format, cli, env, filesystem, test and more\n  - More, please see https://github.com/gookit\n\n## Acknowledgment\n\nThe projects is heavily inspired by follow packages:\n\n- https://github.com/phuslu/log\n- https://github.com/golang/glog\n- https://github.com/sirupsen/logrus\n- https://github.com/Seldaek/monolog\n- https://github.com/syyongx/llog\n- https://github.com/uber-go/zap\n- https://github.com/rs/zerolog\n- https://github.com/natefinch/lumberjack\n  \n## LICENSE\n\n[MIT](LICENSE)\n"
  },
  {
    "path": "README.zh-CN.md",
    "content": "# slog\n\n![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/gookit/slog?style=flat-square)\n[![GoDoc](https://pkg.go.dev/badge/github.com/gookit/slog.svg)](https://pkg.go.dev/github.com/gookit/slog)\n[![Go Report Card](https://goreportcard.com/badge/github.com/gookit/slog)](https://goreportcard.com/report/github.com/gookit/slog)\n[![Unit-Tests](https://github.com/gookit/slog/workflows/Unit-Tests/badge.svg)](https://github.com/gookit/slog/actions)\n[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gookit/slog)](https://github.com/gookit/slog)\n[![Coverage Status](https://coveralls.io/repos/github/gookit/slog/badge.svg?branch=master)](https://coveralls.io/github/gookit/slog?branch=master)\n\n📑 Go 实现的一个易于使用的，结构化的，易扩展、可配置的日志库。\n\n**控制台日志效果:**\n\n![console-log-all-level](_example/images/console-log-all-level.png)\n\n## 功能特色\n\n- 简单，无需配置，开箱即用\n- 支持常用的日志级别处理\n  - 如： `trace` `debug` `info` `notice` `warn` `error` `fatal` `panic`\n- 可以任意扩展自己需要的 `Handler` `Formatter` \n- 支持同时添加多个 `Handler` 日志处理，输出日志到不同的地方\n- 支持自定义构建 `Handler` 处理器\n  - 内置的 `handler.Config` `handler.Builder`,可以方便快捷的构建想要的日志处理器\n- 支持自定义 `Formatter` 格式化处理\n  - 内置了 `json` `text` 两个日志记录格式化 `Formatter`\n- 已经内置了常用的日志处理器\n  - `console` 输出日志到控制台，支持色彩输出\n  - `writer` 输出日志到指定的 `io.Writer`\n  - `file` 输出日志到指定文件，可选启用 `buffer` 缓冲写入\n  - `simple` 输出日志到指定文件，无缓冲直接写入文件\n  - `rotate_file` 输出日志到指定文件，并且同时支持按时间、按大小分割文件，默认启用 `buffer` 缓冲写入\n  - 更多内置实现请查看 [./handler](./handler) 文件夹\n- 基准性能测试请看 [Benchmarks](#benchmarks)\n\n### 输出日志到文件\n\n- 支持启用 `buffer` 缓冲日志写入\n- 支持按时间、按大小自动分割文件\n- 支持配置通过 `gzip` 压缩日志文件\n- 支持清理旧日志文件 配置: `BackupNum` `BackupTime`\n\n### `rotatefile` 子包\n\n- `rotatefile` 子包是一个拥有文件分割,清理,压缩备份的独立工具库\n- `rotatefile.Writer` 也可以直接包装使用用在其他日志库。例如：`log`、`glog`、`zap` 等等\n- `rotatefile.FilesClear` 是一个独立的文件清理备份工具, 可以用在其他地方(如 PHP等其他程序日志清理)\n- 更多使用请查看 [rotatefile](rotatefile/README.md)\n\n### GORM 中使用 slog\n\n请查看 https://github.com/gookit/slog/issues/127#issuecomment-2827745713\n\n## [English](README.md)\n\nEnglish instructions please see [./README](README.md)\n\n## GoDoc\n\n- [Godoc for github](https://pkg.go.dev/github.com/gookit/slog?tab=doc)\n\n## 安装\n\n```bash\ngo get github.com/gookit/slog\n```\n\n## 快速开始\n\n`slog` 使用非常简单，无需任何配置即可使用。\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.Infof(\"info log %s\", \"message\")\n\tslog.Debugf(\"debug %s\", \"message\")\n}\n```\n\n**输出预览:**\n\n```text\n[2020/07/16 12:19:33] [application] [INFO] [main.go:7] info log message  \n[2020/07/16 12:19:33] [application] [WARNING] [main.go:8] warning log message  \n[2020/07/16 12:19:33] [application] [INFO] [main.go:9] info log message  \n[2020/07/16 12:19:33] [application] [DEBUG] [main.go:10] debug message  \n```\n\n### 启用控制台颜色\n\n您可以在输出控制台日志时启用颜色输出，将会根据不同级别打印不同色彩。\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\tslog.Configure(func(logger *slog.SugaredLogger) {\n\t\tf := logger.Formatter.(*slog.TextFormatter)\n\t\tf.EnableColor = true\n\t})\n\n\tslog.Trace(\"this is a simple log message\")\n\tslog.Debug(\"this is a simple log message\")\n\tslog.Info(\"this is a simple log message\")\n\tslog.Notice(\"this is a simple log message\")\n\tslog.Warn(\"this is a simple log message\")\n\tslog.Error(\"this is a simple log message\")\n\tslog.Fatal(\"this is a simple log message\")\n}\n```\n\n**输出预览:**\n\n![](_example/images/console-color-log.png)\n\n### 更改日志输出样式\n\n上面是更改了默认logger的 `Formatter` 设置。\n\n> 你也可以创建自己的logger，并追加 `ConsoleHandler` 来支持打印日志到控制台：\n\n```go\nh := handler.NewConsoleHandler(slog.AllLevels)\nl := slog.NewWithHandlers(h)\n\nl.Trace(\"this is a simple log message\")\nl.Debug(\"this is a simple log message\")\n```\n\n更改默认的logger日志输出样式:\n\n```go\nh.Formatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate)\n```\n\n**输出预览:**\n\n![](_example/images/console-color-log1.png)\n\n> 注意：`slog.TextFormatter` 使用模板字符串来格式化输出日志，因此新增字段输出需要同时调整模板。\n\n### 使用JSON格式\n\nslog 也内置了 JSON 格式的 `Formatter`。若不特别指定，默认都是使用 `TextFormatter` 格式化日志记录。\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\nfunc main() {\n\t// use JSON formatter\n\tslog.SetFormatter(slog.NewJSONFormatter())\n\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.WithData(slog.M{\n\t\t\"key0\": 134,\n\t\t\"key1\": \"abc\",\n\t}).Infof(\"info log %s\", \"message\")\n\n\tr := slog.WithFields(slog.M{\n\t\t\"category\": \"service\",\n\t\t\"IP\": \"127.0.0.1\",\n\t})\n\tr.Infof(\"info %s\", \"message\")\n\tr.Debugf(\"debug %s\", \"message\")\n}\n```\n\n**输出预览:**\n\n```text\n{\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info log message\"}\n{\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"WARNING\",\"message\":\"warning log message\"}\n{\"channel\":\"application\",\"data\":{\"key0\":134,\"key1\":\"abc\"},\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info log message\"}\n{\"IP\":\"127.0.0.1\",\"category\":\"service\",\"channel\":\"application\",\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"INFO\",\"message\":\"info message\"}\n{\"IP\":\"127.0.0.1\",\"category\":\"service\",\"channel\":\"application\",\"datetime\":\"2020/07/16 13:23:33\",\"extra\":{},\"level\":\"DEBUG\",\"message\":\"debug message\"}\n```\n\n## 架构说明\n\n- `Logger` - 日志调度器. 一个logger可以注册多个 `Handler`,`Processor`\n- `Record` - 日志记录，每条日志就是一个 `Record` 实例。\n- `Processor` - 可以对日志记录进行扩展处理。它在日志 `Record` 被 `Handler` 处理之前调用。\n  - 你可以使用它对 `Record` 进行额外的操作，比如：新增字段，添加扩展信息等\n- `Handler` - 日志处理器，每条日志都会经过 `Handler.Handle()` 处理。\n  - 在这里你可以将日志发送到 控制台，文件，远程服务器等等。\n- `Formatter` - 日志记录数据格式化处理。\n  - 通常设置于 `Handler` 中，可以用于格式化日志记录，将记录转成文本，JSON等，`Handler` 再将格式化后的数据写入到指定的地方。\n  - `Formatter` 不是必须的。你可以不使用它,直接在 `Handler.Handle()` 中对日志记录进行处理。\n\n**日志调度器简易结构**：\n\n```text\n          Processors\nLogger --{\n          Handlers --|- Handler0 With Formatter0\n                     |- Handler1 With Formatter1\n                     |- Handler2 (can also without Formatter)\n                     |- ... more\n```\n\n> 注意：一定要记得将 `Handler`, `Processor` 添加注册到 logger 实例上，日志记录才会经过 `Handler` 处理。\n\n### Processor 定义\n\n`Processor` 接口定义如下:\n\n```go\n// Processor interface definition\ntype Processor interface {\n\t// Process record\n\tProcess(record *Record)\n}\n\n// ProcessorFunc definition\ntype ProcessorFunc func(record *Record)\n\n// Process record\nfunc (fn ProcessorFunc) Process(record *Record) {\n\tfn(record)\n}\n```\n\n> 你可以使用它在日志 `Record` 到达 `Handler` 处理之前，对Record进行额外的操作，比如：新增字段，添加扩展信息等\n\n添加 processor 到 logger:\n\n```go\nslog.AddProcessor(mypkg.AddHostname())\n\n// or\nl := slog.New()\nl.AddProcessor(mypkg.AddHostname())\n```\n\n这里使用内置的processor `slog.AddHostname` 作为示例，它可以在每条日志记录上添加新字段 `hostname`。\n\n```go\nslog.AddProcessor(slog.AddHostname())\nslog.Info(\"message\")\n```\n\n输出效果，包含新增字段 `\"hostname\":\"InhereMac\"`：\n\n```json\n{\"channel\":\"application\",\"level\":\"INFO\",\"datetime\":\"2020/07/17 12:01:35\",\"hostname\":\"InhereMac\",\"data\":{},\"extra\":{},\"message\":\"message\"}\n```\n\n### Handler 定义\n\n`Handler` 接口定义如下:\n\n> 你可以自定义任何想要的 `Handler`，只需要实现 `slog.Handler` 接口即可。\n\n```go\n// Handler interface definition\ntype Handler interface {\n\tio.Closer\n\tFlush() error\n\t// IsHandling Checks whether the given record will be handled by this handler.\n\tIsHandling(level Level) bool\n\t// Handle a log record.\n\t// all records may be passed to this method, and the handler should discard\n\t// those that it does not want to handle.\n\tHandle(*Record) error\n}\n```\n\n### Formatter 定义\n\n`Formatter` 接口定义如下:\n\n```go\n// Formatter interface\ntype Formatter interface {\n\tFormat(record *Record) ([]byte, error)\n}\n```\n\n函数包装类型：\n\n```go\n// FormatterFunc wrapper definition\ntype FormatterFunc func(r *Record) ([]byte, error)\n\n// Format a log record\nfunc (fn FormatterFunc) Format(r *Record) ([]byte, error) {\n\treturn fn(r)\n}\n```\n\n**JSON格式化Formatter**\n\n```go\ntype JSONFormatter struct {\n\t// Fields exported log fields.\n\tFields []string\n\t// Aliases for output fields. you can change export field name.\n\t// item: `\"field\" : \"output name\"`\n\t// eg: {\"message\": \"msg\"} export field will display \"msg\"\n\tAliases StringMap\n\t// PrettyPrint will indent all json logs\n\tPrettyPrint bool\n\t// TimeFormat the time format layout. default is time.RFC3339\n\tTimeFormat string\n}\n```\n\n**Text格式化formatter**\n\n默认模板:\n\n```go\nconst DefaultTemplate = \"[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\\n\"\nconst NamedTemplate = \"{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\\n\"\n```\n\n更改模板:\n\n```go\nmyTemplate := \"[{{datetime}}] [{{level}}] {{message}}\"\n\nf := slog.NewTextFormatter()\nf.SetTemplate(myTemplate)\n```\n\n## 自定义日志\n\n自定义 Processor 和 自定义 Formatter 都比较简单，实现一个对应方法即可。\n\n### 创建自定义Logger实例\n\n`slog.Info, slog.Warn` 等方法，使用的默认logger，并且默认输出日志到控制台。 \n\n你可以创建一个全新的 `slog.Logger` 实例：\n\n**方式1**：\n\n```go\nl := slog.New()\n// add handlers ...\nh1 := handler.NewConsoleHandler(slog.AllLevels)\nl.AddHandlers(h1)\n```\n\n**方式2**：\n\n```go\nl := slog.NewWithName(\"myLogger\")\n// add handlers ...\nh1 := handler.NewConsoleHandler(slog.AllLevels)\nl.AddHandlers(h1)\n```\n\n**方式3**：\n\n```go\npackage main\n\nimport (\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc main() {\n\tl := slog.NewWithHandlers(handler.NewConsoleHandler(slog.AllLevels))\n\tl.Info(\"message\")\n}\n```\n\n### 创建自定义 Handler\n\n你只需要实现 `slog.Handler` 接口即可创建自定义 `Handler`。你可以通过 slog内置的\n`handler.LevelsWithFormatter` `handler.LevelWithFormatter`等片段快速的组装自己的 Handler。\n\n示例:\n\n> 使用了 `handler.LevelsWithFormatter`， 只还需要实现 `Close, Flush, Handle` 方法即可\n\n```go\ntype MyHandler struct {\n\thandler.LevelsWithFormatter\n    Output io.Writer\n}\n\nfunc (h *MyHandler) Handle(r *slog.Record) error {\n\t// you can write log message to file or send to remote.\n}\n\nfunc (h *MyHandler) Flush() error {}\nfunc (h *MyHandler) Close() error {}\n```\n\n将 `Handler` 添加到 logger即可使用:\n\n```go\n// 添加到默认 logger\nslog.AddHander(&MyHandler{})\n\n// 或者添加到自定义 logger:\nl := slog.New()\nl.AddHander(&MyHandler{})\n```\n\n## 使用内置处理器\n\n[./handler](handler) 包已经内置了常用的日志 Handler，基本上可以满足绝大部分场景。\n\n```go\n// 输出日志到控制台\nfunc NewConsoleHandler(levels []slog.Level) *ConsoleHandler\n// 发送日志到email邮箱\nfunc NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler\n// 发送日志到系统的syslog\nfunc NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error)\n// 一个简单的handler实现，输出日志到给定的 io.Writer\nfunc NewSimpleHandler(out io.Writer, level slog.Level) *SimpleHandler\n```\n\n**输出日志到文件**:\n\n```go\n// 输出日志到指定文件，默认不带缓冲\nfunc NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error)\n// 输出日志到指定文件且格式为JSON，默认不带缓冲\nfunc JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error)\n// 带缓冲的输出日志到指定文件\nfunc NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n```\n\n> TIP: `NewFileHandler` `JSONFileHandler` 也可以通过传入 fns `handler.WithBuffSize(buffSize)` 启用写入缓冲\n\n**输出日志到文件并自动切割**:\n\n```go\n// 根据文件大小进行自动切割\nfunc NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n// 根据时间进行自动切割\nfunc NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n// 同时支持配置根据大小和时间进行切割, 默认设置文件大小是 20M，默认自动分割时间是 1小时(EveryHour)。\nfunc NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n```\n\n> TIP: 通过传入 `fns ...ConfigFn` 可以设置更多选项，比如 日志文件保留时间, 日志写入缓冲大小等。 详细设置请看 `handler.Config` 结构体\n\n### 输出日志到文件\n\n输出日志到指定文件，默认不启用 `buffer` 缓冲写入。 也可以通过传入参数启用缓冲。\n\n```go\npackage mypkg\n\nimport (\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc main() {\n\tdefer slog.MustClose()\n\n\t// DangerLevels 包含： slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel\n\th1 := handler.MustFileHandler(\"/tmp/error.log\", handler.WithLogLevels(slog.DangerLevels))\n\t// 配置日志格式\n\t// f := h1.Formatter().(*slog.TextFormatter)\n\tf := slog.AsTextFormatter(h1.Formatter())\n\tf.SetTemplate(\"your template format\\n\")\n\n\t// NormalLevels 包含： slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel\n\th2 := handler.MustFileHandler(\"/tmp/info.log\", handler.WithLogLevels(slog.NormalLevels))\n\n\t// 注册 handler 到 logger(调度器)\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message text\")\n\tslog.Error(\"error message text\")\n}\n```\n\n> 提示: 如果启用了写入缓冲 `buffer`，一定要在程序结束时调用 `logger.Close()/MustClose()` 刷出缓冲区的内容到文件并关闭句柄。\n\n### 带自动切割的日志处理器\n\n`slog/handler` 也内置了输出日志到指定文件，并且同时支持按时间、按大小分割文件，默认启用 `buffer` 缓冲写入\n\n```go\nfunc Example_rotateFileHandler() {\n\th1 := handler.MustRotateFile(\"/tmp/error.log\", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels))\n\th2 := handler.MustRotateFile(\"/tmp/info.log\", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels))\n\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message\")\n\tslog.Error(\"error message\")\n}\n```\n\n按时间切割文件示例:\n\n```text\ntime-rotate-file.log\ntime-rotate-file.log.20201229_155753\ntime-rotate-file.log.20201229_155754\n```\n\n按大小进行切割的文件名示例, 格式 `filename.log.yMD_000N`. 例如:\n\n```text\nsize-rotate-file.log\nsize-rotate-file.log.122915_00001\nsize-rotate-file.log.122915_00002\n```\n\n启用gzip压缩旧的日志文件:\n\n```go\n\th1 := handler.MustRotateFile(\"/tmp/error.log\", handler.EveryHour, \n\t\thandler.WithLogLevels(slog.DangerLevels),\n\t\thandler.WithCompress(true),\n\t)\n```\n\n```text\nsize-rotate-file.log.122915_00001.gz\nsize-rotate-file.log.122915_00002.gz\n```\n\n### 根据配置快速创建Handler实例\n\n```go\n// Config struct\ntype Config struct {\n\t// Logfile for write logs\n\tLogfile string `json:\"logfile\" yaml:\"logfile\"`\n\t// LevelMode 筛选日志记录的过滤级别，默认为 LevelModeList\n\tLevelMode uint8 `json:\"level_mode\" yaml:\"level_mode\"`\n\t// Level 筛选日志记录的级别值。当 LevelMode = LevelModeValue 时生效\n \tLevel slog.Level `json:\"level\" yaml:\"level\"`\n\t// Levels 日志记录的级别列表。当 LevelMode = LevelModeList 时生效\n\tLevels []slog.Level `json:\"levels\" yaml:\"levels\"`\n\t// UseJSON 是否以 JSON 格式输出日志\n\tUseJSON bool `json:\"use_json\" yaml:\"use_json\"`\n\t// BuffMode 使用的buffer缓冲模式. allow: line, bite\n\tBuffMode string `json:\"buff_mode\" yaml:\"buff_mode\"`\n\t// BuffSize 开启缓冲时的缓冲区大小，单位为字节。设置为 0 时禁用缓冲\n\tBuffSize int `json:\"buff_size\" yaml:\"buff_size\"`\n\t// RotateTime 用于按时间切割文件，单位是秒。\n\tRotateTime rotatefile.RotateTime `json:\"rotate_time\" yaml:\"rotate_time\"`\n\t// MaxSize 用于按大小旋转切割文件，单位是字节。\n\tMaxSize uint64 `json:\"max_size\" yaml:\"max_size\"`\n\t// Compress 是否对切割后的日志进行 gzip 压缩。 默认为不压缩\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\t// BackupNum 日志清理，保留旧文件的最大数量。\n\t// 0 不限制，默认为 20。\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\t// BackupTime 日志清理，保留旧文件的最长时间。单位是小时\n\t// 0 不进行清理，默认为一周。\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\t// RenameFunc build filename for rotate file\n\tRenameFunc func(filepath string, rotateNum uint) string\n}\n```\n\n**Examples**:\n\n```go\n\ttestFile := \"testdata/error.log\"\n\n\th := handler.NewEmptyConfig(\n\t\t\thandler.WithLogfile(testFile),\n\t\t\thandler.WithBuffSize(1024*8),\n\t\t\thandler.WithRotateTimeString(\"1hour\"),\n\t\t\thandler.WithLogLevels(slog.DangerLevels),\n\t\t).\n\t\tCreateHandler()\n\n\tl := slog.NewWithHandlers(h)\n```\n\n**BuffMode说明**\n\n`Config.BuffMode` 使用的 BuffMode 类型名称。允许的值：line、bite\n\n- `BuffModeLine`：按行缓冲，到达缓冲大小时，始终保证一行完整日志内容写入文件(可以避免日志内容被截断)\n- `BuffModeBite`：按字节缓冲，当缓冲区的字节数达到指定的大小时，将缓冲区的内容写入文件\n\n### 使用Builder快速创建Handler实例\n\n使用 `handler.Builder` 可以方便快速的创建Handler实例。\n\n```go\n\ttestFile := \"testdata/info.log\"\n\n\th := handler.NewBuilder().\n\t\tWithLogfile(testFile).\n\t\tWithLogLevels(slog.NormalLevels).\n\t\tWithBuffSize(1024*8).\n\t\tWithRotateTime(rotatefile.Every30Min).\n\t\tWithCompress(true).\n\t\tBuild()\n\t\n\tl := slog.NewWithHandlers(h)\n```\n\n## 扩展工具包\n\n`bufwrite` 包:\n\n- `bufwrite.BufIOWriter` 通过包装go的 `bufio.Writer` 额外实现了 `Sync(), Close()` 方法，方便使用\n- `bufwrite.LineWriter` 参考go的 `bufio.Writer` 实现, 可以支持按行刷出缓冲，对于写日志文件更有用\n\n`rotatefile` 包:\n\n- `rotatefile.Writer` 实现对日志文件按大小和指定时间进行自动切割，同时也支持自动清理日志文件\n  - `handler/rotate_file` 即是通过使用它对日志文件进行切割处理\n\n### 在其他日志包上使用 rotatefile\n\n`rotatefile.Writer` 也可以用在其他日志包上，例如：`log`、`glog` 等等。\n\n例如，在 golang `log` 上使用 rotatefile:\n\n```go\npackage main\n\nimport (\n  \"log\"\n\n  \"github.com/gookit/slog/rotatefile\"\n)\n\nfunc main() {\n\tlogFile := \"testdata/another_logger.log\"\n\twriter, err := rotatefile.NewConfig(logFile).Create()\n\tif err != nil {\n\t\tpanic(err) \n\t}\n\n\tlog.SetOutput(writer)\n\tlog.Println(\"log message\")\n}\n```\n\n## 测试以及性能\n\n### 单元测试\n\n运行单元测试\n\n```bash\ngo test -v ./...\n```\n\n### 性能压测\n\nBenchmark code at [_example/bench_loglibs_test.go](_example/bench_loglibs_test.go)\n\n```bash\nmake test-bench\n```\n\nBenchmarks for `slog` and other log packages:\n\n> **Note**: test and record ad 2023.04.13\n\n```shell\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                   8381674              1429 ns/op             216 B/op          3 allocs/op\nBenchmarkZapSugarNegative\nBenchmarkZapSugarNegative-4              8655980              1383 ns/op             104 B/op          4 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              14173719               849.8 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              27456256               451.2 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4                2550771              4784 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogNegative\n>>>> BenchmarkGookitSlogNegative-4            8798220              1375 ns/op             120 B/op          3 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                  10302483              1167 ns/op             192 B/op          1 allocs/op\nBenchmarkZapSugarPositive\nBenchmarkZapSugarPositive-4              3833311              3154 ns/op             344 B/op          7 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              14120524               846.7 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27152686               434.9 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2601892              4691 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\n>>>> BenchmarkGookitSlogPositive-4            8997104              1340 ns/op             120 B/op          3 allocs/op\nPASS\nok      command-line-arguments  167.095s\n```\n\n## Gookit packages\n\n  - [gookit/ini](https://github.com/gookit/ini) Go config management, use INI files\n  - [gookit/rux](https://github.com/gookit/rux) Simple and fast request router for golang HTTP \n  - [gookit/gcli](https://github.com/gookit/gcli) Build CLI application, tool library, running CLI commands\n  - [gookit/slog](https://github.com/gookit/slog) Lightweight, extensible, configurable logging library written in Go\n  - [gookit/color](https://github.com/gookit/color) A command-line color library with true color support, universal API methods and Windows support\n  - [gookit/event](https://github.com/gookit/event) Lightweight event manager and dispatcher implements by Go\n  - [gookit/cache](https://github.com/gookit/cache) Generic cache use and cache manager for golang. support File, Memory, Redis, Memcached.\n  - [gookit/config](https://github.com/gookit/config) Go config management. support JSON, YAML, TOML, INI, HCL, ENV and Flags\n  - [gookit/filter](https://github.com/gookit/filter) Provide filtering, sanitizing, and conversion of golang data\n  - [gookit/validate](https://github.com/gookit/validate) Use for data validation and filtering. support Map, Struct, Form data\n  - [gookit/goutil](https://github.com/gookit/goutil) Some utils for the Go: string, array/slice, map, format, cli, env, filesystem, test and more\n  - More, please see https://github.com/gookit\n\n## Acknowledgment\n\n实现参考了以下项目，非常感谢它们\n\n- https://github.com/phuslu/log\n- https://github.com/golang/glog\n- https://github.com/sirupsen/logrus\n- https://github.com/Seldaek/monolog\n- https://github.com/syyongx/llog\n- https://github.com/uber-go/zap\n- https://github.com/rs/zerolog\n- https://github.com/natefinch/lumberjack\n\n## LICENSE\n\n[MIT](LICENSE)\n"
  },
  {
    "path": "_example/bench_loglibs.md",
    "content": "# Log libs benchmarks\n\nRun benchmark: `make test-bench`\n\n> **Note**: on each test will update all package to latest.\n\n## v0.5.5 - 2023.11.30\n\n```shell\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  14441875               821.2 ns/op           216 B/op          3 allocs/op\nBenchmarkZapSugarNegative\nBenchmarkZapSugarNegative-4             13870006               916.1 ns/op           104 B/op          4 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              34721730               359.2 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              39690291               314.4 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4                5605184              2161 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4           14375598               819.2 ns/op           256 B/op          4 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                  15237236               788.5 ns/op           192 B/op          1 allocs/op\nBenchmarkZapSugarPositive\nBenchmarkZapSugarPositive-4              6592038              1910 ns/op             344 B/op          7 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              33931623               366.1 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              38740174               309.4 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                5697038              2197 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4           14531062               814.6 ns/op           256 B/op          4 allocs/op\nPASS\nok      command-line-arguments  159.849s\n```\n\n## v0.5.1 - 2023.04.13\n\n> **Note**: test and record ad 2023.04.13\n\n```shell\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                   8381674              1429 ns/op             216 B/op          3 allocs/op\nBenchmarkZapSugarNegative\nBenchmarkZapSugarNegative-4              8655980              1383 ns/op             104 B/op          4 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              14173719               849.8 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              27456256               451.2 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4                2550771              4784 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4            8798220              1375 ns/op             120 B/op          3 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                  10302483              1167 ns/op             192 B/op          1 allocs/op\nBenchmarkZapSugarPositive\nBenchmarkZapSugarPositive-4              3833311              3154 ns/op             344 B/op          7 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              14120524               846.7 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27152686               434.9 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2601892              4691 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            8997104              1340 ns/op             120 B/op          3 allocs/op\nPASS\nok      command-line-arguments  167.095s\n```\n\n## v0.3.5 - 2022.11.08\n\n> **Note**: test and record ad 2022.11.08\n\n```shell\n% make test-bench\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  123297997              110.4 ns/op           192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              891508806               13.36 ns/op            0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              811990076               14.74 ns/op            0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4               242633541               49.40 ns/op           16 B/op          1 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4           29102253               422.8 ns/op           125 B/op          4 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                   9772791              1194 ns/op             192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              13944360               856.8 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27839614               431.2 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2621076              4583 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            8908768              1359 ns/op             149 B/op          5 allocs/op\nPASS\nok      command-line-arguments  149.379s\n```\n\n## v0.3.0\n\n> **Note**: test and record ad 2022.04.27\n\n```shell\n% make test-bench\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  128133166               93.97 ns/op          192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              909583207               13.41 ns/op            0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              784099310               15.24 ns/op            0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4               289939296               41.60 ns/op           16 B/op          1 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4           29131203               417.4 ns/op           125 B/op          4 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                   9910075              1219 ns/op             192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              13966810               871.0 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              26743148               446.2 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2658482              4481 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            8349562              1441 ns/op             165 B/op          6 allocs/op\nPASS\nok      command-line-arguments  146.669s\n```\n\n### beta 2022.04.17\n\n> **Note**: test and record ad 2022.04.17\n\n```shell\n$ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  130808992               91.91 ns/op          192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              914445844               13.19 ns/op            0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              792539167               15.32 ns/op            0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4               289393606               40.61 ns/op           16 B/op          1 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4           29522170               405.3 ns/op           125 B/op          4 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                   9113048              1283 ns/op             192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              14691699               797.0 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27634338               424.5 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2734669              4363 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            7740348              1563 ns/op             165 B/op          6 allocs/op\nPASS\nok      command-line-arguments  145.175s\n\n```\n\n## v0.2.1\n\n> **Note**: test and record ad 2022.04.17\n\n```shell\n$ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\ngoos: darwin\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-3740QM CPU @ 2.70GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  125500471              125.8 ns/op           192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              839046109               13.71 ns/op            0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              757766400               15.56 ns/op            0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkLogrusNegative-4               253178256               47.12 ns/op           16 B/op          1 allocs/op\nBenchmarkGookitSlogNegative\nBenchmarkGookitSlogNegative-4           30091606               401.9 ns/op            45 B/op          3 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                   9761935              1216 ns/op             192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              13860344               837.1 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              27666529               447.8 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                2705653              4403 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            1836384              6882 ns/op             680 B/op         11 allocs/op\nPASS\nok      command-line-arguments  156.038s\n```\n\n## v0.2.0\n\n> record ad 2022.02.26\n\n```shell\n$ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\ngoos: windows\ngoarch: amd64                               \ncpu: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  139243226               86.39 ns/op          192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              1000000000               8.302 ns/op           0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkPhusLogNegative-4              1000000000               8.989 ns/op           0 B/op          0 allocs/op\nBenchmarkLogrusNegative\nBenchmarkGookitSlogNegative-4           38300540               323.3 ns/op           221 B/op          5 allocs/op\nBenchmarkZapPositive\nBenchmarkZapPositive-4                  14453001               828.1 ns/op           192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              28671724               420.9 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              45619569               261.9 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                5092164              2366 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            3184557              3754 ns/op             856 B/op         13 allocs/op\nPASS\nok      command-line-arguments  135.460s\n```\n\n## v0.1.5\n\n> record ad 2022.02.26\n\n```shell\n$ go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\ngoos: windows\ngoarch: amd64\ncpu: Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\nBenchmarkZapNegative\nBenchmarkZapNegative-4                  137676860               86.43 ns/op          192 B/op          1 allocs/op\nBenchmarkZeroLogNegative\nBenchmarkZeroLogNegative-4              1000000000               8.284 ns/op           0 B/op          0 allocs/op\nBenchmarkPhusLogNegative\nBenchmarkZapPositive-4                  14250313               831.7 ns/op           192 B/op          1 allocs/op\nBenchmarkZeroLogPositive\nBenchmarkZeroLogPositive-4              28183436               426.0 ns/op             0 B/op          0 allocs/op\nBenchmarkPhusLogPositive\nBenchmarkPhusLogPositive-4              44034984               258.7 ns/op             0 B/op          0 allocs/op\nBenchmarkLogrusPositive\nBenchmarkLogrusPositive-4                5005593              2421 ns/op             608 B/op         17 allocs/op\nBenchmarkGookitSlogPositive\nBenchmarkGookitSlogPositive-4            1714084              7029 ns/op            4480 B/op         45 allocs/op\nPASS\nok      command-line-arguments  138.199s\n```"
  },
  {
    "path": "_example/bench_loglibs_test.go",
    "content": "package main\n\nimport (\n\t\"io\"\n\tgoslog \"log/slog\"\n\t\"testing\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\tphuslu \"github.com/phuslu/log\"\n\t\"github.com/rs/zerolog\"\n\t\"github.com/sirupsen/logrus\"\n\t\"go.uber.org/zap\"\n\t\"go.uber.org/zap/zapcore\"\n)\n\n// In _example/ dir, run:\n//\n//\tgo test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_loglibs_test.go\n//\n// code refer:\n//\n//\thttps://github.com/phuslu/log\nvar msg = \"The quick brown fox jumps over the lazy dog\"\n\nfunc BenchmarkGoSlogNegative(b *testing.B) {\n\tlogger := goslog.New(goslog.NewTextHandler(io.Discard, &goslog.HandlerOptions{\n\t\tLevel: goslog.LevelInfo,\n\t}))\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(msg, goslog.String(\"rate\", \"15\"), goslog.Int(\"low\", 16), goslog.Float64(\"high\", 123.2))\n\t}\n}\n\nfunc BenchmarkZapNegative(b *testing.B) {\n\tlogger := zap.New(zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()),\n\t\tzapcore.AddSync(io.Discard),\n\t\tzapcore.InfoLevel,\n\t))\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(msg, zap.String(\"rate\", \"15\"), zap.Int(\"low\", 16), zap.Float32(\"high\", 123.2))\n\t}\n}\n\nfunc BenchmarkZapSugarNegative(b *testing.B) {\n\tlogger := zap.New(zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()),\n\t\tzapcore.AddSync(io.Discard),\n\t\t// zapcore.AddSync(os.Stdout),\n\t\tzapcore.InfoLevel,\n\t)).Sugar()\n\n\t// logger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t// return\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc BenchmarkZeroLogNegative(b *testing.B) {\n\tlogger := zerolog.New(io.Discard).With().Timestamp().Logger().Level(zerolog.InfoLevel)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info().Str(\"rate\", \"15\").Int(\"low\", 16).Float32(\"high\", 123.2).Msg(msg)\n\t}\n}\n\nfunc BenchmarkPhusLogNegative(b *testing.B) {\n\tlogger := phuslu.Logger{Level: phuslu.InfoLevel, Writer: phuslu.IOWriter{Writer: io.Discard}}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info().Str(\"rate\", \"15\").Int(\"low\", 16).Float32(\"high\", 123.2).Msg(msg)\n\t}\n}\n\n// \"github.com/sirupsen/logrus\"\nfunc BenchmarkLogrusNegative(b *testing.B) {\n\tlogger := logrus.New()\n\tlogger.Out = io.Discard\n\tlogger.Level = logrus.InfoLevel\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc BenchmarkGookitSlogNegative(b *testing.B) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, []slog.Level{slog.InfoLevel}),\n\t\t// handler.NewIOWriter(os.Stdout, []slog.Level{slog.InfoLevel}),\n\t)\n\tlogger.ReportCaller = false\n\n\t// logger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t// return\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc BenchmarkZapPositive(b *testing.B) {\n\tlogger := zap.New(zapcore.NewCore(\n\t\tzapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),\n\t\tzapcore.AddSync(io.Discard),\n\t\tzapcore.InfoLevel,\n\t))\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(msg, zap.String(\"rate\", \"15\"), zap.Int(\"low\", 16), zap.Float32(\"high\", 123.2))\n\t}\n}\n\nfunc BenchmarkZapSugarPositive(b *testing.B) {\n\tlogger := zap.New(zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewProductionEncoderConfig()),\n\t\tzapcore.AddSync(io.Discard),\n\t\tzapcore.InfoLevel,\n\t)).Sugar()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(msg, zap.String(\"rate\", \"15\"), zap.Int(\"low\", 16), zap.Float32(\"high\", 123.2))\n\t}\n}\n\nfunc BenchmarkZeroLogPositive(b *testing.B) {\n\tlogger := zerolog.New(io.Discard).With().Timestamp().Logger().Level(zerolog.InfoLevel)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info().Str(\"rate\", \"15\").Int(\"low\", 16).Float32(\"high\", 123.2).Msg(msg)\n\t}\n}\n\nfunc BenchmarkPhusLogPositive(b *testing.B) {\n\tlogger := phuslu.Logger{Level: phuslu.InfoLevel, Writer: phuslu.IOWriter{Writer: io.Discard}}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info().Str(\"rate\", \"15\").Int(\"low\", 16).Float32(\"high\", 123.2).Msg(msg)\n\t}\n}\n\nfunc BenchmarkLogrusPositive(b *testing.B) {\n\tlogger := logrus.New()\n\tlogger.Out = io.Discard\n\tlogger.Level = logrus.InfoLevel\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc BenchmarkGookitSlogPositive(b *testing.B) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, []slog.Level{slog.InfoLevel}),\n\t)\n\tlogger.ReportCaller = false\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n"
  },
  {
    "path": "_example/demos/demo1.go",
    "content": "package main\n\nimport (\n\tlog \"github.com/gookit/slog\"\n)\n\nconst simplestTemplate = \"[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}\"\n\nfunc init() {\n\tlog.GetFormatter().(*log.TextFormatter).SetTemplate(simplestTemplate)\n\tlog.SetLogLevel(log.ErrorLevel)\n\tlog.Errorf(\"Test\")\n}\n\nfunc main() {\n}\n"
  },
  {
    "path": "_example/demos/simple.go",
    "content": "package main\n\nimport \"github.com/gookit/slog\"\n\n// profile run:\n//\n// go build -gcflags '-m -l' simple.go\nfunc main() {\n\t// stackIt()\n\t// _ = stackIt2()\n\tslogTest()\n}\n\n//go:noinline\nfunc stackIt() int {\n\ty := 2\n\treturn y * 2\n}\n\n//go:noinline\nfunc stackIt2() *int {\n\ty := 2\n\tres := y * 2\n\treturn &res\n}\n\nfunc slogTest() {\n\tvar msg = \"The quick brown fox jumps over the lazy dog\"\n\n\tslog.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t// slog.WithFields(slog.M{\n\t// \t\"omg\":    true,\n\t// \t\"number\": 122,\n\t// }).Infof(\"slog %s\", \"message message\")\n}\n"
  },
  {
    "path": "_example/demos/slog_all_level.go",
    "content": "package main\n\nimport (\n\t\"errors\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\n// run: go run ./_example/slog_all_level.go\nfunc main() {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tl.AddHandler(handler.NewConsoleHandler(slog.AllLevels))\n\tprintAllLevel(l, \"this is a\", \"log\", \"message\")\n}\n\nfunc printAllLevel(l *slog.Logger, args ...any) {\n\tl.Debug(args...)\n\tl.Info(args...)\n\tl.Warn(args...)\n\tl.Error(args...)\n\tl.Print(args...)\n\tl.Fatal(args...)\n\tl.Panic(args...)\n\n\tl.Trace(args...)\n\tl.Notice(args...)\n\tl.ErrorT(errors.New(\"a error object\"))\n\tl.ErrorT(errorx.New(\"error with stack info\"))\n}\n"
  },
  {
    "path": "_example/diff-with-zap-zerolog.md",
    "content": "# diff with zap, zerolog\n\n是的，zap 非常快速。\n\n但是有一点问题：\n\n- 配置起来稍显复杂\n- 没有内置切割文件处理和文件清理\n- 自定义扩展性不是很好\n\nYes, zap is very fast.\n\nBut there is a little problem:\n\n- Slightly complicated to configure\n- No built-in cutting file handling, file cleanup\n- Custom extensibility is not very good"
  },
  {
    "path": "_example/go.mod",
    "content": "module slog_example\n\ngo 1.19\n\nrequire (\n\tgithub.com/golang/glog v1.2.5\n\tgithub.com/gookit/goutil v0.7.4\n\tgithub.com/gookit/slog v0.6.0\n\tgithub.com/phuslu/log v1.0.119\n\tgithub.com/rs/zerolog v1.34.0\n\tgithub.com/sirupsen/logrus v1.9.3\n\tgithub.com/syyongx/llog v0.0.0-20200222114215-e8f9f86ac0a3\n\tgo.uber.org/zap v1.27.0\n\tgopkg.in/natefinch/lumberjack.v2 v2.2.1\n)\n\nrequire (\n\tgithub.com/gookit/color v1.6.0 // indirect\n\tgithub.com/gookit/gsr v0.1.1 // indirect\n\tgithub.com/mattn/go-colorable v0.1.13 // indirect\n\tgithub.com/mattn/go-isatty v0.0.19 // indirect\n\tgithub.com/valyala/bytebufferpool v1.0.0 // indirect\n\tgithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect\n\tgo.uber.org/multierr v1.11.0 // indirect\n\tgolang.org/x/sync v0.11.0 // indirect\n\tgolang.org/x/sys v0.30.0 // indirect\n\tgolang.org/x/term v0.29.0 // indirect\n\tgolang.org/x/text v0.22.0 // indirect\n)\n\nreplace github.com/gookit/slog => ../\n"
  },
  {
    "path": "_example/handler/grouped.go",
    "content": "package handler\n\nimport \"github.com/gookit/slog\"\n\n/********************************************************************************\n * Grouped Handler\n ********************************************************************************/\n\n// GroupedHandler definition\ntype GroupedHandler struct {\n\thandlers []slog.Handler\n\t// Levels for log message\n\tLevels []slog.Level\n\t// IgnoreErr on handling messages\n\tIgnoreErr bool\n}\n\n// NewGroupedHandler create new GroupedHandler\nfunc NewGroupedHandler(handlers []slog.Handler) *GroupedHandler {\n\treturn &GroupedHandler{\n\t\thandlers: handlers,\n\t}\n}\n\n// IsHandling Check if the current level can be handling\nfunc (h *GroupedHandler) IsHandling(level slog.Level) bool {\n\tfor _, l := range h.Levels {\n\t\tif l == level {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// Handle log record\nfunc (h *GroupedHandler) Handle(record *slog.Record) (err error) {\n\tfor _, handler := range h.handlers {\n\t\terr = handler.Handle(record)\n\t\tif !h.IgnoreErr && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn\n}\n\n// Close log handlers\nfunc (h *GroupedHandler) Close() error {\n\tfor _, handler := range h.handlers {\n\t\terr := handler.Close()\n\t\tif !h.IgnoreErr && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Flush log records\nfunc (h *GroupedHandler) Flush() error {\n\tfor _, handler := range h.handlers {\n\t\terr := handler.Flush()\n\t\tif !h.IgnoreErr && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "_example/issue100/issue100_test.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"go.uber.org/zap\"\n\t\"go.uber.org/zap/zapcore\"\n\t\"gopkg.in/natefinch/lumberjack.v2\"\n)\n\ntype Obj struct {\n\ta int\n\tb int64\n\tc string\n\td bool\n}\n\nvar (\n\tstr1 = \"str1\"\n\tstr2 = \"str222222222222\"\n\tint1 = 1\n\tint2 = 2\n\tobj  = Obj{1, 2, \"3\", true}\n)\n\nfunc TestZapSugar(t *testing.T) {\n\tw := zapcore.AddSync(&lumberjack.Logger{\n\t\tFilename:   \"./zap-sugar.log\",\n\t\tMaxSize:    500, // megabytes\n\t\tMaxBackups: 3,\n\t\tMaxAge:     28, // days\n\t})\n\tcore := zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),\n\t\tw,\n\t\tzap.InfoLevel,\n\t)\n\tlogger := zap.New(core)\n\n\tsugar := logger.Sugar()\n\tsugar.Info(\"message is msg\")\n\n\tcount := 100000\n\tstart := time.Now().UnixNano()\n\tfor n := count; n > 0; n-- {\n\t\tsugar.Info(\"message is msg\")\n\t}\n\tend := time.Now().UnixNano()\n\tfmt.Printf(\"\\n zap sugar no format\\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\n\tstart = time.Now().UnixNano()\n\tfor n := count; n > 0; n-- {\n\t\tsugar.Infof(\"message is %d %d %s %s %#v\", int1, int2, str1, str2, obj)\n\t}\n\tend = time.Now().UnixNano()\n\tfmt.Printf(\"\\n zap sugar format\\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\tsugar.Sync()\n}\n\nfunc TestZapLog(t *testing.T) {\n\tw := zapcore.AddSync(&lumberjack.Logger{\n\t\tFilename:   \"./zap.log\",\n\t\tMaxSize:    500, // megabytes\n\t\tMaxBackups: 3,\n\t\tMaxAge:     28, // days\n\t})\n\n\tcore := zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),\n\t\tw,\n\t\tzap.InfoLevel,\n\t)\n\tlogger := zap.New(core)\n\n\tcount := 100000\n\tstart := time.Now().UnixNano()\n\tfor n := count; n > 0; n-- {\n\t\tlogger.Info(\"message is msg\")\n\t}\n\tend := time.Now().UnixNano()\n\tfmt.Printf(\"\\n zap no format\\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\n\tstart = time.Now().UnixNano()\n\tfor n := count; n > 0; n-- {\n\t\tlogger.Info(\"failed to fetch URL\",\n\t\t\t// Structured context as strongly typed Field values.\n\t\t\tzap.Int(\"int1\", int1),\n\t\t\tzap.Int(\"int2\", int2),\n\t\t\tzap.String(\"str\", str1),\n\t\t\tzap.String(\"str2\", str2),\n\t\t\tzap.Any(\"backoff\", obj),\n\t\t)\n\t}\n\tend = time.Now().UnixNano()\n\tfmt.Printf(\"\\n zap format\\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\tlogger.Sync()\n}\n\nfunc TestSlog(t *testing.T) {\n\th1, err := handler.NewEmptyConfig(\n\t\thandler.WithLogfile(\"./slog-info.log\"),    // 路径\n\t\thandler.WithRotateTime(handler.EveryHour), // 日志分割间隔\n\t\thandler.WithLogLevels(slog.AllLevels),     // 日志level\n\t\thandler.WithBuffSize(4*1024*1024),         // buffer大小\n\t\thandler.WithCompress(true),                // 是否压缩旧日志 zip\n\t\thandler.WithBackupNum(24*3),               // 保留旧日志数量\n\t\thandler.WithBuffMode(handler.BuffModeBite),\n\t\t// handler.WithRenameFunc(),                    //RenameFunc build filename for rotate file\n\t).CreateHandler()\n\tif err != nil {\n\t\tfmt.Printf(\"Create slog handler err: %#v\", err)\n\t\treturn\n\t}\n\n\tf := slog.AsTextFormatter(h1.Formatter())\n\tmyTplt := \"[{{datetime}}] [{{level}}] [{{caller}}] {{message}}\\n\"\n\tf.SetTemplate(myTplt)\n\tlogs := slog.NewWithHandlers(h1)\n\n\tcount := 100000\n\tstart := time.Now().UnixNano()\n\tfor i := 0; i < count; i++ {\n\t\tlogs.Info(\"message is msg\")\n\t}\n\tend := time.Now().UnixNano()\n\tfmt.Printf(\"\\n slog no format \\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\n\tstart = time.Now().UnixNano()\n\tfor n := count; n > 0; n-- {\n\t\tlogs.Infof(\"message is %d %d %s %s %#v\", int1, int2, str1, str2, obj)\n\t}\n\tend = time.Now().UnixNano()\n\tfmt.Printf(\"\\n slog format \\n total cost %d ns\\n  avg  cost %d ns \\n count %d \\n\", end-start, (end-start)/int64(count), count)\n\tlogs.MustClose()\n}\n"
  },
  {
    "path": "_example/issue111/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/syncs\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nconst pth = \"./logs/main.log\"\n\nfunc main() {\n\tlog := slog.New()\n\n\th, err := handler.NewTimeRotateFileHandler(\n\t\tpth,\n\t\trotatefile.RotateTime(30),\n\t\thandler.WithBuffSize(0),\n\t\thandler.WithBackupNum(5),\n\t\thandler.WithCompress(true),\n\t\tfunc(c *handler.Config) {\n\t\t\tc.DebugMode = true\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.AddHandler(h)\n\n\tfmt.Println(\"Start...(can be stop by CTRL+C)\", timex.NowDate())\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\tlog.Info(\"Log \" + time.Now().String())\n\t\t\t}\n\t\t}\n\t}()\n\n\tsyncs.WaitCloseSignals(func(sig os.Signal) {\n\t\tfmt.Println(\"\\nGot signal:\", sig)\n\t\tfmt.Println(\"Close logger ...\")\n\t\tlog.MustClose()\n\t})\n\n\tfmt.Println(\"Exited at\", timex.NowDate())\n}\n"
  },
  {
    "path": "_example/issue137/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\ntype GLogConfig137 struct {\n\tLevel            string `yaml:\"Level\"`\n\tPattern          string `yaml:\"Pattern\"`\n\tTimeField        string `yaml:\"TimeField\"`\n\tTimeFormat       string `yaml:\"TimeFormat\"`\n\tTemplate         string `yaml:\"Template\"`\n\tRotateTimeFormat string `yaml:\"RotateTimeFormat\"`\n}\n\ntype LogRotateConfig137 struct {\n\tFilepath   string                `yaml:\"filepath\"`\n\tRotateMode rotatefile.RotateMode `yaml:\"rotate_mode\"`\n\tRotateTime rotatefile.RotateTime `yaml:\"rotate_time\"`\n\tMaxSize    uint64                `yaml:\"max_size\"`\n\tBackupNum  uint                  `yaml:\"backup_num\"`\n\tBackupTime uint                  `yaml:\"backup_time\"`\n\tCompress   bool                  `yaml:\"compress\"`\n\tTimeFormat string                `yaml:\"time_format\"`\n\tBuffSize   int                   `yaml:\"buff_size\"`\n\tBuffMode   string                `yaml:\"buff_mode\"`\n}\n\ntype LogConfig137 struct {\n\tGLogConfig     GLogConfig137      `yaml:\"GLogConfig\"`\n\tLogRotate      LogRotateConfig137 `yaml:\"LogRotate\"`\n\tErrorLogRotate LogRotateConfig137 `yaml:\"ErrorLogRotate\"`\n}\n\nfunc main() {\n\tslog.DebugMode = true\n\n\tlogConfig := LogConfig137{\n\t\tGLogConfig: GLogConfig137{\n\t\t\tLevel:            \"debug\",\n\t\t\tPattern:          \"development\",\n\t\t\tTimeField:        \"time\",\n\t\t\tTimeFormat:       \"2006-01-02 15:04:05.000\",\n\t\t\tTemplate:         \"{{datetime}}\\t{{level}}\\t{{channel}}\\t[{{caller}}]\\t{{message}}\\t{{data}}\\t{{extra}}\\n\",\n\t\t\tRotateTimeFormat: \"20060102\",\n\t\t},\n\t\tLogRotate: LogRotateConfig137{\n\t\t\tFilepath:   \"testdata/info137c2.log\",\n\t\t\tRotateMode: 0,\n\t\t\tRotateTime: 86400,\n\t\t\tMaxSize:    512,\n\t\t\tBackupNum:  3,\n\t\t\tBackupTime: 72,\n\t\t\tCompress:   true,\n\t\t\tTimeFormat: \"20060102\",\n\t\t\tBuffSize:   512,\n\t\t\tBuffMode:   \"line\",\n\t\t},\n\t\tErrorLogRotate: LogRotateConfig137{\n\t\t\tFilepath:   \"testdata/err137c2.log\",\n\t\t\tRotateMode: 0,\n\t\t\tRotateTime: 86400,\n\t\t\tMaxSize:    512,\n\t\t\tBackupNum:  3,\n\t\t\tBackupTime: 72,\n\t\t\tCompress:   true,\n\t\t\tTimeFormat: \"20060102\",\n\t\t\tBuffSize:   512,\n\t\t\tBuffMode:   \"line\",\n\t\t},\n\t}\n\ttpl := logConfig.GLogConfig.Template\n\n\t// slog.DefaultChannelName = \"gookit\"\n\tslog.DefaultTimeFormat = logConfig.GLogConfig.TimeFormat\n\n\tslog.Configure(func(l *slog.SugaredLogger) {\n\t\tl.Level = slog.TraceLevel\n\t\tl.DoNothingOnPanicFatal()\n\t\tl.ChannelName = \"gookit\"\n\t})\n\tslog.GetFormatter().(*slog.TextFormatter).SetTemplate(tpl)\n\tslog.GetFormatter().(*slog.TextFormatter).TimeFormat = slog.DefaultTimeFormat\n\n\trotatefile.DefaultFilenameFn = func(filepath string, rotateNum uint) string {\n\t\tsuffix := time.Now().Format(logConfig.GLogConfig.RotateTimeFormat)\n\n\t\t// eg: /tmp/error.log => /tmp/error_20250302_01.log\n\t\t// 将文件名扩展名取出来, 然后在扩展名中间加入下划线+日期+下划线+序号+扩展名的形式\n\t\text := path.Ext(filepath)\n\t\tfilename := filepath[:len(filepath)-len(ext)]\n\n\t\treturn filename + fmt.Sprintf(\"_%s_%02d\", suffix, rotateNum) + ext\n\t}\n\n\th1 := handler.MustRotateFile(logConfig.ErrorLogRotate.Filepath,\n\t\tlogConfig.ErrorLogRotate.RotateTime,\n\t\t// handler.WithFilePerm(os.ModeAppend|os.ModePerm),\n\t\thandler.WithLevelMode(slog.LevelModeList),\n\t\thandler.WithLogLevels(slog.DangerLevels),\n\t\thandler.WithMaxSize(logConfig.ErrorLogRotate.MaxSize),\n\t\thandler.WithBackupNum(logConfig.ErrorLogRotate.BackupNum),\n\t\thandler.WithBackupTime(logConfig.ErrorLogRotate.BackupTime),\n\t\thandler.WithCompress(logConfig.ErrorLogRotate.Compress),\n\t\thandler.WithBuffSize(logConfig.ErrorLogRotate.BuffSize),\n\t\thandler.WithBuffMode(logConfig.ErrorLogRotate.BuffMode),\n\t\thandler.WithRotateMode(logConfig.ErrorLogRotate.RotateMode),\n\t)\n\th1.Formatter().(*slog.TextFormatter).SetTemplate(tpl)\n\n\th2 := handler.MustRotateFile(logConfig.LogRotate.Filepath,\n\t\tlogConfig.LogRotate.RotateTime,\n\t\t// handler.WithFilePerm(os.ModeAppend|os.ModePerm),\n\t\thandler.WithLevelMode(slog.LevelModeList),\n\t\thandler.WithLogLevels(slog.AllLevels),\n\t\thandler.WithMaxSize(logConfig.LogRotate.MaxSize),\n\t\thandler.WithBackupNum(logConfig.LogRotate.BackupNum),\n\t\thandler.WithBackupTime(logConfig.LogRotate.BackupTime),\n\t\thandler.WithCompress(logConfig.LogRotate.Compress),\n\t\thandler.WithBuffSize(logConfig.LogRotate.BuffSize),\n\t\thandler.WithBuffMode(logConfig.LogRotate.BuffMode),\n\t\thandler.WithRotateMode(logConfig.LogRotate.RotateMode),\n\t)\n\th2.Formatter().(*slog.TextFormatter).SetTemplate(tpl)\n\n\tslog.PushHandlers(h1, h2)\n\n\t// add logs\n\tfor i := 0; i < 20; i++ {\n\t\tslog.Infof(\"hi, this is a example information ... message text. log index=%d\", i)\n\t\tslog.WithValue(\"test137\", \"some value\").Warn(\"测试滚动多个文件，同时设置了清理日志文件\")\n\t}\n\n\tslog.MustClose()\n\ttime.Sleep(time.Second * 2)\n}\n"
  },
  {
    "path": "_example/pprof/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"runtime/pprof\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\n// run serve:\n//\n//\tgo run ./_examples/pprof\n//\n// see prof on cli:\n//\n//\tgo tool pprof pprof/cpu_prof_data.out\n//\n// see prof on web:\n//\n//\tgo tool pprof -http=:8080 pprof/cpu_prof_data.out\nfunc main() {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, slog.NormalLevels),\n\t)\n\n\ttimes := 10000\n\tfmt.Println(\"start profile, run times:\", times)\n\n\tcpuProfile := \"cpu_prof_data.out\"\n\tf, err := os.Create(cpuProfile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = pprof.StartCPUProfile(f)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer pprof.StopCPUProfile()\n\n\tvar msg = \"The quick brown fox jumps over the lazy dog\"\n\tfor i := 0; i < times; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n\n\tfmt.Println(\"see prof on web:\\n  go tool pprof -http=:8080\", cpuProfile)\n}\n"
  },
  {
    "path": "_example/refer/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/golang/glog\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"github.com/syyongx/llog\"\n\n\t\"go.uber.org/zap\"\n\n\t\"github.com/rs/zerolog\"\n\tzlog \"github.com/rs/zerolog/log\"\n)\n\nfunc main() {\n\t// for glog\n\tflag.Parse()\n\n\t// -- log\n\tlog.Println(\"raw log message\")\n\n\t// -- glog\n\tglog.Infof(\"glog %s\", \"message message\")\n\n\t// -- llog\n\tllog.NewLogger(\"llog test\").Info(\"llog message message\")\n\n\t// -- slog\n\tslog.Debug(\"slog message message\")\n\tslog.WithFields(slog.M{\n\t\t\"omg\":    true,\n\t\t\"number\": 122,\n\t}).Infof(\"slog %s\", \"message message\")\n\n\t// -- logrus\n\tlogrus.Debug(\"logrus message message\")\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"omg\":    true,\n\t\t\"number\": 122,\n\t}).Warn(\"The group's number increased tremendously!\")\n\n\t// -- zerolog\n\tzerolog.TimeFieldFormat = zerolog.TimeFormatUnix\n\tzlog.Debug().\n\t\tStr(\"Scale\", \"833 cents\").\n\t\tFloat64(\"Interval\", 833.09).\n\t\tMsg(\"zerolog message\")\n\tzlog.Print(\"zerolog hello\")\n\n\t// slog.Infof(\"log %s\", \"message\")\n\turl := \"/path/to/some\"\n\n\t// -- zap\n\tlogger, _ := zap.NewProduction()\n\tdefer logger.Sync() // flushes buffer, if any\n\tsugar := logger.Sugar()\n\tsugar.Infow(\"failed to fetch URL\",\n\t\t// Structured context as loosely typed key-value pairs.\n\t\t\"url\", url,\n\t\t\"attempt\", 3,\n\t\t\"backoff\", time.Second,\n\t)\n\tsugar.Infof(\"zap log. Failed to fetch URL: %s\", url)\n}\n"
  },
  {
    "path": "benchmark2_test.go",
    "content": "package slog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/dump\"\n)\n\nfunc TestLogger_newRecord_AllocTimes(_ *testing.T) {\n\tl := Std()\n\tl.Output = io.Discard\n\tdefer l.Reset()\n\n\t// output: 0 times\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(100, func() {\n\t\t// logger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t\tr := l.newRecord()\n\t\t// do something...\n\t\tl.releaseRecord(r)\n\t})))\n}\n\nfunc Test_AllocTimes_formatArgsWithSpaces_oneElem(_ *testing.T) {\n\t// string Alloc Times: 0\n\tfmt.Println(\"string Alloc Times:\", int(testing.AllocsPerRun(10, func() {\n\t\t// logger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t\tformatArgsWithSpaces([]any{\"msg\"})\n\t})))\n\n\t// int Alloc Times: 1\n\tfmt.Println(\"int Alloc Times:\", int(testing.AllocsPerRun(10, func() {\n\t\tformatArgsWithSpaces([]any{2343})\n\t})))\n\n\t// float Alloc Times: 2\n\tfmt.Println(\"float Alloc Times:\", int(testing.AllocsPerRun(10, func() {\n\t\tformatArgsWithSpaces([]any{123.2})\n\t})))\n}\n\nfunc Test_AllocTimes_formatArgsWithSpaces_manyElem(_ *testing.T) {\n\t// Alloc Times: 1\n\t// TIP:\n\t// `float` will alloc 2 times memory\n\t// `int <0`, `int > 100` will alloc 1 times memory\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(50, func() {\n\t\tformatArgsWithSpaces([]any{\n\t\t\t\"rate\", -23, true, 106, \"high\", 123.2,\n\t\t})\n\t})))\n}\n\nfunc Test_AllocTimes_stringsPool(_ *testing.T) {\n\tl := Std()\n\tl.Output = io.Discard\n\tl.LowerLevelName = true\n\tdefer l.Reset()\n\n\tvar ln, cp int\n\t// output: 0 times\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(100, func() {\n\t\t// logger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\n\t\t// oldnew := stringsPool.Get().([]string)\n\t\t// defer stringsPool.Put(oldnew)\n\n\t\toldnew := make([]string, 0, len(map[string]string{\"a\": \"b\"})*2+1)\n\n\t\toldnew = append(oldnew, \"a\")\n\t\toldnew = append(oldnew, \"b\")\n\t\toldnew = append(oldnew, \"c\")\n\t\t// oldnew = append(oldnew, \"d\")\n\n\t\tln = len(oldnew)\n\t\tcp = cap(oldnew)\n\t})))\n\n\tdump.P(ln, cp)\n}\n\nfunc TestLogger_Info_oneElem_AllocTimes(_ *testing.T) {\n\tl := Std()\n\t// l.Output = io.Discard\n\tl.ReportCaller = false\n\tl.LowerLevelName = true\n\t// 启用 color 会导致多次(10次左右)内存分配\n\tl.Formatter.(*TextFormatter).EnableColor = false\n\n\tdefer l.Reset()\n\n\t// output: 2 times\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(5, func() {\n\t\t// l.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, \"msg\")\n\t\tl.Info(\"msg\")\n\t})))\n}\n\nfunc TestLogger_Info_moreElem_AllocTimes(_ *testing.T) {\n\tl := NewStdLogger()\n\t// l.Output = io.Discard\n\tl.ReportCaller = false\n\tl.LowerLevelName = true\n\t// 启用 color 会导致多次(10次左右)内存分配\n\tl.Formatter.(*TextFormatter).EnableColor = false\n\n\tdefer l.Reset()\n\n\t// output: 5 times\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(5, func() {\n\t\tl.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, \"msg\")\n\t})))\n\n\t// output: 5 times\n\tfmt.Println(\"Alloc Times:\", int(testing.AllocsPerRun(5, func() {\n\t\tl.Info(\"rate\", \"15\", \"low\", 16, \"high\")\n\t\t// l.Info(\"msg\")\n\t})))\n}\n"
  },
  {
    "path": "benchmark_test.go",
    "content": "package slog_test\n\nimport (\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\n// go test -v -cpu=4 -run=none -bench=. -benchtime=10s -benchmem bench_test.go\n//\n// code refer:\n//\n//\thttps://github.com/phuslu/log\nvar msg = \"The quick brown fox jumps over the lazy dog\"\n\nfunc BenchmarkGookitSlogNegative(b *testing.B) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, []slog.Level{slog.ErrorLevel}),\n\t)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc TestLogger_Info_Negative(t *testing.T) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, []slog.Level{slog.ErrorLevel}),\n\t)\n\n\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n}\n\nfunc BenchmarkGookitSlogPositive(b *testing.B) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, slog.NormalLevels),\n\t)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n\t}\n}\n\nfunc BenchmarkTextFormatter_Format(b *testing.B) {\n\tr := newLogRecord(\"TEST_LOG_MESSAGE\")\n\tf := slog.NewTextFormatter()\n\t// 1284 ns/op  456 B/op          11 allocs/op\n\t// On use DefaultTemplate\n\n\t// 304.4 ns/op   200 B/op           2 allocs/op\n\t// f.SetTemplate(\"{{datetime}} {{message}}\")\n\n\t// 271.3 ns/op  200 B/op           2 allocs/op\n\t// f.SetTemplate(\"{{datetime}}\")\n\t// f.SetTemplate(\"{{message}}\")\n\tdump.P(f.Template())\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := f.Format(r)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc TestLogger_Info_Positive(t *testing.T) {\n\tlogger := slog.NewWithHandlers(\n\t\thandler.NewIOWriter(io.Discard, slog.NormalLevels),\n\t)\n\n\tlogger.Info(\"rate\", \"15\", \"low\", 16, \"high\", 123.2, msg)\n}\n"
  },
  {
    "path": "bufwrite/bufio_writer.go",
    "content": "// Package bufwrite provides buffered io.Writer with sync and close methods.\npackage bufwrite\n\nimport (\n\t\"bufio\"\n\t\"io\"\n)\n\n// BufIOWriter wrap the bufio.Writer, implements the Sync() Close() methods\ntype BufIOWriter struct {\n\tbufio.Writer\n\t// backup the bufio.Writer.wr\n\twriter io.Writer\n}\n\n// NewBufIOWriterSize instance with size\nfunc NewBufIOWriterSize(w io.Writer, size int) *BufIOWriter {\n\treturn &BufIOWriter{\n\t\twriter: w,\n\t\tWriter: *bufio.NewWriterSize(w, size),\n\t}\n}\n\n// NewBufIOWriter instance\nfunc NewBufIOWriter(w io.Writer) *BufIOWriter {\n\treturn NewBufIOWriterSize(w, defaultBufSize)\n}\n\n// Close implements the io.Closer\nfunc (w *BufIOWriter) Close() error {\n\tif err := w.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\t// is closer\n\tif c, ok := w.writer.(io.Closer); ok {\n\t\treturn c.Close()\n\t}\n\treturn nil\n}\n\n// Sync implements the Syncer\nfunc (w *BufIOWriter) Sync() error {\n\treturn w.Flush()\n}\n"
  },
  {
    "path": "bufwrite/bufwrite_test.go",
    "content": "package bufwrite_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog/bufwrite\"\n)\n\nfunc TestNewBufIOWriter_WriteString(t *testing.T) {\n\tw := new(bytes.Buffer)\n\tbw := bufwrite.NewBufIOWriterSize(w, 12)\n\n\t_, err := bw.WriteString(\"hello, \")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, 0, w.Len())\n\n\t_, err = bw.WriteString(\"worlds. oh\")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, \"hello, world\", w.String()) // different the LineWriter\n\n\tassert.NoErr(t, bw.Close())\n\tassert.Eq(t, \"hello, worlds. oh\", w.String())\n}\n\ntype closeWriter struct {\n\terrOnWrite bool\n\terrOnClose bool\n\twriteNum   int\n}\n\nfunc (w *closeWriter) Close() error {\n\tif w.errOnClose {\n\t\treturn errorx.Raw(\"close error\")\n\t}\n\treturn nil\n}\n\nfunc (w *closeWriter) Write(p []byte) (n int, err error) {\n\tif w.errOnWrite {\n\t\treturn w.writeNum, errorx.Raw(\"write error\")\n\t}\n\n\tif w.writeNum > 0 {\n\t\treturn w.writeNum, nil\n\t}\n\treturn len(p), nil\n}\n\nfunc TestBufIOWriter_Close_error(t *testing.T) {\n\tbw := bufwrite.NewBufIOWriterSize(&closeWriter{errOnWrite: true}, 24)\n\t_, err := bw.WriteString(\"hi\")\n\tassert.NoErr(t, err)\n\n\t// flush write error\n\terr = bw.Close()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\tbw = bufwrite.NewBufIOWriterSize(&closeWriter{errOnClose: true}, 24)\n\n\t// close error\n\terr = bw.Close()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"close error\", err.Error())\n}\n\nfunc TestBufIOWriter_Sync(t *testing.T) {\n\tw := new(bytes.Buffer)\n\tbw := bufwrite.NewBufIOWriter(w)\n\n\t_, err := bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, 0, w.Len())\n\tassert.Eq(t, \"\", w.String())\n\n\tassert.NoErr(t, bw.Sync())\n\tassert.Eq(t, \"hello\", w.String())\n}\n\nfunc TestNewLineWriter(t *testing.T) {\n\tw := new(bytes.Buffer)\n\tbw := bufwrite.NewLineWriter(w)\n\n\tassert.True(t, bw.Size() > 0)\n\tassert.NoErr(t, bw.Flush())\n\n\t_, err := bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, \"\", w.String())\n\n\tassert.NoErr(t, bw.Sync())\n\tassert.Eq(t, \"hello\", w.String())\n\n\tbw.Reset(w)\n}\n\nfunc TestLineWriter_Write_error(t *testing.T) {\n\tw := &closeWriter{errOnWrite: true}\n\tbw := bufwrite.NewLineWriterSize(w, 6)\n\n\tt.Run(\"flush err on write\", func(t *testing.T) {\n\t\tw1 := &closeWriter{}\n\t\tbw.Reset(w1)\n\t\tn, err := bw.WriteString(\"hi\") // write ok\n\t\tassert.NoErr(t, err)\n\t\tassert.Equal(t, 2, n)\n\n\t\t// fire flush\n\t\tw1.errOnWrite = true\n\t\t_, err = bw.WriteString(\"hello, tom\")\n\t\tassert.Err(t, err)\n\t\tassert.Eq(t, \"write error\", err.Error())\n\t})\n\n\t_, err := bw.WriteString(\"hello, tom\")\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\t// get old error\n\tw.errOnWrite = false\n\n\t_, err = bw.WriteString(\"hello, wo\")\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\tbw.Reset(w)\n\t_, err = bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n}\n\nfunc TestLineWriter_Flush_error(t *testing.T) {\n\tt.Run(\"write ok but n < b.n\", func(t *testing.T) {\n\t\tw := &closeWriter{}\n\t\tbw := bufwrite.NewLineWriterSize(w, 6)\n\t\t_, err := bw.WriteString(\"hi!\")\n\t\tassert.NoErr(t, err)\n\n\t\t// err: write n < b.n\n\t\tw.writeNum = 1\n\t\terr = bw.Flush()\n\t\tassert.Err(t, err)\n\t\tassert.Eq(t, \"short write\", err.Error())\n\t})\n\n\tt.Run(\"write err and n < b.n\", func(t *testing.T) {\n\t\tw := &closeWriter{}\n\t\tbw := bufwrite.NewLineWriterSize(w, 6)\n\t\t_, err := bw.WriteString(\"hi!\")\n\t\tassert.NoErr(t, err)\n\n\t\t// err: write n < b.n\n\t\tw.writeNum = 1\n\t\tw.errOnWrite = true\n\t\terr = bw.Flush()\n\t\tassert.Err(t, err)\n\t\tassert.Eq(t, \"write error\", err.Error())\n\t})\n\n\tw := &closeWriter{}\n\tbw := bufwrite.NewLineWriterSize(w, 6)\n\n\t_, err := bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n\t// error on flush\n\tw.errOnWrite = true\n\terr = bw.Flush()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\t// err: write n < b.n\n\tw.writeNum = 2\n\terr = bw.Flush()\n\tassert.Err(t, err)\n\tw.writeNum = 0\n\n\t// get old error\n\tw.errOnWrite = false\n\terr = bw.Flush()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\tbw.Reset(w)\n\t_, err = bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n}\n\nfunc TestLineWriter_Close_error(t *testing.T) {\n\tw := &closeWriter{}\n\tbw := bufwrite.NewLineWriterSize(w, 8)\n\n\t_, err := bw.WriteString(\"hello\")\n\tassert.NoErr(t, err)\n\n\t// error on flush\n\tw.errOnWrite = true\n\terr = bw.Close()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"write error\", err.Error())\n\n\tw = &closeWriter{errOnClose: true}\n\tbw = bufwrite.NewLineWriterSize(w, 8)\n\n\terr = bw.Close()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"close error\", err.Error())\n}\n\nfunc TestNewLineWriterSize(t *testing.T) {\n\tw := new(bytes.Buffer)\n\tbw := bufwrite.NewLineWriterSize(w, 12)\n\n\t_, err := bw.WriteString(\"hello, \")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, 0, w.Len())\n\tassert.True(t, bw.Size() > 0)\n\n\t_, err = bw.WriteString(\"worlds. oh\")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, \"hello, worlds. oh\", w.String()) // different the BufIOWriter\n\n\t_, err = bw.WriteString(\"...\")\n\tassert.NoErr(t, err)\n\tassert.NoErr(t, bw.Close())\n\tassert.Eq(t, \"hello, worlds. oh...\", w.String())\n\tw.Reset()\n\n\tbw = bufwrite.NewLineWriterSize(bw, 8)\n\tassert.Eq(t, 12, bw.Size())\n\n\tbw = bufwrite.NewLineWriterSize(w, -12)\n\tassert.True(t, bw.Size() > 12)\n}\n"
  },
  {
    "path": "bufwrite/line_writer.go",
    "content": "package bufwrite\n\nimport (\n\t\"io\"\n)\n\nconst (\n\tdefaultBufSize = 1024 * 8\n)\n\n// LineWriter implements buffering for an io.Writer object.\n// If an error occurs writing to a LineWriter, no more data will be\n// accepted and all subsequent writes, and Flush, will return the error.\n// After all data has been written, the client should call the\n// Flush method to guarantee all data has been forwarded to\n// the underlying io.Writer.\n//\n// from bufio.Writer.\n//\n// Change:\n//\n// always keep write full line. more difference please see Write\ntype LineWriter struct {\n\terr error\n\tbuf []byte\n\tn   int\n\twr  io.Writer\n}\n\n// NewLineWriterSize returns a new LineWriter whose buffer has at least the specified\n// size. If the argument io.Writer is already a LineWriter with large enough\n// size, it returns the underlying LineWriter.\nfunc NewLineWriterSize(w io.Writer, size int) *LineWriter {\n\t// Is it already a LineWriter?\n\tb, ok := w.(*LineWriter)\n\tif ok && len(b.buf) >= size {\n\t\treturn b\n\t}\n\tif size <= 0 {\n\t\tsize = defaultBufSize\n\t}\n\n\treturn &LineWriter{\n\t\tbuf: make([]byte, size),\n\t\twr:  w,\n\t}\n}\n\n// NewLineWriter returns a new LineWriter whose buffer has the default size.\nfunc NewLineWriter(w io.Writer) *LineWriter {\n\treturn NewLineWriterSize(w, defaultBufSize)\n}\n\n// Size returns the size of the underlying buffer in bytes.\nfunc (b *LineWriter) Size() int { return len(b.buf) }\n\n// Reset discards any unflushed buffered data, clears any error, and\n// resets b to write its output to w.\nfunc (b *LineWriter) Reset(w io.Writer) {\n\tb.n = 0\n\tb.wr = w\n\tb.err = nil\n\tb.buf = b.buf[:0]\n}\n\n// Close implements the io.Closer\nfunc (b *LineWriter) Close() error {\n\tif err := b.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\t// is closer\n\tif c, ok := b.wr.(io.Closer); ok {\n\t\treturn c.Close()\n\t}\n\treturn nil\n}\n\n// Sync implements the Syncer\nfunc (b *LineWriter) Sync() error {\n\treturn b.Flush()\n}\n\n// Flush writes any buffered data to the underlying io.Writer.\n//\n// TIP: please add lock before calling the method.\nfunc (b *LineWriter) Flush() error {\n\tif b.err != nil {\n\t\treturn b.err\n\t}\n\tif b.n == 0 {\n\t\treturn nil\n\t}\n\n\tn, err := b.wr.Write(b.buf[0:b.n])\n\tif n < b.n && err == nil {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err != nil {\n\t\tif n > 0 && n < b.n {\n\t\t\tcopy(b.buf[0:b.n-n], b.buf[n:b.n])\n\t\t}\n\t\tb.n -= n\n\t\tb.err = err\n\t\treturn err\n\t}\n\n\tb.n = 0\n\treturn nil\n}\n\n// Available returns how many bytes are unused in the buffer.\nfunc (b *LineWriter) Available() int { return len(b.buf) - b.n }\n\n// Buffered returns the number of bytes that have been written into the current buffer.\nfunc (b *LineWriter) Buffered() int { return b.n }\n\n// Write writes the contents of p into the buffer.\n// It returns the number of bytes written.\n// If nn < len(p), it also returns an error explaining\n// why the writing is short.\nfunc (b *LineWriter) Write(p []byte) (nn int, err error) {\n\t// NOTE: 原来的 bufio.Writer#Write 会造成 p 写了一部分到 b.wr, 还有一部分在 b.buf，\n\t// 如果现在外部工具从 b.wr 收集数据，会收集到一行无法解析的数据(例如每个p是一行json日志)\n\t// for len(p) > b.Available() && b.err == nil {\n\t// \tvar n int\n\t// \tif b.Buffered() == 0 {\n\t// \t\t// Large write, empty buffer.\n\t// \t\t// Write directly from p to avoid copy.\n\t// \t\tn, b.err = b.wr.Write(p)\n\t// \t} else {\n\t// \t\tn = copy(b.buf[b.n:], p)\n\t// \t\tb.n += n\n\t// \t\tb.Flush()\n\t// \t}\n\t// \tnn += n\n\t// \tp = p[n:]\n\t// }\n\n\t// UP: 改造一下逻辑，如果 len(p) > b.Available() 就将buf 和 p 都写入 b.wr\n\tif len(p) > b.Available() && b.err == nil {\n\t\tnn = b.Buffered()\n\t\tif nn > 0 {\n\t\t\t_ = b.Flush()\n\t\t\tif b.err != nil {\n\t\t\t\treturn nn, b.err\n\t\t\t}\n\t\t}\n\n\t\tvar n int\n\t\tn, b.err = b.wr.Write(p)\n\t\tif b.err != nil {\n\t\t\treturn nn, b.err\n\t\t}\n\n\t\tnn += n\n\t\treturn nn, nil\n\t}\n\n\tif b.err != nil {\n\t\treturn nn, b.err\n\t}\n\n\tn := copy(b.buf[b.n:], p)\n\tb.n += n\n\tnn += n\n\treturn nn, nil\n}\n\n// WriteString to the writer\nfunc (b *LineWriter) WriteString(s string) (int, error) {\n\treturn b.Write([]byte(s))\n}\n"
  },
  {
    "path": "common.go",
    "content": "package slog\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/envutil\"\n\t\"github.com/gookit/goutil/strutil\"\n\t\"github.com/gookit/gsr\"\n)\n\n// SLogger interface\ntype SLogger interface {\n\tgsr.Logger\n\tLog(level Level, v ...any)\n\tLogf(level Level, format string, v ...any)\n}\n\n// LoggerFn func\ntype LoggerFn func(l *Logger)\n\n//\n// log level definitions\n// region Log level\n\n// Level type\ntype Level uint32\n\n// String get level name\nfunc (l Level) String() string { return LevelName(l) }\n\n// Name get level name. eg: INFO, DEBUG ...\nfunc (l Level) Name() string { return LevelName(l) }\n\n// LowerName get lower level name. eg: info, debug ...\nfunc (l Level) LowerName() string {\n\tif n, ok := lowerLevelNames[l]; ok {\n\t\treturn n\n\t}\n\treturn \"unknown\"\n}\n\n// ShouldHandling compare level, if current level <= l, it will be record.\nfunc (l Level) ShouldHandling(curLevel Level) bool {\n\treturn curLevel <= l\n}\n\n// MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler]\nfunc (l Level) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + l.String() + `\"`), nil\n}\n\n// UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler]\nfunc (l *Level) UnmarshalJSON(data []byte) error {\n\ts, err := strconv.Unquote(string(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*l, err = StringToLevel(s)\n\treturn err\n}\n\n// Levels level list\ntype Levels []Level\n\n// Contains given level\nfunc (ls Levels) Contains(level Level) bool {\n\tfor _, l := range ls {\n\t\tif l == level {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// These are the different logging levels. You can set the logging level to log handler\nconst (\n\t// PanicLevel level, the highest level of severity. will call panic() if the logging level <= PanicLevel.\n\tPanicLevel Level = 100\n\t// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the\n\t// logging level <= FatalLevel.\n\tFatalLevel Level = 200\n\t// ErrorLevel level. Runtime errors. Used for errors that should definitely be noted.\n\t// Commonly used for hooks to send errors to an error tracking service.\n\tErrorLevel Level = 300\n\t// WarnLevel level. Non-critical entries that deserve eyes.\n\tWarnLevel Level = 400\n\t// NoticeLevel level Uncommon events\n\tNoticeLevel Level = 500\n\t// InfoLevel level. Examples: User logs in, SQL logs.\n\tInfoLevel Level = 600\n\t// DebugLevel level. Usually only enabled when debugging. Very verbose logging.\n\tDebugLevel Level = 700\n\t// TraceLevel level. Designates finer-grained informational events than the Debug.\n\tTraceLevel Level = 800\n)\n\n//\n// some common definitions\n// region common types\n\n// StringMap string map short name\ntype StringMap = map[string]string\n\n// M short name of map[string]any\ntype M map[string]any\n\n// String map to string\nfunc (m M) String() string {\n\treturn mapToString(m)\n}\n\n// ClockFn func\ntype ClockFn func() time.Time\n\n// Now implements the Clocker\nfunc (fn ClockFn) Now() time.Time {\n\treturn fn()\n}\n\n// region CallerFlagMode\n\n// CallerFlagMode Defines the Caller backtrace information mode.\ntype CallerFlagMode = uint8\n\n// NOTICE: you must set `Logger.ReportCaller=true` for reporting caller.\n// then config the Logger.CallerFlag by follow flags.\nconst (\n\t// CallerFlagFnlFcn report short func name with filename and with line.\n\t// eg: \"logger_test.go:48,TestLogger_ReportCaller\"\n\tCallerFlagFnlFcn CallerFlagMode = iota\n\t// CallerFlagFull full func name with filename and with line.\n\t// eg: \"github.com/gookit/slog_test.TestLogger_ReportCaller(),logger_test.go:48\"\n\tCallerFlagFull\n\t// CallerFlagFunc full package with func name.\n\t// eg: \"github.com/gookit/slog_test.TestLogger_ReportCaller\"\n\tCallerFlagFunc\n\t// CallerFlagFcLine full package with func name and with line.\n\t// eg: \"github.com/gookit/slog_test.TestLogger_ReportCaller:48\"\n\tCallerFlagFcLine\n\t// CallerFlagPkg report full package name.\n\t// eg: \"github.com/gookit/slog_test\"\n\tCallerFlagPkg\n\t// CallerFlagPkgFnl report full package name + filename + line.\n\t// eg: \"github.com/gookit/slog_test,logger_test.go:48\"\n\tCallerFlagPkgFnl\n\t// CallerFlagFpLine report full filepath with line.\n\t// eg: \"/work/go/gookit/slog/logger_test.go:48\"\n\tCallerFlagFpLine\n\t// CallerFlagFnLine report filename with line.\n\t// eg: \"logger_test.go:48\"\n\tCallerFlagFnLine\n\t// CallerFlagFcName only report func name.\n\t// eg: \"TestLogger_ReportCaller\"\n\tCallerFlagFcName\n)\n\nvar (\n\t// FieldKeyData define the key name for Record.Data\n\tFieldKeyData = \"data\"\n\t// FieldKeyTime key name\n\tFieldKeyTime = \"time\"\n\t// FieldKeyDate key name\n\tFieldKeyDate = \"date\"\n\n\t// FieldKeyDatetime key name\n\tFieldKeyDatetime = \"datetime\"\n\t// FieldKeyTimestamp key name\n\tFieldKeyTimestamp = \"timestamp\"\n\n\t// FieldKeyCaller the field key name for report caller.\n\t//\n\t// For caller style please see CallerFlagFull, CallerFlagFunc and more.\n\t//\n\t// NOTICE: you must set `Logger.ReportCaller=true` for reporting caller\n\tFieldKeyCaller = \"caller\"\n\n\t// FieldKeyLevel name\n\tFieldKeyLevel = \"level\"\n\t// FieldKeyError Define the key when adding errors using WithError.\n\tFieldKeyError = \"error\"\n\t// FieldKeyExtra key name\n\tFieldKeyExtra = \"extra\"\n\n\t// FieldKeyChannel name\n\tFieldKeyChannel = \"channel\"\n\t// FieldKeyMessage name\n\tFieldKeyMessage = \"message\"\n)\n\n// region Global variables\n\nvar (\n\t// DefaultChannelName for log record\n\tDefaultChannelName = \"application\"\n\t// DefaultTimeFormat define\n\tDefaultTimeFormat = \"2006/01/02T15:04:05.000\"\n\n\t// DebugMode enable debug mode for logger. use for local development.\n\tDebugMode = envutil.GetBool(\"OPEN_SLOG_DEBUG\", false)\n\n\t// DoNothingOnExit handle func. use for testing.\n\tDoNothingOnExit = func(code int) {}\n\t// DoNothingOnPanic handle func. use for testing.\n\tDoNothingOnPanic = func(v any) {}\n\n\t// DefaultPanicFn handle func\n\tDefaultPanicFn = func(v any) { panic(v) }\n\t// DefaultClockFn create func\n\tDefaultClockFn = ClockFn(func() time.Time { return time.Now() })\n)\n\nvar (\n\t// PrintLevel for use Logger.Print / Printf / Println\n\tPrintLevel = InfoLevel\n\n\t// AllLevels exposing all logging levels\n\tAllLevels = Levels{\n\t\tPanicLevel,\n\t\tFatalLevel,\n\t\tErrorLevel,\n\t\tWarnLevel,\n\t\tNoticeLevel,\n\t\tInfoLevel,\n\t\tDebugLevel,\n\t\tTraceLevel,\n\t}\n\n\t// DangerLevels define the commonly danger log levels\n\tDangerLevels = Levels{PanicLevel, FatalLevel, ErrorLevel, WarnLevel}\n\t// NormalLevels define the commonly normal log levels\n\tNormalLevels = Levels{InfoLevel, NoticeLevel, DebugLevel, TraceLevel}\n\n\t// LevelNames all level mapping name\n\tLevelNames = map[Level]string{\n\t\tPanicLevel:  \"PANIC\",\n\t\tFatalLevel:  \"FATAL\",\n\t\tErrorLevel:  \"ERROR\",\n\t\tWarnLevel:   \"WARNING\",\n\t\tNoticeLevel: \"NOTICE\",\n\t\tInfoLevel:   \"INFO\",\n\t\tDebugLevel:  \"DEBUG\",\n\t\tTraceLevel:  \"TRACE\",\n\t}\n\n\t// lower level name.\n\tlowerLevelNames = buildLowerLevelName()\n\t// empty time for reset record.\n\temptyTime = time.Time{}\n)\n\n// region Global functions\n\n// LevelName match\nfunc LevelName(l Level) string {\n\tif n, ok := LevelNames[l]; ok {\n\t\treturn n\n\t}\n\treturn \"UNKNOWN\"\n}\n\n// LevelByName convert name to level, fallback to InfoLevel if not match\nfunc LevelByName(ln string) Level {\n\tl, err := StringToLevel(ln)\n\tif err != nil {\n\t\treturn InfoLevel\n\t}\n\treturn l\n}\n\n// Name2Level convert name to level\nfunc Name2Level(s string) (Level, error) { return StringToLevel(s) }\n\n// StringToLevel parse and convert string value to Level\nfunc StringToLevel(s string) (Level, error) {\n\tswitch strings.ToLower(s) {\n\tcase \"panic\":\n\t\treturn PanicLevel, nil\n\tcase \"fatal\":\n\t\treturn FatalLevel, nil\n\tcase \"err\", \"error\":\n\t\treturn ErrorLevel, nil\n\tcase \"warn\", \"warning\":\n\t\treturn WarnLevel, nil\n\tcase \"note\", \"notice\":\n\t\treturn NoticeLevel, nil\n\tcase \"info\", \"\": // make the zero value useful\n\t\treturn InfoLevel, nil\n\tcase \"debug\":\n\t\treturn DebugLevel, nil\n\tcase \"trace\":\n\t\treturn TraceLevel, nil\n\t}\n\n\t// is int value, try to parse as int\n\tif strutil.IsInt(s) {\n\t\tiVal := strutil.SafeInt(s)\n\t\treturn Level(iVal), nil\n\t}\n\treturn 0, errors.New(\"slog: invalid log level name: \" + s)\n}\n\n//\n// exit handle logic\n//\n\n// global exit handler\nvar exitHandlers = make([]func(), 0)\n\nfunc runExitHandlers() {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tprintStderr(\"slog: run exit handler(global) recovered, error:\", err)\n\t\t}\n\t}()\n\n\tfor _, handler := range exitHandlers {\n\t\thandler()\n\t}\n}\n\n// ExitHandlers get all global exitHandlers\nfunc ExitHandlers() []func() {\n\treturn exitHandlers\n}\n\n// RegisterExitHandler register an exit-handler on global exitHandlers\nfunc RegisterExitHandler(handler func()) {\n\texitHandlers = append(exitHandlers, handler)\n}\n\n// PrependExitHandler prepend register an exit-handler on global exitHandlers\nfunc PrependExitHandler(handler func()) {\n\texitHandlers = append([]func(){handler}, exitHandlers...)\n}\n\n// ResetExitHandlers reset all exitHandlers\nfunc ResetExitHandlers(applyToStd bool) {\n\texitHandlers = make([]func(), 0)\n\n\tif applyToStd {\n\t\tstd.ResetExitHandlers()\n\t}\n}\n"
  },
  {
    "path": "common_test.go",
    "content": "package slog_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/gsr\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nvar (\n\ttestData1 = slog.M{\"key0\": \"val0\", \"age\": 23}\n\t// testData2 = slog.M{\"key0\": \"val0\", \"age\": 23, \"sub\": slog.M{\n\t// \t\"subKey0\": 345,\n\t// }}\n)\n\nfunc TestDefine_basic(t *testing.T) {\n\tassert.NotEmpty(t, slog.NoTimeFields)\n\tassert.NotEmpty(t, slog.FieldKeyDate)\n\tassert.NotEmpty(t, slog.FieldKeyTime)\n\tassert.NotEmpty(t, slog.FieldKeyCaller)\n\tassert.NotEmpty(t, slog.FieldKeyError)\n}\n\nfunc TestM_String(t *testing.T) {\n\tm := slog.M{\n\t\t\"k0\": 12,\n\t\t\"k1\": \"abc\",\n\t\t\"k2\": true,\n\t\t\"k3\": 23.45,\n\t\t\"k4\": []int{12, 23},\n\t\t\"k5\": []string{\"ab\", \"bc\"},\n\t\t\"k6\": map[string]any{\n\t\t\t\"k6-1\": 23,\n\t\t\t\"k6-2\": \"def\",\n\t\t},\n\t}\n\n\tfmt.Println(m)\n\tdump.P(m.String(), m)\n\tassert.NotEmpty(t, m.String())\n}\n\nfunc TestLevelName_func(t *testing.T) {\n\tfor level, wantName := range slog.LevelNames {\n\t\trealName := slog.LevelName(level)\n\t\tassert.Eq(t, wantName, realName)\n\t}\n\n\tassert.Eq(t, \"UNKNOWN\", slog.LevelName(20))\n\n\t// LevelByName\n\tassert.Eq(t, slog.InfoLevel, slog.LevelByName(\"info\"))\n\tassert.Eq(t, slog.InfoLevel, slog.LevelByName(\"invalid\"))\n}\n\nfunc TestName2Level(t *testing.T) {\n\tfor wantLevel, name := range slog.LevelNames {\n\t\tlevel, err := slog.Name2Level(name)\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, wantLevel, level)\n\t}\n\n\t// special names\n\ttests := map[slog.Level]string{\n\t\tslog.WarnLevel:  \"warn\",\n\t\tslog.ErrorLevel: \"err\",\n\t\tslog.InfoLevel:  \"\",\n\t}\n\tfor wantLevel, name := range tests {\n\t\tlevel, err := slog.Name2Level(name)\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, wantLevel, level)\n\t}\n\n\tlevel, err := slog.Name2Level(\"unknown\")\n\tassert.Err(t, err)\n\tassert.Eq(t, slog.Level(0), level)\n\n\tlevel, err = slog.StringToLevel(\"300\")\n\tassert.NoErr(t, err)\n\tassert.Eq(t, slog.ErrorLevel, level)\n}\n\nfunc TestLevel_methods(t *testing.T) {\n\tt.Run(\"ShouldHandling\", func(t *testing.T) {\n\t\tassert.True(t, slog.InfoLevel.ShouldHandling(slog.ErrorLevel))\n\t\tassert.False(t, slog.InfoLevel.ShouldHandling(slog.TraceLevel))\n\n\t\tassert.True(t, slog.DebugLevel.ShouldHandling(slog.InfoLevel))\n\t\tassert.False(t, slog.DebugLevel.ShouldHandling(slog.TraceLevel))\n\t})\n\n\tt.Run(\"Name\", func(t *testing.T) {\n\t\tassert.Eq(t, \"INFO\", slog.InfoLevel.Name())\n\t\tassert.Eq(t, \"INFO\", slog.InfoLevel.String())\n\t\tassert.Eq(t, \"info\", slog.InfoLevel.LowerName())\n\t\tassert.Eq(t, \"unknown\", slog.Level(330).LowerName())\n\t})\n\n\tt.Run(\"encoding\", func(t *testing.T) {\n\t\t// MarshalJSON\n\t\tbs, err := slog.InfoLevel.MarshalJSON()\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, `\"INFO\"`, string(bs))\n\n\t\t// UnmarshalJSON\n\t\tlevel := slog.Level(0)\n\t\tassert.Eq(t, \"UNKNOWN\", level.Name())\n\t\terr = level.UnmarshalJSON([]byte(`\"warn\"`))\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, \"WARNING\", level.Name())\n\n\t\tassert.Err(t, level.UnmarshalJSON([]byte(`a`)))\n\t})\n}\n\nfunc TestLevels_Contains(t *testing.T) {\n\tassert.True(t, slog.DangerLevels.Contains(slog.ErrorLevel))\n\tassert.False(t, slog.DangerLevels.Contains(slog.InfoLevel))\n\tassert.True(t, slog.NormalLevels.Contains(slog.InfoLevel))\n\tassert.False(t, slog.NormalLevels.Contains(slog.PanicLevel))\n}\n\nfunc newLogRecord(msg string) *slog.Record {\n\tr := &slog.Record{\n\t\tChannel: slog.DefaultChannelName,\n\t\tLevel:   slog.InfoLevel,\n\t\tMessage: msg,\n\t\tTime:    slog.DefaultClockFn.Now(),\n\t\tData: map[string]any{\n\t\t\t\"data_key0\": \"value\",\n\t\t\t\"username\":  \"inhere\",\n\t\t},\n\t\tExtra: map[string]any{\n\t\t\t\"source\":     \"linux\",\n\t\t\t\"extra_key0\": \"hello\",\n\t\t},\n\t\t// Caller: goinfo.GetCallerInfo(),\n\t}\n\n\tr.Init(true)\n\treturn r\n}\n\ntype closedBuffer struct {\n\tbytes.Buffer\n}\n\nfunc newBuffer() *closedBuffer {\n\treturn &closedBuffer{}\n}\n\nfunc (w *closedBuffer) Close() error {\n\treturn nil\n}\n\nfunc (w *closedBuffer) StringReset() string {\n\ts := w.Buffer.String()\n\tw.Reset()\n\treturn s\n}\n\n//\n// region test handler\n//\n\ntype testHandler struct {\n\tslog.FormatterWrapper\n\tbyteutil.Buffer\n\terrOnHandle bool\n\terrOnClose  bool\n\terrOnFlush  bool\n\t// hooks\n\tbeforeFormat func(r *slog.Record)\n\tbeforeWrite  func(r *slog.Record)\n\tcallOnFlush func()\n\t// NOTE: 如果设置为true，默认会让 error,fatal 等信息提前被reset丢弃掉.\n\t// see Logger.writeRecord()\n\tresetOnFlush bool\n}\n\n// built in test, will collect logs to buffer\nfunc newTestHandler() *testHandler {\n\treturn &testHandler{}\n}\n\nfunc (h *testHandler) IsHandling(_ slog.Level) bool {\n\treturn true\n}\n\nfunc (h *testHandler) Close() error {\n\tif h.errOnClose {\n\t\treturn errorx.Raw(\"close error\")\n\t}\n\n\th.Reset()\n\treturn nil\n}\n\nfunc (h *testHandler) Flush() error {\n\tif h.errOnFlush {\n\t\treturn errorx.Raw(\"flush error\")\n\t}\n\tif h.callOnFlush != nil {\n\t\th.callOnFlush()\n\t}\n\n\tif h.resetOnFlush {\n\t\th.Reset()\n\t}\n\treturn nil\n}\n\nfunc (h *testHandler) Handle(r *slog.Record) error {\n\tif h.errOnHandle {\n\t\treturn errorx.Raw(\"handle error\")\n\t}\n\n\tif h.beforeFormat != nil {\n\t\th.beforeFormat(r)\n\t}\n\n\tbs, err := h.Format(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h.beforeWrite != nil {\n\t\th.beforeWrite(r)\n\t}\n\th.Write(bs)\n\treturn nil\n}\n\n//\n// region test formatter\n//\n\ntype testFormatter struct {\n\terrOnFormat bool\n}\n\nfunc newTestFormatter(errOnFormat ...bool) *testFormatter {\n\treturn &testFormatter{\n\t\terrOnFormat: len(errOnFormat) > 0 && errOnFormat[0],\n\t}\n}\n\nfunc (f testFormatter) Format(r *slog.Record) ([]byte, error) {\n\tif f.errOnFormat {\n\t\treturn nil, errorx.Raw(\"format error\")\n\t}\n\treturn []byte(r.Message), nil\n}\n\n//\n// region test logger\n//\n\nfunc newLogger() *slog.Logger {\n\treturn slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ReportCaller = true\n\t\tl.DoNothingOnPanicFatal()\n\t})\n}\n\n// newTestLogger create a logger for test, will write logs to buffer\nfunc newTestLogger() (*closedBuffer, *slog.Logger) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t\tl.CallerFlag = slog.CallerFlagFull\n\t})\n\tw := newBuffer()\n\th := handler.NewIOWriter(w, slog.AllLevels)\n\t// fmt.Print(\"Template:\", h.TextFormatter().Template())\n\tl.SetHandlers([]slog.Handler{h})\n\treturn w, l\n}\n\nfunc printAllLevelLogs(l gsr.Logger, args ...any) {\n\tl.Debug(args...)\n\tl.Info(args...)\n\tl.Warn(args...)\n\tl.Error(args...)\n\tl.Print(args...)\n\tl.Println(args...)\n\tl.Fatal(args...)\n\tl.Fatalln(args...)\n\tl.Panic(args...)\n\tl.Panicln(args...)\n\n\tsl, ok := l.(*slog.Logger)\n\tif ok {\n\t\tsl.Trace(args...)\n\t\tsl.Notice(args...)\n\t\tsl.ErrorT(errorx.Raw(\"a error object\"))\n\t\tsl.ErrorT(errorx.New(\"error with stack info\"))\n\t}\n}\n\nfunc printfAllLevelLogs(l gsr.Logger, tpl string, args ...any) {\n\tl.Printf(tpl, args...)\n\tl.Debugf(tpl, args...)\n\tl.Infof(tpl, args...)\n\tl.Warnf(tpl, args...)\n\tl.Errorf(tpl, args...)\n\tl.Panicf(tpl, args...)\n\tl.Fatalf(tpl, args...)\n\n\tif sl, ok := l.(*slog.Logger); ok {\n\t\tsl.Noticef(tpl, args...)\n\t\tsl.Tracef(tpl, args...)\n\t}\n}\n"
  },
  {
    "path": "example_test.go",
    "content": "package slog_test\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc Example_quickStart() {\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.Infof(\"info log %s\", \"message\")\n\tslog.Debugf(\"debug %s\", \"message\")\n}\n\nfunc Example_configSlog() {\n\tslog.Configure(func(logger *slog.SugaredLogger) {\n\t\tf := logger.Formatter.(*slog.TextFormatter)\n\t\tf.EnableColor = true\n\t})\n\n\tslog.Trace(\"this is a simple log message\")\n\tslog.Debug(\"this is a simple log message\")\n\tslog.Info(\"this is a simple log message\")\n\tslog.Notice(\"this is a simple log message\")\n\tslog.Warn(\"this is a simple log message\")\n\tslog.Error(\"this is a simple log message\")\n\tslog.Fatal(\"this is a simple log message\")\n}\n\nfunc Example_useJSONFormat() {\n\t// use JSON formatter\n\tslog.SetFormatter(slog.NewJSONFormatter())\n\n\tslog.Info(\"info log message\")\n\tslog.Warn(\"warning log message\")\n\tslog.WithData(slog.M{\n\t\t\"key0\": 134,\n\t\t\"key1\": \"abc\",\n\t}).Infof(\"info log %s\", \"message\")\n\n\tr := slog.WithFields(slog.M{\n\t\t\"category\": \"service\",\n\t\t\"IP\":       \"127.0.0.1\",\n\t})\n\tr.Infof(\"info %s\", \"message\")\n\tr.Debugf(\"debug %s\", \"message\")\n}\n\nfunc ExampleNew() {\n\tmylog := slog.New()\n\tlevels := slog.AllLevels\n\n\tmylog.AddHandler(handler.MustFileHandler(\"app.log\", handler.WithLogLevels(levels)))\n\n\tmylog.Info(\"info log message\")\n\tmylog.Warn(\"warning log message\")\n\tmylog.Infof(\"info log %s\", \"message\")\n}\n\nfunc ExampleFlushDaemon() {\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\tgo slog.FlushDaemon(func() {\n\t\tfmt.Println(\"flush daemon stopped\")\n\t\tslog.MustClose()\n\t\twg.Done()\n\t})\n\n\tgo func() {\n\t\t// mock app running\n\t\ttime.Sleep(time.Second * 2)\n\n\t\t// stop daemon\n\t\tfmt.Println(\"stop flush daemon\")\n\t\tslog.StopDaemon()\n\t}()\n\n\t// wait for stop\n\twg.Wait()\n}\n"
  },
  {
    "path": "formatter.go",
    "content": "package slog\n\nimport \"runtime\"\n\n//\n// Formatter interface\n//\n\n// Formatter interface\ntype Formatter interface {\n\t// Format you can format record and write result to record.Buffer\n\tFormat(record *Record) ([]byte, error)\n}\n\n// FormatterFunc wrapper definition\ntype FormatterFunc func(r *Record) ([]byte, error)\n\n// Format a log record\nfunc (fn FormatterFunc) Format(r *Record) ([]byte, error) {\n\treturn fn(r)\n}\n\n// Formattable interface\ntype Formattable interface {\n\t// Formatter get the log formatter\n\tFormatter() Formatter\n\t// SetFormatter set the log formatter\n\tSetFormatter(Formatter)\n}\n\n// FormattableTrait alias of FormatterWrapper\ntype FormattableTrait = FormatterWrapper\n\n// FormatterWrapper use for format log record.\n//\n// Default will use the TextFormatter\ntype FormatterWrapper struct {\n\t// if not set, default uses the TextFormatter\n\tformatter Formatter\n}\n\n// Formatter get formatter. if not set, will return TextFormatter\nfunc (f *FormatterWrapper) Formatter() Formatter {\n\tif f.formatter == nil {\n\t\tf.formatter = NewTextFormatter()\n\t}\n\treturn f.formatter\n}\n\n// SetFormatter to handler\nfunc (f *FormatterWrapper) SetFormatter(formatter Formatter) {\n\tf.formatter = formatter\n}\n\n// Format log record to bytes\nfunc (f *FormatterWrapper) Format(record *Record) ([]byte, error) {\n\treturn f.Formatter().Format(record)\n}\n\n// CallerFormatFn caller format func\ntype CallerFormatFn func(rf *runtime.Frame) (cs string)\n\n// AsTextFormatter util func\nfunc AsTextFormatter(f Formatter) *TextFormatter {\n\tif tf, ok := f.(*TextFormatter); ok {\n\t\treturn tf\n\t}\n\tpanic(\"slog: cannot cast input as *TextFormatter\")\n}\n\n// AsJSONFormatter util func\nfunc AsJSONFormatter(f Formatter) *JSONFormatter {\n\tif jf, ok := f.(*JSONFormatter); ok {\n\t\treturn jf\n\t}\n\tpanic(\"slog: cannot cast input as *JSONFormatter\")\n}\n"
  },
  {
    "path": "formatter_json.go",
    "content": "package slog\n\nimport (\n\t\"encoding/json\"\n\n\t\"github.com/valyala/bytebufferpool\"\n)\n\nvar (\n\t// DefaultFields default log export fields for json formatter.\n\tDefaultFields = []string{\n\t\tFieldKeyDatetime,\n\t\tFieldKeyChannel,\n\t\tFieldKeyLevel,\n\t\tFieldKeyCaller,\n\t\tFieldKeyMessage,\n\t\tFieldKeyData,\n\t\tFieldKeyExtra,\n\t}\n\n\t// NoTimeFields log export fields without time\n\tNoTimeFields = []string{\n\t\tFieldKeyChannel,\n\t\tFieldKeyLevel,\n\t\tFieldKeyMessage,\n\t\tFieldKeyData,\n\t\tFieldKeyExtra,\n\t}\n)\n\n// JSONFormatter definition\ntype JSONFormatter struct {\n\t// Fields set exported common log fields. default is DefaultFields\n\tFields []string\n\t// Aliases for output fields. you can change the export field name.\n\t//\n\t// - item: `\"field\" : \"output name\"`\n\t//\n\t// eg: {\"message\": \"msg\"} export field will display \"msg\"\n\tAliases StringMap\n\n\t// PrettyPrint will indent all JSON logs\n\tPrettyPrint bool\n\t// TimeFormat the time format layout. default is DefaultTimeFormat\n\tTimeFormat string\n\t// CallerFormatFunc the caller format layout. default is defined by CallerFlag\n\tCallerFormatFunc CallerFormatFn\n}\n\n// NewJSONFormatter create new JSONFormatter\nfunc NewJSONFormatter(fn ...func(f *JSONFormatter)) *JSONFormatter {\n\tf := &JSONFormatter{\n\t\t// Aliases: make(StringMap, 0),\n\t\tFields:     DefaultFields,\n\t\tTimeFormat: DefaultTimeFormat,\n\t}\n\n\tif len(fn) > 0 {\n\t\tfn[0](f)\n\t}\n\treturn f\n}\n\n// Configure current formatter\nfunc (f *JSONFormatter) Configure(fn func(*JSONFormatter)) *JSONFormatter {\n\tfn(f)\n\treturn f\n}\n\n// AddField for export\nfunc (f *JSONFormatter) AddField(name string) *JSONFormatter {\n\tf.Fields = append(f.Fields, name)\n\treturn f\n}\n\nvar jsonPool bytebufferpool.Pool\n\n// Format a log record to JSON bytes\nfunc (f *JSONFormatter) Format(r *Record) ([]byte, error) {\n\tlogData := make(M, len(f.Fields))\n\n\t// TODO perf: use buf write build JSON string.\n\tfor _, field := range f.Fields {\n\t\toutName, ok := f.Aliases[field]\n\t\tif !ok {\n\t\t\toutName = field\n\t\t}\n\n\t\tswitch {\n\t\tcase field == FieldKeyDatetime:\n\t\t\tlogData[outName] = r.Time.Format(f.TimeFormat)\n\t\tcase field == FieldKeyTimestamp:\n\t\t\tlogData[outName] = r.timestamp()\n\t\tcase field == FieldKeyCaller && r.Caller != nil:\n\t\t\tlogData[outName] = formatCaller(r.Caller, r.CallerFlag, f.CallerFormatFunc)\n\t\tcase field == FieldKeyLevel:\n\t\t\tlogData[outName] = r.LevelName()\n\t\tcase field == FieldKeyChannel:\n\t\t\tlogData[outName] = r.Channel\n\t\tcase field == FieldKeyMessage:\n\t\t\tlogData[outName] = r.Message\n\t\tcase field == FieldKeyData:\n\t\t\tlogData[outName] = r.Data\n\t\tcase field == FieldKeyExtra:\n\t\t\tlogData[outName] = r.Extra\n\t\t\t// default:\n\t\t\t// \tlogData[outName] = r.Fields[field]\n\t\t}\n\t}\n\n\t// exported custom record fields\n\tfor field, value := range r.Fields {\n\t\tfieldKey := field\n\t\tif _, has := logData[field]; has {\n\t\t\tfieldKey = \"fields.\" + field\n\t\t}\n\t\tlogData[fieldKey] = value\n\t}\n\n\t// sort.Interface()\n\tbuf := jsonPool.Get()\n\t// buf.Reset()\n\tdefer jsonPool.Put(buf)\n\t// buf := r.NewBuffer()\n\t// buf.Reset()\n\t// buf.Grow(256)\n\n\tencoder := json.NewEncoder(buf)\n\tif f.PrettyPrint {\n\t\tencoder.SetIndent(\"\", \"  \")\n\t}\n\n\t// has been added newline in Encode().\n\terr := encoder.Encode(logData)\n\treturn buf.Bytes(), err\n}\n"
  },
  {
    "path": "formatter_test.go",
    "content": "package slog_test\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestFormattableTrait_Formatter(t *testing.T) {\n\tft := &slog.FormattableTrait{}\n\ttf := slog.AsTextFormatter(ft.Formatter())\n\tassert.NotNil(t, tf)\n\tassert.Panics(t, func() {\n\t\tslog.AsJSONFormatter(ft.Formatter())\n\t})\n\n\tft.SetFormatter(slog.NewJSONFormatter())\n\tjf := slog.AsJSONFormatter(ft.Formatter())\n\tassert.NotNil(t, jf)\n\tassert.Panics(t, func() {\n\t\tslog.AsTextFormatter(ft.Formatter())\n\t})\n}\n\nfunc TestFormattable_Format(t *testing.T) {\n\tr := newLogRecord(\"TEST_LOG_MESSAGE format\")\n\tf := &slog.FormattableTrait{}\n\tassert.Eq(t, \"slog: TEST_LOG_MESSAGE format\", r.GoString())\n\n\tbts, err := f.Format(r)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"TEST_LOG_MESSAGE format\")\n\n\tfn := slog.FormatterFunc(func(r *slog.Record) ([]byte, error) {\n\t\treturn []byte(r.Message), nil\n\t})\n\n\tbts, err = fn.Format(r)\n\tassert.NoErr(t, err)\n\n\tstr = string(bts)\n\tassert.Contains(t, str, \"TEST_LOG_MESSAGE format\")\n}\n\nfunc TestNewTextFormatter(t *testing.T) {\n\tf := slog.NewTextFormatter()\n\n\tdump.Println(f.Fields())\n\tassert.Contains(t, f.Fields(), \"datetime\")\n\tassert.Len(t, f.Fields(), strings.Count(slog.DefaultTemplate, \"{{\"))\n\n\tf.SetTemplate(slog.NamedTemplate)\n\tdump.Println(f.Fields())\n\tassert.Contains(t, f.Fields(), \"datetime\")\n\tassert.Len(t, f.Fields(), strings.Count(slog.NamedTemplate, \"{{\"))\n\n\tf.WithEnableColor(true)\n\tassert.True(t, f.EnableColor)\n\n\tf1 := slog.NewTextFormatter()\n\tf1.Configure(func(f *slog.TextFormatter) {\n\t\tf.FullDisplay = true\n\t})\n\tassert.True(t, f1.FullDisplay)\n\n\tt.Run(\"CallerFormatFunc\", func(t *testing.T) {\n\t\tbuf := byteutil.NewBuffer()\n\t\th := handler.IOWriterWithMaxLevel(buf, slog.DebugLevel)\n\t\th.SetFormatter(slog.TextFormatterWith(func(f *slog.TextFormatter) {\n\t\t\tf.CallerFormatFunc = func(rf *runtime.Frame) string {\n\t\t\t\treturn \"custom_caller\"\n\t\t\t}\n\t\t}))\n\n\t\tl := slog.NewWithHandlers(h)\n\t\tl.Debug(\"test message\")\n\t\tassert.Contains(t, buf.String(), \"custom_caller\")\n\t})\n\n}\n\nfunc TestTextFormatter_Format(t *testing.T) {\n\tr := newLogRecord(\"TEST_LOG_MESSAGE\")\n\tf := slog.NewTextFormatter()\n\n\tbs, err := f.Format(r)\n\tlogTxt := string(bs)\n\tfmt.Println(f.Template(), logTxt)\n\n\tassert.NoErr(t, err)\n\tassert.NotEmpty(t, logTxt)\n\tassert.NotContains(t, logTxt, \"{{\")\n\tassert.NotContains(t, logTxt, \"}}\")\n}\n\nfunc TestTextFormatter_ColorRenderFunc(t *testing.T) {\n\tf := slog.NewTextFormatter()\n\tf.WithEnableColor(true)\n\tf.ColorRenderFunc = func(field, s string, l slog.Level) string {\n\t\treturn fmt.Sprintf(\"NO-%s-NO\", s)\n\t}\n\n\tr := newLogRecord(\"TEST_LOG_MESSAGE\")\n\tbts, err := f.Format(r)\n\tassert.NoErr(t, err)\n\tstr := string(bts)\n\tassert.StrContains(t, str, \"[NO-info-NO]\")\n\tassert.StrContains(t, str, \"NO-TEST_LOG_MESSAGE-NO\")\n}\n\nfunc TestTextFormatter_LimitLevelNameLen(t *testing.T) {\n\tf := slog.TextFormatterWith(slog.LimitLevelNameLen(4))\n\n\th := handler.ConsoleWithMaxLevel(slog.TraceLevel)\n\th.SetFormatter(f)\n\n\tth := newTestHandler()\n\tth.resetOnFlush = false\n\tth.SetFormatter(f)\n\n\tl := slog.NewWithHandlers(h, th)\n\tl.DoNothingOnPanicFatal()\n\n\tfor _, level := range slog.AllLevels {\n\t\tl.Logf(level, \"a %s test message\", level.String())\n\t}\n\tassert.NoErr(t, l.LastErr())\n\n\tstr := th.ResetAndGet()\n\tassert.StrContains(t, str, \"[PANI]\")\n\tassert.StrContains(t, str, \"[FATA]\")\n\tassert.StrContains(t, str, \"[ERRO]\")\n\tassert.StrContains(t, str, \"[TRAC]\")\n}\n\nfunc TestTextFormatter_LimitLevelNameLen2(t *testing.T) {\n\t// set to max length.\n\tf := slog.TextFormatterWith(slog.LimitLevelNameLen(7))\n\n\th := handler.ConsoleWithMaxLevel(slog.TraceLevel)\n\th.SetFormatter(f)\n\n\tth := newTestHandler()\n\tth.resetOnFlush = false\n\tth.SetFormatter(f)\n\n\tl := slog.NewWithHandlers(h, th)\n\tl.DoNothingOnPanicFatal()\n\n\tfor _, level := range slog.AllLevels {\n\t\tl.Logf(level, \"a %s test message\", level.String())\n\t}\n\tassert.NoErr(t, l.LastErr())\n\n\tstr := th.ResetAndGet()\n\tassert.StrContains(t, str, \"[PANIC  ]\")\n\tassert.StrContains(t, str, \"[FATAL  ]\")\n\tassert.StrContains(t, str, \"[ERROR  ]\")\n\tassert.StrContains(t, str, \"[WARNING]\")\n}\n\nfunc TestNewJSONFormatter(t *testing.T) {\n\tf := slog.NewJSONFormatter()\n\tf.AddField(slog.FieldKeyTimestamp)\n\n\th := handler.ConsoleWithLevels(slog.AllLevels)\n\th.SetFormatter(f)\n\n\tl := slog.NewWithHandlers(h)\n\n\tfields := slog.M{\n\t\t\"field1\":  123,\n\t\t\"field2\":  \"abc\",\n\t\t\"message\": \"field name is same of message\", // will be as fields.message\n\t}\n\n\tl.WithFields(fields).Info(\"info\", \"message\")\n\n\tt.Run(\"CallerFormatFunc\", func(t *testing.T) {\n\t\th.SetFormatter(slog.NewJSONFormatter(func(f *slog.JSONFormatter) {\n\t\t\tf.CallerFormatFunc = func(rf *runtime.Frame) string {\n\t\t\t\treturn rf.Function\n\t\t\t}\n\t\t}))\n\t\tl.WithFields(fields).Info(\"info\", \"message\")\n\t})\n\n\t// PrettyPrint=true\n\tt.Run(\"PrettyPrint\", func(t *testing.T) {\n\t\tl = slog.New()\n\t\th = handler.ConsoleWithMaxLevel(slog.DebugLevel)\n\t\tf = slog.NewJSONFormatter(func(f *slog.JSONFormatter) {\n\t\t\tf.Aliases = slog.StringMap{\n\t\t\t\t\"level\": \"levelName\",\n\t\t\t}\n\t\t\tf.PrettyPrint = true\n\t\t})\n\n\t\th.SetFormatter(f)\n\n\t\tl.AddHandler(h)\n\t\tl.WithFields(fields).\n\t\t\tSetData(slog.M{\"key1\": \"val1\"}).\n\t\t\tSetExtra(slog.M{\"ext1\": \"val1\"}).\n\t\t\tInfo(\"info message and PrettyPrint is TRUE\")\n\n\t})\n}\n"
  },
  {
    "path": "formatter_text.go",
    "content": "package slog\n\nimport (\n\t\"github.com/gookit/color\"\n\t\"github.com/gookit/goutil/arrutil\"\n\t\"github.com/valyala/bytebufferpool\"\n)\n\n// there are built in text log template\nconst (\n\tDefaultTemplate = \"[{{datetime}}] [{{channel}}] [{{level}}] [{{caller}}] {{message}} {{data}} {{extra}}\\n\"\n\tNamedTemplate   = \"{{datetime}} channel={{channel}} level={{level}} [file={{caller}}] message={{message}} data={{data}}\\n\"\n)\n\n// ColorTheme for format log to console\nvar ColorTheme = map[Level]color.Color{\n\tPanicLevel:  color.FgRed,\n\tFatalLevel:  color.FgRed,\n\tErrorLevel:  color.FgMagenta,\n\tWarnLevel:   color.FgYellow,\n\tNoticeLevel: color.OpBold,\n\tInfoLevel:   color.FgGreen,\n\tDebugLevel:  color.FgCyan,\n\t// TraceLevel:  color.FgLightGreen,\n}\n\n// TextFormatter definition\ntype TextFormatter struct {\n\t// template text template for render output log messages\n\ttemplate string\n\t// fields list, parsed from template string.\n\t//\n\t// NOTE: contains no-field items in the list. eg: [\"level\", \"}}\"}\n\tfields []string\n\n\t// TimeFormat the time format layout. default is DefaultTimeFormat\n\tTimeFormat string\n\t// Enable color on print log to terminal\n\tEnableColor bool\n\t// ColorTheme setting on render color on terminal\n\tColorTheme map[Level]color.Color\n\t// FullDisplay Whether to display when record.Data, record.Extra, etc. are empty\n\tFullDisplay bool\n\t// EncodeFunc data encode for Record.Data, Record.Extra, etc.\n\t//\n\t// Default is encode by EncodeToString()\n\tEncodeFunc func(v any) string\n\t// CallerFormatFunc the caller format layout. default is defined by CallerFlag\n\tCallerFormatFunc CallerFormatFn\n\t// LevelFormatFunc custom the level name format.\n\tLevelFormatFunc func(s string) string\n\t// ColorRenderFunc custom color render func.\n\t//\n\t// - `s`: level name or message\n\tColorRenderFunc func(filed, s string, l Level) string\n\n\t// TODO BeforeFunc call it before format, update fields or other\n\t// BeforeFunc func(r *Record)\n}\n\n// TextFormatterFn definition\ntype TextFormatterFn func(*TextFormatter)\n\n// NewTextFormatter create new TextFormatter\nfunc NewTextFormatter(template ...string) *TextFormatter {\n\tvar fmtTpl string\n\tif len(template) > 0 {\n\t\tfmtTpl = template[0]\n\t} else {\n\t\tfmtTpl = DefaultTemplate\n\t}\n\n\tf := &TextFormatter{\n\t\t// default options\n\t\tColorTheme: ColorTheme,\n\t\tTimeFormat: DefaultTimeFormat,\n\t\t// EnableColor: color.SupportColor(),\n\t\t// EncodeFunc: func(v any) string {\n\t\t// \treturn fmt.Sprint(v)\n\t\t// },\n\t\tEncodeFunc: EncodeToString,\n\t}\n\tf.SetTemplate(fmtTpl)\n\n\treturn f\n}\n\n// TextFormatterWith create new TextFormatter with options\nfunc TextFormatterWith(fns ...TextFormatterFn) *TextFormatter {\n\treturn NewTextFormatter().WithOptions(fns...)\n}\n\n// LimitLevelNameLen limit the length of the level name\nfunc LimitLevelNameLen(length int) TextFormatterFn {\n\treturn func(f *TextFormatter) {\n\t\tf.LevelFormatFunc = func(s string) string {\n\t\t\treturn FormatLevelName(s, length)\n\t\t}\n\t}\n}\n\n// Configure the formatter\nfunc (f *TextFormatter) Configure(fn TextFormatterFn) *TextFormatter {\n\treturn f.WithOptions(fn)\n}\n\n// WithOptions func on the formatter\nfunc (f *TextFormatter) WithOptions(fns ...TextFormatterFn) *TextFormatter {\n\tfor _, fn := range fns {\n\t\tfn(f)\n\t}\n\treturn f\n}\n\n// SetTemplate set the log format template and update field-map\nfunc (f *TextFormatter) SetTemplate(fmtTpl string) {\n\tf.template = fmtTpl\n\tf.fields = parseTemplateToFields(fmtTpl)\n}\n\n// Template get\nfunc (f *TextFormatter) Template() string {\n\treturn f.template\n}\n\n// WithEnableColor enable color on print log to terminal\nfunc (f *TextFormatter) WithEnableColor(enable bool) *TextFormatter {\n\tf.EnableColor = enable\n\treturn f\n}\n\n// Fields get an export field list\nfunc (f *TextFormatter) Fields() []string {\n\tss := make([]string, 0, len(f.fields)/2)\n\tfor _, s := range f.fields {\n\t\tif s[0] >= 'a' && s[0] <= 'z' {\n\t\t\tss = append(ss, s)\n\t\t}\n\t}\n\treturn ss\n}\n\nvar textPool bytebufferpool.Pool\n\n// Format a log record\n//\n//goland:noinspection GoUnhandledErrorResult\nfunc (f *TextFormatter) Format(r *Record) ([]byte, error) {\n\tf.beforeFormat()\n\tbuf := textPool.Get()\n\tdefer textPool.Put(buf)\n\n\t// record formatted custom fields\n\tvar formattedFields []string\n\n\tfor _, field := range f.fields {\n\t\t// is not field name. eg: \"}}] \"\n\t\tif field[0] < 'a' || field[0] > 'z' {\n\t\t\t// remove left \"}}\"\n\t\t\tif len(field) > 1 && field[0:2] == \"}}\" {\n\t\t\t\tbuf.WriteString(field[2:])\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(field)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch {\n\t\tcase field == FieldKeyDatetime:\n\t\t\tbuf.B = r.Time.AppendFormat(buf.B, f.TimeFormat)\n\t\tcase field == FieldKeyTimestamp:\n\t\t\tbuf.WriteString(r.timestamp())\n\t\tcase field == FieldKeyCaller && r.Caller != nil:\n\t\t\tbuf.WriteString(formatCaller(r.Caller, r.CallerFlag, f.CallerFormatFunc))\n\t\tcase field == FieldKeyLevel:\n\t\t\tbuf.WriteString(f.renderColorText(field, r.LevelName(), r.Level))\n\t\tcase field == FieldKeyChannel:\n\t\t\tbuf.WriteString(r.Channel)\n\t\tcase field == FieldKeyMessage:\n\t\t\tbuf.WriteString(f.renderColorText(field, r.Message, r.Level))\n\t\tcase field == FieldKeyData:\n\t\t\tif f.FullDisplay || len(r.Data) > 0 {\n\t\t\t\tbuf.WriteString(f.EncodeFunc(r.Data))\n\t\t\t}\n\t\tcase field == FieldKeyExtra:\n\t\t\tif f.FullDisplay || len(r.Extra) > 0 {\n\t\t\t\tbuf.WriteString(f.EncodeFunc(r.Extra))\n\t\t\t}\n\t\tdefault:\n\t\t\tif _, ok := r.Fields[field]; ok {\n\t\t\t\tformattedFields = append(formattedFields, field)\n\t\t\t\tbuf.WriteString(f.EncodeFunc(r.Fields[field]))\n\t\t\t} else {\n\t\t\t\tbuf.WriteString(field)\n\t\t\t}\n\t\t}\n\t}\n\n\t// UP: check not configured fields in template.\n\tif fLen := len(r.Fields); fLen > 0 && fLen != len(formattedFields) {\n\t\tunformattedFields := make(map[string]any)\n\t\tfor k, v := range r.Fields {\n\t\t\tif !arrutil.StringsContains(formattedFields, k) {\n\t\t\t\tunformattedFields[k] = v\n\t\t\t}\n\t\t}\n\t\tbuf.WriteString(\"UN-CONFIGURED FIELDS: \")\n\t\tbuf.WriteString(f.EncodeFunc(unformattedFields))\n\t\tbuf.WriteByte('\\n')\n\t}\n\n\t// return buf.Bytes(), nil\n\treturn buf.B, nil\n}\n\nfunc (f *TextFormatter) beforeFormat() {\n\t// if f.BeforeFunc == nil {}\n\tif f.EncodeFunc == nil {\n\t\tf.EncodeFunc = EncodeToString\n\t}\n\tif f.ColorTheme == nil {\n\t\tf.ColorTheme = ColorTheme\n\t}\n}\n\nfunc (f *TextFormatter) renderColorText(field, s string, l Level) string {\n\t// custom level name format\n\tif f.LevelFormatFunc != nil && field == FieldKeyLevel {\n\t\ts = f.LevelFormatFunc(s)\n\t}\n\n\tif !f.EnableColor {\n\t\treturn s\n\t}\n\n\t// custom color render func\n\tif f.ColorRenderFunc != nil {\n\t\treturn f.ColorRenderFunc(field, s, l)\n\t}\n\n\t// output colored logs for console output\n\tif theme, ok := f.ColorTheme[l]; ok {\n\t\treturn theme.Render(s)\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/gookit/slog\n\ngo 1.19\n\nrequire (\n\tgithub.com/gookit/color v1.6.0\n\tgithub.com/gookit/goutil v0.7.4\n\tgithub.com/gookit/gsr v0.1.1\n\tgithub.com/valyala/bytebufferpool v1.0.0\n)\n\nrequire (\n\tgithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect\n\tgolang.org/x/sync v0.11.0 // indirect\n\tgolang.org/x/sys v0.30.0 // indirect\n\tgolang.org/x/term v0.29.0 // indirect\n\tgolang.org/x/text v0.22.0 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/gookit/assert v0.1.1 h1:lh3GcawXe/p+cU7ESTZ5Ui3Sm/x8JWpIis4/1aF0mY0=\ngithub.com/gookit/color v1.6.0 h1:JjJXBTk1ETNyqyilJhkTXJYYigHG24TM9Xa2M1xAhRA=\ngithub.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs=\ngithub.com/gookit/goutil v0.7.4 h1:OWgUngToNz+bPlX5aP+EMG31DraEU63uvKMwwT3vseM=\ngithub.com/gookit/goutil v0.7.4/go.mod h1:vJS9HXctYTCLtCsZot5L5xF+O1oR17cDYO9R0HxBmnU=\ngithub.com/gookit/gsr v0.1.1 h1:TaHD3M7qa6lcAf9D2J4mGNg+QjgDtD1bw7uctF8RXOM=\ngithub.com/gookit/gsr v0.1.1/go.mod h1:7wv4Y4WCnil8+DlDYHBjidzrEzfHhXEoFjEA0pPPWpI=\ngithub.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=\ngithub.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=\ngithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=\ngithub.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=\ngolang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=\ngolang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=\ngolang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=\ngolang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=\ngolang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=\ngolang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=\ngolang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=\n"
  },
  {
    "path": "handler/README.md",
    "content": "# Handlers\n\nPackage handler provide useful common log handlers. eg: file, console, multi_file, rotate_file, stream, syslog, email\n\n```text\nhandler -> buffered -> rotated -> writer(os.File)\n```\n\n## Built-in handlers\n\n- `handler.ConsoleHandler` Console handler \n- `handler.FileHandler` File handler\n- `handler.StreamHandler` Stream handler\n- `handler.SyslogHandler` Syslog handler\n- `handler.EmailHandler` Email handler\n- `handler.FlushCloseHandler` Flush and close handler\n\n## Go Docs\n\nDocs generated by: `go doc ./handler`\n\n### Handler Functions\n\n```go\nfunc LineBuffOsFile(f *os.File, bufSize int, levels []slog.Level) slog.Handler\nfunc LineBuffWriter(w io.Writer, bufSize int, levels []slog.Level) slog.Handler\nfunc LineBufferedFile(logfile string, bufSize int, levels []slog.Level) (slog.Handler, error)\n\ntype ConsoleHandler = IOWriterHandler\n    func ConsoleWithLevels(levels []slog.Level) *ConsoleHandler\n    func ConsoleWithMaxLevel(level slog.Level) *ConsoleHandler\n    func NewConsole(levels []slog.Level) *ConsoleHandler\n    func NewConsoleHandler(levels []slog.Level) *ConsoleHandler\n    func NewConsoleWithLF(lf slog.LevelFormattable) *ConsoleHandler\ntype EmailHandler struct{ ... }\n    func NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler\ntype EmailOption struct{ ... }\n\ntype FlushCloseHandler struct{ ... }\n    func FlushCloserWithLevels(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler\n    func FlushCloserWithMaxLevel(out FlushCloseWriter, maxLevel slog.Level) *FlushCloseHandler\n    func NewBuffered(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler\n    func NewBufferedHandler(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler\n    func NewFlushCloseHandler(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler\n    func NewFlushCloser(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler\n    func NewFlushCloserWithLF(out FlushCloseWriter, lf slog.LevelFormattable) *FlushCloseHandler\n\ntype IOWriterHandler struct{ ... }\n    func IOWriterWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler\n    func IOWriterWithMaxLevel(out io.Writer, maxLevel slog.Level) *IOWriterHandler\n    func NewIOWriter(out io.Writer, levels []slog.Level) *IOWriterHandler\n    func NewIOWriterHandler(out io.Writer, levels []slog.Level) *IOWriterHandler\n    func NewIOWriterWithLF(out io.Writer, lf slog.LevelFormattable) *IOWriterHandler\n    func NewSimpleHandler(out io.Writer, maxLevel slog.Level) *IOWriterHandler\n    func SimpleWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler\n\n\ntype SimpleHandler = IOWriterHandler\n    func NewHandler(out io.Writer, maxLevel slog.Level) *SimpleHandler\n    func NewSimple(out io.Writer, maxLevel slog.Level) *SimpleHandler\n\ntype SyncCloseHandler struct{ ... }\n    func JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func MustFileHandler(logfile string, fns ...ConfigFn) *SyncCloseHandler\n    func MustRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler\n    func MustSimpleFile(filepath string, maxLv ...slog.Level) *SyncCloseHandler\n    func MustSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) *SyncCloseHandler\n    func MustTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler\n    func NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error)\n    func NewRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewSimpleFile(filepath string, maxLv ...slog.Level) (*SyncCloseHandler, error)\n    func NewSimpleFileHandler(filePath string, maxLv ...slog.Level) (*SyncCloseHandler, error)\n    func NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewSizeRotateFileHandler(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewSyncCloseHandler(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler\n    func NewSyncCloser(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler\n    func NewSyncCloserWithLF(out SyncCloseWriter, lf slog.LevelFormattable) *SyncCloseHandler\n    func NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func NewTimeRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error)\n    func SyncCloserWithLevels(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler\n    func SyncCloserWithMaxLevel(out SyncCloseWriter, maxLevel slog.Level) *SyncCloseHandler\n\ntype SysLogHandler struct{ ... }\n    func NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error)\n\ntype WriteCloserHandler struct{ ... }\n    func NewWriteCloser(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler\n    func NewWriteCloserHandler(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler\n    func NewWriteCloserWithLF(out io.WriteCloser, lf slog.LevelFormattable) *WriteCloserHandler\n    func WriteCloserWithLevels(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler\n    func WriteCloserWithMaxLevel(out io.WriteCloser, maxLevel slog.Level) *WriteCloserHandler\n```\n\n\n### Config Functions\n\n```go\ntype Builder struct{ ... }\n    func NewBuilder() *Builder\ntype Config struct{ ... }\n    func NewConfig(fns ...ConfigFn) *Config\n    func NewEmptyConfig(fns ...ConfigFn) *Config\ntype ConfigFn func(c *Config)\n    func WithBackupNum(n uint) ConfigFn\n    func WithBackupTime(bt uint) ConfigFn\n    func WithBuffMode(buffMode string) ConfigFn\n    func WithBuffSize(buffSize int) ConfigFn\n    func WithCompress(compress bool) ConfigFn\n    func WithFilePerm(filePerm fs.FileMode) ConfigFn\n    func WithLevelMode(mode slog.LevelMode) ConfigFn\n    func WithLevelName(name string) ConfigFn\n    func WithLevelNames(names []string) ConfigFn\n    func WithLevelNamesString(names string) ConfigFn\n    func WithLogLevel(level slog.Level) ConfigFn\n    func WithLogLevels(levels slog.Levels) ConfigFn\n    func WithLogfile(logfile string) ConfigFn\n    func WithMaxLevelName(name string) ConfigFn\n    func WithMaxSize(maxSize uint64) ConfigFn\n    func WithRotateMode(m rotatefile.RotateMode) ConfigFn\n    func WithRotateTime(rt rotatefile.RotateTime) ConfigFn\n    func WithRotateTimeString(rt string) ConfigFn\n    func WithTimeClock(clock rotatefile.Clocker) ConfigFn\n    func WithUseJSON(useJSON bool) ConfigFn\n```\n"
  },
  {
    "path": "handler/buffer.go",
    "content": "package handler\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/bufwrite\"\n)\n\n// NewBuffered create new BufferedHandler\nfunc NewBuffered(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler {\n\treturn NewBufferedHandler(w, bufSize, levels...)\n}\n\n// NewBufferedHandler create new BufferedHandler\nfunc NewBufferedHandler(w io.WriteCloser, bufSize int, levels ...slog.Level) *FlushCloseHandler {\n\tif len(levels) == 0 {\n\t\tlevels = slog.AllLevels\n\t}\n\n\tout := bufwrite.NewBufIOWriterSize(w, bufSize)\n\treturn FlushCloserWithLevels(out, levels)\n}\n\n// LineBufferedFile handler\nfunc LineBufferedFile(logfile string, bufSize int, levels []slog.Level) (slog.Handler, error) {\n\tcfg := NewConfig(\n\t\tWithLogfile(logfile),\n\t\tWithBuffSize(bufSize),\n\t\tWithLogLevels(levels),\n\t\tWithBuffMode(BuffModeLine),\n\t)\n\n\tout, err := cfg.CreateWriter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn SyncCloserWithLevels(out, levels), nil\n}\n\n// LineBuffOsFile handler\nfunc LineBuffOsFile(f *os.File, bufSize int, levels []slog.Level) slog.Handler {\n\tif f == nil {\n\t\tpanic(\"slog: the os file cannot be nil\")\n\t}\n\n\tout := bufwrite.NewLineWriterSize(f, bufSize)\n\treturn SyncCloserWithLevels(out, levels)\n}\n\n// LineBuffWriter handler\nfunc LineBuffWriter(w io.Writer, bufSize int, levels []slog.Level) slog.Handler {\n\tif w == nil {\n\t\tpanic(\"slog: the io writer cannot be nil\")\n\t}\n\n\tout := bufwrite.NewLineWriterSize(w, bufSize)\n\treturn IOWriterWithLevels(out, levels)\n}\n\n//\n// --------- wrap a handler with buffer ---------\n//\n\n// FormatWriterHandler interface\ntype FormatWriterHandler interface {\n\tslog.Handler\n\t// Formatter record formatter\n\tFormatter() slog.Formatter\n\t// Writer the output writer\n\tWriter() io.Writer\n}\n"
  },
  {
    "path": "handler/buffer_test.go",
    "content": "package handler_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestNewBufferedHandler(t *testing.T) {\n\tlogfile := \"./testdata/buffer-os-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\tfile, err := handler.QuickOpenFile(logfile)\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tbh := handler.NewBuffered(file, 128)\n\n\t// new logger\n\tl := slog.NewWithHandlers(bh)\n\tl.Info(\"buffered info message\")\n\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\tassert.Empty(t, bts)\n\n\tl.Warn(\"buffered warn message\")\n\tbts, err = os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\n\terr = l.FlushAll()\n\tassert.NoErr(t, err)\n}\n\nfunc TestLineBufferedFile(t *testing.T) {\n\tlogfile := \"./testdata/line-buff-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\th, err := handler.LineBufferedFile(logfile, 12, slog.AllLevels)\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tr := newLogRecord(\"Test LineBufferedFile\")\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, \"Test LineBufferedFile\")\n}\n\nfunc TestLineBuffOsFile(t *testing.T) {\n\tlogfile := \"./testdata/line-buff-os-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\tfile, err := fsutil.QuickOpenFile(logfile)\n\tassert.NoErr(t, err)\n\n\th := handler.LineBuffOsFile(file, 12, slog.AllLevels)\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tr := newLogRecord(\"Test LineBuffOsFile\")\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, \"Test LineBuffOsFile\")\n\n\tassert.Panics(t, func() {\n\t\thandler.LineBuffOsFile(nil, 12, slog.AllLevels)\n\t})\n}\n\nfunc TestLineBuffWriter(t *testing.T) {\n\tlogfile := \"./testdata/line-buff-writer.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\tfile, err := fsutil.QuickOpenFile(logfile)\n\tassert.NoErr(t, err)\n\n\th := handler.LineBuffWriter(file, 12, slog.AllLevels)\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\tassert.Panics(t, func() {\n\t\thandler.LineBuffWriter(nil, 12, slog.AllLevels)\n\t})\n\n\tr := newLogRecord(\"Test LineBuffWriter\")\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, \"Test LineBuffWriter\")\n\n\tassert.Panics(t, func() {\n\t\thandler.LineBuffOsFile(nil, 12, slog.AllLevels)\n\t})\n}\n"
  },
  {
    "path": "handler/builder.go",
    "content": "package handler\n\nimport (\n\t\"io\"\n\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\n//\n// ---------------------------------------------------------------------------\n// handler builder\n// ---------------------------------------------------------------------------\n//\n\n// Builder struct for create handler\ntype Builder struct {\n\t*Config\n\tOutput io.Writer\n}\n\n// NewBuilder create\nfunc NewBuilder() *Builder {\n\treturn &Builder{\n\t\tConfig: NewEmptyConfig(),\n\t}\n}\n\n// WithOutput to the builder\nfunc (b *Builder) WithOutput(w io.Writer) *Builder {\n\tb.Output = w\n\treturn b\n}\n\n// With some config fn\n//\n// Deprecated: please use WithConfigFn()\nfunc (b *Builder) With(fns ...ConfigFn) *Builder {\n\treturn b.WithConfigFn(fns...)\n}\n\n// WithConfigFn some config fn\nfunc (b *Builder) WithConfigFn(fns ...ConfigFn) *Builder {\n\tb.Config.With(fns...)\n\treturn b\n}\n\n// WithLogfile setting\nfunc (b *Builder) WithLogfile(logfile string) *Builder {\n\tb.Logfile = logfile\n\treturn b\n}\n\n// WithLevelMode setting\nfunc (b *Builder) WithLevelMode(mode slog.LevelMode) *Builder {\n\tb.LevelMode = mode\n\treturn b\n}\n\n// WithLogLevel setting max log level\nfunc (b *Builder) WithLogLevel(level slog.Level) *Builder {\n\tb.Level = level\n\tb.LevelMode = slog.LevelModeMax\n\treturn b\n}\n\n// WithLogLevels setting\nfunc (b *Builder) WithLogLevels(levels []slog.Level) *Builder {\n\tb.Levels = levels\n\tb.LevelMode = slog.LevelModeList\n\treturn b\n}\n\n// WithBuffMode setting\nfunc (b *Builder) WithBuffMode(bufMode string) *Builder {\n\tb.BuffMode = bufMode\n\treturn b\n}\n\n// WithBuffSize setting\nfunc (b *Builder) WithBuffSize(bufSize int) *Builder {\n\tb.BuffSize = bufSize\n\treturn b\n}\n\n// WithMaxSize setting\nfunc (b *Builder) WithMaxSize(maxSize uint64) *Builder {\n\tb.MaxSize = maxSize\n\treturn b\n}\n\n// WithRotateTime setting\nfunc (b *Builder) WithRotateTime(rt rotatefile.RotateTime) *Builder {\n\tb.RotateTime = rt\n\treturn b\n}\n\n// WithCompress setting\nfunc (b *Builder) WithCompress(compress bool) *Builder {\n\tb.Compress = compress\n\treturn b\n}\n\n// WithUseJSON setting\nfunc (b *Builder) WithUseJSON(useJSON bool) *Builder {\n\tb.UseJSON = useJSON\n\treturn b\n}\n\n// Build slog handler.\nfunc (b *Builder) Build() slog.FormattableHandler {\n\tif b.Output != nil {\n\t\treturn b.buildFromWriter(b.Output)\n\t}\n\n\tif b.Logfile != \"\" {\n\t\tw, err := b.CreateWriter()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn b.buildFromWriter(w)\n\t}\n\n\tpanic(\"slog: missing information for build slog handler\")\n}\n\n// Build slog handler.\nfunc (b *Builder) buildFromWriter(w io.Writer) (h slog.FormattableHandler) {\n\tdefer b.reset()\n\tbufSize := b.BuffSize\n\tlf := b.newLevelFormattable()\n\n\tif scw, ok := w.(SyncCloseWriter); ok {\n\t\tif bufSize > 0 {\n\t\t\tscw = b.wrapBuffer(scw)\n\t\t}\n\n\t\th = NewSyncCloserWithLF(scw, lf)\n\t} else if fcw, ok := w.(FlushCloseWriter); ok {\n\t\tif bufSize > 0 {\n\t\t\tfcw = b.wrapBuffer(fcw)\n\t\t}\n\n\t\th = NewFlushCloserWithLF(fcw, lf)\n\t} else if wc, ok := w.(io.WriteCloser); ok {\n\t\tif bufSize > 0 {\n\t\t\twc = b.wrapBuffer(wc)\n\t\t}\n\n\t\th = NewWriteCloserWithLF(wc, lf)\n\t} else {\n\t\tif bufSize > 0 {\n\t\t\tw = b.wrapBuffer(w)\n\t\t}\n\n\t\th = NewIOWriterWithLF(w, lf)\n\t}\n\n\t// use json format.\n\tif b.UseJSON {\n\t\th.SetFormatter(slog.NewJSONFormatter())\n\t}\n\treturn\n}\n\n// rest builder.\nfunc (b *Builder) reset() {\n\tb.Output = nil\n\tb.Config = NewEmptyConfig()\n}\n"
  },
  {
    "path": "handler/config.go",
    "content": "package handler\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"io/fs\"\n\t\"strings\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/bufwrite\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\n// the buff mode constants\nconst (\n\tBuffModeLine = \"line\"\n\tBuffModeBite = \"bite\"\n)\n\nconst (\n\t// LevelModeList use level list for limit record write\n\tLevelModeList = slog.LevelModeList\n\t// LevelModeValue use max level limit log record write\n\tLevelModeValue = slog.LevelModeMax\n)\n\n// ConfigFn for config some settings\ntype ConfigFn func(c *Config)\n\n// Config struct\ntype Config struct {\n\t// Logfile for writing logs\n\tLogfile string `json:\"logfile\" yaml:\"logfile\"`\n\n\t// FilePerm for create log file. default rotatefile.DefaultFilePerm\n\tFilePerm fs.FileMode `json:\"file_perm\" yaml:\"file_perm\"`\n\n\t// LevelMode for limit log records. default LevelModeList\n\tLevelMode slog.LevelMode `json:\"level_mode\" yaml:\"level_mode\"`\n\n\t// Level max value. valid on LevelMode = LevelModeValue\n\t//\n\t// eg: set Level=slog.LevelError, it will only write messages on level <= error.\n\tLevel slog.Level `json:\"level\" yaml:\"level\"`\n\n\t// Levels list for writing. valid on LevelMode = LevelModeList\n\tLevels []slog.Level `json:\"levels\" yaml:\"levels\"`\n\n\t// UseJSON for format logs\n\tUseJSON bool `json:\"use_json\" yaml:\"use_json\"`\n\n\t// BuffMode type name. allow: line, bite\n\t//\n\t// Recommend use BuffModeLine(it's default)\n\tBuffMode string `json:\"buff_mode\" yaml:\"buff_mode\"`\n\n\t// BuffSize for enable buffer, unit is bytes. set 0 to disable buffer\n\tBuffSize int `json:\"buff_size\" yaml:\"buff_size\"`\n\n\t// RotateTime for a rotating file, unit is seconds.\n\tRotateTime rotatefile.RotateTime `json:\"rotate_time\" yaml:\"rotate_time\"`\n\n\t// RotateMode for a rotating file by time. default rotatefile.ModeRename\n\tRotateMode rotatefile.RotateMode `json:\"rotate_mode\" yaml:\"rotate_mode\"`\n\n\t// TimeClock for a rotating file by time.\n\tTimeClock rotatefile.Clocker `json:\"-\" yaml:\"-\"`\n\n\t// MaxSize for a rotating file by size, unit is bytes.\n\tMaxSize uint64 `json:\"max_size\" yaml:\"max_size\"`\n\n\t// Compress determines if the rotated log files should be compressed using gzip.\n\t// The default is not to perform compression.\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\n\t// BackupNum max number for keep old files.\n\t//\n\t// 0 is not limit, default is 20.\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\n\t// BackupTime max time for keep old files, unit is hours.\n\t//\n\t// 0 is not limit, default is a week.\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\n\t// RenameFunc build filename for rotate file\n\tRenameFunc func(filepath string, rotateNum uint) string\n\n\t// CleanOnClose determines if the rotated log files should be cleaned up when close.\n\tCleanOnClose bool `json:\"clean_on_close\" yaml:\"clean_on_close\"`\n\n\t// DebugMode for debug on development.\n\tDebugMode bool\n}\n\n// NewEmptyConfig new config instance\nfunc NewEmptyConfig(fns ...ConfigFn) *Config {\n\tc := &Config{Levels: slog.AllLevels}\n\treturn c.WithConfigFn(fns...)\n}\n\n// NewConfig new config instance with some default settings.\nfunc NewConfig(fns ...ConfigFn) *Config {\n\tc := &Config{\n\t\tLevels:   slog.AllLevels,\n\t\tBuffMode: BuffModeLine,\n\t\tBuffSize: DefaultBufferSize,\n\t\t// rotate file settings\n\t\tMaxSize:    rotatefile.DefaultMaxSize,\n\t\tRotateTime: rotatefile.EveryHour,\n\t\t// old files clean settings\n\t\tBackupNum:  rotatefile.DefaultBackNum,\n\t\tBackupTime: rotatefile.DefaultBackTime,\n\t\tDebugMode:  slog.DebugMode,\n\t}\n\n\treturn c.WithConfigFn(fns...)\n}\n\n// FromJSON load config from json string\nfunc (c *Config) FromJSON(bts []byte) error { return json.Unmarshal(bts, c) }\n\n// With more config settings func\nfunc (c *Config) With(fns ...ConfigFn) *Config { return c.WithConfigFn(fns...) }\n\n// WithConfigFn more config settings func\nfunc (c *Config) WithConfigFn(fns ...ConfigFn) *Config {\n\tfor _, fn := range fns {\n\t\tfn(c)\n\t}\n\treturn c\n}\n\nfunc (c *Config) newLevelFormattable() slog.LevelFormattable {\n\tif c.LevelMode == LevelModeValue {\n\t\treturn slog.NewLvFormatter(c.Level)\n\t}\n\treturn slog.NewLvsFormatter(c.Levels)\n}\n\n// CreateHandler quick create a handler by config\nfunc (c *Config) CreateHandler() (*SyncCloseHandler, error) {\n\toutput, err := c.CreateWriter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := &SyncCloseHandler{\n\t\tOutput: output,\n\t\t// with log level and formatter\n\t\tLevelFormattable: c.newLevelFormattable(),\n\t}\n\n\tif c.UseJSON {\n\t\th.SetFormatter(slog.NewJSONFormatter())\n\t}\n\treturn h, nil\n}\n\n// RotateWriter build rotate writer by config\nfunc (c *Config) RotateWriter() (output SyncCloseWriter, err error) {\n\tif c.MaxSize == 0 && c.RotateTime == 0 {\n\t\treturn nil, errorx.E(\"slog: cannot create rotate writer, MaxSize and RotateTime both is 0\")\n\t}\n\n\treturn c.CreateWriter()\n}\n\n// CreateWriter build writer by config\nfunc (c *Config) CreateWriter() (output SyncCloseWriter, err error) {\n\tif c.Logfile == \"\" {\n\t\treturn nil, errorx.Raw(\"slog: logfile cannot be empty for create writer\")\n\t}\n\tif c.FilePerm == 0 {\n\t\tc.FilePerm = rotatefile.DefaultFilePerm\n\t}\n\n\t// create a rotated writer by config.\n\tif c.MaxSize > 0 || c.RotateTime > 0 {\n\t\trc := rotatefile.EmptyConfigWith()\n\n\t\t// has locked on logger.write()\n\t\trc.CloseLock = true\n\t\trc.Filepath = c.Logfile\n\t\trc.FilePerm = c.FilePerm\n\t\trc.DebugMode = c.DebugMode\n\n\t\t// copy settings\n\t\trc.MaxSize = c.MaxSize\n\t\trc.RotateTime = c.RotateTime\n\t\trc.RotateMode = c.RotateMode\n\t\trc.BackupNum = c.BackupNum\n\t\trc.BackupTime = c.BackupTime\n\t\trc.Compress = c.Compress\n\t\trc.CleanOnClose = c.CleanOnClose\n\n\t\tif c.RenameFunc != nil {\n\t\t\trc.RenameFunc = c.RenameFunc\n\t\t}\n\t\tif c.TimeClock != nil {\n\t\t\trc.TimeClock = c.TimeClock\n\t\t}\n\n\t\toutput, err = rc.Create()\n\t} else {\n\t\t// create a file writer\n\t\toutput, err = fsutil.OpenAppendFile(c.Logfile, c.FilePerm)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// wrap buffer\n\tif c.BuffSize > 0 {\n\t\toutput = c.wrapBuffer(output)\n\t}\n\treturn\n}\n\ntype flushSyncCloseWriter interface {\n\tFlushCloseWriter\n\tSync() error\n}\n\n// wrap buffer for the writer\nfunc (c *Config) wrapBuffer(w io.Writer) (bw flushSyncCloseWriter) {\n\tif c.BuffMode == BuffModeLine {\n\t\tbw = bufwrite.NewLineWriterSize(w, c.BuffSize)\n\t} else {\n\t\tbw = bufwrite.NewBufIOWriterSize(w, c.BuffSize)\n\t}\n\treturn bw\n}\n\n//\n// ---------------------------------------------------------------------------\n// global config func\n// ---------------------------------------------------------------------------\n//\n\n// WithLogfile setting\nfunc WithLogfile(logfile string) ConfigFn {\n\treturn func(c *Config) { c.Logfile = logfile }\n}\n\n// WithFilePerm setting\nfunc WithFilePerm(filePerm fs.FileMode) ConfigFn {\n\treturn func(c *Config) { c.FilePerm = filePerm }\n}\n\n// WithLevelMode setting\nfunc WithLevelMode(lm slog.LevelMode) ConfigFn {\n\treturn func(c *Config) { c.LevelMode = lm }\n}\n\n// WithLevelModeString setting\nfunc WithLevelModeString(s string) ConfigFn {\n\treturn func(c *Config) { c.LevelMode = slog.SafeToLevelMode(s) }\n}\n\n// WithLogLevel setting max log level\nfunc WithLogLevel(level slog.Level) ConfigFn {\n\treturn func(c *Config) {\n\t\tc.Level = level\n\t\tc.LevelMode = LevelModeValue\n\t}\n}\n\n// WithLevelName setting max level by name\nfunc WithLevelName(name string) ConfigFn { return WithLogLevel(slog.LevelByName(name)) }\n\n// WithMaxLevelName setting max level by name\nfunc WithMaxLevelName(name string) ConfigFn { return WithLogLevel(slog.LevelByName(name)) }\n\n// WithLogLevels setting\nfunc WithLogLevels(levels slog.Levels) ConfigFn {\n\treturn func(c *Config) {\n\t\tc.Levels = levels\n\t\tc.LevelMode = LevelModeList\n\t}\n}\n\n// WithLevelNamesString setting multi levels by level names string, multi names split by comma.\nfunc WithLevelNamesString(names string) ConfigFn {\n\treturn WithLevelNames(strings.Split(names, \",\"))\n}\n\n// WithLevelNames set multi levels by level names.\nfunc WithLevelNames(names []string) ConfigFn {\n\tlevels := make([]slog.Level, 0, len(names))\n\tfor _, name := range names {\n\t\tlevels = append(levels, slog.LevelByName(name))\n\t}\n\treturn WithLogLevels(levels)\n}\n\n// WithRotateTime setting the rotated time\nfunc WithRotateTime(rt rotatefile.RotateTime) ConfigFn {\n\treturn func(c *Config) { c.RotateTime = rt }\n}\n\n// WithRotateTimeString setting the rotated time by string.\n//\n// eg: \"1hour\", \"24h\", \"1day\", \"7d\", \"1m\", \"30s\"\nfunc WithRotateTimeString(s string) ConfigFn {\n\treturn func(c *Config) {\n\t\trt, err := rotatefile.StringToRotateTime(s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tc.RotateTime = rt\n\t}\n}\n\n// WithRotateMode setting rotating mode rotatefile.RotateMode\nfunc WithRotateMode(m rotatefile.RotateMode) ConfigFn {\n\treturn func(c *Config) { c.RotateMode = m }\n}\n\n// WithRotateModeString setting rotatefile.RotateMode by string.\nfunc WithRotateModeString(s string) ConfigFn {\n\treturn func(c *Config) {\n\t\tm, err := rotatefile.StringToRotateMode(s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tc.RotateMode = m\n\t}\n}\n\n// WithTimeClock setting\nfunc WithTimeClock(clock rotatefile.Clocker) ConfigFn {\n\treturn func(c *Config) { c.TimeClock = clock }\n}\n\n// WithBackupNum setting\nfunc WithBackupNum(n uint) ConfigFn {\n\treturn func(c *Config) { c.BackupNum = n }\n}\n\n// WithBackupTime setting backup time\nfunc WithBackupTime(bt uint) ConfigFn {\n\treturn func(c *Config) { c.BackupTime = bt }\n}\n\n// WithBuffMode setting buffer mode\nfunc WithBuffMode(buffMode string) ConfigFn {\n\treturn func(c *Config) { c.BuffMode = buffMode }\n}\n\n// WithBuffSize setting buffer size, unit is bytes.\nfunc WithBuffSize(buffSize int) ConfigFn {\n\treturn func(c *Config) { c.BuffSize = buffSize }\n}\n\n// WithMaxSize setting max size for a rotated file\nfunc WithMaxSize(maxSize uint64) ConfigFn {\n\treturn func(c *Config) { c.MaxSize = maxSize }\n}\n\n// WithCompress setting compress\nfunc WithCompress(compress bool) ConfigFn {\n\treturn func(c *Config) { c.Compress = compress }\n}\n\n// WithUseJSON setting uses JSON format\nfunc WithUseJSON(useJSON bool) ConfigFn {\n\treturn func(c *Config) { c.UseJSON = useJSON }\n}\n\n// WithDebugMode setting for debug mode\nfunc WithDebugMode(c *Config) { c.DebugMode = true }\n"
  },
  {
    "path": "handler/config_test.go",
    "content": "package handler_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/x/fmtutil\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestNewConfig(t *testing.T) {\n\tc := handler.NewConfig(\n\t\thandler.WithCompress(true),\n\t\thandler.WithLevelMode(handler.LevelModeValue),\n\t\thandler.WithBackupNum(20),\n\t\thandler.WithBackupTime(1800),\n\t\thandler.WithRotateMode(rotatefile.ModeCreate),\n\t\tfunc(c *handler.Config) {\n\t\t\tc.BackupTime = 23\n\t\t\tc.RenameFunc = func(fpath string, num uint) string {\n\t\t\t\treturn fpath + \".bak\"\n\t\t\t}\n\t\t},\n\t).\n\t\tWith(handler.WithBuffSize(129)).\n\t\tWithConfigFn(handler.WithLogLevel(slog.ErrorLevel))\n\n\tassert.True(t, c.Compress)\n\tassert.Eq(t, 129, c.BuffSize)\n\tassert.Eq(t, handler.LevelModeValue, c.LevelMode)\n\tassert.Eq(t, slog.ErrorLevel, c.Level)\n\tassert.Eq(t, rotatefile.ModeCreate, c.RotateMode)\n\n\tc.With(handler.WithLevelModeString(\"max\"))\n\tassert.Eq(t, slog.LevelModeMax, c.LevelMode)\n\n\tc.WithConfigFn(handler.WithLevelNames([]string{\"info\", \"debug\"}))\n\tassert.Eq(t, []slog.Level{slog.InfoLevel, slog.DebugLevel}, c.Levels)\n}\n\nfunc TestConfig_fromJSON(t *testing.T) {\n\tc := &handler.Config{}\n\tassert.Eq(t, slog.LevelModeList, c.LevelMode)\n\tassert.Eq(t, rotatefile.ModeRename, c.RotateMode)\n\n\tassert.NoErr(t, c.FromJSON([]byte(`{\n\t\t\"logfile\": \"testdata/config_test.log\",\n\t\t\"level\": \"debug\",\n\t\t\"level_mode\": \"max\",\n\t\t\"levels\": [\"info\", \"debug\"],\n\t\t\"buff_mode\": \"line\",\n\t\t\"buff_size\": 128,\n\t\t\"backup_num\": 3,\n\t\t\"backup_time\": 3600,\n\t\t\"rotate_mode\": \"create\",\n\t\t\"rotate_time\": \"1day\"\n\t}`)))\n\tc.With(handler.WithDebugMode)\n\tdump.P(c)\n\n\tassert.Eq(t, slog.LevelModeMax, c.LevelMode)\n\tassert.Eq(t, rotatefile.ModeCreate, c.RotateMode)\n\tassert.Eq(t, \"Every 1 Day\", c.RotateTime.String())\n}\n\nfunc TestWithLevelNamesString(t *testing.T) {\n\tc := handler.NewConfig(handler.WithLevelNamesString(\"info,error\"))\n\tassert.Eq(t, []slog.Level{slog.InfoLevel, slog.ErrorLevel}, c.Levels)\n}\n\nfunc TestWithMaxLevelName(t *testing.T) {\n\tc := handler.NewConfig(handler.WithMaxLevelName(\"error\"))\n\tassert.Eq(t, slog.ErrorLevel, c.Level)\n\tassert.Eq(t, handler.LevelModeValue, c.LevelMode)\n\n\tc1 := handler.NewConfig(handler.WithLevelName(\"warn\"))\n\tassert.Eq(t, slog.WarnLevel, c1.Level)\n\tassert.Eq(t, handler.LevelModeValue, c1.LevelMode)\n}\n\nfunc TestWithRotateMode(t *testing.T) {\n\tc := handler.Config{}\n\n\tc.With(handler.WithRotateModeString(\"rename\"))\n\tassert.Eq(t, rotatefile.ModeRename, c.RotateMode)\n\n\tassert.PanicsErrMsg(t, func() {\n\t\tc.With(handler.WithRotateModeString(\"invalid\"))\n\t}, \"rotatefile: invalid rotate mode: invalid\")\n\n}\n\nfunc TestWithRotateTimeString(t *testing.T) {\n\ttests := []struct {\n\t\tinput    string\n\t\texpected rotatefile.RotateTime\n\t\tpanics   bool\n\t}{\n\t\t{\"1hours\", rotatefile.RotateTime(3600), false},\n\t\t{\"24h\", rotatefile.RotateTime(86400), false},\n\t\t{\"1day\", rotatefile.RotateTime(86400), false},\n\t\t{\"7d\", rotatefile.RotateTime(604800), false},\n\t\t{\"1m\", rotatefile.RotateTime(60), false},\n\t\t{\"30s\", rotatefile.RotateTime(30), false},\n\t\t{\"invalid\", 0, true},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.input, func(t *testing.T) {\n\t\t\tc := &handler.Config{}\n\t\t\tif tt.panics {\n\t\t\t\tassert.Panics(t, func() {\n\t\t\t\t\thandler.WithRotateTimeString(tt.input)(c)\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tassert.NotPanics(t, func() {\n\t\t\t\t\thandler.WithRotateTimeString(tt.input)(c)\n\t\t\t\t})\n\t\t\t\tassert.Eq(t, tt.expected, c.RotateTime)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewBuilder(t *testing.T) {\n\ttestFile := \"testdata/builder.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(testFile))\n\n\tb := handler.NewBuilder().\n\t\tWithLogfile(testFile).\n\t\tWithLogLevels(slog.AllLevels).\n\t\tWithBuffSize(128).\n\t\tWithBuffMode(handler.BuffModeBite).\n\t\tWithMaxSize(fmtutil.OneMByte * 3).\n\t\tWithRotateTime(rotatefile.Every30Min).\n\t\tWithCompress(true).\n\t\tWith(func(c *handler.Config) {\n\t\t\tc.BackupNum = 3\n\t\t})\n\n\tassert.Eq(t, uint(3), b.BackupNum)\n\tassert.Eq(t, handler.BuffModeBite, b.BuffMode)\n\tassert.Eq(t, rotatefile.Every30Min, b.RotateTime)\n\n\th := b.Build()\n\tassert.NotNil(t, h)\n\tassert.NoErr(t, h.Close())\n\n\tb1 := handler.NewBuilder().\n\t\tWithOutput(new(bytes.Buffer)).\n\t\tWithUseJSON(true).\n\t\tWithLogLevel(slog.ErrorLevel).\n\t\tWithLevelMode(handler.LevelModeValue)\n\tassert.Eq(t, handler.LevelModeValue, b1.LevelMode)\n\tassert.Eq(t, slog.ErrorLevel, b1.Level)\n\n\th2 := b1.Build()\n\tassert.NotNil(t, h2)\n\n\tassert.Panics(t, func() {\n\t\thandler.NewBuilder().Build()\n\t})\n}\n\ntype simpleWriter struct {\n\terrOnWrite bool\n}\n\nfunc (w *simpleWriter) Write(p []byte) (n int, err error) {\n\tif w.errOnWrite {\n\t\treturn 0, errorx.Raw(\"write error\")\n\t}\n\treturn len(p), nil\n}\n\ntype closeWriter struct {\n\terrOnWrite bool\n\terrOnClose bool\n}\n\nfunc (w *closeWriter) Close() error {\n\tif w.errOnClose {\n\t\treturn errorx.Raw(\"close error\")\n\t}\n\treturn nil\n}\n\nfunc (w *closeWriter) Write(p []byte) (n int, err error) {\n\tif w.errOnWrite {\n\t\treturn 0, errorx.Raw(\"write error\")\n\t}\n\treturn len(p), nil\n}\n\ntype flushCloseWriter struct {\n\tcloseWriter\n\terrOnFlush bool\n}\n\n// Flush implement stdio.Flusher\nfunc (w *flushCloseWriter) Flush() error {\n\tif w.errOnFlush {\n\t\treturn errorx.Raw(\"flush error\")\n\t}\n\treturn nil\n}\n\ntype syncCloseWriter struct {\n\tcloseWriter\n\terrOnSync bool\n}\n\n// Sync implement stdio.Syncer\nfunc (w *syncCloseWriter) Sync() error {\n\tif w.errOnSync {\n\t\treturn errorx.Raw(\"sync error\")\n\t}\n\treturn nil\n}\n\nfunc TestNewBuilder_buildFromWriter(t *testing.T) {\n\tt.Run(\"FlushCloseWriter\", func(t *testing.T) {\n\t\tout := &flushCloseWriter{}\n\t\tout.errOnFlush = true\n\t\th := handler.NewBuilder().\n\t\t\tWithOutput(out).\n\t\t\tWithConfigFn(func(c *handler.Config) {\n\t\t\t\tc.RenameFunc = func(fpath string, num uint) string {\n\t\t\t\t\treturn fpath + \".bak\"\n\t\t\t\t}\n\t\t\t}).\n\t\t\tBuild()\n\t\tassert.Err(t, h.Flush())\n\n\t\t// wrap buffer\n\t\th = handler.NewBuilder().\n\t\t\tWithOutput(out).\n\t\t\tWithBuffSize(128).\n\t\t\tBuild()\n\t\tassert.NoErr(t, h.Close())\n\t\tassert.NoErr(t, h.Flush())\n\t})\n\n\tt.Run(\"CloseWriter\", func(t *testing.T) {\n\t\th := handler.NewBuilder().\n\t\t\tWithOutput(&closeWriter{errOnClose: true}).\n\t\t\tWithBuffSize(128).\n\t\t\tBuild()\n\t\tassert.NotNil(t, h)\n\t\tassert.Err(t, h.Close())\n\t})\n\n\tt.Run(\"SimpleWriter\", func(t *testing.T) {\n\t\th := handler.NewBuilder().\n\t\t\tWithOutput(&simpleWriter{errOnWrite: true}).\n\t\t\tWithBuffSize(128).\n\t\t\tBuild()\n\t\tassert.NotNil(t, h)\n\t\tassert.NoErr(t, h.Close())\n\t})\n}\n"
  },
  {
    "path": "handler/console.go",
    "content": "package handler\n\nimport (\n\t\"os\"\n\n\t\"github.com/gookit/color\"\n\t\"github.com/gookit/slog\"\n)\n\n/********************************************************************************\n * console log handler\n ********************************************************************************/\n\n// ConsoleHandler definition\ntype ConsoleHandler = IOWriterHandler\n\n// NewConsoleWithLF create new ConsoleHandler and with custom slog.LevelFormattable\nfunc NewConsoleWithLF(lf slog.LevelFormattable) *ConsoleHandler {\n\th := NewIOWriterWithLF(os.Stdout, lf)\n\n\t// default use text formatter\n\tf := slog.NewTextFormatter()\n\t// default enable color on console\n\tf.WithEnableColor(color.SupportColor())\n\n\th.SetFormatter(f)\n\treturn h\n}\n\n//\n// ------------- Use max log level -------------\n//\n\n// ConsoleWithMaxLevel create new ConsoleHandler and with max log level\nfunc ConsoleWithMaxLevel(level slog.Level) *ConsoleHandler {\n\treturn NewConsoleWithLF(slog.NewLvFormatter(level))\n}\n\n//\n// ------------- Use multi log levels -------------\n//\n\n// NewConsole create new ConsoleHandler, alias of NewConsoleHandler\nfunc NewConsole(levels []slog.Level) *ConsoleHandler {\n\treturn NewConsoleHandler(levels)\n}\n\n// ConsoleWithLevels create new ConsoleHandler and with limited log levels\nfunc ConsoleWithLevels(levels []slog.Level) *ConsoleHandler {\n\treturn NewConsoleHandler(levels)\n}\n\n// NewConsoleHandler create new ConsoleHandler with limited log levels\nfunc NewConsoleHandler(levels []slog.Level) *ConsoleHandler {\n\treturn NewConsoleWithLF(slog.NewLvsFormatter(levels))\n}\n"
  },
  {
    "path": "handler/console_test.go",
    "content": "package handler_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestConsoleWithMaxLevel(t *testing.T) {\n\tl := slog.NewWithHandlers(handler.ConsoleWithMaxLevel(slog.InfoLevel))\n\tl.DoNothingOnPanicFatal()\n\n\tfor _, level := range slog.AllLevels {\n\t\tl.Log(level, \"a test message\")\n\t}\n\tassert.NoErr(t, l.LastErr())\n}\n"
  },
  {
    "path": "handler/email.go",
    "content": "package handler\n\nimport (\n\t\"net/smtp\"\n\t\"strconv\"\n\n\t\"github.com/gookit/slog\"\n)\n\n// EmailOption struct\ntype EmailOption struct {\n\tSMTPHost string `json:\"smtp_host\"` // eg \"smtp.gmail.com\"\n\tSMTPPort int    `json:\"smtp_port\"` // eg 587\n\tFromAddr string `json:\"from_addr\"` // eg \"yourEmail@gmail.com\"\n\tPassword string `json:\"password\"`\n}\n\n// EmailHandler struct\ntype EmailHandler struct {\n\tNopFlushClose\n\tslog.LevelWithFormatter\n\t// From the sender email information\n\tFrom EmailOption\n\t// ToAddresses email list\n\tToAddresses []string\n}\n\n// NewEmailHandler instance\nfunc NewEmailHandler(from EmailOption, toAddresses []string) *EmailHandler {\n\th := &EmailHandler{\n\t\tFrom: from,\n\t\t// to receivers\n\t\tToAddresses: toAddresses,\n\t}\n\n\t// init default log level\n\th.Level = slog.InfoLevel\n\treturn h\n}\n\n// Handle a log record\nfunc (h *EmailHandler) Handle(r *slog.Record) error {\n\tmsgBytes, err := h.Format(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar auth = smtp.PlainAuth(\"\", h.From.FromAddr, h.From.Password, h.From.SMTPHost)\n\taddr := h.From.SMTPHost + \":\" + strconv.Itoa(h.From.SMTPPort)\n\n\treturn smtp.SendMail(addr, auth, h.From.FromAddr, h.ToAddresses, msgBytes)\n}\n"
  },
  {
    "path": "handler/example_test.go",
    "content": "package handler_test\n\nimport (\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc Example_fileHandler() {\n\twithLevels := handler.WithLogLevels(slog.Levels{slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel})\n\th1 := handler.MustFileHandler(\"/tmp/error.log\", withLevels)\n\n\twithLevels = handler.WithLogLevels(slog.Levels{slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel})\n\th2 := handler.MustFileHandler(\"/tmp/info.log\", withLevels)\n\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message\")\n\tslog.Error(\"error message\")\n}\n\nfunc Example_rotateFileHandler() {\n\th1 := handler.MustRotateFile(\"/tmp/error.log\", handler.EveryHour, handler.WithLogLevels(slog.DangerLevels))\n\th2 := handler.MustRotateFile(\"/tmp/info.log\", handler.EveryHour, handler.WithLogLevels(slog.NormalLevels))\n\n\tslog.PushHandler(h1)\n\tslog.PushHandler(h2)\n\n\t// add logs\n\tslog.Info(\"info message\")\n\tslog.Error(\"error message\")\n}\n"
  },
  {
    "path": "handler/file.go",
    "content": "package handler\n\nimport (\n\t\"github.com/gookit/goutil/x/basefn\"\n\t\"github.com/gookit/slog\"\n)\n\n// JSONFileHandler create new FileHandler with JSON formatter\nfunc JSONFileHandler(logfile string, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\treturn NewFileHandler(logfile, append(fns, WithUseJSON(true))...)\n}\n\n// NewBuffFileHandler create file handler with buff size\nfunc NewBuffFileHandler(logfile string, buffSize int, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\treturn NewFileHandler(logfile, append(fns, WithBuffSize(buffSize))...)\n}\n\n// MustFileHandler create file handler\nfunc MustFileHandler(logfile string, fns ...ConfigFn) *SyncCloseHandler {\n\treturn basefn.Must(NewFileHandler(logfile, fns...))\n}\n\n// NewFileHandler create new FileHandler\nfunc NewFileHandler(logfile string, fns ...ConfigFn) (h *SyncCloseHandler, err error) {\n\treturn NewEmptyConfig(fns...).With(WithLogfile(logfile)).CreateHandler()\n}\n\n//\n// ------------- simple file handler -------------\n//\n\n// MustSimpleFile new instance\nfunc MustSimpleFile(filepath string, maxLv ...slog.Level) *SyncCloseHandler {\n\treturn basefn.Must(NewSimpleFileHandler(filepath, maxLv...))\n}\n\n// NewSimpleFile new instance\nfunc NewSimpleFile(filepath string, maxLv ...slog.Level) (*SyncCloseHandler, error) {\n\treturn NewSimpleFileHandler(filepath, maxLv...)\n}\n\n// NewSimpleFileHandler instance, default log level is InfoLevel\n//\n// Usage:\n//\n//\th, err := NewSimpleFileHandler(\"/tmp/error.log\")\n//\n// Custom formatter:\n//\n//\th.SetFormatter(slog.NewJSONFormatter())\n//\tslog.PushHandler(h)\n//\tslog.Info(\"log message\")\nfunc NewSimpleFileHandler(filePath string, maxLv ...slog.Level) (*SyncCloseHandler, error) {\n\tfile, err := QuickOpenFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := SyncCloserWithMaxLevel(file, basefn.FirstOr(maxLv, slog.InfoLevel))\n\treturn h, nil\n}\n"
  },
  {
    "path": "handler/file_test.go",
    "content": "package handler_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\n// const testSubFile = \"./testdata/subdir/app.log\"\n\nfunc TestNewFileHandler(t *testing.T) {\n\ttestFile := \"testdata/file.log\"\n\th, err := handler.NewFileHandler(testFile, handler.WithFilePerm(0644))\n\tassert.NoErr(t, err)\n\n\tl := slog.NewWithHandlers(h)\n\tl.DoNothingOnPanicFatal()\n\tl.Info(\"info message\")\n\tl.Warn(\"warn message\")\n\tlogAllLevel(l, \"file handler message\")\n\n\tassert.True(t, fsutil.IsFile(testFile))\n\n\tstr, err := fsutil.ReadStringOrErr(testFile)\n\tassert.NoErr(t, err)\n\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, \"info message\")\n\tassert.Contains(t, str, \"[WARNING]\")\n\tassert.Contains(t, str, \"warn message\")\n\n\t// assert.NoErr(t, os.Remove(testFile))\n}\n\nfunc TestMustFileHandler(t *testing.T) {\n\ttestFile := \"testdata/file-must.log\"\n\n\th := handler.MustFileHandler(testFile)\n\tassert.NotEmpty(t, h.Writer())\n\n\tr := newLogRecord(\"test file must handler\")\n\n\terr := h.Handle(r)\n\tassert.NoErr(t, err)\n\tassert.NoErr(t, h.Close())\n\n\tbts := fsutil.MustReadFile(testFile)\n\tstr := string(bts)\n\n\tassert.Contains(t, str, `INFO`)\n\tassert.Contains(t, str, `test file must handler`)\n}\n\nfunc TestNewFileHandler_basic(t *testing.T) {\n\ttestFile := \"testdata/file-basic.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(testFile))\n\n\th, err := handler.NewFileHandler(testFile)\n\tassert.NoErr(t, err)\n\tassert.NotEmpty(t, h.Writer())\n\n\tr := newLogRecord(\"test file handler\")\n\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\tassert.NoErr(t, h.Close())\n\n\tbts := fsutil.MustReadFile(testFile)\n\tstr := string(bts)\n\n\tassert.Contains(t, str, `INFO`)\n\tassert.Contains(t, str, `test file handler`)\n}\n\nfunc TestNewBuffFileHandler(t *testing.T) {\n\ttestFile := \"testdata/file-buff.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(testFile))\n\n\th, err := handler.NewBuffFileHandler(testFile, 56)\n\tassert.NoErr(t, err)\n\tassert.NotEmpty(t, h.Writer())\n\n\tr := newLogRecord(\"test file buff handler\")\n\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\tassert.NoErr(t, h.Close())\n\n\tbts := fsutil.MustReadFile(testFile)\n\tstr := string(bts)\n\n\tassert.Contains(t, str, `INFO`)\n\tassert.Contains(t, str, `test file buff handler`)\n}\n\nfunc TestJSONFileHandler(t *testing.T) {\n\ttestFile := \"testdata/file-json.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(testFile))\n\n\th, err := handler.JSONFileHandler(testFile)\n\tassert.NoErr(t, err)\n\n\tr := newLogRecord(\"test json file handler\")\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\n\terr = h.Close()\n\tassert.NoErr(t, err)\n\n\tbts := fsutil.MustReadFile(testFile)\n\tstr := string(bts)\n\n\tassert.Contains(t, str, `\"level\":\"INFO\"`)\n\tassert.Contains(t, str, `\"message\":\"test json file handler\"`)\n}\n\nfunc TestSimpleFile(t *testing.T) {\n\tlogfile := \"./testdata/must-simple-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\th := handler.MustSimpleFile(logfile)\n\tassert.True(t, h.IsHandling(slog.InfoLevel))\n\n\t// NewSimpleFile\n\tlogfile = \"./testdata/test-simple-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\th2, err := handler.NewSimpleFile(logfile)\n\tassert.NoErr(t, err)\n\tassert.True(t, h2.IsHandling(slog.InfoLevel))\n}\n\nfunc TestNewSimpleFileHandler(t *testing.T) {\n\tlogfile := \"./testdata/simple-file.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\tassert.False(t, fsutil.IsFile(logfile))\n\n\th, err := handler.NewSimpleFileHandler(logfile)\n\tassert.NoErr(t, err)\n\n\tl := slog.NewWithHandlers(h)\n\tl.Info(\"info message\")\n\tl.Warn(\"warn message\")\n\n\tassert.True(t, fsutil.IsFile(logfile))\n\t// assert.NoErr(t, os.Remove(logfile))\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, slog.WarnLevel.Name())\n}\n"
  },
  {
    "path": "handler/handler.go",
    "content": "// Package handler provide useful common log handlers.\n//\n// eg: file, console, multi_file, rotate_file, stream, syslog, email\npackage handler\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/slog\"\n)\n\n// DefaultBufferSize sizes the buffer associated with each log file. It's large\n// so that log records can accumulate without the logging thread blocking\n// on disk I/O. The flushDaemon will block instead.\nvar DefaultBufferSize = 8 * 1024\n\nvar (\n\t// DefaultFilePerm perm and flags for create log file\n\tDefaultFilePerm os.FileMode = 0664\n\t// DefaultFileFlags for create/open file\n\tDefaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND\n)\n\n// FlushWriter is the interface satisfied by logging destinations.\ntype FlushWriter interface {\n\tFlush() error\n\t// Writer the output writer\n\tio.Writer\n}\n\n// FlushCloseWriter is the interface satisfied by logging destinations.\ntype FlushCloseWriter interface {\n\tFlush() error\n\t// WriteCloser the output writer\n\tio.WriteCloser\n}\n\n// SyncCloseWriter is the interface satisfied by logging destinations.\n// such as os.File\ntype SyncCloseWriter interface {\n\tSync() error\n\t// WriteCloser the output writer\n\tio.WriteCloser\n}\n\n/********************************************************************************\n * Common parts for handler\n ********************************************************************************/\n\n// LevelWithFormatter struct definition\n//\n// - support set log formatter\n// - only support set one log level\n//\n// Deprecated: please use slog.LevelWithFormatter instead.\ntype LevelWithFormatter = slog.LevelWithFormatter\n\n// LevelsWithFormatter struct definition\n//\n// - support set log formatter\n// - support setting multi log levels\n//\n// Deprecated: please use slog.LevelsWithFormatter instead.\ntype LevelsWithFormatter = slog.LevelsWithFormatter\n\n// NopFlushClose no operation.\n//\n// provide empty Flush(), Close() methods, useful for tests.\ntype NopFlushClose struct{}\n\n// Flush logs to disk\nfunc (h *NopFlushClose) Flush() error {\n\treturn nil\n}\n\n// Close handler\nfunc (h *NopFlushClose) Close() error {\n\treturn nil\n}\n\n// LockWrapper struct\ntype LockWrapper struct {\n\tsync.Mutex\n\tdisable bool\n}\n\n// Lock it\nfunc (lw *LockWrapper) Lock() {\n\tif !lw.disable {\n\t\tlw.Mutex.Lock()\n\t}\n}\n\n// Unlock it\nfunc (lw *LockWrapper) Unlock() {\n\tif !lw.disable {\n\t\tlw.Mutex.Unlock()\n\t}\n}\n\n// EnableLock enable lock\nfunc (lw *LockWrapper) EnableLock(enable bool) {\n\tlw.disable = !enable\n}\n\n// LockEnabled status\nfunc (lw *LockWrapper) LockEnabled() bool {\n\treturn !lw.disable\n}\n\n// QuickOpenFile like os.OpenFile\nfunc QuickOpenFile(filepath string) (*os.File, error) {\n\treturn fsutil.OpenFile(filepath, DefaultFileFlags, DefaultFilePerm)\n}\n"
  },
  {
    "path": "handler/handler_test.go",
    "content": "package handler_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nvar (\n\tsampleData = slog.M{\n\t\t\"name\":  \"inhere\",\n\t\t\"age\":   100,\n\t\t\"skill\": \"go,php,java\",\n\t}\n)\n\nfunc TestMain(m *testing.M) {\n\tfmt.Println(\"TestMain: remove all test files in ./testdata\")\n\tgoutil.PanicErr(fsutil.RemoveSub(\"./testdata\", fsutil.ExcludeNames(\".keep\")))\n\tm.Run()\n}\n\nfunc TestConfig_CreateWriter(t *testing.T) {\n\tcfg := handler.NewEmptyConfig()\n\n\tw, err := cfg.CreateWriter()\n\tassert.Nil(t, w)\n\tassert.Err(t, err)\n\n\th, err := cfg.CreateHandler()\n\tassert.Nil(t, h)\n\tassert.Err(t, err)\n\n\tlogfile := \"./testdata/file-by-config.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\tcfg.With(\n\t\thandler.WithBuffMode(handler.BuffModeBite),\n\t\thandler.WithLogLevels(slog.NormalLevels),\n\t\thandler.WithLogfile(logfile),\n\t)\n\n\tw, err = cfg.CreateWriter()\n\tassert.NoErr(t, err)\n\n\t_, err = w.Write([]byte(\"hello, config\"))\n\tassert.NoErr(t, err)\n\n\tbts := fsutil.MustReadFile(logfile)\n\tstr := string(bts)\n\n\tassert.Eq(t, str, \"hello, config\")\n\tassert.NoErr(t, w.Sync())\n\tassert.NoErr(t, w.Close())\n}\n\nfunc TestConfig_RotateWriter(t *testing.T) {\n\tcfg := handler.NewEmptyConfig()\n\n\tw, err := cfg.RotateWriter()\n\tassert.Nil(t, w)\n\tassert.Err(t, err)\n}\n\nfunc TestConsoleHandlerWithColor(t *testing.T) {\n\tl := slog.NewWithHandlers(handler.ConsoleWithLevels(slog.AllLevels))\n\tl.DoNothingOnPanicFatal()\n\tl.Configure(func(l *slog.Logger) {\n\t\tl.ReportCaller = true\n\t})\n\n\tlogAllLevel(l, \"this is a simple log message\")\n\t// logfAllLevel()\n}\n\nfunc TestConsoleHandlerNoColor(t *testing.T) {\n\th := handler.NewConsole(slog.AllLevels)\n\t// no color\n\th.TextFormatter().EnableColor = false\n\n\tl := slog.NewWithHandlers(h)\n\tl.DoNothingOnPanicFatal()\n\tl.ReportCaller = true\n\n\tlogAllLevel(l, \"this is a simple log message\")\n}\n\nfunc TestNewEmailHandler(t *testing.T) {\n\tfrom := handler.EmailOption{\n\t\tSMTPHost: \"smtp.gmail.com\",\n\t\tSMTPPort: 587,\n\t\tFromAddr: \"someone@gmail.com\",\n\t}\n\n\th := handler.NewEmailHandler(from, []string{\n\t\t\"another@gmail.com\",\n\t})\n\n\tassert.Eq(t, slog.InfoLevel, h.Level)\n\n\t// handle error\n\th.SetFormatter(newTestFormatter(true))\n\tassert.Err(t, h.Handle(newLogRecord(\"test email handler\")))\n}\n\nfunc TestLevelWithFormatter(t *testing.T) {\n\tlf := handler.LevelWithFormatter{Level: slog.InfoLevel}\n\n\tassert.True(t, lf.IsHandling(slog.ErrorLevel))\n\tassert.True(t, lf.IsHandling(slog.InfoLevel))\n\tassert.False(t, lf.IsHandling(slog.DebugLevel))\n}\n\nfunc TestLevelsWithFormatter(t *testing.T) {\n\tlsf := handler.LevelsWithFormatter{Levels: slog.NormalLevels}\n\n\tassert.False(t, lsf.IsHandling(slog.ErrorLevel))\n\tassert.True(t, lsf.IsHandling(slog.InfoLevel))\n\tassert.True(t, lsf.IsHandling(slog.DebugLevel))\n}\n\nfunc TestNopFlushClose_Flush(t *testing.T) {\n\tnfc := handler.NopFlushClose{}\n\n\tassert.NoErr(t, nfc.Flush())\n\tassert.NoErr(t, nfc.Close())\n}\n\nfunc TestLockWrapper_Lock(t *testing.T) {\n\tlw := &handler.LockWrapper{}\n\tassert.True(t, lw.LockEnabled())\n\n\tlw.EnableLock(true)\n\tassert.True(t, lw.LockEnabled())\n\n\ta := 1\n\tlw.Lock()\n\ta++\n\tlw.Unlock()\n\tassert.Eq(t, 2, a)\n}\n\nfunc logAllLevel(log slog.SLogger, msg string) {\n\tfor _, level := range slog.AllLevels {\n\t\tlog.Log(level, msg)\n\t}\n}\n\nfunc newLogRecord(msg string) *slog.Record {\n\tr := &slog.Record{\n\t\tChannel: \"handler_test\",\n\t\tLevel:   slog.InfoLevel,\n\t\tMessage: msg,\n\t\tTime:    slog.DefaultClockFn.Now(),\n\t\tData:    sampleData,\n\t\tExtra: map[string]any{\n\t\t\t\"source\":     \"linux\",\n\t\t\t\"extra_key0\": \"hello\",\n\t\t\t\"sub\":        slog.M{\"sub_key1\": \"val0\"},\n\t\t},\n\t}\n\n\tr.Init(false)\n\treturn r\n}\n\ntype testHandler struct {\n\terrOnHandle bool\n\terrOnFlush  bool\n\terrOnClose  bool\n}\n\nfunc newTestHandler() *testHandler {\n\treturn &testHandler{}\n}\n\n// func (h testHandler) Reset() {\n// \th.errOnHandle = false\n// \th.errOnFlush = false\n// \th.errOnClose = false\n// }\n\nfunc (h testHandler) IsHandling(_ slog.Level) bool {\n\treturn true\n}\n\nfunc (h testHandler) Close() error {\n\tif h.errOnClose {\n\t\treturn errorx.Raw(\"close error\")\n\t}\n\treturn nil\n}\n\nfunc (h testHandler) Flush() error {\n\tif h.errOnFlush {\n\t\treturn errorx.Raw(\"flush error\")\n\t}\n\treturn nil\n}\n\nfunc (h testHandler) Handle(_ *slog.Record) error {\n\tif h.errOnHandle {\n\t\treturn errorx.Raw(\"handle error\")\n\t}\n\treturn nil\n}\n\ntype testFormatter struct {\n\terrOnFormat bool\n}\n\nfunc newTestFormatter(errOnFormat ...bool) *testFormatter {\n\treturn &testFormatter{\n\t\terrOnFormat: len(errOnFormat) > 0 && errOnFormat[0],\n\t}\n}\n\nfunc (f testFormatter) Format(r *slog.Record) ([]byte, error) {\n\tif f.errOnFormat {\n\t\treturn nil, errorx.Raw(\"format error\")\n\t}\n\treturn []byte(r.Message), nil\n}\n"
  },
  {
    "path": "handler/rotatefile.go",
    "content": "package handler\n\nimport (\n\t\"github.com/gookit/goutil/x/basefn\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\n// NewRotateFileHandler instance. It supports splitting log files by time and size\nfunc NewRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\tcfg := NewConfig(fns...).With(WithLogfile(logfile), WithRotateTime(rt))\n\n\twriter, err := cfg.RotateWriter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := NewSyncCloseHandler(writer, cfg.Levels)\n\treturn h, nil\n}\n\n// MustRotateFile handler instance, will panic on create error\nfunc MustRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler {\n\treturn basefn.Must(NewRotateFileHandler(logfile, rt, fns...))\n}\n\n// NewRotateFile instance. alias of NewRotateFileHandler()\nfunc NewRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\treturn NewRotateFileHandler(logfile, rt, fns...)\n}\n\n//\n// ---------------------------------------------------------------------------\n// rotate file by size\n// ---------------------------------------------------------------------------\n//\n\n// MustSizeRotateFile instance\nfunc MustSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) *SyncCloseHandler {\n\treturn basefn.Must(NewSizeRotateFileHandler(logfile, maxSize, fns...))\n}\n\n// NewSizeRotateFile instance\nfunc NewSizeRotateFile(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\treturn NewSizeRotateFileHandler(logfile, maxSize, fns...)\n}\n\n// NewSizeRotateFileHandler instance, default close rotate by time.\nfunc NewSizeRotateFileHandler(logfile string, maxSize int, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\t// close rotate by time.\n\tfns = append(fns, WithMaxSize(uint64(maxSize)))\n\treturn NewRotateFileHandler(logfile, 0, fns...)\n}\n\n//\n// ---------------------------------------------------------------------------\n// rotate log file by time\n// ---------------------------------------------------------------------------\n//\n\n// RotateTime rotate log file by time.\n//\n// EveryDay:\n//   - \"error.log.20201223\"\n//\n// EveryHour, Every30Minutes, EveryMinute:\n//   - \"error.log.20201223_1500\"\n//   - \"error.log.20201223_1530\"\n//   - \"error.log.20201223_1523\"\n//\n// Deprecated: please use rotatefile.RotateTime\ntype RotateTime = rotatefile.RotateTime\n\n// Deprecated: Please use define constants on pkg rotatefile. e.g. rotatefile.EveryDay\nconst (\n\tEveryDay  = rotatefile.EveryDay\n\tEveryHour = rotatefile.EveryDay\n\n\tEvery30Minutes = rotatefile.Every30Min\n\tEvery15Minutes = rotatefile.Every15Min\n\n\tEveryMinute = rotatefile.EveryMinute\n\tEverySecond = rotatefile.EverySecond // only use for tests\n)\n\n// MustTimeRotateFile instance\nfunc MustTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) *SyncCloseHandler {\n\treturn basefn.Must(NewTimeRotateFileHandler(logfile, rt, fns...))\n}\n\n// NewTimeRotateFile instance\nfunc NewTimeRotateFile(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\treturn NewTimeRotateFileHandler(logfile, rt, fns...)\n}\n\n// NewTimeRotateFileHandler instance, default close rotate by size\nfunc NewTimeRotateFileHandler(logfile string, rt rotatefile.RotateTime, fns ...ConfigFn) (*SyncCloseHandler, error) {\n\t// default close rotate by size: WithMaxSize(0)\n\treturn NewRotateFileHandler(logfile, rt, append(fns, WithMaxSize(0))...)\n}\n"
  },
  {
    "path": "handler/rotatefile_test.go",
    "content": "package handler_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"github.com/gookit/slog/internal\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestNewRotateFileHandler(t *testing.T) {\n\t// by size\n\tlogfile := \"./testdata/both-rotate-bysize.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\th, err := handler.NewRotateFile(logfile, handler.EveryMinute, handler.WithMaxSize(128))\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tl := slog.NewWithHandlers(h)\n\tl.ReportCaller = true\n\n\tfor i := 0; i < 3; i++ {\n\t\tl.Info(\"info\", \"message\", i)\n\t\tl.Warn(\"warn message\", i)\n\t}\n\tl.MustClose()\n\n\t// by time\n\tlogfile = \"./testdata/both-rotate-bytime.log\"\n\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\th = handler.MustRotateFile(logfile, handler.EverySecond)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tl = slog.NewWithHandlers(h)\n\n\tfor i := 0; i < 3; i++ {\n\t\tl.Info(\"info\", \"message\", i)\n\t\tl.Warn(\"warn message\", i)\n\t\tfmt.Println(\"second \", i+1)\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\tl.Error(\"error message\")\n\n\tassert.NoErr(t, l.FlushAll())\n}\n\nfunc TestNewSizeRotateFileHandler(t *testing.T) {\n\tt.Run(\"NewSizeRotateFile\", func(t *testing.T) {\n\t\tlogfile := \"./testdata/size-rotate-file.log\"\n\t\tassert.NoErr(t, fsutil.DeleteIfFileExist(logfile))\n\n\t\th, err := handler.NewSizeRotateFile(logfile, 468, handler.WithBuffSize(256))\n\t\tassert.NoErr(t, err)\n\t\tassert.True(t, fsutil.IsFile(logfile))\n\n\t\tl := slog.NewWithHandlers(h)\n\t\tl.ReportCaller = true\n\t\tl.CallerFlag = slog.CallerFlagFull\n\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tl.Info(\"this is a info\", \"message, index=\", i)\n\t\t\tl.Warn(\"this is a warning message, index=\", i)\n\t\t}\n\n\t\tassert.NoErr(t, l.Close())\n\t\tcheckLogFileContents(t, logfile)\n\t})\n\n\tt.Run(\"MustSizeRotateFile\", func(t *testing.T) {\n\t\tlogfile := \"./testdata/must-size-rotate-file.log\"\n\t\th := handler.MustSizeRotateFile(logfile, 128, handler.WithBuffSize(128))\n\t\th.SetFormatter(slog.NewJSONFormatter())\n\t\terr := h.Handle(newLogRecord(\"this is a info message\"))\n\t\tassert.NoErr(t, err)\n\n\t\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\t\tassert.Len(t, files, 2)\n\t})\n}\n\nfunc TestNewTimeRotateFileHandler_EveryDay(t *testing.T) {\n\tlogfile := \"./testdata/time-rotate_EveryDay.log\"\n\tnewFile := internal.AddSuffix2path(logfile, \"20221116\")\n\n\tclock := rotatefile.NewMockClock(\"2022-11-16 23:59:57\")\n\toptions := []handler.ConfigFn{\n\t\thandler.WithBuffSize(128),\n\t\thandler.WithTimeClock(clock),\n\t}\n\n\th := handler.MustTimeRotateFile(logfile, handler.EveryDay, options...)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tl := slog.NewWithHandlers(h)\n\tl.ReportCaller = true\n\tl.TimeClock = clock.Now\n\n\tfor i := 0; i < 6; i++ {\n\t\tl.WithData(sampleData).Info(\"the th:\", i, \"info message\")\n\t\tl.Warnf(\"the th:%d warning message text\", i)\n\t\tfmt.Println(\"log number \", (i+1)*2)\n\t\tclock.Add(time.Second * 1)\n\t}\n\n\tl.MustClose()\n\tcheckLogFileContents(t, logfile)\n\tcheckLogFileContents(t, newFile)\n}\n\nfunc TestNewTimeRotateFileHandler_EveryHour(t *testing.T) {\n\tclock := rotatefile.NewMockClock(\"2022-04-28 20:59:58\")\n\tlogfile := \"./testdata/time-rotate_EveryHour.log\"\n\tnewFile := internal.AddSuffix2path(logfile, timex.DateFormat(clock.Now(), \"Ymd_H00\"))\n\n\toptions := []handler.ConfigFn{\n\t\thandler.WithTimeClock(clock),\n\t\thandler.WithBuffSize(0),\n\t}\n\th, err := handler.NewTimeRotateFile(logfile, rotatefile.EveryHour, options...)\n\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tl := slog.NewWithHandlers(h)\n\tl.ReportCaller = true\n\tl.TimeClock = clock.Now\n\n\tfor i := 0; i < 6; i++ {\n\t\tl.WithData(sampleData).Info(\"the th:\", i, \"info message\")\n\t\tl.Warnf(\"the th:%d warning message text\", i)\n\t\tfmt.Println(\"log number \", (i+1)*2)\n\t\tclock.Add(time.Second * 1)\n\t}\n\tl.MustClose()\n\n\tcheckLogFileContents(t, logfile)\n\tcheckLogFileContents(t, newFile)\n}\n\nfunc TestNewTimeRotateFileHandler_someSeconds(t *testing.T) {\n\tlogfile := \"./testdata/time-rotate-Seconds.log\"\n\tassert.NoErr(t, fsutil.DeleteIfExist(logfile))\n\th, err := handler.NewTimeRotateFileHandler(logfile, handler.EverySecond)\n\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tl := slog.NewWithHandlers(h)\n\tl.ReportCaller = true\n\n\tfor i := 0; i < 3; i++ {\n\t\tl.Info(\"info\", \"message\", i)\n\t\tl.Warn(\"warning message\", i)\n\t\tfmt.Println(\"second \", i+1)\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\tl.MustClose()\n\t// assert.NoErr(t, os.Remove(fpath))\n}\n\nfunc checkLogFileContents(t *testing.T, logfile string) {\n\tassert.True(t, fsutil.IsFile(logfile))\n\n\tbts, err := os.ReadFile(logfile)\n\tassert.NoErr(t, err)\n\n\tstr := string(bts)\n\tassert.Contains(t, str, \"[INFO]\")\n\tassert.Contains(t, str, \"info message\")\n\tassert.Contains(t, str, \"[WARNING]\")\n\tassert.Contains(t, str, \"warning message\")\n}\n"
  },
  {
    "path": "handler/syslog.go",
    "content": "//go:build !windows && !plan9\n\npackage handler\n\nimport (\n\t\"log/syslog\"\n\n\t\"github.com/gookit/slog\"\n)\n\n// SysLogOpt for syslog handler\ntype SysLogOpt struct {\n\t// Tag syslog tag\n\tTag string\n\t// Priority syslog priority\n\tPriority syslog.Priority\n\t// Network syslog network\n\tNetwork string\n\t// Raddr syslog address\n\tRaddr string\n}\n\n// SysLogHandler struct\ntype SysLogHandler struct {\n\tslog.LevelWithFormatter\n\twriter *syslog.Writer\n}\n\n// NewSysLogHandler instance\nfunc NewSysLogHandler(priority syslog.Priority, tag string) (*SysLogHandler, error) {\n\treturn NewSysLog(&SysLogOpt{\n\t\tPriority: priority,\n\t\tTag:      tag,\n\t})\n}\n\n// NewSysLog handler instance with all custom options.\nfunc NewSysLog(opt *SysLogOpt) (*SysLogHandler, error) {\n\tslWriter, err := syslog.Dial(opt.Network, opt.Raddr, opt.Priority, opt.Tag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := &SysLogHandler{\n\t\twriter: slWriter,\n\t}\n\n\t// init default log level\n\th.Level = slog.InfoLevel\n\treturn h, nil\n}\n\n// Handle a log record\nfunc (h *SysLogHandler) Handle(record *slog.Record) error {\n\tbts, err := h.Formatter().Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts := string(bts)\n\n\t// write log by level\n\tswitch record.Level {\n\tcase slog.DebugLevel, slog.TraceLevel:\n\t\treturn h.writer.Debug(s)\n\tcase slog.NoticeLevel:\n\t\treturn h.writer.Notice(s)\n\tcase slog.WarnLevel:\n\t\treturn h.writer.Warning(s)\n\tcase slog.ErrorLevel:\n\t\treturn h.writer.Err(s)\n\tcase slog.FatalLevel:\n\t\treturn h.writer.Crit(s)\n\tcase slog.PanicLevel:\n\t\treturn h.writer.Emerg(s)\n\tdefault: // as info level\n\t\treturn h.writer.Info(s)\n\t}\n}\n\n// Close handler\nfunc (h *SysLogHandler) Close() error {\n\treturn h.writer.Close()\n}\n\n// Flush handler\nfunc (h *SysLogHandler) Flush() error {\n\treturn nil\n}\n"
  },
  {
    "path": "handler/syslog_test.go",
    "content": "//go:build !windows && !plan9\n\npackage handler_test\n\nimport (\n\t\"log/syslog\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestNewSysLogHandler(t *testing.T) {\n\th, err := handler.NewSysLogHandler(syslog.LOG_INFO, \"slog\")\n\tassert.NoErr(t, err)\n\n\terr = h.Handle(newLogRecord(\"test syslog handler\"))\n\tassert.NoErr(t, err)\n\n\tassert.NoErr(t, h.Flush())\n\tassert.NoErr(t, h.Close())\n}\n"
  },
  {
    "path": "handler/write_close_flusher.go",
    "content": "package handler\n\nimport (\n\t\"github.com/gookit/slog\"\n)\n\n// FlushCloseHandler definition\ntype FlushCloseHandler struct {\n\tslog.LevelFormattable\n\tOutput FlushCloseWriter\n}\n\n// NewFlushCloserWithLF create new FlushCloseHandler, with custom slog.LevelFormattable\nfunc NewFlushCloserWithLF(out FlushCloseWriter, lf slog.LevelFormattable) *FlushCloseHandler {\n\treturn &FlushCloseHandler{\n\t\tOutput: out,\n\t\t// init formatter and level handle\n\t\tLevelFormattable: lf,\n\t}\n}\n\n//\n// ------------- Use max log level -------------\n//\n\n// FlushCloserWithMaxLevel create new FlushCloseHandler, with max log level\nfunc FlushCloserWithMaxLevel(out FlushCloseWriter, maxLevel slog.Level) *FlushCloseHandler {\n\treturn NewFlushCloserWithLF(out, slog.NewLvFormatter(maxLevel))\n}\n\n//\n// ------------- Use multi log levels -------------\n//\n\n// NewFlushCloser create new FlushCloseHandler, alias of NewFlushCloseHandler()\nfunc NewFlushCloser(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler {\n\treturn NewFlushCloseHandler(out, levels)\n}\n\n// FlushCloserWithLevels create new FlushCloseHandler, alias of NewFlushCloseHandler()\nfunc FlushCloserWithLevels(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler {\n\treturn NewFlushCloseHandler(out, levels)\n}\n\n// NewFlushCloseHandler create new FlushCloseHandler\n//\n// Usage:\n//\n//\tbuf := new(byteutil.Buffer)\n//\th := handler.NewFlushCloseHandler(&buf, slog.AllLevels)\n//\n//\tf, err := os.OpenFile(\"my.log\", ...)\n//\th := handler.NewFlushCloseHandler(f, slog.AllLevels)\nfunc NewFlushCloseHandler(out FlushCloseWriter, levels []slog.Level) *FlushCloseHandler {\n\treturn NewFlushCloserWithLF(out, slog.NewLvsFormatter(levels))\n}\n\n// Close the handler\nfunc (h *FlushCloseHandler) Close() error {\n\tif err := h.Flush(); err != nil {\n\t\treturn err\n\t}\n\treturn h.Output.Close()\n}\n\n// Flush the handler\nfunc (h *FlushCloseHandler) Flush() error {\n\treturn h.Output.Flush()\n}\n\n// Handle log record\nfunc (h *FlushCloseHandler) Handle(record *slog.Record) error {\n\tbts, err := h.Formatter().Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = h.Output.Write(bts)\n\treturn err\n}\n"
  },
  {
    "path": "handler/write_close_syncer.go",
    "content": "package handler\n\nimport (\n\t\"io\"\n\n\t\"github.com/gookit/slog\"\n)\n\n// SyncCloseHandler definition\ntype SyncCloseHandler struct {\n\tslog.LevelFormattable\n\tOutput SyncCloseWriter\n}\n\n// NewSyncCloserWithLF create new SyncCloseHandler, with custom slog.LevelFormattable\nfunc NewSyncCloserWithLF(out SyncCloseWriter, lf slog.LevelFormattable) *SyncCloseHandler {\n\treturn &SyncCloseHandler{\n\t\tOutput: out,\n\t\t// init formatter and level handle\n\t\tLevelFormattable: lf,\n\t}\n}\n\n//\n// ------------- Use max log level -------------\n//\n\n// SyncCloserWithMaxLevel create new SyncCloseHandler, with max log level\nfunc SyncCloserWithMaxLevel(out SyncCloseWriter, maxLevel slog.Level) *SyncCloseHandler {\n\treturn NewSyncCloserWithLF(out, slog.NewLvFormatter(maxLevel))\n}\n\n//\n// ------------- Use multi log levels -------------\n//\n\n// NewSyncCloser create new SyncCloseHandler, alias of NewSyncCloseHandler()\nfunc NewSyncCloser(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler {\n\treturn NewSyncCloseHandler(out, levels)\n}\n\n// SyncCloserWithLevels create new SyncCloseHandler, alias of NewSyncCloseHandler()\nfunc SyncCloserWithLevels(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler {\n\treturn NewSyncCloseHandler(out, levels)\n}\n\n// NewSyncCloseHandler create new SyncCloseHandler with limited log levels\n//\n// Usage:\n//\n//\tf, err := os.OpenFile(\"my.log\", ...)\n//\th := handler.NewSyncCloseHandler(f, slog.AllLevels)\nfunc NewSyncCloseHandler(out SyncCloseWriter, levels []slog.Level) *SyncCloseHandler {\n\treturn NewSyncCloserWithLF(out, slog.NewLvsFormatter(levels))\n}\n\n// Close the handler\nfunc (h *SyncCloseHandler) Close() error {\n\tif err := h.Flush(); err != nil {\n\t\treturn err\n\t}\n\treturn h.Output.Close()\n}\n\n// Flush the handler\nfunc (h *SyncCloseHandler) Flush() error {\n\treturn h.Output.Sync()\n}\n\n// Writer of the handler\nfunc (h *SyncCloseHandler) Writer() io.Writer {\n\treturn h.Output\n}\n\n// Handle log record\nfunc (h *SyncCloseHandler) Handle(record *slog.Record) error {\n\tbts, err := h.Formatter().Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = h.Output.Write(bts)\n\treturn err\n}\n"
  },
  {
    "path": "handler/write_closer.go",
    "content": "package handler\n\nimport (\n\t\"io\"\n\n\t\"github.com/gookit/slog\"\n)\n\n// WriteCloserHandler definition\ntype WriteCloserHandler struct {\n\tslog.LevelFormattable\n\tOutput io.WriteCloser\n}\n\n// NewWriteCloserWithLF create new WriteCloserHandler and with custom slog.LevelFormattable\nfunc NewWriteCloserWithLF(out io.WriteCloser, lf slog.LevelFormattable) *WriteCloserHandler {\n\treturn &WriteCloserHandler{\n\t\tOutput: out,\n\t\t// init formatter and level handle\n\t\tLevelFormattable: lf,\n\t}\n}\n\n// WriteCloserWithMaxLevel create new WriteCloserHandler and with max log level\nfunc WriteCloserWithMaxLevel(out io.WriteCloser, maxLevel slog.Level) *WriteCloserHandler {\n\treturn NewWriteCloserWithLF(out, slog.NewLvFormatter(maxLevel))\n}\n\n//\n// ------------- Use multi log levels -------------\n//\n\n// WriteCloserWithLevels create a new instance and with limited log levels\nfunc WriteCloserWithLevels(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler {\n\t// h := &WriteCloserHandler{Output: out}\n\t// h.LimitLevels(levels)\n\treturn NewWriteCloserHandler(out, levels)\n}\n\n// NewWriteCloser create a new instance\nfunc NewWriteCloser(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler {\n\treturn NewWriteCloserHandler(out, levels)\n}\n\n// NewWriteCloserHandler create new WriteCloserHandler\n//\n// Usage:\n//\n//\tbuf := new(bytes.Buffer)\n//\th := handler.NewIOWriteCloserHandler(&buf, slog.AllLevels)\n//\n//\tf, err := os.OpenFile(\"my.log\", ...)\n//\th := handler.NewIOWriteCloserHandler(f, slog.AllLevels)\nfunc NewWriteCloserHandler(out io.WriteCloser, levels []slog.Level) *WriteCloserHandler {\n\treturn NewWriteCloserWithLF(out, slog.NewLvsFormatter(levels))\n}\n\n// Close the handler\nfunc (h *WriteCloserHandler) Close() error {\n\treturn h.Output.Close()\n}\n\n// Flush the handler\nfunc (h *WriteCloserHandler) Flush() error {\n\treturn nil\n}\n\n// Handle log record\nfunc (h *WriteCloserHandler) Handle(record *slog.Record) error {\n\tbts, err := h.Formatter().Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = h.Output.Write(bts)\n\treturn err\n}\n"
  },
  {
    "path": "handler/writer.go",
    "content": "package handler\n\nimport (\n\t\"io\"\n\n\t\"github.com/gookit/slog\"\n)\n\n// IOWriterHandler definition\ntype IOWriterHandler struct {\n\tNopFlushClose\n\tslog.LevelFormattable\n\tOutput io.Writer\n}\n\n// TextFormatter get the formatter\nfunc (h *IOWriterHandler) TextFormatter() *slog.TextFormatter {\n\treturn h.Formatter().(*slog.TextFormatter)\n}\n\n// Handle log record\nfunc (h *IOWriterHandler) Handle(record *slog.Record) error {\n\tbts, err := h.Formatter().Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = h.Output.Write(bts)\n\treturn err\n}\n\n// NewIOWriterWithLF create new IOWriterHandler, with custom slog.LevelFormattable\nfunc NewIOWriterWithLF(out io.Writer, lf slog.LevelFormattable) *IOWriterHandler {\n\treturn &IOWriterHandler{\n\t\tOutput: out,\n\t\t// init formatter and level handle\n\t\tLevelFormattable: lf,\n\t}\n}\n\n//\n// ------------- Use max log level -------------\n//\n\n// IOWriterWithMaxLevel create new IOWriterHandler, with max log level\n//\n// Usage:\n//\n//\t\tbuf := new(bytes.Buffer)\n//\t\th := handler.IOWriterWithMaxLevel(buf, slog.InfoLevel)\n//\t slog.AddHandler(h)\n//\t\tslog.Info(\"info message\")\nfunc IOWriterWithMaxLevel(out io.Writer, maxLevel slog.Level) *IOWriterHandler {\n\treturn NewIOWriterWithLF(out, slog.NewLvFormatter(maxLevel))\n}\n\n//\n// ------------- Use multi log levels -------------\n//\n\n// NewIOWriter create a new instance and with limited log levels\nfunc NewIOWriter(out io.Writer, levels []slog.Level) *IOWriterHandler {\n\treturn NewIOWriterHandler(out, levels)\n}\n\n// IOWriterWithLevels create a new instance and with limited log levels\nfunc IOWriterWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler {\n\treturn NewIOWriterHandler(out, levels)\n}\n\n// NewIOWriterHandler create new IOWriterHandler\n//\n// Usage:\n//\n//\tbuf := new(bytes.Buffer)\n//\th := handler.NewIOWriterHandler(&buf, slog.AllLevels)\n//\n//\tf, err := os.OpenFile(\"my.log\", ...)\n//\th := handler.NewIOWriterHandler(f, slog.AllLevels)\nfunc NewIOWriterHandler(out io.Writer, levels []slog.Level) *IOWriterHandler {\n\treturn NewIOWriterWithLF(out, slog.NewLvsFormatter(levels))\n}\n\n// SimpleHandler definition. alias of IOWriterHandler\ntype SimpleHandler = IOWriterHandler\n\n// NewHandler create a new instance\nfunc NewHandler(out io.Writer, maxLevel slog.Level) *SimpleHandler {\n\treturn NewSimpleHandler(out, maxLevel)\n}\n\n// NewSimple create a new instance\nfunc NewSimple(out io.Writer, maxLevel slog.Level) *SimpleHandler {\n\treturn NewSimpleHandler(out, maxLevel)\n}\n\n// SimpleWithLevels create new simple handler, with log levels\nfunc SimpleWithLevels(out io.Writer, levels []slog.Level) *IOWriterHandler {\n\treturn NewIOWriterHandler(out, levels)\n}\n\n// NewSimpleHandler create new SimpleHandler\n//\n// Usage:\n//\n//\tbuf := new(bytes.Buffer)\n//\th := handler.NewSimpleHandler(&buf, slog.InfoLevel)\n//\n//\tf, err := os.OpenFile(\"my.log\", ...)\n//\th := handler.NewSimpleHandler(f, slog.InfoLevel)\nfunc NewSimpleHandler(out io.Writer, maxLevel slog.Level) *IOWriterHandler {\n\treturn IOWriterWithMaxLevel(out, maxLevel)\n}\n"
  },
  {
    "path": "handler/writer_test.go",
    "content": "package handler_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/x/fakeobj\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestNewIOWriter(t *testing.T) {\n\tw := new(bytes.Buffer)\n\th := handler.NewIOWriter(w, slog.NormalLevels)\n\n\tassert.True(t, h.IsHandling(slog.NoticeLevel))\n\n\tr := newLogRecord(\"test io.writer handler\")\n\tassert.NoErr(t, h.Handle(r))\n\tassert.NoErr(t, h.Flush())\n\n\tstr := w.String()\n\tassert.Contains(t, str, \"test io.writer handler\")\n\n\tassert.NoErr(t, h.Close())\n}\n\nfunc TestNewSyncCloser(t *testing.T) {\n\tlogfile := \"./testdata/sync_closer.log\"\n\n\tf, err := handler.QuickOpenFile(logfile)\n\tassert.NoErr(t, err)\n\n\th := handler.NewSyncCloser(f, slog.DangerLevels)\n\n\tassert.True(t, h.IsHandling(slog.WarnLevel))\n\tassert.False(t, h.IsHandling(slog.InfoLevel))\n\n\tr := newLogRecord(\"test sync closer handler\")\n\tr.Level = slog.ErrorLevel\n\n\terr = h.Handle(r)\n\tassert.NoErr(t, err)\n\tassert.NoErr(t, h.Flush())\n\n\tstr := fsutil.ReadString(logfile)\n\tassert.Contains(t, str, \"test sync closer handler\")\n\tassert.NoErr(t, h.Close())\n\n\tt.Run(\"err on sync\", func(t *testing.T) {\n\t\tw := &syncCloseWriter{}\n\t\tw.errOnSync = true\n\t\th = handler.SyncCloserWithLevels(w, slog.NormalLevels)\n\n\t\tassert.Err(t, h.Flush())\n\t\tassert.Err(t, h.Close())\n\t})\n\n\t// test handle error\n\th.SetFormatter(newTestFormatter(true))\n\tassert.Err(t, h.Handle(r))\n}\n\nfunc TestNewWriteCloser(t *testing.T) {\n\tw := fakeobj.NewWriter()\n\th := handler.NewWriteCloser(w, slog.NormalLevels)\n\n\tassert.True(t, h.IsHandling(slog.NoticeLevel))\n\n\tr := newLogRecord(\"test writeCloser handler\")\n\tassert.NoErr(t, h.Handle(r))\n\tassert.NoErr(t, h.Flush())\n\n\tstr := w.String()\n\tassert.Contains(t, str, \"test writeCloser handler\")\n\tassert.NoErr(t, h.Close())\n\n\tt.Run(\"use max level\", func(t *testing.T) {\n\t\th = handler.WriteCloserWithMaxLevel(w, slog.WarnLevel)\n\t\tr = newLogRecord(\"test max level\")\n\t\tassert.False(t, h.IsHandling(r.Level))\n\n\t\tr.Level = slog.ErrorLevel\n\t\tassert.True(t, h.IsHandling(r.Level))\n\t})\n\n\t// test handle error\n\tt.Run(\"handle error\", func(t *testing.T) {\n\t\th = handler.WriteCloserWithLevels(w, slog.NormalLevels)\n\t\th.SetFormatter(newTestFormatter(true))\n\t\tassert.Err(t, h.Handle(r))\n\t})\n}\n\nfunc TestNewFlushCloser(t *testing.T) {\n\tw := fakeobj.NewWriter()\n\th := handler.NewFlushCloser(w, slog.AllLevels)\n\tw.WriteString(\"before flush\\n\")\n\n\tr := newLogRecord(\"TestNewFlushCloser\")\n\tassert.NoErr(t, h.Handle(r))\n\n\tstr := w.ResetGet()\n\tassert.Contains(t, str, \"TestNewFlushCloser\")\n\n\tassert.NoErr(t, h.Flush())\n\tassert.NoErr(t, h.Close())\n\n\tt.Run(\"ErrOnFlush\", func(t *testing.T) {\n\t\tw.ErrOnFlush = true\n\t\tassert.Err(t, h.Flush())\n\t\tassert.Err(t, h.Close())\n\t})\n\n\tt.Run(\"With max level\", func(t *testing.T) {\n\t\th = handler.FlushCloserWithMaxLevel(w, slog.WarnLevel)\n\t\tr = newLogRecord(\"test max level\")\n\t\tassert.False(t, h.IsHandling(r.Level))\n\t\tassert.Empty(t, w.String())\n\n\t\tr.Level = slog.ErrorLevel\n\t\tassert.True(t, h.IsHandling(r.Level))\n\t\tassert.NoErr(t, h.Handle(r))\n\t\tassert.NotEmpty(t, w.String())\n\t})\n\n\t// test handle error\n\th = handler.FlushCloserWithMaxLevel(w, slog.WarnLevel)\n\th.SetFormatter(newTestFormatter(true))\n\tassert.Err(t, h.Handle(r))\n}\n\nfunc TestNewSimpleHandler(t *testing.T) {\n\tbuf := fakeobj.NewWriter()\n\n\th := handler.NewSimple(buf, slog.InfoLevel)\n\tr := newLogRecord(\"test simple handler\")\n\tassert.NoErr(t, h.Handle(r))\n\n\ts := buf.String()\n\tbuf.Reset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"test simple handler\")\n\n\tassert.NoErr(t, h.Flush())\n\tassert.NoErr(t, h.Close())\n\n\th = handler.NewHandler(buf, slog.InfoLevel)\n\tr = newLogRecord(\"test simple handler2\")\n\tassert.NoErr(t, h.Handle(r))\n\n\ts = buf.ResetGet()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"test simple handler2\")\n\n\tassert.NoErr(t, h.Flush())\n\tassert.NoErr(t, h.Close())\n\n\th = handler.SimpleWithLevels(buf, slog.NormalLevels)\n\tr = newLogRecord(\"test simple handler with levels\")\n\tassert.NoErr(t, h.Handle(r))\n\n\ts = buf.ResetGet()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"test simple handler with levels\")\n\n\t// handle error\n\th.SetFormatter(newTestFormatter(true))\n\tassert.Err(t, h.Handle(r))\n}\n"
  },
  {
    "path": "handler.go",
    "content": "package slog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\n\t\"github.com/gookit/goutil/strutil\"\n)\n\n//\n// Handler interface\n//\n\n// Handler interface definition\ntype Handler interface {\n\t// Closer Close handler.\n\t// You should first call Flush() on close logic.\n\t// Refer the FileHandler.Close() handle\n\tio.Closer\n\t// Flush and sync logs to disk file.\n\tFlush() error\n\t// IsHandling Checks whether the given record will be handled by this handler.\n\tIsHandling(level Level) bool\n\t// Handle a log record.\n\t//\n\t// All records may be passed to this method, and the handler should discard\n\t// those that it does not want to handle.\n\tHandle(*Record) error\n}\n\n// LevelFormattable support limit log levels and provide formatter\ntype LevelFormattable interface {\n\tFormattable\n\tIsHandling(level Level) bool\n}\n\n// FormattableHandler interface\ntype FormattableHandler interface {\n\tHandler\n\tFormattable\n}\n\n/********************************************************************************\n * Common parts for handler\n ********************************************************************************/\n\n// LevelWithFormatter struct definition\n//\n// - support set log formatter\n// - only support set max log level\ntype LevelWithFormatter struct {\n\tFormattableTrait\n\t// Level max for logging messages. if current level <= Level will log messages\n\tLevel Level\n}\n\n// NewLvFormatter create new LevelWithFormatter instance\nfunc NewLvFormatter(maxLv Level) *LevelWithFormatter {\n\treturn &LevelWithFormatter{Level: maxLv}\n}\n\n// SetMaxLevel set max level for logging messages\nfunc (h *LevelWithFormatter) SetMaxLevel(maxLv Level) {\n\th.Level = maxLv\n}\n\n// IsHandling Check if the current level can be handling\nfunc (h *LevelWithFormatter) IsHandling(level Level) bool {\n\treturn h.Level.ShouldHandling(level)\n}\n\n// LevelsWithFormatter struct definition\n//\n// - support set log formatter\n// - support setting multi log levels\ntype LevelsWithFormatter struct {\n\tFormattableTrait\n\t// Levels for logging messages\n\tLevels []Level\n}\n\n// NewLvsFormatter create new instance\nfunc NewLvsFormatter(levels []Level) *LevelsWithFormatter {\n\treturn &LevelsWithFormatter{Levels: levels}\n}\n\n// SetLimitLevels set limit levels for log message\nfunc (h *LevelsWithFormatter) SetLimitLevels(levels []Level) {\n\th.Levels = levels\n}\n\n// IsHandling Check if the current level can be handling\nfunc (h *LevelsWithFormatter) IsHandling(level Level) bool {\n\tfor _, l := range h.Levels {\n\t\tif l == level {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// LevelMode define level mode for logging\ntype LevelMode uint8\n\n// MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler]\nfunc (m LevelMode) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + m.String() + `\"`), nil\n}\n\n// UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler]\nfunc (m *LevelMode) UnmarshalJSON(data []byte) error {\n\ts, err := strconv.Unquote(string(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*m, err = StringToLevelMode(s)\n\treturn err\n}\n\n// String return string value\nfunc (m LevelMode) String() string {\n\tswitch m {\n\tcase LevelModeList:\n\t\treturn \"list\"\n\tcase LevelModeMax:\n\t\treturn \"max\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n\nconst (\n\t// LevelModeList use level list for limit record write\n\tLevelModeList LevelMode = iota\n\t// LevelModeMax use max level limit log record write\n\tLevelModeMax\n)\n\n// SafeToLevelMode parse string value to LevelMode, fail return LevelModeList\nfunc SafeToLevelMode(s string) LevelMode {\n\tlm, err := StringToLevelMode(s)\n\tif err != nil {\n\t\treturn LevelModeList\n\t}\n\treturn lm\n}\n\n// StringToLevelMode parse string value to LevelMode\nfunc StringToLevelMode(s string) (LevelMode, error) {\n\tswitch s {\n\tcase \"\", \"list\", \"list_level\", \"level_list\":\n\t\treturn LevelModeList, nil\n\tcase \"max\", \"max_level\", \"level_max\":\n\t\treturn LevelModeMax, nil\n\tdefault:\n\t\t// is int value, try to parse as int\n\t\tif strutil.IsInt(s) {\n\t\t\tiVal := strutil.SafeInt(s)\n\t\t\tif iVal >= 0 && iVal <= int(LevelModeMax) {\n\t\t\t\treturn LevelMode(iVal), nil\n\t\t\t}\n\t\t}\n\t\treturn 0, fmt.Errorf(\"slog: invalid level mode: %s\", s)\n\t}\n}\n\n// LevelHandling struct definition\ntype LevelHandling struct {\n\t// level check mode. default is LevelModeList\n\tlvMode LevelMode\n\t// max level for a log message. if the current level <= Level will log a message\n\tmaxLevel Level\n\t// levels limit for log message\n\tlevels []Level\n}\n\n// SetMaxLevel set max level for a log message\nfunc (h *LevelHandling) SetMaxLevel(maxLv Level) {\n\th.lvMode = LevelModeMax\n\th.maxLevel = maxLv\n}\n\n// SetLimitLevels set limit levels for log message\nfunc (h *LevelHandling) SetLimitLevels(levels []Level) {\n\th.lvMode = LevelModeList\n\th.levels = levels\n}\n\n// IsHandling Check if the current level can be handling\nfunc (h *LevelHandling) IsHandling(level Level) bool {\n\tif h.lvMode == LevelModeMax {\n\t\treturn h.maxLevel.ShouldHandling(level)\n\t}\n\n\tfor _, l := range h.levels {\n\t\tif l == level {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// LevelFormatting wrap level handling and log formatter\ntype LevelFormatting struct {\n\tLevelHandling\n\tFormatterWrapper\n}\n\n// NewMaxLevelFormatting create new instance with max level\nfunc NewMaxLevelFormatting(maxLevel Level) *LevelFormatting {\n\tlf := &LevelFormatting{}\n\tlf.SetMaxLevel(maxLevel)\n\treturn lf\n}\n\n// NewLevelsFormatting create new instance with levels\nfunc NewLevelsFormatting(levels []Level) *LevelFormatting {\n\tlf := &LevelFormatting{}\n\tlf.SetLimitLevels(levels)\n\treturn lf\n}\n"
  },
  {
    "path": "handler_test.go",
    "content": "package slog_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n)\n\nfunc TestSafeToLevelMode(t *testing.T) {\n\tassert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode(\"list\"))\n\tassert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode(\"0\"))\n\tassert.Eq(t, slog.LevelModeMax, slog.SafeToLevelMode(\"1\"))\n\tassert.Eq(t, slog.LevelModeList, slog.SafeToLevelMode(\"unknown\"))\n\n\tmode := slog.SafeToLevelMode(\"max\")\n\tassert.Eq(t, slog.LevelModeMax, mode)\n\n\t// MarshalJSON\n\tbs, err := mode.MarshalJSON()\n\tassert.Nil(t, err)\n\tassert.Eq(t, `\"max\"`, string(bs))\n\n\t// UnmarshalJSON\n\tmode = slog.LevelMode(0)\n\terr = mode.UnmarshalJSON([]byte(`\"max\"`))\n\tassert.Nil(t, err)\n\tassert.Eq(t, slog.LevelModeMax, mode)\n\n\tassert.Err(t, mode.UnmarshalJSON([]byte(\"ab\")))\n}\n\nfunc TestNewLvFormatter(t *testing.T) {\n\tlf := slog.NewLvFormatter(slog.InfoLevel)\n\n\tassert.True(t, lf.IsHandling(slog.ErrorLevel))\n\tassert.True(t, lf.IsHandling(slog.InfoLevel))\n\tassert.False(t, lf.IsHandling(slog.DebugLevel))\n\n\tlf.SetMaxLevel(slog.DebugLevel)\n\tassert.True(t, lf.IsHandling(slog.DebugLevel))\n}\n\nfunc TestNewLvsFormatter(t *testing.T) {\n\tlf := slog.NewLvsFormatter([]slog.Level{slog.InfoLevel, slog.ErrorLevel})\n\tassert.True(t, lf.IsHandling(slog.InfoLevel))\n\tassert.False(t, lf.IsHandling(slog.DebugLevel))\n\n\tlf.SetLimitLevels([]slog.Level{slog.InfoLevel, slog.ErrorLevel, slog.DebugLevel})\n\tassert.True(t, lf.IsHandling(slog.DebugLevel))\n}\n\nfunc TestLevelFormatting(t *testing.T) {\n\tlf := slog.NewMaxLevelFormatting(slog.InfoLevel)\n\n\tassert.True(t, lf.IsHandling(slog.InfoLevel))\n\tassert.False(t, lf.IsHandling(slog.TraceLevel))\n\n\t// use levels\n\tlf = slog.NewLevelsFormatting([]slog.Level{slog.InfoLevel, slog.ErrorLevel})\n\n\tassert.True(t, lf.IsHandling(slog.InfoLevel))\n\tassert.True(t, lf.IsHandling(slog.ErrorLevel))\n\tassert.False(t, lf.IsHandling(slog.TraceLevel))\n\n\t// test level mode\n\tassert.Eq(t, \"list\", slog.LevelModeList.String())\n\tassert.Eq(t, \"max\", slog.LevelModeMax.String())\n\tassert.Eq(t, \"unknown\", slog.LevelMode(9).String())\n}\n"
  },
  {
    "path": "internal/util.go",
    "content": "package internal\n\nimport \"path/filepath\"\n\n// AddSuffix2path add suffix to file path.\n//\n// eg: \"/path/to/error.log\" => \"/path/to/error.{suffix}.log\"\nfunc AddSuffix2path(filePath, suffix string) string {\n\text := filepath.Ext(filePath)\n\treturn filePath[:len(filePath)-len(ext)] + \".\" + suffix + ext\n}\n\n// BuildGlobPattern builds a glob pattern for the given logfile. NOTE: use for testing only.\nfunc BuildGlobPattern(logfile string) string {\n\treturn logfile[:len(logfile)-4] + \"*\"\n}\n"
  },
  {
    "path": "issues_test.go",
    "content": "package slog_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\n// https://github.com/gookit/slog/issues/27\nfunc TestIssues_27(t *testing.T) {\n\tdefer slog.Reset()\n\n\tcount := 0\n\tfor {\n\t\tif count >= 6 {\n\t\t\tbreak\n\t\t}\n\t\tslog.Infof(\"info log %d\", count)\n\t\ttime.Sleep(time.Second)\n\t\tcount++\n\t}\n}\n\n// https://github.com/gookit/slog/issues/31\nfunc TestIssues_31(t *testing.T) {\n\tdefer slog.Reset()\n\tdefer slog.MustClose()\n\n\t// slog.DangerLevels equals slog.Levels{slog.PanicLevel, slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel}\n\th1 := handler.MustFileHandler(\"testdata/error_issue31.log\", handler.WithLogLevels(slog.DangerLevels))\n\n\tinfoLevels := slog.Levels{slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel}\n\th2 := handler.MustFileHandler(\"testdata/info_issue31.log\", handler.WithLogLevels(infoLevels))\n\n\tslog.PushHandler(h1)\n\tslog.PushHandlers(h2)\n\n\t// add logs\n\tslog.Info(\"info message text\")\n\tslog.Error(\"error message text\")\n}\n\n// https://github.com/gookit/slog/issues/52\nfunc TestIssues_52(t *testing.T) {\n\ttestTemplate := \"[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}\"\n\tslog.SetLogLevel(slog.ErrorLevel)\n\tslog.GetFormatter().(*slog.TextFormatter).SetTemplate(testTemplate)\n\n\tslog.Error(\"Error message\")\n\tslog.Reset()\n\n\tfmt.Println()\n\t// dump.P(slog.GetFormatter())\n}\n\n// https://github.com/gookit/slog/issues/75\nfunc TestIssues_75(t *testing.T) {\n\tslog.Error(\"Error message 1\")\n\n\t// set max level\n\tslog.SetLogLevel(slog.Level(0))\n\t// slog.SetLogLevel(slog.PanicLevel)\n\tslog.Error(\"Error message 2\")\n\tslog.Reset()\n\t// dump.P(slog.GetFormatter())\n}\n\n// https://github.com/gookit/slog/issues/105\nfunc TestIssues_105(t *testing.T) {\n\tt.Run(\"simple write\", func(t *testing.T) {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tslog.Error(\"simple error log\", i)\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t}\n\t})\n\n\t// test concurrent write\n\tt.Run(\"concurrent write\", func(t *testing.T) {\n\t\twg := sync.WaitGroup{}\n\t\tfor i := 0; i < 100; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\tslog.Error(\"concurrent error log\", i)\n\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\twg.Done()\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\t})\n}\n\n// https://github.com/gookit/slog/issues/108\nfunc TestIssues_108(t *testing.T) {\n\tbuf1 := byteutil.NewBuffer()\n\troot := slog.NewWithName(\"root\", func(l *slog.Logger) {\n\t\tl.ChannelName = l.Name()\n\t\tl.AddHandler(handler.NewSimple(buf1, slog.InfoLevel))\n\t})\n\troot.Info(\"root info message\")\n\troot.Warn(\"root warn message\")\n\n\tstr := buf1.ResetGet()\n\tfmt.Println(str)\n\tassert.StrContains(t, str, \"[root] [INFO\")\n\tassert.StrContains(t, str, \"[root] [WARN\")\n\n\tbuf2 := byteutil.NewBuffer()\n\tprobe := slog.NewWithName(\"probe\", func(l *slog.Logger) {\n\t\tl.ChannelName = l.Name()\n\t\tl.AddHandler(handler.NewSimple(buf2, slog.InfoLevel))\n\t})\n\tprobe.Info(\"probe info message\")\n\tprobe.Warn(\"probe warn message\")\n\n\tstr = buf2.ResetGet()\n\tfmt.Println(str)\n\tassert.StrContains(t, str, \"[probe] [INFO\")\n\tassert.StrContains(t, str, \"[probe] [WARN\")\n}\n\n// https://github.com/gookit/slog/issues/121\n// 当我配置按日期的方式来滚动日志时，当大于 1 天时只能按 1 天来滚动日志。\nfunc TestIssues_121(t *testing.T) {\n\tseconds := timex.OneDaySec * 7 // 7天\n\tlogFile := \"testdata/issue121_7day.log\"\n\n\tclock := rotatefile.NewMockClock(\"2024-03-25 08:04:02\")\n\tfh, err := handler.NewTimeRotateFileHandler(\n\t\tlogFile,\n\t\trotatefile.RotateTime(seconds),\n\t\thandler.WithLogLevels(slog.NormalLevels),\n\t\thandler.WithBuffSize(128),\n\t\thandler.WithBackupNum(20),\n\t\thandler.WithTimeClock(clock),\n\t\thandler.WithDebugMode, // debug mode\n\t\t// handler.WithCompress(log.compress),\n\t\t// handler.WithFilePerm(log.filePerm),\n\t)\n\tassert.NoError(t, err)\n\n\t// create logger with handler and clock.\n\tl := slog.NewWithHandlers(fh).Config(func(sl *slog.Logger) {\n\t\tsl.TimeClock = clock.Now\n\t})\n\n\t// add logs\n\tfor i := 0; i < 50; i++ {\n\t\tl.Infof(\"hi, this is a exmple information ... message text. log index=%d\", i)\n\t\tclock.Add(24 * timex.Hour)\n\t}\n\n\tl.MustClose()\n}\n\n// https://github.com/gookit/slog/issues/137\n// 按日期滚动 如果当天时间节点的日志文件已存在 不会append 会直接替换 #137\nfunc TestIssues_137(t *testing.T) {\n\tlogFile := \"testdata/issue137_case1.log\"\n\tfsutil.MustSave(logFile, \"hello, this is a log file content\\n\")\n\n\tl := slog.NewWithHandlers(handler.MustFileHandler(logFile))\n\n\t// add logs\n\tfor i := 0; i < 5; i++ {\n\t\tl.Infof(\"hi, this is a example information ... message text. log index=%d\", i)\n\t}\n\n\tl.MustClose()\n\t// read file content\n\tcontent := fsutil.ReadString(logFile)\n\tassert.StrContains(t, content, \"this is a log file content\")\n\tassert.StrContains(t, content, \"log index=4\")\n}\n\n// https://github.com/gookit/slog/issues/139\n// 自定义模板报 invalid memory address or nil pointer dereference #139\nfunc TestIssues_139(t *testing.T) {\n\tmyTemplate := \"[{{datetime}}] [{{requestid}}] [{{level}}] {{message}}\\n\"\n\ttextFormatter := &slog.TextFormatter{TimeFormat: \"2006-01-02 15:04:05.000\"}\n\ttextFormatter.SetTemplate(myTemplate)\n\t// use func create\n\t// textFormatter := slog.NewTextFormatter(myTemplate).Configure(func(f *slog.TextFormatter) {\n\t// \tf.TimeFormat = \"2006-01-02 15:04:05.000\"\n\t// })\n\th1 := handler.NewConsoleHandler(slog.AllLevels)\n\th1.SetFormatter(textFormatter)\n\n\tL := slog.New()\n\tL.AddHandlers(h1)\n\t// add processor <====\n\t// L.AddProcessor(slog.ProcessorFunc(func(r *slog.Record) {\n\t// \tr.Fields[\"requestid\"] = r.Ctx.Value(\"requestid\")\n\t// }))\n\tL.AddProcessor(slog.AppendCtxKeys(\"requestid\"))\n\n\tctx := context.WithValue(context.Background(), \"requestid\", \"111111\")\n\tL.WithCtx(ctx).Info(\"test\")\n}\n\n// https://github.com/gookit/slog/issues/144\n// slog: failed to handle log, error: write ./logs/info.log: file already closed #144\nfunc TestIssues_144(t *testing.T) {\n\tdefer slog.MustClose()\n\tslog.Reset()\n\n\t// DangerLevels 包含： slog.PanicLevel, slog.ErrorLevel, slog.WarnLevel\n\th1 := handler.MustRotateFile(\"./testdata/logs/error_is144.log\", rotatefile.EveryDay,\n\t\thandler.WithLogLevels(slog.DangerLevels),\n\t\thandler.WithCompress(true),\n\t)\n\n\t// NormalLevels 包含： slog.InfoLevel, slog.NoticeLevel, slog.DebugLevel, slog.TraceLevel\n\th2 := handler.MustFileHandler(\"./testdata/logs/info_is144.log\", handler.WithLogLevels(slog.NormalLevels))\n\n\t// 注册 handler 到 logger(调度器)\n\tslog.PushHandlers(h1, h2)\n\n\t// add logs\n\tslog.Info(\"info message text\")\n\tslog.Error(\"error message text\")\n}\n\n// https://github.com/gookit/slog/issues/161 自定义level、caller的宽度\nfunc TestIssues_161(t *testing.T) {\n\t// 这样是全局影响的 - 不推荐\n\t// slog.LevelNames[slog.WarnLevel] = \"WARNI\"\n\t// slog.LevelNames[slog.InfoLevel] = \"INFO \"\n\t// slog.LevelNames[slog.NoticeLevel] = \"NOTIC\"\n\n\tl := slog.New()\n\tl.DoNothingOnPanicFatal()\n\n\th := handler.ConsoleWithMaxLevel(slog.TraceLevel)\n\t// 通过 SetFormatter 设置格式化 LevelNameLen=5\n\th.SetFormatter(slog.TextFormatterWith(slog.LimitLevelNameLen(5)))\n\tl.AddHandler(h)\n\n\tfor _, level := range slog.AllLevels {\n\t\tl.Logf(level, \"a %s test message\", level.String())\n\t}\n\tassert.NoErr(t, l.LastErr())\n}\n\n// https://github.com/gookit/slog/issues/163\nfunc TestIssues_163(t *testing.T) {\n\th, e := handler.NewRotateFile(\"testdata/app_iss163.log\", rotatefile.EveryDay)\n\tassert.NoError(t, e)\n\n\tl := slog.NewWithHandlers(h)\n\tdefer l.MustClose()\n\n\tl.Debugf(\"error %+v\", e)\n\tl.Infof(\"2222\")\n\t// TODO assert.FileExists(\"testdata/app_iss163.log\")\n}\n"
  },
  {
    "path": "logger.go",
    "content": "package slog\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil\"\n)\n\n// Logger log dispatcher definition.\n//\n// The logger implements the `github.com/gookit/gsr.Logger`\ntype Logger struct {\n\tname string\n\t// lock for writing logs\n\tmu sync.Mutex\n\t// logger latest error\n\terr error\n\t// mark logger is closed\n\tclosed bool\n\n\t// log handlers for logger\n\thandlers   []Handler\n\tprocessors []Processor\n\n\t// reusable empty record\n\trecordPool sync.Pool\n\t// handlers on exit.\n\texitHandlers []func()\n\tquitDaemon   chan struct{}\n\n\t//\n\t// logger options\n\t//\n\n\t// ChannelName log channel name, default is DefaultChannelName\n\tChannelName string\n\t// FlushInterval flush interval time. default is defaultFlushInterval=30s\n\tFlushInterval time.Duration\n\t// LowerLevelName use lower level name\n\tLowerLevelName bool\n\t// ReportCaller on writing log record\n\tReportCaller bool\n\tCallerSkip   int\n\t// CallerFlag used to set caller traceback information in different modes\n\tCallerFlag CallerFlagMode\n\t// BackupArgs backup log input args to Record.Args\n\tBackupArgs bool\n\t// GlobalFields global fields. will be added to all log records\n\t//\n\t// NOTE: add field need config Formatter template fields.\n\tGlobalFields map[string]any\n\t// TimeClock custom time clock, timezone\n\tTimeClock ClockFn\n\t// custom exit, panic handler.\n\tExitFunc  func(code int)\n\tPanicFunc func(v any)\n}\n\n// New create a new logger\nfunc New(fns ...LoggerFn) *Logger { return NewWithName(\"logger\", fns...) }\n\n// NewWithHandlers create a new logger with handlers\nfunc NewWithHandlers(hs ...Handler) *Logger {\n\tlogger := NewWithName(\"logger\")\n\tlogger.AddHandlers(hs...)\n\treturn logger\n}\n\n// NewWithConfig create a new logger with config func\nfunc NewWithConfig(fns ...LoggerFn) *Logger { return NewWithName(\"logger\", fns...) }\n\n// NewWithName create a new logger with name\nfunc NewWithName(name string, fns ...LoggerFn) *Logger {\n\tlogger := &Logger{\n\t\tname: name,\n\t\t// exit handle\n\t\t// ExitFunc:  os.Exit,\n\t\tPanicFunc:    DefaultPanicFn,\n\t\texitHandlers: []func(){},\n\t\t// options\n\t\tChannelName:  DefaultChannelName,\n\t\tReportCaller: true,\n\t\tCallerSkip:   6,\n\t\tTimeClock:    DefaultClockFn,\n\t\t// flush interval time\n\t\tFlushInterval: defaultFlushInterval,\n\t}\n\n\tlogger.recordPool.New = func() any {\n\t\treturn newRecord(logger)\n\t}\n\treturn logger.Config(fns...)\n}\n\n// NewRecord get new logger record\nfunc (l *Logger) newRecord() *Record {\n\tr := l.recordPool.Get().(*Record)\n\tr.reuse = false\n\tr.freed = false\n\tr.Fields = l.GlobalFields\n\treturn r\n}\n\nfunc (l *Logger) releaseRecord(r *Record) {\n\t// must reset for each record\n\tr.Time = emptyTime\n\tr.Message = \"\"\n\tr.Caller = nil\n\tr.Fmt = \"\"\n\tr.Args = nil\n\n\t// reuse=true: will not be released\n\tif r.reuse || r.freed {\n\t\treturn\n\t}\n\n\t// reset ctx data\n\tr.Ctx = nil\n\tr.Extra = nil\n\tr.Data = map[string]any{}\n\tr.Fields = map[string]any{}\n\t// reset flags\n\tr.inited = false\n\tr.reuse = false\n\tr.freed = true\n\n\tr.CallerSkip = l.CallerSkip\n\tl.recordPool.Put(r)\n}\n\n//\n// ---------------------------------------------------------------------------\n// region Configure logger\n// ---------------------------------------------------------------------------\n//\n\n// Config current logger\nfunc (l *Logger) Config(fns ...LoggerFn) *Logger {\n\tfor _, fn := range fns {\n\t\tfn(l)\n\t}\n\treturn l\n}\n\n// Configure current logger. alias of Config()\nfunc (l *Logger) Configure(fn LoggerFn) *Logger { return l.Config(fn) }\n\n// RegisterExitHandler register an exit-handler on global exitHandlers\nfunc (l *Logger) RegisterExitHandler(handler func()) {\n\tl.exitHandlers = append(l.exitHandlers, handler)\n}\n\n// PrependExitHandler prepend register an exit-handler on global exitHandlers\nfunc (l *Logger) PrependExitHandler(handler func()) {\n\tl.exitHandlers = append([]func(){handler}, l.exitHandlers...)\n}\n\n// ResetExitHandlers reset logger exitHandlers\nfunc (l *Logger) ResetExitHandlers() { l.exitHandlers = make([]func(), 0) }\n\n// ExitHandlers get all exitHandlers of the logger\nfunc (l *Logger) ExitHandlers() []func() { return l.exitHandlers }\n\n// SetName for logger\nfunc (l *Logger) SetName(name string) { l.name = name }\n\n// Name of the logger\nfunc (l *Logger) Name() string { return l.name }\n\n//\n// ---------------------------------------------------------------------------\n// region Management logger\n// ---------------------------------------------------------------------------\n//\n\nconst defaultFlushInterval = 30 * time.Second\n\n// FlushDaemon run flush handle on daemon\n//\n// Usage, please refer to the FlushDaemon() on package.\nfunc (l *Logger) FlushDaemon(onStops ...func()) {\n\tl.quitDaemon = make(chan struct{})\n\tif l.FlushInterval <= 0 {\n\t\tl.FlushInterval = defaultFlushInterval\n\t}\n\n\t// create a ticker\n\ttk := time.NewTicker(l.FlushInterval)\n\tdefer tk.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-tk.C:\n\t\t\tif err := l.lockAndFlushAll(); err != nil {\n\t\t\t\tprintStderr(\"slog.FlushDaemon: daemon flush logs error: \", err)\n\t\t\t}\n\t\tcase <-l.quitDaemon:\n\t\t\tfor _, fn := range onStops {\n\t\t\t\tfn()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// StopDaemon stop flush daemon\nfunc (l *Logger) StopDaemon() {\n\tif l.quitDaemon == nil {\n\t\tpanic(\"cannot quit daemon, please call FlushDaemon() first\")\n\t}\n\tclose(l.quitDaemon)\n}\n\n// FlushTimeout flush logs on limit time.\n//\n// refer from glog package\nfunc (l *Logger) FlushTimeout(timeout time.Duration) {\n\tdone := make(chan bool, 1)\n\tgo func() {\n\t\tif err := l.lockAndFlushAll(); err != nil {\n\t\t\tprintStderr(\"slog.FlushTimeout: flush logs error: \", err)\n\t\t}\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(timeout):\n\t\tprintStderr(\"slog.FlushTimeout: flush took longer than timeout:\", timeout)\n\t}\n}\n\n// Sync flushes buffered logs (if any). alias of the Flush()\nfunc (l *Logger) Sync() error { return Flush() }\n\n// Flush flushes all the logs and attempts to \"sync\" their data to disk.\n// l.mu is held.\nfunc (l *Logger) Flush() error { return l.lockAndFlushAll() }\n\n// MustFlush flush logs. will panic on error\nfunc (l *Logger) MustFlush() {\n\tgoutil.PanicErr(l.lockAndFlushAll())\n}\n\n// FlushAll flushes all the logs and attempts to \"sync\" their data to disk.\n//\n// alias of the Flush()\nfunc (l *Logger) FlushAll() error { return l.lockAndFlushAll() }\n\n// lockAndFlushAll is like flushAll but locks l.mu first.\nfunc (l *Logger) lockAndFlushAll() error {\n\tl.mu.Lock()\n\tl.flushAll()\n\tl.mu.Unlock()\n\n\treturn l.err\n}\n\n// flush all without lock\nfunc (l *Logger) flushAll() {\n\t// flush from fatal down, in case there's trouble flushing.\n\t_ = l.VisitAll(func(handler Handler) error {\n\t\tif err := handler.Flush(); err != nil {\n\t\t\tl.err = err\n\t\t\tprintStderr(\"slog: call handler.Flush() error:\", err)\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// MustClose close logger. will panic on error\nfunc (l *Logger) MustClose() { goutil.PanicErr(l.Close()) }\n\n// Close the logger, will flush all logs and close all handlers\n//\n// IMPORTANT:\n//\n//\tif enable async/buffer mode, please call the Close() before exit.\nfunc (l *Logger) Close() error {\n\tif l.closed {\n\t\treturn nil\n\t}\n\n\t_ = l.VisitAll(func(handler Handler) error {\n\t\tif err := handler.Close(); err != nil {\n\t\t\tl.err = err\n\t\t\tprintStderr(\"slog: call handler.Close() error:\", err)\n\t\t}\n\t\treturn nil\n\t})\n\n\tl.closed = true\n\treturn l.err\n}\n\n// VisitAll logger handlers\nfunc (l *Logger) VisitAll(fn func(handler Handler) error) error {\n\tfor _, handler := range l.handlers {\n\t\t// TIP: you can return nil for ignore error\n\t\tif err := fn(handler); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Reset the logger. will reset: handlers, processors, closed=false\nfunc (l *Logger) Reset() {\n\tl.closed = false\n\tl.ResetHandlers()\n\tl.ResetProcessors()\n}\n\n// ResetProcessors for the logger\nfunc (l *Logger) ResetProcessors() { l.processors = make([]Processor, 0) }\n\n// ResetHandlers for the logger\nfunc (l *Logger) ResetHandlers() { l.handlers = make([]Handler, 0) }\n\n// Exit logger handle\nfunc (l *Logger) Exit(code int) {\n\tl.runExitHandlers()\n\n\t// global exit handlers\n\trunExitHandlers()\n\n\tif l.ExitFunc != nil {\n\t\tl.ExitFunc(code)\n\t}\n}\n\nfunc (l *Logger) runExitHandlers() {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tprintStderr(\"slog: run exit handler recovered, error:\", err)\n\t\t}\n\t}()\n\n\tfor _, handler := range l.exitHandlers {\n\t\thandler()\n\t}\n}\n\n// DoNothingOnPanicFatal do nothing on panic or fatal level. TIP: useful on testing.\nfunc (l *Logger) DoNothingOnPanicFatal() {\n\tl.PanicFunc = DoNothingOnPanic\n\tl.ExitFunc = DoNothingOnExit\n}\n\n// HandlersNum returns the number of handlers\nfunc (l *Logger) HandlersNum() int { return len(l.handlers) }\n\n// LastErr get, will clear it after read.\nfunc (l *Logger) LastErr() error {\n\terr := l.err\n\tl.err = nil\n\treturn err\n}\n\n//\n// ---------------------------------------------------------------------------\n// region Register handlers, processors\n// ---------------------------------------------------------------------------\n//\n\n// AddHandler to the logger\nfunc (l *Logger) AddHandler(h Handler) { l.PushHandlers(h) }\n\n// AddHandlers to the logger\nfunc (l *Logger) AddHandlers(hs ...Handler) { l.PushHandlers(hs...) }\n\n// PushHandler to the l. alias of AddHandler()\nfunc (l *Logger) PushHandler(h Handler) { l.PushHandlers(h) }\n\n// PushHandlers to the logger\nfunc (l *Logger) PushHandlers(hs ...Handler) {\n\tif len(hs) > 0 {\n\t\tl.handlers = append(l.handlers, hs...)\n\t}\n}\n\n// SetHandlers for the logger\nfunc (l *Logger) SetHandlers(hs []Handler) { l.handlers = hs }\n\n// AddProcessor to the logger\nfunc (l *Logger) AddProcessor(p Processor) { l.processors = append(l.processors, p) }\n\n// PushProcessor to the logger, alias of AddProcessor()\nfunc (l *Logger) PushProcessor(p Processor) { l.processors = append(l.processors, p) }\n\n// AddProcessors to the logger. alias of AddProcessor()\nfunc (l *Logger) AddProcessors(ps ...Processor) { l.processors = append(l.processors, ps...) }\n\n// SetProcessors for the logger\nfunc (l *Logger) SetProcessors(ps []Processor) { l.processors = ps }\n\n// -------------------------- New sub-logger -----------------------------\n\n// NewSub return a new sub logger on the logger, can keep fields/data/ctx for sub logger.\n//\n// Usage:\n//\n//\tsl := logger.NewSub().KeepCtx(custom ctx).\n//\t\tKeepFields(slog.M{\"ip\": ...}).\n//\t\tKeepData(slog.M{\"username\": ...})\n//\tdefer sl.Release()\n//\n//\tsl.Info(\"some message\")\n//\tsl.Warn(\"some message\")\nfunc (l *Logger) NewSub() *SubLogger { return NewSubWith(l) }\n\n//\n// ---------------------------------------------------------------------------\n// region New record with logger\n// ---------------------------------------------------------------------------\n//\n\n// Record return a new record with logger, will release after writing log.\nfunc (l *Logger) Record() *Record { return l.newRecord() }\n\n// Reused return a new record with logger, but it can be reused.\n// if you want to release the record, please call the Record.Release() after write log.\n//\n// Usage:\n//\n//\tr := logger.Reused()\n//\tdefer r.Release()\n//\n//\t// can write log multiple times\n//\tr.Info(\"some message1\")\n//\tr.Warn(\"some message1\")\nfunc (l *Logger) Reused() *Record { return l.newRecord().Reused() }\n\n// WithField new record with field\n//\n// TIP: add field need config Formatter template fields.\nfunc (l *Logger) WithField(name string, value any) *Record {\n\tr := l.newRecord()\n\t// defer l.releaseRecord(r)\n\treturn r.WithField(name, value)\n}\n\n// WithFields new record with fields\n//\n// TIP: add field need config Formatter template fields.\nfunc (l *Logger) WithFields(fields M) *Record {\n\tr := l.newRecord()\n\t// defer l.releaseRecord(r)\n\treturn r.WithFields(fields)\n}\n\n// WithData new record with data\nfunc (l *Logger) WithData(data M) *Record {\n\treturn l.newRecord().WithData(data)\n}\n\n// WithValue new record with data value\nfunc (l *Logger) WithValue(key string, value any) *Record {\n\treturn l.newRecord().AddValue(key, value)\n}\n\n// WithExtra new record with extra data\nfunc (l *Logger) WithExtra(ext M) *Record {\n\treturn l.newRecord().SetExtra(ext)\n}\n\n// WithTime new record with time.Time\nfunc (l *Logger) WithTime(t time.Time) *Record {\n\tr := l.newRecord()\n\t// defer l.releaseRecord(r)\n\treturn r.WithTime(t)\n}\n\n// WithCtx new record with context.Context\nfunc (l *Logger) WithCtx(ctx context.Context) *Record { return l.WithContext(ctx) }\n\n// WithContext new record with context.Context\nfunc (l *Logger) WithContext(ctx context.Context) *Record {\n\tr := l.newRecord()\n\t// defer l.releaseRecord(r)\n\treturn r.WithContext(ctx)\n}\n\n//\n// ---------------------------------------------------------------------------\n// region Add log message\n// ---------------------------------------------------------------------------\n//\n\nfunc (l *Logger) log(level Level, args []any) {\n\tr := l.newRecord()\n\tr.CallerSkip++\n\tr.log(level, args)\n}\n\n// Logf a format message with level\nfunc (l *Logger) logf(level Level, format string, args []any) {\n\tr := l.newRecord()\n\tr.CallerSkip++\n\tr.logf(level, format, args)\n}\n\n// logCtx a context message with level\nfunc (l *Logger) logCtx(ctx context.Context, level Level, args []any) {\n\tr := l.newRecord()\n\tr.Ctx = ctx\n\tr.CallerSkip++\n\tr.log(level, args)\n}\n\n// logfCtx a format message with level,  context\nfunc (l *Logger) logfCtx(ctx context.Context, level Level, format string, args []any) {\n\tr := l.newRecord()\n\tr.Ctx = ctx\n\tr.CallerSkip++\n\tr.logf(level, format, args)\n}\n\n// Log a message with level\nfunc (l *Logger) Log(level Level, args ...any) { l.log(level, args) }\n\n// Logf a format message with level\nfunc (l *Logger) Logf(level Level, format string, args ...any) { l.logf(level, format, args) }\n\n// Print logs a message at level PrintLevel\nfunc (l *Logger) Print(args ...any) { l.log(PrintLevel, args) }\n\n// Println logs a message at level PrintLevel\nfunc (l *Logger) Println(args ...any) { l.log(PrintLevel, args) }\n\n// Printf logs a message at level PrintLevel\nfunc (l *Logger) Printf(format string, args ...any) { l.logf(PrintLevel, format, args) }\n\n// Trace logs a message at level trace\nfunc (l *Logger) Trace(args ...any) { l.log(TraceLevel, args) }\n\n// Tracef logs a message at level trace\nfunc (l *Logger) Tracef(format string, args ...any) { l.logf(TraceLevel, format, args) }\n\n// TraceCtx logs a message at level trace with context\nfunc (l *Logger) TraceCtx(ctx context.Context, args ...any) { l.logCtx(ctx, TraceLevel, args) }\n\n// TracefCtx logs a message at level trace with context\nfunc (l *Logger) TracefCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, TraceLevel, format, args)\n}\n\n// Debug logs a message at level debug\nfunc (l *Logger) Debug(args ...any) { l.log(DebugLevel, args) }\n\n// Debugf logs a message at level debug\nfunc (l *Logger) Debugf(format string, args ...any) { l.logf(DebugLevel, format, args) }\n\n// DebugCtx logs a message at level debug with context\nfunc (l *Logger) DebugCtx(ctx context.Context, args ...any) { l.logCtx(ctx, DebugLevel, args) }\n\n// DebugfCtx logs a message at level debug with context\nfunc (l *Logger) DebugfCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, DebugLevel, format, args)\n}\n\n// Info logs a message at level Info\nfunc (l *Logger) Info(args ...any) { l.log(InfoLevel, args) }\n\n// Infof logs a message at level Info\nfunc (l *Logger) Infof(format string, args ...any) { l.logf(InfoLevel, format, args) }\n\n// InfoCtx logs a message at level Info with context\nfunc (l *Logger) InfoCtx(ctx context.Context, args ...any) { l.logCtx(ctx, InfoLevel, args) }\n\n// InfofCtx logs a message at level Info with context\nfunc (l *Logger) InfofCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, InfoLevel, format, args)\n}\n\n// Notice logs a message at level notice\nfunc (l *Logger) Notice(args ...any) { l.log(NoticeLevel, args) }\n\n// Noticef logs a message at level notice\nfunc (l *Logger) Noticef(format string, args ...any) { l.logf(NoticeLevel, format, args) }\n\n// NoticeCtx logs a message at level notice with context\nfunc (l *Logger) NoticeCtx(ctx context.Context, args ...any) { l.logCtx(ctx, NoticeLevel, args) }\n\n// NoticefCtx logs a message at level notice with context\nfunc (l *Logger) NoticefCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, NoticeLevel, format, args)\n}\n\n// Warn logs a message at level Warn\nfunc (l *Logger) Warn(args ...any) { l.log(WarnLevel, args) }\n\n// Warnf logs a message at level Warn\nfunc (l *Logger) Warnf(format string, args ...any) { l.logf(WarnLevel, format, args) }\n\n// WarnCtx logs a message at level Warn with context\nfunc (l *Logger) WarnCtx(ctx context.Context, args ...any) { l.logCtx(ctx, WarnLevel, args) }\n\n// WarnfCtx logs a message at level Warn with context\nfunc (l *Logger) WarnfCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, WarnLevel, format, args)\n}\n\n// Warning logs a message at level Warn, alias of Logger.Warn()\nfunc (l *Logger) Warning(args ...any) { l.log(WarnLevel, args) }\n\n// Error logs a message at level error\nfunc (l *Logger) Error(args ...any) { l.log(ErrorLevel, args) }\n\n// Errorf logs a message at level error\nfunc (l *Logger) Errorf(format string, args ...any) { l.logf(ErrorLevel, format, args) }\n\n// ErrorT logs an error type at level error\nfunc (l *Logger) ErrorT(err error) {\n\tif err != nil {\n\t\tl.log(ErrorLevel, []any{err})\n\t}\n}\n\n// ErrorCtx logs a message at level error with context\nfunc (l *Logger) ErrorCtx(ctx context.Context, args ...any) { l.logCtx(ctx, ErrorLevel, args) }\n\n// ErrorfCtx logs a message at level error with context\nfunc (l *Logger) ErrorfCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, ErrorLevel, format, args)\n}\n\n// Stack logs a error message and with call stack. TODO\n// func EStack(args ...any) { std.log(ErrorLevel, args) }\n\n// Fatal logs a message at level fatal\nfunc (l *Logger) Fatal(args ...any) { l.log(FatalLevel, args) }\n\n// Fatalf logs a message at level fatal\nfunc (l *Logger) Fatalf(format string, args ...any) { l.logf(FatalLevel, format, args) }\n\n// Fatalln logs a message at level fatal\nfunc (l *Logger) Fatalln(args ...any) { l.log(FatalLevel, args) }\n\n// FatalCtx logs a message at level panic with context\nfunc (l *Logger) FatalCtx(ctx context.Context, args ...any) { l.logCtx(ctx, FatalLevel, args) }\n\n// FatalfCtx logs a message at level panic with context\nfunc (l *Logger) FatalfCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, FatalLevel, format, args)\n}\n\n// Panic logs a message at level panic\nfunc (l *Logger) Panic(args ...any) { l.log(PanicLevel, args) }\n\n// Panicf logs a message at level panic\nfunc (l *Logger) Panicf(format string, args ...any) { l.logf(PanicLevel, format, args) }\n\n// Panicln logs a message at level panic\nfunc (l *Logger) Panicln(args ...any) { l.log(PanicLevel, args) }\n\n// PanicCtx logs a message at level panic with context\nfunc (l *Logger) PanicCtx(ctx context.Context, args ...any) { l.logCtx(ctx, PanicLevel, args) }\n\n// PanicfCtx logs a message at level panic with context\nfunc (l *Logger) PanicfCtx(ctx context.Context, format string, args ...any) {\n\tl.logfCtx(ctx, PanicLevel, format, args)\n}\n"
  },
  {
    "path": "logger_sub.go",
    "content": "package slog\n\nimport \"context\"\n\n// SubLogger is a sub-logger, It can be used to keep a certain amount of contextual information and log multiple times.\n// 可以用于保持一定的上下文信息多次记录日志。例如在循环中使用，或者作为方法参数传入。\n//\n// Usage:\n//\n//\tsl := slog.NewSub().KeepCtx(custom ctx).\n//\t\tKeepFields(slog.M{\"ip\": ...}).\n//\t\tKeepData(slog.M{\"username\": ...})\n//\tdefer sl.Release()\n//\n//\tsl.Info(\"some message\")\ntype SubLogger struct {\n\tl *Logger // parent logger\n\n\t// Ctx keep context for all log records\n\tCtx context.Context\n\t// Fields keep custom fields data for all log records\n\tFields M\n\t// Data keep data for all log records\n\tData M\n\t// Extra data. will keep for all log records\n\tExtra M\n}\n\n// NewSubWith returns a new SubLogger with parent logger.\nfunc NewSubWith(l *Logger) *SubLogger { return &SubLogger{l: l} }\n\n// KeepCtx keep context for all log records\nfunc (sub *SubLogger) KeepCtx(ctx context.Context) *SubLogger {\n\tsub.Ctx = ctx\n\treturn sub\n}\n\n// KeepFields keep custom fields data for all log records\nfunc (sub *SubLogger) KeepFields(fields M) *SubLogger {\n\tsub.Fields = fields\n\treturn sub\n}\n\n// KeepField keep custom field for all log records\nfunc (sub *SubLogger) KeepField(field string, value any) *SubLogger {\n\tif sub.Fields == nil {\n\t\tsub.Fields = make(M)\n\t}\n\n\tsub.Fields[field] = value\n\treturn sub\n}\n\n// KeepData keep data for all log records\nfunc (sub *SubLogger) KeepData(data M) *SubLogger {\n\tsub.Data = data\n\treturn sub\n}\n\n// KeepExtra keep extra data for all log records\nfunc (sub *SubLogger) KeepExtra(extra M) *SubLogger {\n\tsub.Extra = extra\n\treturn sub\n}\n\n// Release releases the SubLogger.\nfunc (sub *SubLogger) Release() {\n\tsub.l = nil\n\tsub.Ctx = nil\n\tsub.Fields = nil\n\tsub.Data = nil\n\tsub.Extra = nil\n}\n\nfunc (sub *SubLogger) withKeepCtx() *Record {\n\tr := sub.l.WithContext(sub.Ctx)\n\tr.Data = sub.Data\n\tr.Extra = sub.Extra\n\tr.Fields = sub.Fields\n\treturn r\n}\n\n//\n// ---------------------------------------------------------------------------\n// Add log message with level\n// ---------------------------------------------------------------------------\n//\n\n// Print logs a message at PrintLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Print(args ...any) { sub.withKeepCtx().Print(args...) }\n\n// Printf logs a message at PrintLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Printf(format string, args ...any) { sub.withKeepCtx().Printf(format, args...) }\n\n// Trace logs a message at TraceLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Trace(args ...any) { sub.withKeepCtx().Trace(args...) }\n\n// Tracef logs a formatted message at TraceLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Tracef(format string, args ...any) {\n\tsub.withKeepCtx().Tracef(format, args...)\n}\n\n// Debug logs a message at DebugLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Debug(args ...any) { sub.withKeepCtx().Debug(args...) }\n\n// Debugf logs a formatted message at DebugLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Debugf(format string, args ...any) {\n\tsub.withKeepCtx().Debugf(format, args...)\n}\n\n// Info logs a message at InfoLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Info(args ...any) { sub.withKeepCtx().Info(args...) }\n\n// Infof logs a formatted message at InfoLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Infof(format string, args ...any) {\n\tsub.withKeepCtx().Infof(format, args...)\n}\n\n// Notice logs a message at NoticeLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Notice(args ...any) { sub.withKeepCtx().Notice(args...) }\n\n// Noticef logs a formatted message at NoticeLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Noticef(format string, args ...any) {\n\tsub.withKeepCtx().Noticef(format, args...)\n}\n\n// Warn logs a message at WarnLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Warn(args ...any) { sub.withKeepCtx().Warn(args...) }\n\n// Warnf logs a formatted message at WarnLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Warnf(format string, args ...any) {\n\tsub.withKeepCtx().Warnf(format, args...)\n}\n\n// Error logs a message at ErrorLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Error(args ...any) { sub.withKeepCtx().Error(args...) }\n\n// Errorf logs a formatted message at ErrorLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Errorf(format string, args ...any) {\n\tsub.withKeepCtx().Errorf(format, args...)\n}\n\n// Fatal logs a message at FatalLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Fatal(args ...any) { sub.withKeepCtx().Fatal(args...) }\n\n// Fatalf logs a formatted message at FatalLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Fatalf(format string, args ...any) {\n\tsub.withKeepCtx().Fatalf(format, args...)\n}\n\n// Panic logs a message at PanicLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Panic(args ...any) { sub.withKeepCtx().Panic(args...) }\n\n// Panicf logs a formatted message at PanicLevel. will with sub logger's context, fields and data\nfunc (sub *SubLogger) Panicf(format string, args ...any) {\n\tsub.withKeepCtx().Panicf(format, args...)\n}\n"
  },
  {
    "path": "logger_test.go",
    "content": "package slog_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestLoggerBasic(t *testing.T) {\n\tl := slog.New()\n\tl.SetName(\"testName\")\n\tassert.Eq(t, \"testName\", l.Name())\n\n\tl = slog.NewWithName(\"testName\")\n\tassert.Eq(t, \"testName\", l.Name())\n}\n\nfunc TestLogger_PushHandler(t *testing.T) {\n\tl := slog.New().Configure(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tw1 := new(bytes.Buffer)\n\th1 := handler.NewIOWriterHandler(w1, slog.DangerLevels)\n\tl.PushHandler(h1)\n\n\tw2 := new(bytes.Buffer)\n\th2 := handler.NewIOWriterHandler(w2, slog.NormalLevels)\n\tl.PushHandlers(h2)\n\n\tl.Warning(slog.WarnLevel, \"message\")\n\tl.Logf(slog.TraceLevel, \"%s message\", slog.TraceLevel)\n\n\tassert.Contains(t, w1.String(), \"WARNING message\")\n\tassert.Contains(t, w2.String(), \"TRACE message\")\n\tassert.Contains(t, w2.String(), \"TestLogger_PushHandler\")\n\n\tassert.NoErr(t, l.Sync())\n\tassert.NoErr(t, l.Flush())\n\tl.MustFlush()\n\n\tassert.NoErr(t, l.Close())\n\tl.MustClose()\n\tl.Reset()\n}\n\nfunc TestLogger_ReportCaller(t *testing.T) {\n\tl := slog.NewWithConfig(func(logger *slog.Logger) {\n\t\tlogger.ReportCaller = true\n\t\tlogger.CallerFlag = slog.CallerFlagFnLine\n\t})\n\n\tvar buf bytes.Buffer\n\th := handler.NewIOWriterHandler(&buf, slog.AllLevels)\n\th.SetFormatter(slog.NewJSONFormatter(func(f *slog.JSONFormatter) {\n\t\tf.Fields = append(f.Fields, slog.FieldKeyCaller)\n\t}))\n\n\tl.AddHandler(h)\n\tl.Info(\"message\")\n\n\tstr := buf.String()\n\tassert.Contains(t, str, `\"caller\":\"logger_test.go`)\n}\n\nfunc TestLogger_Log(t *testing.T) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ReportCaller = true\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tl.AddHandler(handler.NewConsoleHandler(slog.AllLevels))\n\tl.Log(slog.InfoLevel, \"a\", slog.InfoLevel, \"message\")\n\n\tl.WithField(\"newKey\", \"value\").Fatalln(\"a fatal message\")\n\tl.WithTime(timex.NowHourStart()).Panicln(\"a panic message\")\n}\n\nfunc TestLogger_WithContext(t *testing.T) {\n\tvar buf bytes.Buffer\n\th := handler.NewIOWriterHandler(&buf, slog.AllLevels)\n\n\tl := newLogger()\n\tl.AddHandlers(h)\n\n\tctx := context.Background()\n\n\tr := l.WithCtx(ctx)\n\tr.Info(\"with context\")\n\n\tstr := buf.String()\n\tassert.Contains(t, str, `with context`)\n}\n\nfunc TestLogger_panic(t *testing.T) {\n\th := newTestHandler()\n\th.errOnFlush = true\n\n\tl := slog.NewWithHandlers(h)\n\n\tassert.Panics(t, func() {\n\t\tl.MustFlush()\n\t})\n\n\terr := l.LastErr()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"flush error\", err.Error())\n\n\th.errOnClose = true\n\tassert.Panics(t, func() {\n\t\tl.MustClose()\n\t})\n\n\terr = l.LastErr()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"close error\", err.Error())\n}\n\nfunc TestLogger_error(t *testing.T) {\n\th := newTestHandler()\n\tl := slog.NewWithHandlers(h)\n\n\terr := l.VisitAll(func(h slog.Handler) error {\n\t\treturn errorx.Raw(\"visit error\")\n\t})\n\tassert.Err(t, err)\n\tassert.Eq(t, \"visit error\", err.Error())\n\n\th.errOnClose = true\n\terr = l.Close()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"close error\", err.Error())\n}\n\nfunc TestLogger_panicLevel(t *testing.T) {\n\tw := new(bytes.Buffer)\n\tl := slog.NewWithHandlers(handler.NewIOWriter(w, slog.AllLevels))\n\n\t// assert.PanicsWithValue(t, \"slog: panic message\", func() {\n\tassert.Panics(t, func() {\n\t\tl.Panicln(\"panicln message\")\n\t})\n\tassert.Contains(t, w.String(), \"[PANIC]\")\n\tassert.Contains(t, w.String(), \"panicln message\")\n\n\tw.Reset()\n\tassert.Panics(t, func() {\n\t\tl.Panicf(\"panicf message\")\n\t})\n\tassert.Contains(t, w.String(), \"panicf message\")\n\n\tw.Reset()\n\tassert.Panics(t, func() {\n\t\tl.Panic(\"panic message\")\n\t})\n\tassert.Contains(t, w.String(), \"panic message\")\n\n\tassert.NoErr(t, l.FlushAll())\n}\n\nfunc TestLogger_log_allLevel(t *testing.T) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ReportCaller = true\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tl.AddHandler(handler.NewConsoleHandler(slog.AllLevels))\n\tprintAllLevelLogs(l, \"this a\", \"log\", \"message\")\n}\n\nfunc TestLogger_logf_allLevel(t *testing.T) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ReportCaller = true\n\t\tl.CallerFlag = slog.CallerFlagFpLine\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tl.AddHandler(handler.NewConsoleHandler(slog.AllLevels))\n\tprintfAllLevelLogs(l, \"this a log %s\", \"message\")\n}\n\nfunc TestLogger_write_error(t *testing.T) {\n\th := newTestHandler()\n\th.errOnHandle = true\n\n\tl := slog.NewWithHandlers(h)\n\tl.Info(\"a message\")\n\n\terr := l.LastErr()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"handle error\", err.Error())\n}\n\nfunc TestLogger_AddWithCtx(t *testing.T) {\n\th := newTestHandler()\n\n\tl := slog.NewWithHandlers(h)\n\tl.DoNothingOnPanicFatal()\n\tl.AddProcessor(slog.CtxKeysProcessor(\"data\", \"ctx1\", \"ctx2\"))\n\n\tctx := context.WithValue(context.Background(), \"ctx1\", \"ctx1-value\")\n\tctx = context.WithValue(ctx, \"ctx2\", \"ctx2-value\")\n\n\tt.Run(\"normal\", func(t *testing.T) {\n\t\tl.TraceCtx(ctx, \"A message\", \"test\")\n\t\tl.DebugCtx(ctx, \"A message\", \"test\")\n\t\tl.InfoCtx(ctx, \"A message\", \"test\")\n\t\tl.NoticeCtx(ctx, \"A message\", \"test\")\n\t\tl.WarnCtx(ctx, \"A message\", \"test\")\n\t\tl.ErrorCtx(ctx, \"A message\", \"test\")\n\t\tl.FatalCtx(ctx, \"A message\", \"test\")\n\t\tl.PanicCtx(ctx, \"A message\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ctx2-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n\n\tt.Run(\"with format\", func(t *testing.T) {\n\t\tl.TracefCtx(ctx, \"A message %s\", \"test\")\n\t\tl.DebugfCtx(ctx, \"A message %s\", \"test\")\n\t\tl.InfofCtx(ctx, \"A message %s\", \"test\")\n\t\tl.NoticefCtx(ctx, \"A message %s\", \"test\")\n\t\tl.WarnfCtx(ctx, \"A message %s\", \"test\")\n\t\tl.ErrorfCtx(ctx, \"A message %s\", \"test\")\n\t\tl.PanicfCtx(ctx, \"A message %s\", \"test\")\n\t\tl.FatalfCtx(ctx, \"A message %s\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ctx2-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n}\n\nfunc TestLogger_GlobalFields(t *testing.T) {\n\tbuf, l := newTestLogger()\n\tl.Config(func(l *slog.Logger) {\n\t\tl.GlobalFields = slog.M{\n\t\t\t\"global1\": \"test-app\",\n\t\t\t\"global2\": \"test-value2\",\n\t\t}\n\t})\n\n\tl.Info(\"A message\")\n\ts := buf.StringReset()\n\tfmt.Print(s)\n\tassert.StrContains(t, s, \"global1\")\n\tassert.StrContains(t, s, \"global2\")\n}\n\nfunc TestLogger_option_BackupArgs(t *testing.T) {\n\tl := slog.New(func(l *slog.Logger) {\n\t\tl.BackupArgs = true\n\t\tl.CallerFlag = slog.CallerFlagPkgFnl\n\t})\n\n\tvar rFmt string\n\tvar rArgs []any\n\n\th := newTestHandler()\n\th.beforeFormat = func(r *slog.Record) {\n\t\trFmt = r.Fmt\n\t\trArgs = r.Args\n\t}\n\tl.AddHandler(h)\n\n\tl.Info(\"str message1\")\n\tassert.NotEmpty(t, rArgs)\n\n\trFmt = \"\"\n\trArgs = nil\n\tl.Infof(\"fmt %s\", \"message2\")\n\tassert.NotEmpty(t, rFmt)\n\tassert.NotEmpty(t, rArgs)\n\n\tl.WithField(\"key\", \"value\").Info(\"field message3\")\n\n\ts := h.ResetGet()\n\tfmt.Println(s)\n\tassert.StrContains(t, s, \"str message1\")\n\tassert.StrContains(t, s, \"fmt message2\")\n\tassert.StrContains(t, s, \"field message3\")\n\tassert.StrContains(t, s, \"UN-CONFIGURED FIELDS: {key:value}\")\n}\n\nfunc TestLogger_FlushTimeout(t *testing.T) {\n\th := newTestHandler()\n\tl := slog.NewWithHandlers(h)\n\n\t// test flush error\n\th.errOnFlush = true\n\tl.FlushTimeout(time.Millisecond * 2)\n\n\t// test flush timeout\n\th.errOnFlush = false\n\th.callOnFlush = func() {\n\t\ttime.Sleep(time.Millisecond * 25)\n\t}\n\tl.FlushTimeout(time.Millisecond * 20)\n\n\tassert.Panics(t, func() {\n\t\tl.StopDaemon()\n\t})\n}\n\nfunc TestLogger_rewrite_record(t *testing.T) {\n\th := newTestHandler()\n\tl := slog.NewWithHandlers(h)\n\n\tt.Run(\"Record rewrite\", func(t *testing.T) {\n\t\tr := l.Record()\n\t\tr.Info(\"a message1\")\n\t\tfmt.Printf(\"%+v\\n\", r)\n\n\t\ttime.Sleep(time.Millisecond * 2)\n\t\tr.Warn(\"a message2\")\n\t\tfmt.Printf(\"%+v\\n\", r)\n\n\t\ttime.Sleep(time.Millisecond * 2)\n\t\tr.Warn(\"a message3\")\n\t\tfmt.Printf(\"%+v\\n\", r)\n\n\t\tr.Release()\n\t\tdump.P(h.ResetGet())\n\t})\n\n\tt.Run(\"Reused rewrite\", func(t *testing.T) {\n\t\tr := l.Reused()\n\t\tr.Info(\"A message1\")\n\t\tfmt.Printf(\"%+v\\n\", r)\n\n\t\ttime.Sleep(time.Millisecond * 2)\n\t\tr.Warn(\"A message2\")\n\t\tfmt.Printf(\"%+v\\n\", r)\n\n\t\tr.Release()\n\t\tdump.P(h.ResetGet())\n\t})\n}\n\nfunc TestLogger_Sub(t *testing.T) {\n\th := newTestHandler()\n\n\tl := slog.NewWithHandlers(h)\n\tl.DoNothingOnPanicFatal()\n\tl.AddProcessor(slog.CtxKeysProcessor(\"extra\", \"ctx1\"))\n\n\tsub := l.NewSub().\n\t\tKeepData(slog.M{\"data1\": \"data1-value\"}).\n\t\tKeepExtra(slog.M{\"ext1\": \"ext1-value\"}).\n\t\tKeepFields(slog.M{\"field1\": \"field1-value\"}).\n\t\tKeepCtx(context.WithValue(context.Background(), \"ctx1\", \"ctx1-value\"))\n\n\tassert.ContainsKey(t, sub.Data, \"data1\")\n\tassert.ContainsKey(t, sub.Extra, \"ext1\")\n\tassert.ContainsKey(t, sub.Fields, \"field1\")\n\tassert.Eq(t, \"ctx1-value\", sub.Ctx.Value(\"ctx1\"))\n\n\tt.Run(\"normal\", func(t *testing.T) {\n\t\tsub.Print(\"A message\", \"test\")\n\t\tsub.Trace(\"A message\", \"test\")\n\t\tsub.Debug(\"A message\", \"test\")\n\t\tsub.Info(\"A message\", \"test\")\n\t\tsub.Notice(\"A message\", \"test\")\n\t\tsub.Warn(\"A message\", \"test\")\n\t\tsub.Error(\"A message\", \"test\")\n\t\tsub.Fatal(\"A message\", \"test\")\n\t\tsub.Panic(\"A message\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ext1-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n\n\tt.Run(\"formated\", func(t *testing.T) {\n\t\tsub.Printf(\"A message %s\", \"test\")\n\t\tsub.Tracef(\"A message %s\", \"test\")\n\t\tsub.Debugf(\"A message %s\", \"test\")\n\t\tsub.Infof(\"A message %s\", \"test\")\n\t\tsub.Noticef(\"A message %s\", \"test\")\n\t\tsub.Warnf(\"A message %s\", \"test\")\n\t\tsub.Errorf(\"A message %s\", \"test\")\n\t\tsub.Panicf(\"A message %s\", \"test\")\n\t\tsub.Fatalf(\"A message %s\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ext1-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n\n\t// Release\n\tsub.Release()\n\tassert.Nil(t, sub.Ctx)\n\tassert.Nil(t, sub.Data)\n\tassert.Nil(t, sub.Extra)\n\tassert.Nil(t, sub.Fields)\n}"
  },
  {
    "path": "logger_write.go",
    "content": "package slog\n\n//\n// ---------------------------------------------------------------------------\n// Do write log message\n// ---------------------------------------------------------------------------\n//\n\n// func (r *Record) logWrite(level Level) {\n// Will reduce memory allocation once\n// r.Message = strutil.Byte2str(message)\n\n// var buf *bytes.Buffer\n// buf = bufferPool.Get().(*bytes.Buffer)\n// defer bufferPool.Put(buf)\n// r.Buffer = buf\n\n// TODO release on here ??\n// defer r.logger.releaseRecord(r)\n// r.logger.writeRecord(level, r)\n// r.Buffer = nil\n// }\n\n// Init something for record(eg: time, level name).\nfunc (r *Record) Init(lowerLevelName bool) {\n\tr.inited = true\n\n\t// use lower level name\n\tif lowerLevelName {\n\t\tr.levelName = r.Level.LowerName()\n\t} else {\n\t\tr.levelName = r.Level.Name()\n\t}\n\n\t// init log time\n\tif r.Time.IsZero() {\n\t\tr.Time = r.logger.TimeClock.Now()\n\t}\n\n\t// r.microSecond = r.Time.Nanosecond() / 1000\n}\n\n// Init something for record.\nfunc (r *Record) beforeHandle(l *Logger) {\n\t// log caller. will alloc 3 times\n\tif l.ReportCaller {\n\t\tcaller, ok := getCaller(r.CallerSkip)\n\t\tif ok {\n\t\t\tr.Caller = &caller\n\t\t}\n\t}\n\n\t// processing log record\n\tfor i := range l.processors {\n\t\tl.processors[i].Process(r)\n\t}\n}\n\n// do write record to handlers, will add lock.\nfunc (l *Logger) writeRecord(level Level, r *Record) {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\t// reset init flag, useful for repeat use Record\n\tr.inited = false\n\n\tfor _, handler := range l.handlers {\n\t\tif handler.IsHandling(level) {\n\t\t\t// init record, call processors\n\t\t\tif !r.inited {\n\t\t\t\tr.Init(l.LowerLevelName)\n\t\t\t\tr.beforeHandle(l)\n\t\t\t}\n\n\t\t\t// do write a log message by handler\n\t\t\tif err := handler.Handle(r); err != nil {\n\t\t\t\tl.err = err\n\t\t\t\tprintStderr(\"slog: failed to handle log, error:\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// ---- after write log ----\n\tr.Time = emptyTime\n\n\t// flush logs on level <= error level.\n\tif level <= ErrorLevel {\n\t\tl.flushAll() // has been in lock\n\t}\n\n\tif level <= PanicLevel {\n\t\tl.PanicFunc(r)\n\t} else if level <= FatalLevel {\n\t\tl.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "processor.go",
    "content": "package slog\n\nimport (\n\t\"crypto/md5\"\n\t\"encoding/hex\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/gookit/goutil/strutil\"\n)\n\n//\n// Processor interface\n//\n\n// Processor interface definition\ntype Processor interface {\n\t// Process record\n\tProcess(record *Record)\n}\n\n// ProcessorFunc wrapper definition\ntype ProcessorFunc func(record *Record)\n\n// Process record\nfunc (fn ProcessorFunc) Process(record *Record) {\n\tfn(record)\n}\n\n// ProcessableHandler interface\ntype ProcessableHandler interface {\n\t// AddProcessor add a processor\n\tAddProcessor(Processor)\n\t// ProcessRecord handle a record\n\tProcessRecord(record *Record)\n}\n\n// Processable definition\ntype Processable struct {\n\tprocessors []Processor\n}\n\n// AddProcessor to the handler\nfunc (p *Processable) AddProcessor(processor Processor) {\n\tp.processors = append(p.processors, processor)\n}\n\n// ProcessRecord process record\nfunc (p *Processable) ProcessRecord(r *Record) {\n\t// processing log record\n\tfor _, processor := range p.processors {\n\t\tprocessor.Process(r)\n\t}\n}\n\n//\n// there are some built-in processors\n//\n\n// AddHostname to record\nfunc AddHostname() Processor {\n\thostname, _ := os.Hostname()\n\treturn ProcessorFunc(func(record *Record) {\n\t\trecord.AddField(\"hostname\", hostname)\n\t})\n}\n\n// AddUniqueID to record\nfunc AddUniqueID(fieldName string) Processor {\n\ths := md5.New()\n\n\treturn ProcessorFunc(func(record *Record) {\n\t\trb, _ := strutil.RandomBytes(32)\n\t\ths.Write(rb)\n\t\trandomID := hex.EncodeToString(hs.Sum(nil))\n\t\ths.Reset()\n\n\t\trecord.AddField(fieldName, randomID)\n\t})\n}\n\n// MemoryUsage get memory usage.\nvar MemoryUsage ProcessorFunc = func(record *Record) {\n\tstat := new(runtime.MemStats)\n\truntime.ReadMemStats(stat)\n\trecord.SetExtraValue(\"memoryUsage\", stat.Alloc)\n}\n\n// AppendCtxKeys append context keys to Record.Fields\nfunc AppendCtxKeys(keys ...string) Processor {\n\treturn ProcessorFunc(func(record *Record) {\n\t\tif record.Ctx == nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, key := range keys {\n\t\t\tif val := record.Ctx.Value(key); val != nil {\n\t\t\t\trecord.AddField(key, val)\n\t\t\t}\n\t\t}\n\t})\n}\n\n// CtxKeysProcessor append context keys to Record.Data, Record.Fields, Record.Extra\n//  - dist: \"data\" | \"fields\" | \"extra\"\nfunc CtxKeysProcessor(dist string, keys ...string) Processor {\n\treturn ProcessorFunc(func(r *Record) {\n\t\tif r.Ctx == nil {\n\t\t\treturn\n\t\t}\n\n\t\tkvMap := map[string]any{}\n\t\tfor _, key := range keys {\n\t\t\tif val := r.Ctx.Value(key); val != nil {\n\t\t\t\tkvMap[key] = val\n\t\t\t}\n\t\t}\n\n\t\tswitch dist {\n\t\tcase \"field\", \"fields\":\n\t\t\tr.AddFields(kvMap)\n\t\tcase \"ext\", \"extra\":\n\t\t\tr.AddExtra(kvMap)\n\t\tdefault:\n\t\t\tr.AddData(kvMap)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "processor_test.go",
    "content": "package slog_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog\"\n)\n\nfunc TestLogger_AddProcessor(t *testing.T) {\n\tbuf := new(byteutil.Buffer)\n\n\tl := slog.NewJSONSugared(buf, slog.InfoLevel)\n\tl.AddProcessor(slog.AddHostname())\n\tl.Info(\"message\")\n\n\thostname, _ := os.Hostname()\n\n\t// {\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/17 12:01:35\",\"extra\":{},\"hostname\":\"InhereMac\",\"level\":\"INFO\",\"message\":\"message\"}\n\tstr := buf.String()\n\tbuf.Reset()\n\tassert.Contains(t, str, `\"level\":\"INFO\"`)\n\tassert.Contains(t, str, `\"message\":\"message\"`)\n\tassert.Contains(t, str, fmt.Sprintf(`\"hostname\":\"%s\"`, hostname))\n\n\tl.ResetProcessors()\n\tl.PushProcessor(slog.MemoryUsage)\n\tl.Info(\"message2\")\n\n\t// {\"channel\":\"application\",\"data\":{},\"datetime\":\"2020/07/16 16:40:18\",\"extra\":{\"memoryUsage\":326072},\"level\":\"INFO\",\"message\":\"message2\"}\n\tstr = buf.String()\n\tbuf.Reset()\n\tassert.Contains(t, str, `\"message\":\"message2\"`)\n\tassert.Contains(t, str, `\"memoryUsage\":`)\n\n\tl.ResetProcessors()\n\tl.SetProcessors([]slog.Processor{slog.AddUniqueID(\"requestId\")})\n\tl.Info(\"message3\")\n\tstr = buf.String()\n\tbuf.Reset()\n\tassert.Contains(t, str, `\"message\":\"message3\"`)\n\tassert.Contains(t, str, `\"requestId\":`)\n\tfmt.Print(str)\n\n\tl.ResetProcessors()\n\tl.AddProcessors(slog.AppendCtxKeys(\"traceId\", \"userId\"))\n\tl.Info(\"message4\")\n\tstr = buf.ResetAndGet()\n\tfmt.Print(str)\n\tassert.Contains(t, str, `\"message\":\"message4\"`)\n\tassert.NotContains(t, str, `\"traceId\"`)\n\n\tctx := context.WithValue(context.Background(), \"traceId\", \"traceId123abc456\")\n\tl.WithCtx(ctx).Info(\"message5\")\n\tstr = buf.ResetAndGet()\n\tfmt.Print(str)\n\tassert.Contains(t, str, `\"message\":\"message5\"`)\n\tassert.Contains(t, str, `\"traceId\":\"traceId123abc456\"`)\n}\n\nfunc TestCtxKeysProcessor(t *testing.T) {\n\t// CtxKeysProcessor\n\ttr := newLogRecord(\"test message\")\n\ttr.Extra = map[string]any{} // reset\n\tprocFn := slog.CtxKeysProcessor(\"ext\", \"traceId\")\n\tprocFn.Process(tr)\n\tassert.Empty(t, tr.Extra)\n\n\tctx := context.WithValue(context.Background(), \"traceId\", \"traceId123abc456\")\n\ttr = tr.WithCtx(ctx)\n\tprocFn.Process(tr)\n\tassert.Equal(t, \"traceId123abc456\", tr.Extra[\"traceId\"])\n}\n\nfunc TestProcessable_AddProcessor(t *testing.T) {\n\tps := &slog.Processable{}\n\tps.AddProcessor(slog.MemoryUsage)\n\n\tr := newLogRecord(\"error message\")\n\tps.ProcessRecord(r)\n\n\tassert.NotEmpty(t, r.Extra)\n\tassert.Contains(t, r.Extra, \"memoryUsage\")\n}\n"
  },
  {
    "path": "record.go",
    "content": "package slog\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/strutil\"\n)\n\n// Record a log record definition\ntype Record struct {\n\tlogger *Logger\n\t// reuse flag. for reuse a Record, will not be released on after writing.\n\t// release the record need call Release() method.\n\t//\n\t// Reuse field: Ctx, Data, Fields, Extra\n\t//\n\t// NOTE, if you reuse a record, you must call Reused() method.\n\treuse bool\n\t// Mark whether the current record is released to the pool. TODO\n\tfreed bool\n\t// inited flag for record\n\tinited bool\n\n\t// Time for record log, if is empty will use now.\n\t//\n\t// TIP: Will be emptied after each use (write)\n\tTime time.Time\n\t// Level log level for record\n\tLevel Level\n\t// level name cache from Level\n\tlevelName string\n\n\t// Channel log channel name. eg: \"order\", \"goods\", \"user\"\n\tChannel string\n\tMessage string\n\n\t// Ctx context.Context\n\tCtx context.Context\n\n\t// Fields custom fields data.\n\t// Contains all the fields set by the user.\n\tFields M\n\t// Data log context data\n\tData M\n\t// Extra log extra data\n\tExtra M\n\n\t// Caller information\n\tCaller *runtime.Frame\n\t// CallerFlag value. default is equals to Logger.CallerFlag\n\tCallerFlag uint8\n\t// CallerSkip value. default is equals to Logger.CallerSkip\n\tCallerSkip int\n\t// EnableStack enable stack info, default is false. TODO\n\tEnableStack bool\n\n\t// Buffer Can use Buffer on formatter\n\t// Buffer *bytes.Buffer\n\n\t// log input args backups, from log() and logf(). its dont use in formatter.\n\tFmt  string\n\tArgs []any\n}\n\nfunc newRecord(logger *Logger) *Record {\n\treturn &Record{\n\t\tlogger:  logger,\n\t\tChannel: strutil.OrElse(logger.ChannelName, DefaultChannelName),\n\t\t// with some options\n\t\tCallerFlag: logger.CallerFlag,\n\t\tCallerSkip: logger.CallerSkip,\n\t\t// init map data field\n\t\t// Data:   make(M, 2),\n\t\t// Extra:  make(M, 0),\n\t\t// Fields: make(M, 0),\n\t}\n}\n\n// Reused set record is reused, will not be released on after writing.\n// so, MUST call Release() method after use completed.\nfunc (r *Record) Reused() *Record {\n\tr.reuse = true\n\treturn r\n}\n\n// Release manual release record to pool\nfunc (r *Record) Release() {\n\tif r.reuse {\n\t\tr.reuse = false\n\t\tr.logger.releaseRecord(r)\n\t}\n}\n\n//\n// ---------------------------------------------------------------------------\n// Copy record with something\n// ---------------------------------------------------------------------------\n//\n\n// WithTime set the record time\nfunc (r *Record) WithTime(t time.Time) *Record {\n\tnr := r.Copy()\n\tnr.Time = t\n\treturn nr\n}\n\n// WithCtx on record\nfunc (r *Record) WithCtx(ctx context.Context) *Record { return r.WithContext(ctx) }\n\n// WithContext on record\nfunc (r *Record) WithContext(ctx context.Context) *Record {\n\tnr := r.Copy()\n\tnr.Ctx = ctx\n\treturn nr\n}\n\n// WithError on record\nfunc (r *Record) WithError(err error) *Record {\n\treturn r.WithFields(M{FieldKeyError: err})\n}\n\n// WithData on record\nfunc (r *Record) WithData(data M) *Record {\n\tnr := r.Copy()\n\tnr.Data = data\n\treturn nr\n}\n\n// WithField with a new field to record\n//\n// Note: add field need config Formatter template fields.\nfunc (r *Record) WithField(name string, val any) *Record {\n\treturn r.WithFields(M{name: val})\n}\n\n// WithFields with new fields to record\n//\n// Note: add field need config Formatter template fields.\nfunc (r *Record) WithFields(fields M) *Record {\n\tnr := r.Copy()\n\tif nr.Fields == nil {\n\t\tnr.Fields = make(M, len(fields))\n\t}\n\n\tfor k, v := range fields {\n\t\tnr.Fields[k] = v\n\t}\n\treturn nr\n}\n\n// Copy new record from old record\nfunc (r *Record) Copy() *Record {\n\tdataCopy := make(M, len(r.Data))\n\tfor k, v := range r.Data {\n\t\tdataCopy[k] = v\n\t}\n\n\tfieldsCopy := make(M, len(r.Fields))\n\tfor k, v := range r.Fields {\n\t\tfieldsCopy[k] = v\n\t}\n\n\textraCopy := make(M, len(r.Extra))\n\tfor k, v := range r.Extra {\n\t\textraCopy[k] = v\n\t}\n\n\treturn &Record{\n\t\t// reuse: true, // copy record is reused\n\t\tlogger:  r.logger,\n\t\tChannel: r.Channel,\n\t\t// Time:       r.Time,\n\t\tLevel:      r.Level,\n\t\tlevelName:  r.levelName,\n\t\tCallerFlag: r.CallerFlag,\n\t\tCallerSkip: r.CallerSkip,\n\t\tMessage:    r.Message,\n\t\tData:       dataCopy,\n\t\tExtra:      extraCopy,\n\t\tFields:     fieldsCopy,\n\t}\n}\n\n//\n// ---------------------------------------------------------------------------\n// Direct set value to record\n// ---------------------------------------------------------------------------\n//\n\n// SetCtx on record\nfunc (r *Record) SetCtx(ctx context.Context) *Record { return r.SetContext(ctx) }\n\n// SetContext on record\nfunc (r *Record) SetContext(ctx context.Context) *Record {\n\tr.Ctx = ctx\n\treturn r\n}\n\n// SetData on record\nfunc (r *Record) SetData(data M) *Record {\n\tr.Data = data\n\treturn r\n}\n\n// AddData on record\nfunc (r *Record) AddData(data M) *Record {\n\tif r.Data == nil {\n\t\tr.Data = data\n\t\treturn r\n\t}\n\n\tfor k, v := range data {\n\t\tr.Data[k] = v\n\t}\n\treturn r\n}\n\n// WithValue add Data value to record. alias of AddValue\nfunc (r *Record) WithValue(key string, value any) *Record {\n\treturn r.AddValue(key, value)\n}\n\n// AddValue add Data value to record\nfunc (r *Record) AddValue(key string, value any) *Record {\n\tif r.Data == nil {\n\t\tr.Data = make(M, 8)\n\t}\n\n\tr.Data[key] = value\n\treturn r\n}\n\n// Value get Data value from record\nfunc (r *Record) Value(key string) any {\n\tif r.Data == nil {\n\t\treturn nil\n\t}\n\treturn r.Data[key]\n}\n\n// SetExtra information on record\nfunc (r *Record) SetExtra(data M) *Record {\n\tr.Extra = data\n\treturn r\n}\n\n// AddExtra information on record\nfunc (r *Record) AddExtra(data M) *Record {\n\tif r.Extra == nil {\n\t\tr.Extra = data\n\t\treturn r\n\t}\n\n\tfor k, v := range data {\n\t\tr.Extra[k] = v\n\t}\n\treturn r\n}\n\n// SetExtraValue on record\nfunc (r *Record) SetExtraValue(k string, v any) {\n\tif r.Extra == nil {\n\t\tr.Extra = make(M, 8)\n\t}\n\tr.Extra[k] = v\n}\n\n// SetTime on record\nfunc (r *Record) SetTime(t time.Time) *Record {\n\tr.Time = t\n\treturn r\n}\n\n// AddField add new field to the record\nfunc (r *Record) AddField(name string, val any) *Record {\n\tif r.Fields == nil {\n\t\tr.Fields = make(M, 8)\n\t}\n\n\tr.Fields[name] = val\n\treturn r\n}\n\n// AddFields add new fields to the record\nfunc (r *Record) AddFields(fields M) *Record {\n\tif r.Fields == nil {\n\t\tr.Fields = fields\n\t\treturn r\n\t}\n\n\tfor n, v := range fields {\n\t\tr.Fields[n] = v\n\t}\n\treturn r\n}\n\n// SetFields to the record\nfunc (r *Record) SetFields(fields M) *Record {\n\tr.Fields = fields\n\treturn r\n}\n\n// Field value gets from record\nfunc (r *Record) Field(key string) any {\n\tif r.Fields == nil {\n\t\treturn nil\n\t}\n\treturn r.Fields[key]\n}\n\n//\n// ---------------------------------------------------------------------------\n// Add log message with builder\n// TODO r.Build(InfoLevel).Str().Int().Float().Msg()\n// ---------------------------------------------------------------------------\n//\n\n// Object data on record TODO optimize performance\n// func (r *Record) Obj(obj fmt.Stringer) *Record {\n// \tr.Data = ctx\n// \treturn r\n// }\n\n// Object data on record TODO optimize performance\n// func (r *Record) Any(v any) *Record {\n// \tr.Data = ctx\n// \treturn r\n// }\n\n// func (r *Record) Str(message string) {\n// \tr.logWrite(level, []byte(message))\n// }\n\n// func (r *Record) Int(val int) {\n// \tr.logWrite(level, []byte(message))\n// }\n\n//\n// ---------------------------------------------------------------------------\n// Add log message with level\n// ---------------------------------------------------------------------------\n//\n\nfunc (r *Record) log(level Level, args []any) {\n\tr.Level = level\n\tif r.logger.BackupArgs {\n\t\tr.Args = args\n\t}\n\n\t// r.Message = strutil.Byte2str(formatArgsWithSpaces(args)) // will reduce memory allocation once\n\tr.Message = formatArgsWithSpaces(args)\n\t// do write log, then release record\n\tr.logger.writeRecord(level, r)\n\tr.logger.releaseRecord(r)\n}\n\nfunc (r *Record) logf(level Level, format string, args []any) {\n\tif r.logger.BackupArgs {\n\t\tr.Fmt, r.Args = format, args\n\t}\n\n\tr.Level = level\n\tr.Message = fmt.Sprintf(format, args...)\n\t// do write log, then release record\n\tr.logger.writeRecord(level, r)\n\tr.logger.releaseRecord(r)\n}\n\n// Log a message with level\nfunc (r *Record) Log(level Level, args ...any) { r.log(level, args) }\n\n// Logf a message with level\nfunc (r *Record) Logf(level Level, format string, args ...any) {\n\tr.logf(level, format, args)\n}\n\n// Info logs a message at level Info\nfunc (r *Record) Info(args ...any) { r.log(InfoLevel, args) }\n\n// Infof logs a message at level Info\nfunc (r *Record) Infof(format string, args ...any) {\n\tr.logf(InfoLevel, format, args)\n}\n\n// Trace logs a message at level Trace\nfunc (r *Record) Trace(args ...any) { r.log(TraceLevel, args) }\n\n// Tracef logs a message at level Trace\nfunc (r *Record) Tracef(format string, args ...any) {\n\tr.logf(TraceLevel, format, args)\n}\n\n// Error logs a message at level Error\nfunc (r *Record) Error(args ...any) { r.log(ErrorLevel, args) }\n\n// Errorf logs a message at level Error\nfunc (r *Record) Errorf(format string, args ...any) {\n\tr.logf(ErrorLevel, format, args)\n}\n\n// Warn logs a message at level Warn\nfunc (r *Record) Warn(args ...any) { r.log(WarnLevel, args) }\n\n// Warnf logs a message at level Warn\nfunc (r *Record) Warnf(format string, args ...any) {\n\tr.logf(WarnLevel, format, args)\n}\n\n// Notice logs a message at level Notice\nfunc (r *Record) Notice(args ...any) { r.log(NoticeLevel, args) }\n\n// Noticef logs a message at level Notice\nfunc (r *Record) Noticef(format string, args ...any) {\n\tr.logf(NoticeLevel, format, args)\n}\n\n// Debug logs a message at level Debug\nfunc (r *Record) Debug(args ...any) { r.log(DebugLevel, args) }\n\n// Debugf logs a message at level Debug\nfunc (r *Record) Debugf(format string, args ...any) {\n\tr.logf(DebugLevel, format, args)\n}\n\n// Print logs a message at level Print\nfunc (r *Record) Print(args ...any) { r.log(PrintLevel, args) }\n\n// Println logs a message at level Print. alias of Print\nfunc (r *Record) Println(args ...any) { r.log(PrintLevel, args) }\n\n// Printf logs a message at level Print\nfunc (r *Record) Printf(format string, args ...any) {\n\tr.logf(PrintLevel, format, args)\n}\n\n// Fatal logs a message at level Fatal\nfunc (r *Record) Fatal(args ...any) { r.log(FatalLevel, args) }\n\n// Fatalln logs a message at level Fatal\nfunc (r *Record) Fatalln(args ...any) { r.log(FatalLevel, args) }\n\n// Fatalf logs a message at level Fatal\nfunc (r *Record) Fatalf(format string, args ...any) {\n\tr.logf(FatalLevel, format, args)\n}\n\n// Panic logs a message at level Panic\nfunc (r *Record) Panic(args ...any) { r.log(PanicLevel, args) }\n\n// Panicln logs a message at level Panic\nfunc (r *Record) Panicln(args ...any) { r.log(PanicLevel, args) }\n\n// Panicf logs a message at level Panic\nfunc (r *Record) Panicf(format string, args ...any) {\n\tr.logf(PanicLevel, format, args)\n}\n\n// ---------------------------------------------------------------------------\n// helper methods\n// ---------------------------------------------------------------------------\n\n// LevelName get\nfunc (r *Record) LevelName() string { return r.levelName }\n\n// GoString of the record\nfunc (r *Record) GoString() string {\n\treturn \"slog: \" + r.Message\n}\n\nfunc (r *Record) timestamp() string {\n\ts := strconv.FormatInt(r.Time.UnixMicro(), 10)\n\treturn s[:10] + \".\" + s[10:]\n}\n"
  },
  {
    "path": "record_test.go",
    "content": "package slog_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nfunc TestRecord_AddData(t *testing.T) {\n\tw := newBuffer()\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t\tl.CallerFlag = slog.CallerFlagFull\n\t})\n\tl.SetHandlers([]slog.Handler{\n\t\thandler.NewIOWriter(w, slog.AllLevels),\n\t})\n\n\tr := l.Record()\n\n\t// - add data\n\tr.AddData(testData1).Trace(\"log message with data\")\n\ts := w.StringReset()\n\tfmt.Print(s)\n\n\tassert.Contains(t, s, \"slog_test.TestRecord_AddData\")\n\tassert.Contains(t, s, \"log message with data\")\n\n\tr.AddData(slog.M{\"key01\": \"val01\"}).Print(\"log message add data2\")\n\ts = w.StringReset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"log message add data2\")\n\tassert.Contains(t, s, \"key01:val01\")\n\tassert.Eq(t, \"val01\", r.Value(\"key01\"))\n\n\t// - add value\n\tr.AddValue(\"key01\", \"val02\").Println(\"log message add value\")\n\ts = w.StringReset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"log message add value\")\n\tassert.Contains(t, s, \"key01:val02\")\n\t// - first add value\n\tnr := &slog.Record{}\n\tassert.Nil(t, nr.Value(\"key01\"))\n\tnr.WithValue(\"key01\", \"val02\")\n\tassert.Eq(t, \"val02\", nr.Value(\"key01\"))\n\n\t// -with data\n\tr.CallerFlag = slog.CallerFlagFcName\n\tr.WithData(slog.M{\"key1\": \"val1\"}).Warn(\"warn message with data\")\n\ts = w.StringReset()\n\tfmt.Print(s)\n\n\tassert.Contains(t, s, \"TestRecord_AddData\")\n\tassert.Contains(t, s, \"warn message with data\")\n\tassert.Contains(t, s, \"{key1:val1}\")\n}\n\nfunc TestRecord_AddExtra(t *testing.T) {\n\tw := newBuffer()\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t\tl.CallerFlag = slog.CallerFlagFcName\n\t})\n\tl.SetHandlers([]slog.Handler{\n\t\thandler.NewIOWriter(w, slog.AllLevels),\n\t})\n\n\tr := l.Record()\n\n\tr.AddExtra(testData1).Trace(\"log message and add extra\")\n\ts := w.StringReset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"TestRecord_AddExtra\")\n\tassert.Contains(t, s, \"log message and add extra\")\n\tassert.Contains(t, s, \"key0:val0\")\n\n\tr.AddExtra(slog.M{\"key002\": \"val002\"}).AddExtra(slog.M{\"key01\": \"val01\"}).\n\t\tTrace(\"log message and add extra2\")\n\ts = w.StringReset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"log message and add extra2\")\n\tassert.Contains(t, s, \"TestRecord_AddExtra\")\n\tassert.Contains(t, s, \"key002:val002\")\n}\n\nfunc TestRecord_SetContext(t *testing.T) {\n\tw := newBuffer()\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t}).Config(func(l *slog.Logger) {\n\t\tl.CallerFlag = slog.CallerFlagPkg\n\t})\n\tl.SetHandlers([]slog.Handler{\n\t\thandler.NewIOWriter(w, slog.AllLevels),\n\t})\n\n\tr := l.Record()\n\tr.SetCtx(context.Background()).Info(\"info message\")\n\tr.WithCtx(context.Background()).Debug(\"debug message\")\n\n\ts := w.StringReset()\n\tfmt.Print(s)\n\tassert.Contains(t, s, \"github.com/gookit/slog_test\")\n}\n\nfunc TestRecord_WithError(t *testing.T) {\n\tw := newBuffer()\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.CallerFlag = slog.CallerFlagFunc\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\th := handler.NewIOWriter(w, slog.AllLevels)\n\th.SetFormatter(slog.NewTextFormatter(\"ts={{timestamp}} err={{error}} msg={{message}}\\n\"))\n\tl.SetHandlers([]slog.Handler{h})\n\n\tr := l.Record()\n\tr.WithError(errorx.Raw(\"error message\")).Notice(\"test record with error\")\n\n\ts := w.StringReset()\n\tassert.Contains(t, s, \"err=error message\")\n\tassert.Contains(t, s, \"msg=test record with error\")\n\tfmt.Print(s)\n}\n\nfunc TestRecord_WithTime(t *testing.T) {\n\tw, l := newTestLogger()\n\tht := timex.NowHourStart()\n\n\tr := l.Record()\n\tr.WithTime(ht).Notice(\"a message with time\")\n\ts := w.StringReset()\n\n\tassert.Contains(t, s, \"a message with time\")\n\tassert.Contains(t, s, timex.FormatByTpl(ht, slog.DefaultTimeFormat))\n\tfmt.Print(s)\n}\n\nfunc TestRecord_AddFields(t *testing.T) {\n\tr := newLogRecord(\"AddFields\")\n\n\tr.AddFields(slog.M{\"f1\": \"hi\", \"env\": \"prod\"})\n\tassert.NotEmpty(t, r.Fields)\n\n\tr.AddFields(slog.M{\"app\": \"goods\"})\n\tassert.NotEmpty(t, r.Fields)\n\n\t// WithFields\n\tr = r.WithFields(slog.M{\"f2\": \"v2\"})\n\tassert.Eq(t, \"v2\", r.Field(\"f2\"))\n\n\t// - first add field\n\tnr := slog.Record{}\n\tassert.Nil(t, nr.Field(\"f3\"))\n\tnr.AddField(\"f3\", \"val02\")\n\tassert.Eq(t, \"val02\", nr.Field(\"f3\"))\n}\n\nfunc TestRecord_WithFields(t *testing.T) {\n\tw, l := newTestLogger()\n\tr := l.Record().\n\t\tWithFields(slog.M{\"key1\": \"value1\", \"key2\": \"value2\"}).\n\t\tWithFields(slog.M{\"key3\": \"value3\"})\n\tassert.Eq(t, \"value1\", r.Field(\"key1\"))\n\tassert.Eq(t, \"value2\", r.Field(\"key2\"))\n\tassert.Eq(t, \"value3\", r.Field(\"key3\"))\n\n\tr.Info(\"log message with fields\")\n\ts := w.StringReset()\n\tfmt.Print(s)\n\n\tassert.Contains(t, s, \"log message with fields\")\n}\n\nfunc TestRecord_SetFields(t *testing.T) {\n\tr := newLogRecord(\"AddFields\")\n\n\tr.SetTime(timex.Now().Yesterday().T())\n\tr.SetFields(slog.M{\"f1\": \"hi\", \"env\": \"prod\"})\n\tassert.NotEmpty(t, r.Fields)\n\tassert.NotEmpty(t, r.Time)\n}\n\nfunc TestRecord_allLevel(t *testing.T) {\n\tw := newBuffer()\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\tl.SetHandlers([]slog.Handler{\n\t\thandler.NewIOWriter(w, slog.AllLevels),\n\t})\n\n\tr := l.Record()\n\tr = r.WithContext(context.Background())\n\tprintAllLevelLogs(r, \"a message use record.XX()\")\n\tr.Log(slog.InfoLevel, \"a message use record.XX()\")\n\tr.Notice(\"a message use record.XX()\")\n\tr.Trace(\"a message use record.XX()\")\n\n\ts := w.StringReset()\n\tassert.Contains(t, s, \"printAllLevelLogs\")\n\tassert.Contains(t, s, \"a message use record.XX()\")\n\tassert.Contains(t, s, \"[NOTICE]\")\n\tassert.Contains(t, s, \"[TRACE]\")\n\n\tprintfAllLevelLogs(r, \"a message use %s()\", \"record.XXf\")\n\tr.Logf(slog.InfoLevel, \"a message use %s()\", \"record.XXf\")\n\tr.Noticef(\"a message use %s()\", \"record.XXf\")\n\tr.Tracef(\"a message use %s()\", \"record.XXf\")\n\n\ts = w.StringReset()\n\tassert.Contains(t, s, \"printfAllLevelLogs\")\n\tassert.Contains(t, s, \"a message use record.XXf()\")\n\tassert.Contains(t, s, \"[NOTICE]\")\n\tassert.Contains(t, s, \"[TRACE]\")\n}\n\nfunc TestRecord_useMultiTimes(t *testing.T) {\n\tbuf := byteutil.NewBuffer()\n\tl := slog.NewWithHandlers(\n\t\thandler.NewSimple(buf, slog.DebugLevel),\n\t\thandler.NewSimple(os.Stdout, slog.DebugLevel),\n\t)\n\n\tr := l.Record()\n\tt.Run(\"simple\", func(t *testing.T) {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tr.Error(\"simple error log\", i)\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t}\n\t})\n\n\t// test concurrent write\n\tt.Run(\"concurrent\", func(t *testing.T) {\n\t\twg := sync.WaitGroup{}\n\t\tfor i := 0; i < 100; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\tr.Error(\"concurrent error log\", i)\n\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\twg.Done()\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\t})\n}\n"
  },
  {
    "path": "rotatefile/README.md",
    "content": "# Rotate File\n\n `rotatefile` provides simple file rotation, compression and cleanup.\n\n## Features\n\n- Rotate file by size and time\n  - Custom filename for rotate file by size\n  - Custom time clock for rotate\n  - Custom file perm for create log file\n  - Custom rotate mode: create, rename\n- Compress rotated file\n- Cleanup old files\n\n## Install\n\n```bash\ngo get github.com/gookit/slog/rotatefile\n```\n\n## Usage\n\n### Create a file writer\n\n```go\nlogFile := \"testdata/go_logger.log\"\nwriter, err := rotatefile.NewConfig(logFile).Create()\nif err != nil {\n    panic(err)\n}\n\n// use writer\nwriter.Write([]byte(\"log message\\n\"))\n```\n\n### Use on another logger\n\n```go\npackage main\n\nimport (\n  \"log\"\n\n  \"github.com/gookit/slog/rotatefile\"\n)\n\nfunc main() {\n\tlogFile := \"testdata/go_logger.log\"\n\twriter, err := rotatefile.NewConfig(logFile).Create()\n\tif err != nil {\n\t\tpanic(err) \n\t}\n\n\tlog.SetOutput(writer)\n\tlog.Println(\"log message\")\n}\n```\n\n### Available config options\n\n```go\n// Config struct for rotate dispatcher\ntype Config struct {\n    // Filepath the log file path, will be rotating\n    Filepath string `json:\"filepath\" yaml:\"filepath\"`\n    \n    // FilePerm for create log file. default DefaultFilePerm\n    FilePerm os.FileMode `json:\"file_perm\" yaml:\"file_perm\"`\n    \n    // MaxSize file contents max size, unit is bytes.\n    // If is equals zero, disable rotate file by size\n    //\n    // default see DefaultMaxSize\n    MaxSize uint64 `json:\"max_size\" yaml:\"max_size\"`\n    \n    // RotateTime the file rotate interval time, unit is seconds.\n    // If is equals zero, disable rotate file by time\n    //\n    // default see EveryHour\n    RotateTime RotateTime `json:\"rotate_time\" yaml:\"rotate_time\"`\n    \n    // CloseLock use sync lock on write contents, rotating file.\n    //\n    // default: false\n    CloseLock bool `json:\"close_lock\" yaml:\"close_lock\"`\n    \n    // BackupNum max number for keep old files.\n    //\n    // 0 is not limit, default is DefaultBackNum\n    BackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n    \n    // BackupTime max time for keep old files, unit is hours.\n    //\n    // 0 is not limit, default is DefaultBackTime\n    BackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n    \n    // Compress determines if the rotated log files should be compressed using gzip.\n    // The default is not to perform compression.\n    Compress bool `json:\"compress\" yaml:\"compress\"`\n    \n    // RenameFunc you can custom-build filename for rotate file by size.\n    //\n    // default see DefaultFilenameFn\n    RenameFunc func(filePath string, rotateNum uint) string\n    \n    // TimeClock for rotate\n    TimeClock Clocker\n}\n```\n\n## Files clear\n\n```go\n\tfc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) {\n\t\tc.AddPattern(\"/path/to/some*.log\")\n\t\tc.BackupNum = 2\n\t\tc.BackupTime = 12 // 12 hours\n\t})\n\t\n\t// clear files on daemon\n\tgo fc.DaemonClean(nil)\n\t\n\t// NOTE: stop daemon before exit\n\t// fc.QuitDaemon()\n```\n\n### Configs\n\n```go\n\n// CConfig struct for clean files\ntype CConfig struct {\n\t// BackupNum max number for keep old files.\n\t// 0 is not limit, default is 20.\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\n\t// BackupTime max time for keep old files, unit is TimeUnit.\n\t//\n\t// 0 is not limit, default is a week.\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\n\t// Compress determines if the rotated log files should be compressed using gzip.\n\t// The default is not to perform compression.\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\n\t// Patterns dir path with filename match patterns.\n\t//\n\t// eg: [\"/tmp/error.log.*\", \"/path/to/info.log.*\", \"/path/to/dir/*\"]\n\tPatterns []string `json:\"patterns\" yaml:\"patterns\"`\n\n\t// TimeClock for clean files\n\tTimeClock Clocker\n\n\t// TimeUnit for BackupTime. default is hours: time.Hour\n\tTimeUnit time.Duration `json:\"time_unit\" yaml:\"time_unit\"`\n\n\t// CheckInterval for clean files on daemon run. default is 60s.\n\tCheckInterval time.Duration `json:\"check_interval\" yaml:\"check_interval\"`\n\n\t// IgnoreError ignore remove error\n\t// TODO IgnoreError bool\n\n\t// RotateMode for rotate split files TODO\n\t//  - copy+cut: copy contents then truncate file\n\t//\t- rename : rename file(use for like PHP-FPM app)\n\t// RotateMode RotateMode `json:\"rotate_mode\" yaml:\"rotate_mode\"`\n}\n```"
  },
  {
    "path": "rotatefile/cleanup.go",
    "content": "package rotatefile\n\nimport (\n\t\"os\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/fsutil\"\n)\n\nconst defaultCheckInterval = 60 * time.Second\n\n// CConfig struct for clean files\ntype CConfig struct {\n\t// BackupNum max number for keep old files.\n\t//\n\t// 0 is not limit, default is 20.\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\n\t// BackupTime max time for keep old files, unit is TimeUnit.\n\t//\n\t// 0 is not limit, default is a week.\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\n\t// Compress determines if the rotated log files should be compressed using gzip.\n\t// The default is not to perform compression.\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\n\t// Patterns dir path with filename match patterns.\n\t//\n\t// eg: [\"/tmp/error.log.*\", \"/path/to/info.log.*\", \"/path/to/dir/*\"]\n\tPatterns []string `json:\"patterns\" yaml:\"patterns\"`\n\n\t// TimeClock for clean files\n\tTimeClock Clocker\n\n\t// TimeUnit for BackupTime. default is hours: time.Hour\n\tTimeUnit time.Duration `json:\"time_unit\" yaml:\"time_unit\"`\n\n\t// CheckInterval for clean files on daemon run. default is 60s.\n\tCheckInterval time.Duration `json:\"check_interval\" yaml:\"check_interval\"`\n\n\t// IgnoreError ignore remove error\n\t// TODO IgnoreError bool\n\n\t// RotateMode for rotate split files TODO\n\t//  - copy+cut: copy contents then truncate file\n\t//\t- rename : rename file(use for like PHP-FPM app)\n\t// RotateMode RotateMode `json:\"rotate_mode\" yaml:\"rotate_mode\"`\n}\n\n// CConfigFunc for clean config\ntype CConfigFunc func(c *CConfig)\n\n// AddDirPath for clean, will auto append * for match all files\nfunc (c *CConfig) AddDirPath(dirPaths ...string) *CConfig {\n\tfor _, dirPath := range dirPaths {\n\t\tif !fsutil.IsDir(dirPath) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Patterns = append(c.Patterns, dirPath+\"/*\")\n\t}\n\treturn c\n}\n\n// AddPattern for clean. eg: \"/tmp/error.log.*\"\nfunc (c *CConfig) AddPattern(patterns ...string) *CConfig {\n\tc.Patterns = append(c.Patterns, patterns...)\n\treturn c\n}\n\n// WithConfigFn for custom settings\nfunc (c *CConfig) WithConfigFn(fns ...CConfigFunc) *CConfig {\n\tfor _, fn := range fns {\n\t\tif fn != nil {\n\t\t\tfn(c)\n\t\t}\n\t}\n\treturn c\n}\n\n// NewCConfig instance\nfunc NewCConfig() *CConfig {\n\treturn &CConfig{\n\t\tBackupNum:  DefaultBackNum,\n\t\tBackupTime: DefaultBackTime,\n\t\tTimeClock:  DefaultTimeClockFn,\n\t\tTimeUnit:   time.Hour,\n\t\t// check interval time\n\t\tCheckInterval: defaultCheckInterval,\n\t}\n}\n\n// FilesClear multi files by time.\n//\n// use for rotate and clear other program produce log files\ntype FilesClear struct {\n\t// mu sync.Mutex\n\tcfg *CConfig\n\t// inited mark\n\tinited bool\n\n\t// file max backup time. equals CConfig.BackupTime * CConfig.TimeUnit\n\tbackupDur  time.Duration\n\tquitDaemon chan struct{}\n}\n\n// NewFilesClear instance\nfunc NewFilesClear(fns ...CConfigFunc) *FilesClear {\n\tcfg := NewCConfig().WithConfigFn(fns...)\n\treturn &FilesClear{cfg: cfg}\n}\n\n// Config get\nfunc (r *FilesClear) Config() *CConfig {\n\treturn r.cfg\n}\n\n// WithConfig for custom set config\nfunc (r *FilesClear) WithConfig(cfg *CConfig) *FilesClear {\n\tr.cfg = cfg\n\treturn r\n}\n\n// WithConfigFn for custom settings\nfunc (r *FilesClear) WithConfigFn(fns ...CConfigFunc) *FilesClear {\n\tr.cfg.WithConfigFn(fns...)\n\treturn r\n}\n\n//\n// ---------------------------------------------------------------------------\n// clean backup files\n// ---------------------------------------------------------------------------\n//\n\n// StopDaemon for stop daemon clean\nfunc (r *FilesClear) StopDaemon() {\n\tif r.quitDaemon == nil {\n\t\tpanic(\"cannot quit daemon, please call DaemonClean() first\")\n\t}\n\tclose(r.quitDaemon)\n}\n\n// DaemonClean daemon clean old files by config\n//\n// NOTE: this method will block current goroutine\n//\n// Usage:\n//\n//\tfc := rotatefile.NewFilesClear(nil)\n//\tfc.WithConfigFn(func(c *rotatefile.CConfig) {\n//\t\tc.AddDirPath(\"./testdata\")\n//\t})\n//\n//\twg := sync.WaitGroup{}\n//\twg.Add(1)\n//\n//\t// start daemon\n//\tgo fc.DaemonClean(func() {\n//\t\twg.Done()\n//\t})\n//\n//\t// wait for stop\n//\twg.Wait()\nfunc (r *FilesClear) DaemonClean(onStop func()) {\n\tif r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 {\n\t\tpanic(\"clean: backupNum and backupTime are both 0\")\n\t}\n\n\tr.quitDaemon = make(chan struct{})\n\ttk := time.NewTicker(r.cfg.CheckInterval)\n\tdefer tk.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-r.quitDaemon:\n\t\t\tif onStop != nil {\n\t\t\t\tonStop()\n\t\t\t}\n\t\t\treturn\n\t\tcase <-tk.C: // do cleaning\n\t\t\tprintErrln(\"files-clear: cleanup old files error:\", r.Clean())\n\t\t}\n\t}\n}\n\n// Clean old files by config\nfunc (r *FilesClear) prepare() {\n\tif r.inited {\n\t\treturn\n\t}\n\tr.inited = true\n\n\t// check backup time\n\tif r.cfg.BackupTime > 0 {\n\t\tr.backupDur = time.Duration(r.cfg.BackupTime) * r.cfg.TimeUnit\n\t}\n}\n\n// Clean old files by config\nfunc (r *FilesClear) Clean() error {\n\tif r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 {\n\t\treturn errorx.Err(\"clean: backupNum and backupTime are both 0\")\n\t}\n\n\t// clear by time, can also clean by number\n\tfor _, filePattern := range r.cfg.Patterns {\n\t\tif err := r.cleanByPattern(filePattern); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// CleanByPattern clean files by pattern\nfunc (r *FilesClear) cleanByPattern(filePattern string) (err error) {\n\tr.prepare()\n\n\toldFiles := make([]fileInfo, 0, 8)\n\tcutTime := r.cfg.TimeClock.Now().Add(-r.backupDur)\n\n\t// find and clean expired files\n\terr = fsutil.GlobWithFunc(filePattern, func(filePath string) error {\n\t\tstat, err := os.Stat(filePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// not handle subdir TODO: support subdir\n\t\tif stat.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\t// collect not expired\n\t\tif stat.ModTime().After(cutTime) {\n\t\t\toldFiles = append(oldFiles, newFileInfo(filePath, stat))\n\t\t\treturn nil\n\t\t}\n\n\t\t// remove expired file\n\t\treturn r.remove(filePath)\n\t})\n\n\t// clear by backup number.\n\tbackNum := int(r.cfg.BackupNum)\n\tremNum := len(oldFiles) - backNum\n\n\tif backNum > 0 && remNum > 0 {\n\t\t// sort by mod-time, oldest at first.\n\t\tsort.Sort(modTimeFInfos(oldFiles))\n\n\t\tfor idx := 0; idx < len(oldFiles); idx++ {\n\t\t\tif err = r.remove(oldFiles[idx].Path()); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tremNum--\n\t\t\tif remNum == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (r *FilesClear) remove(filePath string) (err error) {\n\treturn os.Remove(filePath)\n}\n"
  },
  {
    "path": "rotatefile/cleanup_test.go",
    "content": "package rotatefile_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil\"\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestFilesClear_Clean(t *testing.T) {\n\t// make files for clean\n\tmakeNum := 5\n\tmakeWaitCleanFiles(\"file_clean.log\", makeNum)\n\t_, err := fsutil.PutContents(\"testdata/subdir/some.txt\", \"test data\")\n\tassert.NoErr(t, err)\n\n\t// create clear\n\tfc := rotatefile.NewFilesClear()\n\tfc.WithConfig(rotatefile.NewCConfig())\n\tfc.WithConfigFn(func(c *rotatefile.CConfig) {\n\t\tc.AddDirPath(\"testdata\", \"not-exist-dir\")\n\t\tc.BackupNum = 1\n\t\tc.BackupTime = 3\n\t\tc.TimeUnit = time.Second // for test\n\t})\n\n\tcfg := fc.Config()\n\tassert.Eq(t, uint(1), cfg.BackupNum)\n\tdump.P(cfg)\n\n\t// do clean\n\tassert.NoErr(t, fc.Clean())\n\n\tfiles := fsutil.Glob(\"testdata/file_clean.log.*\")\n\tdump.P(files)\n\tassert.NotEmpty(t, files)\n\tassert.Lt(t, len(files), makeNum)\n\n\tt.Run(\"error\", func(t *testing.T) {\n\t\tfc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) {\n\t\t\tc.BackupNum = 0\n\t\t\tc.BackupTime = 0\n\t\t})\n\t\tassert.Err(t, fc.Clean())\n\t})\n}\n\nfunc TestFilesClear_DaemonClean(t *testing.T) {\n\tt.Run(\"panic\", func(t *testing.T) {\n\t\tfc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) {\n\t\t\tc.BackupNum = 0\n\t\t\tc.BackupTime = 0\n\t\t})\n\t\tassert.Panics(t, func() {\n\t\t\tfc.StopDaemon()\n\t\t})\n\t\tassert.Panics(t, func() {\n\t\t\tfc.DaemonClean(nil)\n\t\t})\n\t})\n\n\tfc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) {\n\t\tc.AddPattern(\"testdata/file_daemon_clean.*\")\n\t\tc.BackupNum = 1\n\t\tc.BackupTime = 3\n\t\tc.TimeUnit = time.Second      // for test\n\t\tc.CheckInterval = time.Second // for test\n\t})\n\n\tcfg := fc.Config()\n\tdump.P(cfg)\n\n\t// make files for clean\n\tmakeNum := 5\n\tmakeWaitCleanFiles(\"file_daemon_clean.log\", makeNum)\n\n\t// test daemon clean\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\t// start daemon\n\tgo fc.DaemonClean(func() {\n\t\tfmt.Println(\"daemon clean stopped, at\", timex.Now().DateFormat(\"ymdTH:i:s.v\"))\n\t\twg.Done()\n\t})\n\n\t// stop daemon\n\tgo func() {\n\t\ttime.Sleep(time.Millisecond * 1200)\n\t\tfmt.Println(\"stop daemon clean, at\", timex.Now().DateFormat(\"ymdTH:i:s.v\"))\n\t\tfc.StopDaemon()\n\t}()\n\n\t// wait for stop\n\twg.Wait()\n\n\tfiles := fsutil.Glob(\"testdata/file_daemon_clean.log.*\")\n\tdump.P(files)\n\tassert.NotEmpty(t, files)\n\tassert.Lt(t, len(files), makeNum)\n}\n\nfunc makeWaitCleanFiles(nameTpl string, makeNum int) {\n\tfor i := 0; i < makeNum; i++ {\n\t\tfpath := fmt.Sprintf(\"testdata/%s.%03d\", nameTpl, i)\n\t\tfmt.Println(\"make file:\", fpath)\n\t\t_, err := fsutil.PutContents(fpath, []byte(\"test contents ...\"))\n\t\tgoutil.PanicErr(err)\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tfmt.Println(\"wait clean files:\")\n\terr := fsutil.GlobWithFunc(\"./testdata/\"+nameTpl+\".*\", func(fpath string) error {\n\t\tfi, err := os.Stat(fpath)\n\t\tgoutil.PanicErr(err)\n\n\t\tfmt.Printf(\"  %s => mtime: %s\\n\", fpath, fi.ModTime().Format(\"060102T15:04:05\"))\n\t\treturn nil\n\t})\n\tgoutil.PanicErr(err)\n}\n"
  },
  {
    "path": "rotatefile/config.go",
    "content": "package rotatefile\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/strutil\"\n\t\"github.com/gookit/goutil/timex\"\n)\n\n//\n// ---------------------------- rotate time -------------------------------\n//\n\ntype rotateLevel uint8\n\nconst (\n\tlevelDay rotateLevel = iota\n\tlevelHour\n\tlevelMin\n\tlevelSec\n)\n\n// RotateTime for a rotating file. unit is seconds.\n//\n// EveryDay:\n//   - \"error.log.20201223\"\n//\n// EveryHour, Every30Min, EveryMinute:\n//   - \"error.log.20201223_1500\"\n//   - \"error.log.20201223_1530\"\n//   - \"error.log.20201223_1523\"\ntype RotateTime int\n\n// built in rotate time constants\nconst (\n\tEveryMonth  RotateTime = 30 * timex.OneDaySec\n\tEveryDay    RotateTime = timex.OneDaySec\n\tEveryHour   RotateTime = timex.OneHourSec\n\tEvery30Min  RotateTime = 30 * timex.OneMinSec\n\tEvery15Min  RotateTime = 15 * timex.OneMinSec\n\tEveryMinute RotateTime = timex.OneMinSec\n\tEverySecond RotateTime = 1 // only use for tests\n)\n\n// Interval get check interval time. unit is seconds.\nfunc (rt RotateTime) Interval() int64 {\n\treturn int64(rt)\n}\n\n// FirstCheckTime for a rotated file.\n// - will automatically align the time from the start of each hour.\nfunc (rt RotateTime) FirstCheckTime(now time.Time) time.Time {\n\tinterval := rt.Interval()\n\n\tswitch rt.level() {\n\tcase levelDay:\n\t\treturn timex.DayEnd(now)\n\tcase levelHour:\n\t\t// should check on H:59:59.500\n\t\treturn timex.HourStart(now).Add(timex.OneHour - 500*time.Millisecond)\n\tcase levelMin:\n\t\t// eg: minutes=5\n\t\tminutes := int(interval / 60)\n\t\tnextMin := now.Minute() + minutes\n\n\t\t// will rotate at next hour start. eg: now.Minute()=57, nextMin=62.\n\t\tif nextMin >= 60 {\n\t\t\treturn timex.HourStart(now).Add(timex.OneHour)\n\t\t}\n\n\t\t// eg: now.Minute()=37, nextMin=42, will get nextDur=40\n\t\tnextDur := time.Duration(nextMin).Round(time.Duration(minutes))\n\t\treturn timex.HourStart(now).Add(nextDur * time.Minute)\n\tdefault: // levelSec\n\t\treturn now.Add(time.Duration(interval) * time.Second)\n\t}\n}\n\n// level for rotating time\nfunc (rt RotateTime) level() rotateLevel {\n\tswitch {\n\tcase rt >= timex.OneDaySec:\n\t\treturn levelDay\n\tcase rt >= timex.OneHourSec:\n\t\treturn levelHour\n\tcase rt >= EveryMinute:\n\t\treturn levelMin\n\tdefault:\n\t\treturn levelSec\n\t}\n}\n\n// TimeFormat get log file suffix format\n//\n// EveryDay:\n//   - \"error.log.20201223\"\n//\n// EveryHour, Every30Min, EveryMinute:\n//   - \"error.log.20201223_1500\"\n//   - \"error.log.20201223_1530\"\n//   - \"error.log.20201223_1523\"\nfunc (rt RotateTime) TimeFormat() (suffixFormat string) {\n\tsuffixFormat = \"20060102_1500\" // default is levelHour\n\tswitch rt.level() {\n\tcase levelDay:\n\t\tsuffixFormat = \"20060102\"\n\tcase levelHour:\n\t\tsuffixFormat = \"20060102_1500\"\n\tcase levelMin:\n\t\tsuffixFormat = \"20060102_1504\"\n\tcase levelSec:\n\t\tsuffixFormat = \"20060102_150405\"\n\t}\n\treturn\n}\n\n// MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler]\nfunc (rt RotateTime) MarshalJSON() ([]byte, error) {\n\treturn []byte(fmt.Sprintf(`\"%ds\"`, rt.Interval())), nil\n}\n\n// UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler]\nfunc (rt *RotateTime) UnmarshalJSON(data []byte) error {\n\ts, err := strconv.Unquote(string(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*rt, err = StringToRotateTime(s)\n\treturn err\n}\n\n// String rotate type to string\nfunc (rt RotateTime) String() string {\n\tswitch rt.level() {\n\tcase levelDay:\n\t\treturn fmt.Sprintf(\"Every %d Day\", rt.Interval()/timex.OneDaySec)\n\tcase levelHour:\n\t\treturn fmt.Sprintf(\"Every %d Hours\", rt.Interval()/timex.OneHourSec)\n\tcase levelMin:\n\t\treturn fmt.Sprintf(\"Every %d Minutes\", rt.Interval()/timex.OneMinSec)\n\tdefault: // levelSec\n\t\treturn fmt.Sprintf(\"Every %d Seconds\", rt.Interval())\n\t}\n}\n\n// StringToRotateTime parse and convert string to RotateTime\nfunc StringToRotateTime(s string) (RotateTime, error) {\n\t// is int value, try to parse as seconds\n\tif strutil.IsInt(s) {\n\t\tiVal := strutil.SafeInt(s)\n\t\tif iVal < 0 || iVal > timex.OneMonthSec*3 {\n\t\t\treturn 0, fmt.Errorf(\"rotatefile: invalid rotate time: %s\", s)\n\t\t}\n\t\treturn RotateTime(iVal), nil\n\t}\n\n\t// parse time duration string. eg: \"1h\", \"1m\", \"1d\"\n\trtDur, err := timex.ToDuration(s)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn RotateTime(rtDur.Seconds()), nil\n}\n\n//\n// ---------------------------- RotateMode -------------------------------\n//\n\n// RotateMode for a rotated file. 0: rename, 1: create\ntype RotateMode uint8\n\nconst (\n\t// ModeRename rotating file by rename.\n\t//\n\t// Example flow:\n\t//  - always write to \"error.log\"\n\t//  - rotating by rename it to \"error.log.20201223\"\n\t//  - then re-create \"error.log\"\n\tModeRename RotateMode = iota\n\n\t// ModeCreate rotating file by create a new file.\n\t//\n\t// Example flow:\n\t//  - directly create a new file on each rotated time. eg: \"error.log.20201223\", \"error.log.20201224\"\n\tModeCreate\n)\n\n// String get string name\nfunc (m RotateMode) String() string {\n\tswitch m {\n\tcase ModeRename:\n\t\treturn \"rename\"\n\tcase ModeCreate:\n\t\treturn \"create\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n\n// MarshalJSON implement the JSON Marshal interface [encoding/json.Marshaler]\nfunc (m RotateMode) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + m.String() + `\"`), nil\n}\n\n// UnmarshalJSON implement the JSON Unmarshal interface [encoding/json.Unmarshaler]\nfunc (m *RotateMode) UnmarshalJSON(data []byte) error {\n\ts, err := strconv.Unquote(string(data))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*m, err = StringToRotateMode(s)\n\treturn err\n}\n\n// StringToRotateMode convert string to RotateMode\nfunc StringToRotateMode(s string) (RotateMode, error) {\n\tswitch s {\n\tcase \"rename\":\n\t\treturn ModeRename, nil\n\tcase \"create\", \"make\":\n\t\treturn ModeCreate, nil\n\tdefault:\n\t\t// is int value, try to parse as int\n\t\tif strutil.IsInt(s) {\n\t\t\tiVal := strutil.SafeInt(s)\n\t\t\tif iVal >= int(ModeRename) && iVal <= int(ModeCreate) {\n\t\t\t\treturn RotateMode(iVal), nil\n\t\t\t}\n\t\t}\n\t\treturn 0, fmt.Errorf(\"rotatefile: invalid rotate mode: %s\", s)\n\t}\n}\n\n//\n// ---------------------------- Clocker -------------------------------\n//\n\n// Clocker is the interface used for determine the current time\ntype Clocker interface {\n\tNow() time.Time\n}\n\n// ClockFn func\ntype ClockFn func() time.Time\n\n// Now implements the Clocker\nfunc (fn ClockFn) Now() time.Time {\n\treturn fn()\n}\n\n// ConfigFn for setting config\ntype ConfigFn func(c *Config)\n\n// Config struct for rotate dispatcher\ntype Config struct {\n\t// Filepath the log file path, will be rotating. eg: \"logs/error.log\"\n\tFilepath string `json:\"filepath\" yaml:\"filepath\"`\n\n\t// FilePerm for create log file. default DefaultFilePerm\n\tFilePerm os.FileMode `json:\"file_perm\" yaml:\"file_perm\"`\n\n\t// RotateMode for rotate file. default ModeRename\n\tRotateMode RotateMode `json:\"rotate_mode\" yaml:\"rotate_mode\"`\n\n\t// MaxSize file contents max size, unit is bytes.\n\t// If is equals zero, disable rotate file by size\n\t//\n\t// default see DefaultMaxSize\n\tMaxSize uint64 `json:\"max_size\" yaml:\"max_size\"`\n\n\t// RotateTime the file rotating interval time, unit is seconds.\n\t// If is equals zero, disable rotate file by time\n\t//\n\t// default: EveryHour\n\tRotateTime RotateTime `json:\"rotate_time\" yaml:\"rotate_time\"`\n\n\t// CloseLock use sync lock on writing contents, rotating file.\n\t//\n\t// default: false\n\tCloseLock bool `json:\"close_lock\" yaml:\"close_lock\"`\n\n\t// BackupNum max number for keep old files.\n\t//\n\t// 0 is not limit, default is DefaultBackNum\n\tBackupNum uint `json:\"backup_num\" yaml:\"backup_num\"`\n\n\t// BackupTime max time for keep old files, unit is hours.\n\t//\n\t// 0 is not limit, default is DefaultBackTime\n\tBackupTime uint `json:\"backup_time\" yaml:\"backup_time\"`\n\n\t// CleanOnClose determines if the rotated log files should be cleaned up when close.\n\tCleanOnClose bool `json:\"clean_on_close\" yaml:\"clean_on_close\"`\n\n\t// Compress determines if the rotated log files should be compressed using gzip.\n\t// The default is not to perform compression.\n\tCompress bool `json:\"compress\" yaml:\"compress\"`\n\n\t// RenameFunc you can custom-build filename for rotate file by size.\n\t//\n\t// Example:\n\t//\n\t//  c.RenameFunc = func(filepath string, rotateNum uint) string {\n\t// \t\tsuffix := time.Now().Format(\"06010215\")\n\t//\n\t// \t\t// eg: /tmp/error.log => /tmp/error.log.24032116_894136\n\t// \t\treturn filepath + fmt.Sprintf(\".%s_%d\", suffix, rotateNum)\n\t//  }\n\tRenameFunc func(filePath string, rotateNum uint) string `json:\"-\" yaml:\"-\"`\n\n\t// TimeClock for a rotating file by time.\n\tTimeClock Clocker `json:\"-\" yaml:\"-\"`\n\n\t// DebugMode for debug on development.\n\tDebugMode bool `json:\"debug_mode\" yaml:\"debug_mode\"`\n}\n\nfunc (c *Config) backupDuration() time.Duration {\n\tif c.BackupTime < 1 {\n\t\treturn 0\n\t}\n\treturn time.Duration(c.BackupTime) * time.Hour\n}\n\n// With more config setting func\nfunc (c *Config) With(fns ...ConfigFn) *Config {\n\tfor _, fn := range fns {\n\t\tfn(c)\n\t}\n\treturn c\n}\n\n// Create new Writer by config\nfunc (c *Config) Create() (*Writer, error) { return NewWriter(c) }\n\n// IsMode check rotate mode\nfunc (c *Config) IsMode(m RotateMode) bool { return c.RotateMode == m }\n\nvar (\n\t// DefaultFilePerm perm and flags for create log file\n\tDefaultFilePerm os.FileMode = 0664\n\t// DefaultFileFlags for open log file\n\tDefaultFileFlags = os.O_CREATE | os.O_WRONLY | os.O_APPEND\n\n\t// DefaultTimeClockFn for create time\n\tDefaultTimeClockFn = ClockFn(func() time.Time {\n\t\treturn time.Now()\n\t})\n)\n\n// NewDefaultConfig instance\nfunc NewDefaultConfig() *Config {\n\treturn &Config{\n\t\tMaxSize:    DefaultMaxSize,\n\t\tRotateTime: EveryHour,\n\t\tBackupNum:  DefaultBackNum,\n\t\tBackupTime: DefaultBackTime,\n\t\t// RenameFunc: DefaultFilenameFn,\n\t\tTimeClock: DefaultTimeClockFn,\n\t\tFilePerm:  DefaultFilePerm,\n\t}\n}\n\n// NewConfig by file path, and can with custom setting\nfunc NewConfig(filePath string, fns ...ConfigFn) *Config {\n\tif len(fns) == 0 {\n\t\treturn NewConfigWith(WithFilepath(filePath))\n\t}\n\treturn NewConfigWith(append(fns, WithFilepath(filePath))...)\n}\n\n// NewConfigWith custom func\nfunc NewConfigWith(fns ...ConfigFn) *Config {\n\treturn NewDefaultConfig().With(fns...)\n}\n\n// EmptyConfigWith new empty config with custom func\nfunc EmptyConfigWith(fns ...ConfigFn) *Config {\n\tc := &Config{\n\t\t// RenameFunc: DefaultFilenameFn,\n\t\tTimeClock: DefaultTimeClockFn,\n\t\tFilePerm:  DefaultFilePerm,\n\t}\n\n\treturn c.With(fns...)\n}\n\n// WithFilepath setting\nfunc WithFilepath(logfile string) ConfigFn {\n\treturn func(c *Config) { c.Filepath = logfile }\n}\n\n// WithDebugMode setting for debug mode\nfunc WithDebugMode(c *Config) { c.DebugMode = true }\n\n// WithCompress setting for compress\nfunc WithCompress(c *Config) { c.Compress = true }\n\n// WithBackupNum setting for backup number\nfunc WithBackupNum(num uint) ConfigFn {\n\treturn func(c *Config) { c.BackupNum = num }\n}\n"
  },
  {
    "path": "rotatefile/config_test.go",
    "content": "package rotatefile_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/jsonutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/goutil/x/fmtutil\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestNewDefaultConfig(t *testing.T) {\n\tsize := fmtutil.DataSize(1024 * 1024 * 10)\n\tdump.P(size)\n\n\tc := rotatefile.NewDefaultConfig()\n\tassert.Eq(t, rotatefile.DefaultMaxSize, c.MaxSize)\n}\n\nfunc TestNewConfig(t *testing.T) {\n\tcfg := rotatefile.NewConfig(\"testdata/test.log\")\n\tdump.P(cfg)\n\n\tassert.Eq(t, rotatefile.DefaultBackNum, cfg.BackupNum)\n\tassert.Eq(t, rotatefile.DefaultBackTime, cfg.BackupTime)\n\tassert.Eq(t, rotatefile.EveryHour, cfg.RotateTime)\n\tassert.Eq(t, rotatefile.DefaultMaxSize, cfg.MaxSize)\n\tassert.Eq(t, rotatefile.ModeRename, cfg.RotateMode)\n\n\tcfg = rotatefile.EmptyConfigWith(func(c *rotatefile.Config) {\n\t\tc.Compress = true\n\t})\n\tassert.True(t, cfg.Compress)\n\tassert.Eq(t, uint(0), cfg.BackupNum)\n\tassert.Eq(t, uint(0), cfg.BackupTime)\n\n\tcfg = &rotatefile.Config{}\n\tassert.Eq(t, rotatefile.ModeRename, cfg.RotateMode)\n\n\terr := jsonutil.DecodeString(`{\n\t\"debug_mode\": true,\n\t\"rotate_mode\": \"create\",\n\t\"rotate_time\": \"1day\"\n}`, cfg)\n\tdump.P(cfg)\n\tassert.NoErr(t, err)\n\tassert.Eq(t, rotatefile.ModeCreate, cfg.RotateMode)\n\tassert.Eq(t, \"Every 1 Day\", cfg.RotateTime.String())\n}\n\nfunc TestRotateMode_cases(t *testing.T) {\n\tt.Run(\"String\", func(t *testing.T) {\n\t\tassert.Eq(t, \"rename\", rotatefile.ModeRename.String())\n\t\tassert.Eq(t, \"create\", rotatefile.ModeCreate.String())\n\t\tassert.Eq(t, \"unknown\", rotatefile.RotateMode(9).String())\n\t})\n\n\tt.Run(\"UnmarshalJSON\", func(t *testing.T) {\n\t\trm := rotatefile.RotateMode(0)\n\n\t\t// UnmarshalJSON\n\t\terr := rm.UnmarshalJSON([]byte(`\"create\"`))\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, rotatefile.ModeCreate, rm)\n\n\t\trm = rotatefile.RotateMode(0)\n\t\t// use int\n\t\terr = rm.UnmarshalJSON([]byte(`\"1\"`))\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, rotatefile.ModeCreate, rm)\n\n\t\t// error case\n\t\tassert.Err(t, rm.UnmarshalJSON([]byte(`create`)))\n\t})\n\n\tt.Run(\"MarshalJSON\", func(t *testing.T) {\n\t\tbs, err := rotatefile.ModeRename.MarshalJSON()\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, `\"rename\"`, string(bs))\n\t\tbs, err = rotatefile.ModeCreate.MarshalJSON()\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, `\"create\"`, string(bs))\n\n\t\tbs, err = rotatefile.RotateMode(35).MarshalJSON()\n\t\tassert.NoErr(t, err)\n\t\tassert.Eq(t, `\"unknown\"`, string(bs))\n\t})\n}\n\nfunc TestRotateTime_encode(t *testing.T) {\n\trt := rotatefile.RotateTime(0)\n\n\t// UnmarshalJSON\n\terr := rt.UnmarshalJSON([]byte(`\"1h\"`))\n\tassert.NoErr(t, err)\n\tassert.Eq(t, \"Every 1 Hours\", rt.String())\n\terr = rt.UnmarshalJSON([]byte(`\"3600\"`))\n\tassert.NoErr(t, err)\n\tassert.Eq(t, \"Every 1 Hours\", rt.String())\n\n\t// error case\n\tassert.Err(t, rt.UnmarshalJSON([]byte(`a`)))\n\n\t// MarshalJSON\n\tbs, err := rt.MarshalJSON()\n\tassert.NoErr(t, err)\n\tassert.Eq(t, `\"3600s\"`, string(bs))\n}\n\nfunc TestRotateTime_TimeFormat(t *testing.T) {\n\tnow := timex.Now()\n\n\trt := rotatefile.EveryDay\n\tassert.Eq(t, \"20060102\", rt.TimeFormat())\n\tft := rt.FirstCheckTime(now.T())\n\tassert.True(t, now.DayEnd().Equal(ft))\n\n\trt = rotatefile.EveryHour\n\tassert.Eq(t, \"20060102_1500\", rt.TimeFormat())\n\n\trt = rotatefile.Every15Min\n\tassert.Eq(t, \"20060102_1504\", rt.TimeFormat())\n\tft = rt.FirstCheckTime(now.T())\n\tassert.Gt(t, ft.Unix(), 0)\n\n\trt = rotatefile.EverySecond\n\tassert.Eq(t, \"20060102_150405\", rt.TimeFormat())\n\tft = rt.FirstCheckTime(now.T())\n\tassert.Eq(t, now.Unix()+rt.Interval(), ft.Unix())\n}\n\nfunc TestRotateTime_String(t *testing.T) {\n\tassert.Eq(t, \"Every 1 Day\", rotatefile.EveryDay.String())\n\tassert.Eq(t, \"Every 1 Hours\", rotatefile.EveryHour.String())\n\tassert.Eq(t, \"Every 1 Minutes\", rotatefile.EveryMinute.String())\n\tassert.Eq(t, \"Every 1 Seconds\", rotatefile.EverySecond.String())\n\n\tassert.Eq(t, \"Every 2 Hours\", rotatefile.RotateTime(timex.OneHourSec*2).String())\n\tassert.Eq(t, \"Every 15 Minutes\", rotatefile.RotateTime(timex.OneMinSec*15).String())\n\tassert.Eq(t, \"Every 5 Minutes\", rotatefile.RotateTime(timex.OneMinSec*5).String())\n\tassert.Eq(t, \"Every 3 Seconds\", rotatefile.RotateTime(3).String())\n\tassert.Eq(t, \"Every 2 Day\", rotatefile.RotateTime(timex.OneDaySec*2).String())\n}\n\nfunc TestRotateTime_FirstCheckTime_Round(t *testing.T) {\n\t// log rotate interval minutes\n\tlogMin := 5\n\n\t// now := timex.Now()\n\t// nowMin := now.Minute()\n\tnowMin := 37\n\t// dur := time.Duration(now.Minute() + min)\n\tdur := time.Duration(nowMin + logMin)\n\tassert.Eq(t, time.Duration(40), dur.Round(time.Duration(logMin)))\n\n\tnowMin = 40\n\tdur = time.Duration(nowMin + logMin)\n\tassert.Eq(t, time.Duration(45), dur.Round(time.Duration(logMin)))\n\n\tnowMin = 41\n\tdur = time.Duration(nowMin + logMin)\n\tassert.Eq(t, time.Duration(45), dur.Round(time.Duration(logMin)))\n}\n"
  },
  {
    "path": "rotatefile/issues_test.go",
    "content": "package rotatefile_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/mathutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog/internal\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\n// https://github.com/gookit/slog/issues/138\n// 日志按everyday自动滚动，文件名的日期对应的是前一天的日志 #138\nfunc TestIssues_138(t *testing.T) {\n\tlogfile := \"testdata/iss138_rotate_day.log\"\n\n\tmt := rotatefile.NewMockClock(\"2023-11-16 23:59:55\")\n\tw, err := rotatefile.NewWriterWith(rotatefile.WithDebugMode, func(c *rotatefile.Config) {\n\t\tc.TimeClock = mt\n\t\t// c.MaxSize = 128\n\t\tc.Filepath = logfile\n\t\tc.RotateTime = rotatefile.EveryDay\n\t})\n\n\tassert.NoErr(t, err)\n\tdefer w.MustClose()\n\n\tfor i := 0; i < 15; i++ {\n\t\tdt := mt.Datetime()\n\t\t_, err = w.WriteString(dt + \" [INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t\t// increase time\n\t\tmt.Add(time.Second * 3)\n\t\t// mt.Add(time.Millisecond * 300)\n\t}\n\n\t// Out: rotate_day.log, rotate_day.log.20231116\n\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\tassert.Len(t, files, 2)\n\n\t// check contents\n\tassert.True(t, fsutil.IsFile(logfile))\n\ts := fsutil.ReadString(logfile)\n\tassert.StrContains(t, s, \"2023-11-17 00:00\")\n\n\toldFile := internal.AddSuffix2path(logfile, \"20231116\")\n\tassert.True(t, fsutil.IsFile(oldFile))\n\ts = fsutil.ReadString(oldFile)\n\tassert.StrContains(t, s, \"2023-11-16 23:\")\n}\n\n// https://github.com/gookit/slog/issues/150\n// 日志轮转时间设置为分钟时，FirstCheckTime计算单位错误，导致生成预期外的多个日志文件 #150\nfunc TestIssues_150(t *testing.T) {\n\tlogfile := \"testdata/iss150_rotate_min.log\"\n\n\tmt := rotatefile.NewMockClock(\"2024-09-14 18:39:55\")\n\tw, err := rotatefile.NewWriterWith(rotatefile.WithDebugMode, func(c *rotatefile.Config) {\n\t\tc.TimeClock = mt\n\t\t// c.MaxSize = 128\n\t\tc.Filepath = logfile\n\t\tc.RotateTime = rotatefile.EveryMinute * 3\n\t})\n\n\tassert.NoErr(t, err)\n\tdefer w.MustClose()\n\n\tfor i := 0; i < 15; i++ {\n\t\tdt := mt.Datetime()\n\t\t_, err = w.WriteString(dt + \" [INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t\t// increase time\n\t\tmt.Add(time.Minute * 1)\n\t}\n\n\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\tassert.LenGt(t, files, 3)\n\n\t// check contents\n\tassert.True(t, fsutil.IsFile(logfile))\n\ts := fsutil.ReadString(logfile)\n\tassert.StrContains(t, s, \"2024-09-14 18:\")\n\n\t// iss150_rotate_min.20240914_1842.log\n\toldFile := internal.AddSuffix2path(logfile, \"20240914_1842\")\n\tassert.True(t, fsutil.IsFile(oldFile))\n\ts = fsutil.ReadString(oldFile)\n\tassert.StrContains(t, s, \"2024-09-14 18:41\")\n}\n"
  },
  {
    "path": "rotatefile/rotatefile.go",
    "content": "// Package rotatefile provides simple file rotation, compression and cleanup.\npackage rotatefile\n\nimport (\n\t\"io\"\n)\n\n// RotateWriter interface\ntype RotateWriter interface {\n\tio.WriteCloser\n\tClean() error\n\tFlush() error\n\tRotate() error\n\tSync() error\n}\n\nconst (\n\t// OneMByte size\n\tOneMByte uint64 = 1024 * 1024\n\n\t// DefaultMaxSize of a log file. default is 20M.\n\tDefaultMaxSize = 20 * OneMByte\n\t// DefaultBackNum default backup numbers for old files.\n\tDefaultBackNum uint = 20\n\t// DefaultBackTime default backup time for old files. default keeps a week.\n\tDefaultBackTime uint = 24 * 7\n)\n"
  },
  {
    "path": "rotatefile/rotatefile_test.go",
    "content": "package rotatefile_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestMain(m *testing.M) {\n\tfmt.Println(\"TestMain: remove all test files in ./testdata\")\n\tgoutil.PanicErr(fsutil.RemoveSub(\"./testdata\", fsutil.ExcludeNames(\".keep\")))\n\tm.Run()\n}\n\nfunc ExampleNewWriter_on_other_logger() {\n\tlogFile := \"testdata/another_logger.log\"\n\twriter, err := rotatefile.NewConfig(logFile).Create()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlog.SetOutput(writer)\n\tlog.Println(\"log message\")\n}\n"
  },
  {
    "path": "rotatefile/util.go",
    "content": "package rotatefile\n\nimport (\n\t\"compress/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/timex\"\n)\n\nconst compressSuffix = \".gz\"\n\nfunc printErrln(pfx string, err error) {\n\tif err != nil {\n\t\t_, _ = fmt.Fprintln(os.Stderr, pfx, err)\n\t}\n}\n\nfunc compressFile(srcPath, dstPath string) error {\n\tsrcFile, err := os.OpenFile(srcPath, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcFile.Close()\n\n\t// create and open a gz file\n\tgzFile, err := fsutil.OpenTruncFile(dstPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gzFile.Close()\n\n\tsrcSt, err := srcFile.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tzw := gzip.NewWriter(gzFile)\n\tzw.Name = srcSt.Name()\n\tzw.ModTime = srcSt.ModTime()\n\n\t// do copy\n\tif _, err = io.Copy(zw, srcFile); err != nil {\n\t\t_ = zw.Close()\n\t\treturn err\n\t}\n\treturn zw.Close()\n}\n\n// TODO replace to fsutil.FileInfo\ntype fileInfo struct {\n\tfs.FileInfo\n\tfilePath string\n}\n\n// Path get file full path. eg: \"/path/to/file.go\"\nfunc (fi *fileInfo) Path() string {\n\treturn fi.filePath\n}\n\nfunc newFileInfo(filePath string, fi fs.FileInfo) fileInfo {\n\treturn fileInfo{filePath: filePath, FileInfo: fi}\n}\n\n// modTimeFInfos sorts by oldest time modified in the fileInfo.\n// eg: [old_220211, old_220212, old_220213]\ntype modTimeFInfos []fileInfo\n\n// Less check\nfunc (fis modTimeFInfos) Less(i, j int) bool {\n\treturn fis[j].ModTime().After(fis[i].ModTime())\n}\n\n// Swap value\nfunc (fis modTimeFInfos) Swap(i, j int) {\n\tfis[i], fis[j] = fis[j], fis[i]\n}\n\n// Len get\nfunc (fis modTimeFInfos) Len() int {\n\treturn len(fis)\n}\n\n// MockClocker mock clock for test\ntype MockClocker struct {\n\ttt time.Time\n}\n\n// NewMockClock create a mock time instance from datetime string.\nfunc NewMockClock(datetime string) *MockClocker {\n\tnt := goutil.Must(timex.FromString(datetime))\n\treturn &MockClocker{tt: nt.Time}\n}\n\n// Now get current time.\nfunc (mt *MockClocker) Now() time.Time {\n\treturn mt.tt\n}\n\n// Add progresses time by the given duration.\nfunc (mt *MockClocker) Add(d time.Duration) {\n\tmt.tt = mt.tt.Add(d)\n}\n\n// Datetime returns the current time in the format \"2006-01-02 15:04:05\".\nfunc (mt *MockClocker) Datetime() string {\n\treturn mt.tt.Format(\"2006-01-02 15:04:05\")\n}\n"
  },
  {
    "path": "rotatefile/util_test.go",
    "content": "package rotatefile\n\nimport (\n\t\"errors\"\n\t\"testing\"\n)\n\nfunc TestPrintErrln(t *testing.T) {\n\tprintErrln(\"test\", nil)\n\tprintErrln(\"test\", errors.New(\"an error\"))\n}\n"
  },
  {
    "path": "rotatefile/writer.go",
    "content": "package rotatefile\n\nimport (\n\t\"fmt\"\n\t\"io/fs\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/mathutil\"\n\t\"github.com/gookit/goutil/strutil\"\n\t\"github.com/gookit/goutil/x/stdio\"\n)\n\n// Writer a flush, close, writer and support rotate file.\n//\n// refer https://github.com/flike/golog/blob/master/filehandler.go\ntype Writer struct {\n\t// writer instance id, use for debug\n\tid string\n\tmu sync.RWMutex\n\t// config of the writer\n\tcfg *Config\n\n\t// current opened logfile\n\tfile *os.File\n\t// current opened file path. NOTE it maybe not equals Config.Filepath\n\tpath string\n\t// The original file dir path for the Config.Filepath\n\tfileDir string\n\t// The original name and ext information\n\tfileName, onlyName, fileExt string\n\n\t// logfile max backup time. equals Config.BackupTime * time.Hour\n\tbackupDur time.Duration\n\t// oldFiles []string\n\tcleanCh chan struct{}\n\tstopCh  chan struct{}\n\n\t// context use for rotating file by size\n\twritten   uint64 // written size\n\trotateNum uint   // rotate times number\n\n\t// ---- context use for rotating file by time ----\n\n\t// the rotating file name suffix format. eg: \"20210102\", \"20210102_1500\"\n\tsuffixFormat   string\n\tcheckInterval  int64     // check interval seconds.\n\tnextRotatingAt time.Time // next rotating time\n}\n\n// NewWriter create rotate write with config and init it.\nfunc NewWriter(c *Config) (*Writer, error) {\n\td := &Writer{cfg: c}\n\n\tif err := d.init(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn d, nil\n}\n\n// NewWriterWith create a rotated writer with some settings.\nfunc NewWriterWith(fns ...ConfigFn) (*Writer, error) {\n\treturn NewWriter(NewConfigWith(fns...))\n}\n\n// init rotate dispatcher\nfunc (d *Writer) init() error {\n\td.id = fmt.Sprintf(\"%p\", d)\n\n\tlogfile := d.cfg.Filepath\n\t// dirSep := filepath.Separator\n\t// d.fileDir = filepath.Dir(logfile)\n\td.fileDir, d.fileName = filepath.Split(d.cfg.Filepath)\n\td.fileExt = filepath.Ext(d.fileName)                   // eg: .log\n\td.onlyName = strings.TrimSuffix(d.fileName, d.fileExt) // eg: error\n\t// removes the trailing separator on the dir path\n\tif ln := len(d.fileDir); ln > 1 && d.fileDir[ln-1] == filepath.Separator {\n\t\td.fileDir = d.fileDir[:ln-1]\n\t}\n\n\td.backupDur = d.cfg.backupDuration()\n\td.suffixFormat = d.cfg.RotateTime.TimeFormat()\n\td.checkInterval = d.cfg.RotateTime.Interval()\n\n\t// calc and storage next rotating time\n\tif d.checkInterval > 0 {\n\t\tnow := d.cfg.TimeClock.Now()\n\t\t// next rotating time\n\t\td.nextRotatingAt = d.cfg.RotateTime.FirstCheckTime(now)\n\t\tif d.cfg.RotateMode == ModeCreate {\n\t\t\t// logfile = d.cfg.Filepath + \".\" + now.Format(d.suffixFormat)\n\t\t\tlogfile = d.buildFilePath(now.Format(d.suffixFormat))\n\t\t}\n\t}\n\n\t// open the current file\n\treturn d.openFile(logfile)\n}\n\n// Config gets the config\nfunc (d *Writer) Config() Config {\n\treturn *d.cfg\n}\n\n// Flush sync data to disk. alias of Sync()\nfunc (d *Writer) Flush() error {\n\treturn d.file.Sync()\n}\n\n// Sync data to disk.\nfunc (d *Writer) Sync() error {\n\treturn d.file.Sync()\n}\n\n// Close the writer. will sync data to disk, then close the file handle.\n// and it will stop the async clean backups.\nfunc (d *Writer) Close() error {\n\tif d.cfg.CleanOnClose {\n\t\td.debugLog(\"cfg.CleanOnClose=true: start clean old files\")\n\t\tprintErrln(\"files-clear-onClose: cleanup old files error:\", d.Clean())\n\t}\n\n\treturn d.close(true)\n}\n\n// MustClose the writer. alias of Close(), but will panic if has error.\nfunc (d *Writer) MustClose() {\n\tprintErrln(\"rotatefile: close writer -\", d.Close())\n}\n\nfunc (d *Writer) close(closeStopCh bool) error {\n\tif err := d.file.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\t// stop the async clean backups\n\tif closeStopCh && d.stopCh != nil {\n\t\td.debugLog(\"close stopCh for stop async clean old files\")\n\t\tclose(d.stopCh)\n\t\td.stopCh = nil\n\t}\n\treturn d.file.Close()\n}\n\n//\n// ---------------------------------------------------------------------------\n// write and rotate file\n// ---------------------------------------------------------------------------\n//\n\n// WriteString implements the io.StringWriter\nfunc (d *Writer) WriteString(s string) (n int, err error) {\n\treturn d.Write([]byte(s))\n}\n\n// Write data to file. then check and do rotate file, then async clean backups\nfunc (d *Writer) Write(p []byte) (n int, err error) {\n\t// do write data\n\tif n, err = d.doWrite(p); err != nil {\n\t\treturn\n\t}\n\n\t// do rotate file\n\terr = d.doRotate()\n\t// async clean backup files.\n\tif err == nil && d.shouldClean(true) {\n\t\td.asyncClean()\n\t}\n\treturn\n}\n\nfunc (d *Writer) doWrite(p []byte) (n int, err error) {\n\t// if enable lock\n\tif !d.cfg.CloseLock {\n\t\td.mu.Lock()\n\t\tdefer d.mu.Unlock()\n\t}\n\n\tn, err = d.file.Write(p)\n\tif err == nil {\n\t\t// update size\n\t\td.written += uint64(n)\n\t}\n\treturn\n}\n\n// Rotate the file by config and async clean backups\nfunc (d *Writer) Rotate() error {\n\terr := d.doRotate()\n\n\t// async clean backup files.\n\tif err == nil && d.shouldClean(true) {\n\t\td.asyncClean()\n\t}\n\treturn err\n}\n\n// do rotate the logfile by config\nfunc (d *Writer) doRotate() (err error) {\n\t// if enable lock\n\tif !d.cfg.CloseLock {\n\t\td.mu.Lock()\n\t\tdefer d.mu.Unlock()\n\t}\n\n\t// do rotate a file by size\n\tif d.cfg.MaxSize > 0 && d.written >= d.cfg.MaxSize {\n\t\terr = d.rotatingBySize()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// do rotate a file by time\n\tif d.checkInterval > 0 && d.written > 0 {\n\t\terr = d.rotatingByTime()\n\t}\n\treturn\n}\n\n// TIP: should only call on d.checkInterval > 0\nfunc (d *Writer) rotatingByTime() error {\n\tnow := d.cfg.TimeClock.Now()\n\tif now.Before(d.nextRotatingAt) {\n\t\treturn nil\n\t}\n\n\t// generate new file path.\n\t// eg: /tmp/error.log => /tmp/error.20220423_1600.log\n\t// file := d.cfg.Filepath + \".\" + d.nextRotatingAt.Format(d.suffixFormat)\n\tfile := d.buildFilePath(d.nextRotatingAt.Format(d.suffixFormat))\n\terr := d.rotatingFile(file, false)\n\n\t// calc and storage next rotating time\n\td.nextRotatingAt = d.nextRotatingAt.Add(time.Duration(d.checkInterval) * time.Second)\n\treturn err\n}\n\nfunc (d *Writer) rotatingBySize() error {\n\td.rotateNum++\n\tnow := d.cfg.TimeClock.Now()\n\t// up: use now minutes + seconds as rotate number\n\tnumStr := fmt.Sprintf(\"%d%d%d\", now.Hour(), now.Minute(), now.Second())\n\tnumInt := strutil.IntOr(numStr, 0) + now.Nanosecond()/1000\n\trotateNum := uint(numInt) + d.rotateNum\n\n\tvar bakFile string\n\tif d.cfg.IsMode(ModeCreate) {\n\t\t// eg: /tmp/error.log => /tmp/error.894136.log\n\t\t// eg: /tmp/error.20220423_1600.log => /tmp/error.20220423_1600_894136.log\n\t\tpathNoExt := d.path[:len(d.path)-len(d.fileExt)]\n\t\tbakFile = fmt.Sprintf(\"%s_%d%s\", pathNoExt, rotateNum, d.fileExt)\n\t} else if d.cfg.RenameFunc != nil {\n\t\t// rename current to new file by custom RenameFunc\n\t\t// eg: /tmp/error.log => /tmp/error.163021_894136.log\n\t\tbakFile = d.cfg.RenameFunc(d.cfg.Filepath, rotateNum)\n\t} else {\n\t\t// eg: /tmp/error.log => /tmp/error.25031615_894136.log\n\t\tbakFile = d.buildFilePath(fmt.Sprintf(\"%s_%d\", now.Format(\"06010215\"), rotateNum))\n\t}\n\n\t// always rename current to a new file\n\treturn d.rotatingFile(bakFile, true)\n}\n\n// rotateFile closes the syncBuffer's file and starts a new one.\nfunc (d *Writer) rotatingFile(bakFile string, rename bool) error {\n\t// close the current file\n\tif err := d.close(false); err != nil {\n\t\treturn err\n\t}\n\n\t// record old files for clean.\n\t// d.oldFiles = append(d.oldFiles, bakFile)\n\n\t// rename current to a new file.\n\tif rename || d.cfg.RotateMode == ModeRename {\n\t\tif err := os.Rename(d.path, bakFile); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// filepath for reopening\n\tlogfile := d.path\n\tif d.cfg.RotateMode == ModeRename {\n\t\tlogfile = d.cfg.Filepath\n\t}\n\n\t// reopen log file\n\tif err := d.openFile(logfile); err != nil {\n\t\treturn err\n\t}\n\n\t// reset written\n\td.written = 0\n\treturn nil\n}\n\n//\n// ---------------------------------------------------------------------------\n// clean backup files\n// ---------------------------------------------------------------------------\n//\n\n// check should clean old files by config\nfunc (d *Writer) shouldClean(withRand bool) bool {\n\tcfgIsYes := d.cfg.BackupNum > 0 || d.cfg.BackupTime > 0\n\tif !withRand {\n\t\treturn cfgIsYes\n\t}\n\n\t// 20% probability trigger clean\n\treturn cfgIsYes && rand.Intn(100) < 20\n}\n\n// async clean old files by config. should be in lock.\nfunc (d *Writer) asyncClean() {\n\tif !d.shouldClean(false) {\n\t\treturn\n\t}\n\n\t// if already running, send a signal\n\tif d.cleanCh != nil {\n\t\td.notifyClean()\n\t\treturn\n\t}\n\n\t// add lock for deny concurrent clean\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\t// re-check d.cleanCh is not nil\n\tif d.cleanCh != nil {\n\t\td.notifyClean()\n\t\treturn\n\t}\n\n\t// init clean channel\n\td.debugLog(\"INIT clean and stop channels for clean old files\")\n\td.cleanCh = make(chan struct{})\n\td.stopCh = make(chan struct{})\n\n\t// start a goroutine to clean backups\n\tgo func() {\n\t\td.debugLog(\"START a goroutine consumer for clean old files\")\n\n\t\t// consume the signal until stop\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-d.cleanCh:\n\t\t\t\td.debugLog(\"receive signal - clean old files handling\")\n\t\t\t\tprintErrln(\"rotatefile: clean old files error:\", d.doClean())\n\t\t\tcase <-d.stopCh:\n\t\t\t\td.cleanCh = nil\n\t\t\t\td.debugLog(\"STOP consumer for clean old files\")\n\t\t\t\treturn // stop clean\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (d *Writer) notifyClean() {\n\tselect {\n\tcase d.cleanCh <- struct{}{}: // notify clean old files\n\t\td.debugLog(\"sent signal - start clean old files...\")\n\tdefault: // skip on blocking\n\t\td.debugLog(\"clean old files signal blocked, SKIP\")\n\t}\n}\n\n// Clean old files by config\nfunc (d *Writer) Clean() (err error) {\n\tif d.cfg.BackupNum == 0 && d.cfg.BackupTime == 0 {\n\t\treturn errorx.Err(\"clean: backupNum and backupTime are both 0\")\n\t}\n\n\t// up: 单独运行清理，不需要设置 skipSeconds\n\treturn d.doClean(0)\n}\n\n// do clean old files by config\n//\n// - skipSeconds: skip find files that are within the specified seconds\nfunc (d *Writer) doClean(skipSeconds ...int) (err error) {\n\t// oldFiles: xx.log.yy files, no gz file\n\tvar oldFiles, gzFiles []fileInfo\n\tfileDir, fileName := d.fileDir, d.fileName\n\tcurFileName := filepath.Base(d.path)\n\n\t// FIX: do not process recent changes to avoid conflicts\n\tskipSec := 30\n\tif len(skipSeconds) > 0 {\n\t\tskipSec = skipSeconds[0]\n\t}\n\tlimitTime := d.cfg.TimeClock.Now().Add(-time.Second * time.Duration(skipSec))\n\n\t// find and clean old files\n\td.debugLog(\"clean - find old files, match name:\", fileName, \", in dir:\", fileDir)\n\terr = fsutil.FindInDir(fileDir, func(fPath string, ent fs.DirEntry) error {\n\t\tfi, err := ent.Info()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// fix: exclude the current file\n\t\tif ent.Name() == curFileName {\n\t\t\treturn nil\n\t\t}\n\n\t\tif strings.HasSuffix(ent.Name(), compressSuffix) {\n\t\t\tgzFiles = append(gzFiles, newFileInfo(fPath, fi))\n\t\t} else if fi.ModTime().Before(limitTime) {\n\t\t\toldFiles = append(oldFiles, newFileInfo(fPath, fi))\n\t\t}\n\t\treturn nil\n\t}, d.buildFilterFns(fileName)...)\n\n\tgzNum := len(gzFiles)\n\toldNum := len(oldFiles)\n\tremNum := mathutil.Max(gzNum+oldNum-int(d.cfg.BackupNum), 0)\n\td.debugLog(\"clean old files, gzNum:\", gzNum, \"oldNum:\", oldNum, \"remNum:\", remNum)\n\n\tif remNum > 0 && d.cfg.BackupNum > 0 {\n\t\t// remove old gz files\n\t\tif gzNum > 0 {\n\t\t\tremNum, err = d.removeOldGzFiles(remNum, gzFiles)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// remove old log files\n\t\tif remNum > 0 && oldNum > 0 {\n\t\t\toldFiles, err = d.removeOldFiles(remNum, oldFiles)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif d.cfg.Compress && len(oldFiles) > 0 {\n\t\td.debugLog(\"compress old normal files to gz files\")\n\t\terr = d.compressFiles(oldFiles)\n\t}\n\treturn\n}\n\n// remove old gz files\nfunc (d *Writer) removeOldGzFiles(remNum int, gzFiles []fileInfo) (rn int, err error) {\n\tgzNum := len(gzFiles)\n\tsort.Sort(modTimeFInfos(gzFiles)) // sort by mod-time\n\td.debugLog(\"remove old gz files ...\")\n\n\tfor idx := 0; idx < gzNum; idx++ {\n\t\td.debugLog(\"remove old gz file:\", gzFiles[idx].filePath)\n\t\tif err = os.Remove(gzFiles[idx].filePath); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tremNum--\n\t\tif remNum == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn remNum, errorx.Wrap(err, \"remove old gz file error\")\n\t}\n\treturn remNum, nil\n}\n\n// remove old log files\nfunc (d *Writer) removeOldFiles(remNum int, oldFiles []fileInfo) (files []fileInfo, err error) {\n\t// sort by mod-time, oldest at first.\n\tsort.Sort(modTimeFInfos(oldFiles))\n\td.debugLog(\"remove old normal files ...\")\n\n\tvar idx int\n\toldNum := len(oldFiles)\n\n\tfor idx = 0; idx < oldNum; idx++ {\n\t\td.debugLog(\"remove old file:\", oldFiles[idx].filePath)\n\t\tif err = os.Remove(oldFiles[idx].filePath); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tremNum--\n\t\tif remNum == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\toldFiles = oldFiles[idx+1:]\n\tif err != nil {\n\t\treturn nil, errorx.Wrap(err, \"remove old file error\")\n\t}\n\treturn oldFiles, nil\n}\n\n//\n// ---------------------------------------------------------------------------\n// helper methods\n// ---------------------------------------------------------------------------\n//\n\n// open the current file. and set the d.file, d.path\nfunc (d *Writer) openFile(logfile string) error {\n\tfile, err := fsutil.OpenFile(logfile, DefaultFileFlags, d.cfg.FilePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.path = logfile\n\td.file = file\n\treturn nil\n}\n\n// return eg. logs/error.20220423_1600.log\nfunc (d *Writer) buildFilePath(suffix string) string {\n\tfileName := d.onlyName + \".\" + suffix + d.fileExt\n\treturn fmt.Sprintf(\"%s/%s\", d.fileDir, fileName)\n}\n\nfunc (d *Writer) buildFilterFns(fileName string) []fsutil.FilterFunc {\n\tonlyName := d.onlyName\n\tfilterFns := []fsutil.FilterFunc{\n\t\tfsutil.OnlyFindFile,\n\t\t// filter by name. match pattern like: error.log.* eg: error.log.xx, error.log.xx.gz\n\t\tfunc(fPath string, ent fs.DirEntry) bool {\n\t\t\t// ok, _ := path.Match(fileName+\".*\", ent.Name())\n\t\t\tif !strings.HasPrefix(ent.Name(), fileName) {\n\t\t\t\t// 自定义文件名 eg: error.log -> error.20220423_02.log\n\t\t\t\treturn strings.HasPrefix(ent.Name(), onlyName)\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t}\n\n\t// filter by mod-time, clear expired files\n\tif d.cfg.BackupTime > 0 {\n\t\tcutTime := d.cfg.TimeClock.Now().Add(-d.backupDur)\n\t\tfilterFns = append(filterFns, func(fPath string, ent fs.DirEntry) bool {\n\t\t\tfi, err := ent.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn false // skip, not handle\n\t\t\t}\n\n\t\t\t// collect un-expired\n\t\t\tif fi.ModTime().After(cutTime) {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// remove expired files\n\t\t\td.debugLog(\"remove expired file:\", fPath)\n\t\t\tprintErrln(\"rotatefile: remove expired file error:\", os.Remove(fPath))\n\t\t\treturn false\n\t\t})\n\t}\n\n\treturn filterFns\n}\n\nfunc (d *Writer) compressFiles(oldFiles []fileInfo) error {\n\tfor _, fi := range oldFiles {\n\t\terr := compressFile(fi.filePath, fi.filePath+compressSuffix)\n\t\tif err != nil {\n\t\t\treturn errorx.Wrap(err, \"compress old file error\")\n\t\t}\n\n\t\t// remove an old log file\n\t\td.debugLog(\"compress and rm old file:\", fi.filePath)\n\t\tif err = os.Remove(fi.filePath); err != nil {\n\t\t\treturn errorx.Wrap(err, \"remove file error after compress\")\n\t\t}\n\t}\n\treturn nil\n}\n\n// Debug print debug message on development\nfunc (d *Writer) debugLog(vs ...any) {\n\tif d.cfg.DebugMode {\n\t\tstdio.WriteString(\"[rotatefile.DEBUG] ID:\" + d.id + \" | \" + fmt.Sprintln(vs...))\n\t}\n}\n"
  },
  {
    "path": "rotatefile/writer_test.go",
    "content": "package rotatefile_test\n\nimport (\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/dump\"\n\t\"github.com/gookit/goutil/fsutil\"\n\t\"github.com/gookit/goutil/mathutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/slog/internal\"\n\t\"github.com/gookit/slog/rotatefile\"\n)\n\nfunc TestNewWriter(t *testing.T) {\n\ttestFile := \"testdata/test_writer.log\"\n\tassert.NoErr(t, fsutil.DeleteIfExist(testFile))\n\n\tw, err := rotatefile.NewConfig(testFile).Create()\n\tassert.NoErr(t, err)\n\n\tc := w.Config()\n\t// dump.P(c)\n\tassert.Eq(t, c.MaxSize, rotatefile.DefaultMaxSize)\n\n\t_, err = w.WriteString(\"info log message\\n\")\n\tassert.NoErr(t, err)\n\tassert.True(t, fsutil.IsFile(testFile))\n\n\tassert.NoErr(t, w.Sync())\n\tassert.NoErr(t, w.Flush())\n\tassert.NoErr(t, w.Close())\n\n\tw, err = rotatefile.NewWriterWith(rotatefile.WithFilepath(testFile))\n\tassert.NoErr(t, err)\n\tassert.Eq(t, w.Config().Filepath, testFile)\n}\n\nfunc TestWriter_Rotate_modeCreate(t *testing.T) {\n\tlogfile := \"testdata/mode_create.log\"\n\n\tc := rotatefile.NewConfig(logfile)\n\tc.RotateMode = rotatefile.ModeCreate\n\n\twr, err := c.Create()\n\tassert.NoErr(t, err)\n\t_, err = wr.WriteString(\"[INFO] this is a log message\\n\")\n\tassert.NoErr(t, err)\n\tassert.False(t, fsutil.IsFile(logfile))\n\n\tls, err := filepath.Glob(\"testdata/mode_create*\")\n\tassert.NoErr(t, err)\n\tassert.Len(t, ls, 1)\n\n\tfor i := 0; i < 20; i++ {\n\t\t_, err = wr.WriteString(\"[INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t}\n\n\t// test clean and backup\n\tc.BackupNum = 2\n\tc.MaxSize = 128\n\terr = wr.Rotate()\n\tassert.NoErr(t, err)\n\t_, err = wr.WriteString(\"hi, rotated\\n\")\n\tassert.NoErr(t, err)\n}\n\nfunc TestWriter_rotateByTime(t *testing.T) {\n\tlogfile := \"testdata/rotate-by-time.log\"\n\tc := rotatefile.NewConfig(logfile).With(func(c *rotatefile.Config) {\n\t\tc.DebugMode = true\n\t\tc.Compress = true\n\t\tc.CleanOnClose = true\n\t\tc.RotateTime = rotatefile.EverySecond * 2\n\t})\n\n\tw, err := c.Create()\n\tassert.NoErr(t, err)\n\tdefer func() {\n\t\t_ = w.Close()\n\t}()\n\n\tfor i := 0; i < 5; i++ {\n\t\t_, err = w.WriteString(\"[INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\tdump.P(files)\n\n}\n\nfunc TestWriter_Clean(t *testing.T) {\n\tlogfile := \"testdata/writer_clean.log\"\n\n\tc := rotatefile.NewConfig(logfile)\n\tc.MaxSize = 128 // will rotate by size\n\n\twr, err := c.Create()\n\tassert.NoErr(t, err)\n\tdefer func() {\n\t\t_ = wr.Close()\n\t}()\n\n\tfor i := 0; i < 20; i++ {\n\t\t_, err = wr.WriteString(\"[INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t}\n\n\tassert.True(t, fsutil.IsFile(logfile))\n\t_, err = wr.WriteString(\"hi\\n\")\n\tassert.NoErr(t, err)\n\n\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\tdump.P(files)\n\n\t// test clean error\n\tt.Run(\"clean error\", func(t *testing.T) {\n\t\tc.BackupNum = 0\n\t\tc.BackupTime = 0\n\t\tassert.Err(t, wr.Clean())\n\t})\n\n\t// test clean and compress backup\n\tt.Run(\"clean and compress\", func(t *testing.T) {\n\t\tc.BackupNum = 2\n\t\tc.Compress = true\n\t\terr = wr.Clean()\n\t\tassert.NoErr(t, err)\n\n\t\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\t\tassert.Lt(t, 2, len(files))\n\t})\n}\n\n// test writer compress\nfunc TestWriter_Compress(t *testing.T) {\n\tlogfile := \"testdata/test_compress.log\"\n\n\tc := rotatefile.NewConfig(logfile)\n\tc.MaxSize = 128 // will rotate by size\n\tc.With(rotatefile.WithDebugMode)\n\n\twr, err := c.Create()\n\tassert.NoErr(t, err)\n\n\tfor i := 0; i < 20; i++ {\n\t\t_, err = wr.WriteString(\"[INFO] this is a log message, idx=\" + mathutil.String(i) + \"\\n\")\n\t\tassert.NoErr(t, err)\n\t}\n\n\tassert.True(t, fsutil.IsFile(logfile))\n\t_, err = wr.WriteString(\"hi\\n\")\n\tassert.NoErr(t, err)\n\twr.MustClose()\n\n\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\tassert.NotEmpty(t, files)\n\tdump.P(files)\n\n\t// test clean and compress backup\n\tt.Run(\"compress backup\", func(t *testing.T) {\n\t\tc := rotatefile.NewConfig(logfile,\n\t\t\trotatefile.WithDebugMode, rotatefile.WithCompress,\n\t\t\trotatefile.WithBackupNum(2),\n\t\t)\n\n\t\twr, err := c.Create()\n\t\tassert.NoErr(t, err)\n\t\tdefer wr.MustClose()\n\n\t\terr = wr.Clean()\n\t\tassert.NoErr(t, err)\n\n\t\tfiles := fsutil.Glob(internal.BuildGlobPattern(logfile))\n\t\tassert.Lt(t, 2, len(files))\n\t\tdump.P(files)\n\t})\n}\n\n// TODO set github.com/benbjohnson/clock for mock clock\ntype constantClock time.Time\n\nfunc (c constantClock) Now() time.Time { return time.Time(c) }\nfunc (c constantClock) NewTicker(d time.Duration) *time.Ticker {\n\treturn &time.Ticker{}\n}\n"
  },
  {
    "path": "slog.go",
    "content": "/*\nPackage slog Lightweight, extensible, configurable logging library written in Go.\n\nSource code and other details for the project are available at GitHub:\n\n\thttps://github.com/gookit/slog\n\nQuick usage:\n\n\tpackage main\n\n\timport (\n\t\t\"github.com/gookit/slog\"\n\t)\n\n\tfunc main() {\n\t\tslog.Info(\"info log message\")\n\t\tslog.Warn(\"warning log message\")\n\t\tslog.Infof(\"info log %s\", \"message\")\n\t\tslog.Debugf(\"debug %s\", \"message\")\n\t}\n\nMore usage please see README.\n*/\npackage slog\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil\"\n)\n\n//\n// ------------------------------------------------------------\n// Global std logger operate\n// ------------------------------------------------------------\n//\n\n// std logger is a SugaredLogger.\n// It is directly available without any additional configuration\nvar std = NewStdLogger()\n\n// Std get std logger\nfunc Std() *SugaredLogger { return std }\n\n// Reset the std logger and reset exit handlers\nfunc Reset() {\n\tResetExitHandlers(true)\n\t// new std\n\tstd = NewStdLogger()\n}\n\n// Configure the std logger\nfunc Configure(fn func(l *SugaredLogger)) { std.Config(fn) }\n\n// SetExitFunc to the std logger\nfunc SetExitFunc(fn func(code int)) { std.ExitFunc = fn }\n\n// Exit runs all exit handlers and then terminates the program using os.Exit(code)\nfunc Exit(code int) { std.Exit(code) }\n\n// Close logger, flush and close all handlers.\n//\n// IMPORTANT: please call Close() before app exit.\nfunc Close() error { return std.Close() }\n\n// MustClose logger, flush and close all handlers.\n//\n// IMPORTANT: please call Close() before app exit.\nfunc MustClose() { goutil.PanicErr(Close()) }\n\n// Flush log messages\nfunc Flush() error { return std.Flush() }\n\n// MustFlush log messages\nfunc MustFlush() { goutil.PanicErr(Flush()) }\n\n// FlushTimeout flush logs with timeout.\nfunc FlushTimeout(timeout time.Duration) { std.FlushTimeout(timeout) }\n\n// FlushDaemon run flush handle on daemon.\n//\n// Usage please see slog_test.ExampleFlushDaemon()\nfunc FlushDaemon(onStops ...func()) { std.FlushDaemon(onStops...) }\n\n// StopDaemon stop flush daemon\nfunc StopDaemon() { std.StopDaemon() }\n\n// SetLogLevel max level for the std logger\nfunc SetLogLevel(l Level) { std.Level = l }\n\n// SetLevelByName set max log level by name. eg: \"info\", \"debug\" ...\nfunc SetLevelByName(name string) { std.Level = LevelByName(name) }\n\n// SetFormatter to std logger\nfunc SetFormatter(f Formatter) { std.Formatter = f }\n\n// GetFormatter of the std logger\nfunc GetFormatter() Formatter { return std.Formatter }\n\n// AddHandler to the std logger\nfunc AddHandler(h Handler) { std.AddHandler(h) }\n\n// PushHandler to the std logger\nfunc PushHandler(h Handler) { std.AddHandler(h) }\n\n// AddHandlers to the std logger\nfunc AddHandlers(hs ...Handler) { std.AddHandlers(hs...) }\n\n// PushHandlers to the std logger\nfunc PushHandlers(hs ...Handler) { std.PushHandlers(hs...) }\n\n// AddProcessor to the logger\nfunc AddProcessor(p Processor) { std.AddProcessor(p) }\n\n// AddProcessors to the logger\nfunc AddProcessors(ps ...Processor) { std.AddProcessors(ps...) }\n\n// -------------------------- New sub-logger -----------------------------\n\n// NewSub returns a new SubLogger on the std logger.\nfunc NewSub() *SubLogger { return NewSubWith(std.Logger) }\n\n// -------------------------- New record with log data, fields -----------------------------\n\n// WithExtra new record with extra data\nfunc WithExtra(ext M) *Record { return std.WithExtra(ext) }\n\n// WithData new record with data\nfunc WithData(data M) *Record { return std.WithData(data) }\n\n// WithValue new record with data value\nfunc WithValue(key string, value any) *Record { return std.WithValue(key, value) }\n\n// WithField new record with field.\n//\n// **NOTE**: add field need config Formatter template fields.\nfunc WithField(name string, value any) *Record { return std.WithField(name, value) }\n\n// WithFields new record with fields\n//\n// **NOTE**: add field need config Formatter template fields.\nfunc WithFields(fields M) *Record { return std.WithFields(fields) }\n\n// WithContext new record with context\nfunc WithContext(ctx context.Context) *Record { return std.WithContext(ctx) }\n\n// region Add log messages\n// -------------------------- Add log messages with level -----------------------------\n\n// Log logs a message with level\nfunc Log(level Level, args ...any) { std.log(level, args) }\n\n// Print logs a message at level PrintLevel\nfunc Print(args ...any) { std.log(PrintLevel, args) }\n\n// Println logs a message at level PrintLevel\nfunc Println(args ...any) { std.log(PrintLevel, args) }\n\n// Printf logs a message at level PrintLevel\nfunc Printf(format string, args ...any) { std.logf(PrintLevel, format, args) }\n\n// Trace logs a message at level TraceLevel\nfunc Trace(args ...any) { std.log(TraceLevel, args) }\n\n// Tracef logs a message at level TraceLevel\nfunc Tracef(format string, args ...any) { std.logf(TraceLevel, format, args) }\n\n// TraceCtx logs a message at level TraceLevel with context\nfunc TraceCtx(ctx context.Context, args ...any) { std.logCtx(ctx, TraceLevel, args) }\n\n// TracefCtx logs a message at level TraceLevel with context\nfunc TracefCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, TraceLevel, format, args)\n}\n\n// Debug logs a message at level DebugLevel\nfunc Debug(args ...any) { std.log(DebugLevel, args) }\n\n// Debugf logs a message at level DebugLevel\nfunc Debugf(format string, args ...any) { std.logf(DebugLevel, format, args) }\n\n// DebugCtx logs a message at level DebugLevel with context\nfunc DebugCtx(ctx context.Context, args ...any) { std.logCtx(ctx, DebugLevel, args) }\n\n// DebugfCtx logs a message at level DebugLevel with context\nfunc DebugfCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, DebugLevel, format, args)\n}\n\n// Info logs a message at level InfoLevel\nfunc Info(args ...any) { std.log(InfoLevel, args) }\n\n// Infof logs a message at level InfoLevel\nfunc Infof(format string, args ...any) { std.logf(InfoLevel, format, args) }\n\n// InfoCtx logs a message at level InfoLevel with context\nfunc InfoCtx(ctx context.Context, args ...any) { std.logCtx(ctx, InfoLevel, args) }\n\n// InfofCtx logs a message at level InfoLevel with context\nfunc InfofCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, InfoLevel, format, args)\n}\n\n// Notice logs a message at level NoticeLevel\nfunc Notice(args ...any) { std.log(NoticeLevel, args) }\n\n// Noticef logs a message at level NoticeLevel\nfunc Noticef(format string, args ...any) { std.logf(NoticeLevel, format, args) }\n\n// NoticeCtx logs a message at level NoticeLevel with context\nfunc NoticeCtx(ctx context.Context, args ...any) { std.logCtx(ctx, NoticeLevel, args) }\n\n// NoticefCtx logs a message at level NoticeLevel with context\nfunc NoticefCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, NoticeLevel, format, args)\n}\n\n// Warn logs a message at level WarnLevel\nfunc Warn(args ...any) { std.log(WarnLevel, args) }\n\n// Warnf logs a message at level WarnLevel\nfunc Warnf(format string, args ...any) { std.logf(WarnLevel, format, args) }\n\n// WarnCtx logs a message at level Warn with a context\nfunc WarnCtx(ctx context.Context, args ...any) { std.logCtx(ctx, WarnLevel, args) }\n\n// WarnfCtx logs a message at level Warn with a context\nfunc WarnfCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, WarnLevel, format, args)\n}\n\n// Error logs a message at level Error\nfunc Error(args ...any) { std.log(ErrorLevel, args) }\n\n// Errorf logs a message at level Error\nfunc Errorf(format string, args ...any) { std.logf(ErrorLevel, format, args) }\n\n// ErrorT logs a error type at level Error\nfunc ErrorT(err error) {\n\tif err != nil {\n\t\tstd.log(ErrorLevel, []any{err})\n\t}\n}\n\n// ErrorCtx logs a message at level Error with context\nfunc ErrorCtx(ctx context.Context, args ...any) { std.logCtx(ctx, ErrorLevel, args) }\n\n// ErrorfCtx logs a message at level Error with context\nfunc ErrorfCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, ErrorLevel, format, args)\n}\n\n// EStack logs a error message and with call stack.\n// func EStack(args ...any) {\n// \tstd.WithExtra(map[string]any{\"stack\": goinfo.GetCallerInfo(2)}).\n// \t\tlog(ErrorLevel, args)\n// }\n\n// Fatal logs a message at level Fatal\nfunc Fatal(args ...any) { std.log(FatalLevel, args) }\n\n// Fatalf logs a message at level Fatal\nfunc Fatalf(format string, args ...any) { std.logf(FatalLevel, format, args) }\n\n// FatalErr logs a message at level Fatal on err is not nil\nfunc FatalErr(err error) {\n\tif err != nil {\n\t\tstd.log(FatalLevel, []any{err})\n\t}\n}\n\n// FatalCtx logs a message at level Fatal with context\nfunc FatalCtx(ctx context.Context, args ...any) { std.logCtx(ctx, FatalLevel, args) }\n\n// FatalfCtx logs a message at level Fatal with context\nfunc FatalfCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, FatalLevel, format, args)\n}\n\n// Panic logs a message at level Panic\nfunc Panic(args ...any) { std.log(PanicLevel, args) }\n\n// Panicf logs a message at level Panic\nfunc Panicf(format string, args ...any) { std.logf(PanicLevel, format, args) }\n\n// PanicErr logs a message at level Panic on err is not nil\nfunc PanicErr(err error) {\n\tif err != nil {\n\t\tstd.log(PanicLevel, []any{err})\n\t}\n}\n\n// PanicCtx logs a message at level panic with context\nfunc PanicCtx(ctx context.Context, args ...any) { std.logCtx(ctx, PanicLevel, args) }\n\n// PanicfCtx logs a message at level panic with context\nfunc PanicfCtx(ctx context.Context, format string, args ...any) {\n\tstd.logfCtx(ctx, PanicLevel, format, args)\n}\n"
  },
  {
    "path": "slog_test.go",
    "content": "package slog_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n\t\"github.com/gookit/slog\"\n\t\"github.com/gookit/slog/handler\"\n)\n\nvar doNothing = func(code int) {\n\t// do nothing\n}\n\nfunc TestStd(t *testing.T) {\n\tdefer slog.Reset()\n\tassert.Eq(t, \"stdLogger\", slog.Std().Name())\n\n\t_, ok := slog.GetFormatter().(*slog.TextFormatter)\n\tassert.True(t, ok)\n\n\tslog.SetLogLevel(slog.WarnLevel)\n\tslog.SetFormatter(slog.NewJSONFormatter())\n\n\tassert.True(t, slog.Std().IsHandling(slog.WarnLevel))\n\tassert.True(t, slog.Std().IsHandling(slog.ErrorLevel))\n\tassert.False(t, slog.Std().IsHandling(slog.InfoLevel))\n\n\t_, ok = slog.GetFormatter().(*slog.JSONFormatter)\n\tassert.True(t, ok)\n\n\tbuf := new(bytes.Buffer)\n\tslog.Std().ExitFunc = func(code int) {\n\t\tbuf.WriteString(\"Exited,\")\n\t\tbuf.WriteString(strconv.Itoa(code))\n\t}\n\tslog.Exit(34)\n\tassert.Eq(t, \"Exited,34\", buf.String())\n}\n\nfunc TestTextFormatNoColor(t *testing.T) {\n\tdefer slog.Reset()\n\tslog.Configure(func(l *slog.SugaredLogger) {\n\t\tf := l.Formatter.(*slog.TextFormatter)\n\t\tf.EnableColor = false\n\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tprintLogs(\"print log message\")\n\tprintfLogs(\"print log with %s\", \"params\")\n\n\tassert.NoErr(t, slog.Std().FlushAll())\n\tassert.NoErr(t, slog.Std().Close())\n}\n\nfunc TestFlushDaemon(t *testing.T) {\n\tdefer slog.Reset()\n\n\tbuf := byteutil.NewBuffer()\n\tslog.Configure(func(l *slog.SugaredLogger) {\n\t\tl.FlushInterval = timex.Millisecond * 100\n\t\tl.Output = buf\n\t})\n\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\tgo slog.FlushDaemon(func() {\n\t\tfmt.Println(\"flush daemon stopped\")\n\t\twg.Done()\n\t})\n\n\tgo func() {\n\t\t// mock app running\n\t\ttime.Sleep(time.Second * 1)\n\n\t\t// stop daemon\n\t\tfmt.Println(\"stop flush daemon\")\n\t\tslog.StopDaemon()\n\t}()\n\n\tslog.Info(\"print log message\")\n\n\twg.Wait()\n\n\tfmt.Print(buf.ResetGet())\n}\n\nfunc TestFlushTimeout(t *testing.T) {\n\tdefer slog.Reset()\n\tslog.Info(\"print log message\")\n\tslog.NewSub().KeepData(map[string]any{\"key\": \"value\"}).Warn(\"test message\")\n\tslog.FlushTimeout(timex.Second * 1)\n\tslog.MustFlush()\n}\n\nfunc TestNewSugaredLogger(t *testing.T) {\n\tbuf := byteutil.NewBuffer()\n\tl := slog.NewSugared(buf, slog.DebugLevel, func(sl *slog.SugaredLogger) {\n\t\tsl.SetName(\"test\")\n\t\tsl.ReportCaller = true\n\t\tsl.CallerFlag = slog.CallerFlagFcLine\n\t})\n\n\tl.Debug(\"debug message\")\n\tl.Info(\"info message\")\n\ts := buf.ResetAndGet()\n\tassert.StrContains(t, s, \"debug message\")\n\n\tl = slog.NewStd(func(sl *slog.SugaredLogger) {\n\t\tsl.SetName(\"test\")\n\t\tsl.ReportCaller = true\n\t\tsl.CallerFlag = slog.CallerFlagFunc\n\t})\n\tl.Info(\"info message1\")\n}\n\ntype logTest struct {\n\t*slog.SugaredLogger\n}\n\nfunc (l logTest) testPrint() {\n\tl.Logger.Info(\"print testing\")\n}\n\nfunc TestTextFormatWithColor(t *testing.T) {\n\tdefer slog.Reset()\n\n\tslog.Configure(func(l *slog.SugaredLogger) {\n\t\tl.Level = slog.TraceLevel\n\t\tl.DoNothingOnPanicFatal()\n\t})\n\n\tprintLogs(\"this is a simple log message\")\n\tfmt.Println()\n\n\tslog.Std().Trace(\"this is a simple log message\")\n\tlt := &logTest{slog.Std()}\n\tlt.testPrint()\n\n\tfmt.Println()\n\tslog.GetFormatter().(*slog.TextFormatter).SetTemplate(slog.NamedTemplate)\n\tprintfLogs(\"print log with %s\", \"params\")\n\n\tfmt.Println()\n\ttpl := \"[{{datetime}}] [{{channel}}] [{{level}}] [{{func}}] {{message}} {{data}} {{extra}}\\n\"\n\tslog.GetFormatter().(*slog.TextFormatter).SetTemplate(tpl)\n\tprintfLogs(\"print log with %s\", \"params\")\n\n\tlt = &logTest{\n\t\tslog.Std(),\n\t}\n\tlt.testPrint()\n}\n\nfunc printLogs(msg string) {\n\tslog.Log(slog.TraceLevel, msg)\n\tslog.Print(msg)\n\tslog.Println(msg)\n\tslog.Trace(msg)\n\tslog.Debug(msg)\n\tslog.Info(msg)\n\tslog.Notice(msg)\n\tslog.Warn(msg)\n\tslog.Error(msg)\n\tslog.Fatal(msg)\n\tslog.FatalErr(errorx.Rawf(\"Fatal Err: %s\", msg))\n\tslog.Panic(msg)\n\tslog.PanicErr(errorx.Rawf(\"Panic Err: %s\", msg))\n\tslog.ErrorT(errors.New(msg))\n\tslog.ErrorT(errorx.Newf(\"Traced Err: %s\", msg))\n}\n\nfunc printfLogs(msg string, args ...any) {\n\tslog.Printf(msg, args...)\n\tslog.Tracef(msg, args...)\n\tslog.Debugf(msg, args...)\n\tslog.Infof(msg, args...)\n\tslog.Noticef(msg, args...)\n\tslog.Warnf(msg, args...)\n\tslog.Errorf(msg, args...)\n\tslog.Panicf(msg, args...)\n\tslog.Fatalf(msg, args...)\n}\n\nfunc TestSetFormatter_jsonFormat(t *testing.T) {\n\tdefer slog.Reset()\n\tslog.SetLevelByName(\"trace\")\n\tslog.SetFormatter(slog.NewJSONFormatter())\n\n\tth := newTestHandler()\n\tth.SetFormatter(slog.NewJSONFormatter().Configure(func(f *slog.JSONFormatter) {\n\t\tf.Fields = slog.NoTimeFields\n\t}))\n\tslog.PushHandler(th)\n\n\tassert.Eq(t, 2, slog.Std().HandlersNum())\n\n\tslog.Info(\"info log message1\")\n\tslog.Warn(\"warning log message2\")\n\ts := th.ResetGet()\n\tassert.StrContains(t, s, `\"level\":\"INFO\"`)\n\tassert.StrContains(t, s, `info log message1`)\n\tassert.StrContains(t, s, `\"level\":\"WARNING\"`)\n\tassert.StrContains(t, s, `warning log message2`)\n\n\t// WithData\n\tslog.WithData(slog.M{\n\t\t\"key0\": 134,\n\t\t\"key1\": \"abc\",\n\t}).Infof(\"info log %s\", \"message\")\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, `\"key1\":\"abc\"`)\n\n\t// reused record\n\tr := slog.WithFields(slog.M{\n\t\t\"category\": \"service\",\n\t\t\"IP\":       \"127.0.0.1\",\n\t}).Reused()\n\tr.Infof(\"info %s\", \"message\")\n\tr.Debugf(\"debug %s\", \"message\")\n\tr.Release()\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, `\"category\"`)\n\tassert.StrCount(t, s, `\"127.0.0.1\"`, 2)\n\n\t// reused record\n\tr = slog.WithField(\"category\", \"app-service\").Reused()\n\tr.Infof(\"info %s\", \"message\")\n\tr.Debugf(\"debug %s\", \"message\")\n\tr.Release()\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, `\"category\"`)\n\tassert.StrCount(t, s, `\"app-service\"`, 2)\n\n\t// sub logger\n\tsub := slog.NewSub().KeepField(\"app\", \"order\")\n\tsub.Trace(\"trace message\")\n\tsub.Print(\"print message\")\n\tsub.Release()\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, `\"app\":\"order\"`)\n\tassert.StrCount(t, s, `\"app\":\"order\"`, 2)\n\n\t// WithContext\n\tctx := context.WithValue(context.Background(), \"ctxField\", \"ctx1-value\")\n\tslog.AddProcessor(slog.CtxKeysProcessor(\"fields\", \"ctxField\"))\n\tslog.WithContext(ctx).Print(\"print message with ctx\")\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, \"print message with ctx\")\n\tassert.StrContains(t, s, \"ctxField\")\n}\n\nfunc TestAddHandler(t *testing.T) {\n\tdefer slog.Reset()\n\tslog.AddHandler(handler.NewConsoleHandler(slog.AllLevels))\n\n\th2 := handler.NewConsoleHandler(slog.AllLevels)\n\th2.SetFormatter(slog.NewJSONFormatter().Configure(func(f *slog.JSONFormatter) {\n\t\tf.Aliases = slog.StringMap{\n\t\t\t\"level\":   \"levelName\",\n\t\t\t\"message\": \"msg\",\n\t\t\t\"data\":    \"params\",\n\t\t}\n\t}))\n\n\tslog.AddHandlers(h2)\n\tslog.Infof(\"info %s\", \"message\")\n}\n\nfunc TestWithExtra(t *testing.T) {\n\tdefer slog.Reset()\n\n\tth := newTestHandler()\n\tslog.AddHandler(th)\n\n\tslog.WithExtra(slog.M{\"ext1\": \"val1\"}).\n\t\tAddValue(\"key1\", \"val2\").\n\t\tInfo(\"info message\")\n\ts := th.ResetGet()\n\tassert.StrContains(t, s, `ext1:val1`)\n\tassert.StrContains(t, s, `{key1:val2}`)\n\n\tslog.WithValue(\"key1\", \"val2\").Info(\"info message\")\n\ts = th.ResetGet()\n\tassert.StrContains(t, s, `{key1:val2}`)\n}\n\nfunc TestAddProcessor(t *testing.T) {\n\tdefer slog.Reset()\n\n\tbuf := new(bytes.Buffer)\n\tslog.Configure(func(logger *slog.SugaredLogger) {\n\t\tlogger.Level = slog.TraceLevel\n\t\tlogger.Output = buf\n\t\tlogger.Formatter = slog.NewJSONFormatter()\n\t})\n\n\tslog.AddProcessor(slog.AddHostname())\n\tslog.Trace(\"Trace message\")\n\tslog.Tracef(\"Tracef %s\", \"message\")\n\n\tstr := buf.String()\n\tbuf.Reset()\n\tfmt.Println(str)\n\tassert.Contains(t, str, `\"hostname\":`)\n\tassert.Contains(t, str, \"Trace message\")\n\tassert.Contains(t, str, \"Tracef message\")\n\n\tslog.AddProcessors(slog.ProcessorFunc(func(r *slog.Record) {\n\t\tr.AddField(\"newField\", \"newValue\")\n\t}))\n\tslog.Debug(\"Debug message\")\n\tslog.Debugf(\"Debugf %s\", \"message\")\n\tstr = buf.String()\n\tbuf.Reset()\n\n\tassert.Contains(t, str, `\"newField\":\"newValue\"`)\n\tassert.Contains(t, str, \"Debug message\")\n\tassert.Contains(t, str, \"Debugf message\")\n}\n\n\nfunc TestPrependExitHandler(t *testing.T) {\n\tdefer slog.Reset()\n\n\tassert.Len(t, slog.ExitHandlers(), 0)\n\n\tbuf := new(bytes.Buffer)\n\tslog.PrependExitHandler(func() {\n\t\tbuf.WriteString(\"HANDLER1-\")\n\t})\n\tslog.PrependExitHandler(func() {\n\t\tbuf.WriteString(\"HANDLER2-\")\n\t})\n\tassert.Len(t, slog.ExitHandlers(), 2)\n\n\tslog.SetExitFunc(func(code int) {\n\t\tbuf.WriteString(\"Exited\")\n\t})\n\tslog.Exit(23)\n\tassert.Eq(t, \"HANDLER2-HANDLER1-Exited\", buf.String())\n}\n\nfunc TestRegisterExitHandler(t *testing.T) {\n\tdefer slog.Reset()\n\n\tassert.Len(t, slog.ExitHandlers(), 0)\n\n\tbuf := new(bytes.Buffer)\n\tslog.RegisterExitHandler(func() {\n\t\tbuf.WriteString(\"HANDLER1-\")\n\t})\n\tslog.RegisterExitHandler(func() {\n\t\tbuf.WriteString(\"HANDLER2-\")\n\t})\n\t// prepend\n\tslog.PrependExitHandler(func() {\n\t\tbuf.WriteString(\"HANDLER3-\")\n\t})\n\tassert.Len(t, slog.ExitHandlers(), 3)\n\n\tslog.SetExitFunc(func(code int) {\n\t\tbuf.WriteString(\"Exited\")\n\t})\n\tslog.Exit(23)\n\tassert.Eq(t, \"HANDLER3-HANDLER1-HANDLER2-Exited\", buf.String())\n}\n\nfunc TestExitHandlerWithError(t *testing.T) {\n\tdefer slog.Reset()\n\tassert.Len(t, slog.ExitHandlers(), 0)\n\n\tslog.RegisterExitHandler(func() {\n\t\tpanic(\"test error\")\n\t})\n\n\tslog.SetExitFunc(func(code int) {})\n\n\ttestutil.RewriteStderr()\n\tslog.Exit(23)\n\tstr := testutil.RestoreStderr()\n\tassert.Eq(t, \"slog: run exit handler(global) recovered, error: test error\\n\", str)\n}\n\nfunc TestLogger_ExitHandlerWithError(t *testing.T) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ExitFunc = doNothing\n\t})\n\n\tassert.Len(t, l.ExitHandlers(), 0)\n\n\tl.RegisterExitHandler(func() {\n\t\tpanic(\"test error\")\n\t})\n\n\ttestutil.RewriteStderr()\n\tl.Exit(23)\n\tstr := testutil.RestoreStderr()\n\tassert.Eq(t, \"slog: run exit handler recovered, error: test error\\n\", str)\n}\n\nfunc TestLogger_PrependExitHandler(t *testing.T) {\n\tl := slog.NewWithConfig(func(l *slog.Logger) {\n\t\tl.ExitFunc = doNothing\n\t})\n\n\tassert.Len(t, l.ExitHandlers(), 0)\n\n\tl.PrependExitHandler(func() {\n\t\tpanic(\"test error2\")\n\t})\n\n\ttestutil.RewriteStderr()\n\tl.Exit(23)\n\tstr := testutil.RestoreStderr()\n\tassert.Eq(t, \"slog: run exit handler recovered, error: test error2\\n\", str)\n}\n\nfunc TestSugaredLogger_Close(t *testing.T) {\n\th := newTestHandler()\n\n\tsl := slog.NewStd(func(sl *slog.SugaredLogger) {\n\t\tsl.PushHandler(h)\n\t\tsl.Formatter = newTestFormatter()\n\t})\n\n\th.errOnClose = true\n\terr := sl.Close()\n\tassert.Err(t, err)\n\tassert.Err(t, sl.LastErr())\n\tassert.Eq(t, \"close error\", err.Error())\n}\n\nfunc TestSugaredLogger_Handle(t *testing.T) {\n\tbuf := byteutil.NewBuffer()\n\tsl := slog.NewStd(func(sl *slog.SugaredLogger) {\n\t\tsl.Output = buf\n\t\tsl.Formatter = newTestFormatter(true)\n\t})\n\n\t// Handle error: format error\n\tsl.WithField(\"key\", \"value\").Error(\"error message\")\n\terr := sl.LastErr()\n\tassert.Err(t, err)\n\tassert.Eq(t, \"format error\", err.Error())\n}\n\nfunc TestAddWithCtx(t *testing.T) {\n\th := newTestHandler()\n\n\tslog.Reset()\n\tslog.PushHandler(h)\n\tslog.Std().DoNothingOnPanicFatal()\n\tslog.AddProcessor(slog.CtxKeysProcessor(\"data\", \"ctx1\", \"ctx2\"))\n\n\tctx := context.WithValue(context.Background(), \"ctx1\", \"ctx1-value\")\n\tctx = context.WithValue(ctx, \"ctx2\", \"ctx2-value\")\n\n\tt.Run(\"normal\", func(t *testing.T) {\n\t\tslog.TraceCtx(ctx, \"A message\", \"test\")\n\t\tslog.DebugCtx(ctx, \"A message\", \"test\")\n\t\tslog.InfoCtx(ctx, \"A message\", \"test\")\n\t\tslog.NoticeCtx(ctx, \"A message\", \"test\")\n\t\tslog.WarnCtx(ctx, \"A message\", \"test\")\n\t\tslog.ErrorCtx(ctx, \"A message\", \"test\")\n\t\tslog.FatalCtx(ctx, \"A message\", \"test\")\n\t\tslog.PanicCtx(ctx, \"A message\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ctx2-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n\n\tt.Run(\"with format\", func(t *testing.T) {\n\t\tslog.TracefCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.DebugfCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.InfofCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.NoticefCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.WarnfCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.ErrorfCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.PanicfCtx(ctx, \"A message %s\", \"test\")\n\t\tslog.FatalfCtx(ctx, \"A message %s\", \"test\")\n\n\t\ts := h.ResetGet()\n\t\tassert.StrContains(t, s, \"ctx1-value\")\n\t\tassert.StrContains(t, s, \"ctx2-value\")\n\t\tfor _, level := range slog.AllLevels {\n\t\t\tassert.StrContains(t, s, level.Name())\n\t\t}\n\t})\n\tslog.Reset()\n}"
  },
  {
    "path": "sugared.go",
    "content": "package slog\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/gookit/color\"\n)\n\n// SugaredLoggerFn func type.\ntype SugaredLoggerFn func(sl *SugaredLogger)\n\n// SugaredLogger Is a fast and usable Logger, which already contains\n// the default formatting and handling capabilities\ntype SugaredLogger struct {\n\t*Logger\n\t// Formatter log message formatter. default use TextFormatter\n\tFormatter Formatter\n\t// Output writer\n\tOutput io.Writer\n\t// Level for log handling. if log record level <= Level, it will be record. default: DebugLevel\n\t//\n\t// TIP: setting the level to lower will ignore more logs.\n\tLevel Level\n}\n\n// NewStd logger instance, alias of NewStdLogger()\nfunc NewStd(fns ...SugaredLoggerFn) *SugaredLogger {\n\treturn NewStdLogger(fns...)\n}\n\n// NewStdLogger instance\nfunc NewStdLogger(fns ...SugaredLoggerFn) *SugaredLogger {\n\tsetFns := []SugaredLoggerFn{\n\t\tfunc(sl *SugaredLogger) {\n\t\t\tsl.SetName(\"stdLogger\")\n\t\t\t// sl.CallerSkip += 1\n\t\t\tsl.ReportCaller = true\n\t\t\t// auto enable console color\n\t\t\tsl.Formatter.(*TextFormatter).EnableColor = color.SupportColor()\n\t\t},\n\t}\n\n\tif len(fns) > 0 {\n\t\tsetFns = append(setFns, fns...)\n\t}\n\treturn NewSugaredLogger(os.Stdout, DebugLevel, setFns...)\n}\n\n// NewSugared create new SugaredLogger. alias of NewSugaredLogger()\nfunc NewSugared(out io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger {\n\treturn NewSugaredLogger(out, level, fns...)\n}\n\n// NewSugaredLogger create new SugaredLogger\nfunc NewSugaredLogger(output io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger {\n\tsl := &SugaredLogger{\n\t\tLevel:  level,\n\t\tOutput: output,\n\t\tLogger: New(),\n\t\t// default value\n\t\tFormatter: NewTextFormatter(),\n\t}\n\n\t// NOTICE: use self as a log handler\n\tsl.AddHandler(sl)\n\treturn sl.Config(fns...)\n}\n\n// NewJSONSugared create new SugaredLogger with JSONFormatter\nfunc NewJSONSugared(out io.Writer, level Level, fns ...SugaredLoggerFn) *SugaredLogger {\n\tsl := NewSugaredLogger(out, level)\n\tsl.Formatter = NewJSONFormatter()\n\n\treturn sl.Config(fns...)\n}\n\n// Config current logger\nfunc (sl *SugaredLogger) Config(fns ...SugaredLoggerFn) *SugaredLogger {\n\tfor _, fn := range fns {\n\t\tfn(sl)\n\t}\n\treturn sl\n}\n\n// Reset the logger\nfunc (sl *SugaredLogger) Reset() {\n\tsl.Level = DebugLevel\n\tsl.Output = os.Stdout\n\tsl.Formatter = NewTextFormatter()\n}\n\n// IsHandling Check if the current level can be handling\nfunc (sl *SugaredLogger) IsHandling(level Level) bool {\n\treturn sl.Level.ShouldHandling(level)\n}\n\n// Handle log record\nfunc (sl *SugaredLogger) Handle(record *Record) error {\n\tbts, err := sl.Formatter.Format(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = sl.Output.Write(bts)\n\treturn err\n}\n\n// Close all log handlers, will flush and close all handlers.\n//\n// IMPORTANT:\n//\n//\tif enable async/buffer mode, please call the Close() before exit.\nfunc (sl *SugaredLogger) Close() error {\n\t_ = sl.Logger.VisitAll(func(handler Handler) error {\n\t\t// TIP: must exclude self, because self is a handler\n\t\tif _, ok := handler.(*SugaredLogger); !ok {\n\t\t\tif err := handler.Close(); err != nil {\n\t\t\t\tsl.err = err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn sl.err\n}\n\n// Flush all logs. alias of the FlushAll()\nfunc (sl *SugaredLogger) Flush() error { return sl.FlushAll() }\n\n// FlushAll all logs\nfunc (sl *SugaredLogger) FlushAll() error {\n\treturn sl.Logger.VisitAll(func(handler Handler) error {\n\t\tif _, ok := handler.(*SugaredLogger); !ok {\n\t\t\t_ = handler.Flush()\n\t\t}\n\t\treturn nil\n\t})\n}\n"
  },
  {
    "path": "util.go",
    "content": "package slog\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/gookit/goutil/byteutil\"\n\t\"github.com/gookit/goutil/strutil\"\n\t\"github.com/valyala/bytebufferpool\"\n)\n\n// const (\n// \tdefaultMaxCallerDepth  int = 15\n// \tdefaultKnownSlogFrames int = 4\n// )\n\n// Stack that attempts to recover the data for all goroutines.\n// func getCallStacks(callerSkip int) []byte {\n// \treturn nil\n// }\n\n// FormatLevelName Format the level name, specify the length returned,\n// fill the space with less length, and truncate than the length\nfunc FormatLevelName(name string, length int) string {\n\tif len(name) < length {\n\t\treturn fmt.Sprintf(\"%-\"+strconv.Itoa(length)+\"s\", name)\n\t}\n\treturn name[:length]\n}\n\nfunc buildLowerLevelName() map[Level]string {\n\tmp := make(map[Level]string, len(LevelNames))\n\tfor level, s := range LevelNames {\n\t\tmp[level] = strings.ToLower(s)\n\t}\n\treturn mp\n}\n\n// getCaller retrieves the name of the first non-slog calling function\nfunc getCaller(callerSkip int) (fr runtime.Frame, ok bool) {\n\tpcs := make([]uintptr, 1) // alloc 1 times\n\tnum := runtime.Callers(callerSkip, pcs)\n\tif num > 0 {\n\t\tfr, _ = runtime.CallersFrames(pcs).Next()\n\t\tok = fr.PC != 0\n\t}\n\treturn\n}\n\nfunc formatCaller(rf *runtime.Frame, flag uint8, userFn CallerFormatFn) (cs string) {\n\tif userFn != nil {\n\t\treturn userFn(rf)\n\t}\n\n\tlineNum := strconv.FormatInt(int64(rf.Line), 10)\n\tswitch flag {\n\tcase CallerFlagFull:\n\t\treturn rf.Function + \",\" + filepath.Base(rf.File) + \":\" + lineNum\n\tcase CallerFlagFunc:\n\t\treturn rf.Function\n\tcase CallerFlagFcLine:\n\t\treturn rf.Function + \":\" + lineNum\n\tcase CallerFlagPkg:\n\t\ti := strings.LastIndex(rf.Function, \"/\")\n\t\ti += strings.IndexByte(rf.Function[i+1:], '.')\n\t\treturn rf.Function[:i+1]\n\tcase CallerFlagPkgFnl:\n\t\ti := strings.LastIndex(rf.Function, \"/\")\n\t\ti += strings.IndexByte(rf.Function[i+1:], '.')\n\t\treturn rf.Function[:i+1] + \",\" + filepath.Base(rf.File) + \":\" + lineNum\n\tcase CallerFlagFnlFcn:\n\t\tss := strings.Split(rf.Function, \".\")\n\t\treturn filepath.Base(rf.File) + \":\" + lineNum + \",\" + ss[len(ss)-1]\n\tcase CallerFlagFnLine:\n\t\treturn filepath.Base(rf.File) + \":\" + lineNum\n\tcase CallerFlagFcName:\n\t\tss := strings.Split(rf.Function, \".\")\n\t\treturn ss[len(ss)-1]\n\tdefault: // CallerFlagFpLine\n\t\treturn rf.File + \":\" + lineNum\n\t}\n}\n\nvar msgBufPool bytebufferpool.Pool\n\n// it like Println, will add spaces for each argument\nfunc formatArgsWithSpaces(vs []any) string {\n\tln := len(vs)\n\tif ln == 0 {\n\t\treturn \"\"\n\t}\n\n\tif ln == 1 {\n\t\t// cast is string, return it. NOT ALLOC MEMORY\n\t\tif str, ok := vs[0].(string); ok {\n\t\t\treturn str\n\t\t}\n\t\treturn strutil.SafeString(vs[0])\n\t}\n\n\t// buf = make([]byte, 0, ln*8)\n\tbb := msgBufPool.Get()\n\tdefer msgBufPool.Put(bb)\n\n\t// TIP:\n\t// `float` to string - will alloc 2 times memory\n\t// `int <0`, `int > 100` to string -  will alloc 1 times memory\n\tfor i := range vs {\n\t\tif i > 0 { // add space\n\t\t\tbb.B = append(bb.B, ' ')\n\t\t}\n\t\tbb.B = byteutil.AppendAny(bb.B, vs[i])\n\t}\n\n\treturn string(bb.B)\n\t// return byteutil.String(bb.B) // perf: Reduce one memory allocation\n}\n\n// EncodeToString data to string\nfunc EncodeToString(v any) string {\n\tif mp, ok := v.(map[string]any); ok {\n\t\treturn mapToString(mp)\n\t}\n\treturn strutil.SafeString(v)\n}\n\nfunc mapToString(mp map[string]any) string {\n\tln := len(mp)\n\tif ln == 0 {\n\t\treturn \"{}\"\n\t}\n\n\t// TODO use bytebufferpool\n\tbuf := make([]byte, 0, ln*8)\n\tbuf = append(buf, '{')\n\n\tfor k, val := range mp {\n\t\tbuf = append(buf, k...)\n\t\tbuf = append(buf, ':')\n\n\t\tstr, _ := strutil.AnyToString(val, false)\n\t\tbuf = append(buf, str...)\n\t\tbuf = append(buf, ',', ' ')\n\t}\n\n\t// remove last ', '\n\tbuf = append(buf[:len(buf)-2], '}')\n\treturn strutil.Byte2str(buf)\n}\n\nfunc parseTemplateToFields(tplStr string) []string {\n\tss := strings.Split(tplStr, \"{{\")\n\n\tvars := make([]string, 0, len(ss)*2)\n\tfor _, s := range ss {\n\t\tif len(s) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldAndOther := strings.SplitN(s, \"}}\", 2)\n\t\tif len(fieldAndOther) < 2 {\n\t\t\tvars = append(vars, s)\n\t\t} else {\n\t\t\tvars = append(vars, fieldAndOther[0], \"}}\"+fieldAndOther[1])\n\t\t}\n\t}\n\n\treturn vars\n}\n\nfunc printStderr(args ...any) {\n\t_, _ = fmt.Fprintln(os.Stderr, args...)\n}\n"
  },
  {
    "path": "util_test.go",
    "content": "package slog\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/gookit/goutil/errorx\"\n\t\"github.com/gookit/goutil/testutil/assert\"\n\t\"github.com/gookit/goutil/timex\"\n)\n\nfunc revertTemplateString(ss []string) string {\n\tvar sb strings.Builder\n\tfor _, s := range ss {\n\t\t// is field\n\t\tif s[0] >= 'a' && s[0] <= 'z' {\n\t\t\tsb.WriteString(\"{{\")\n\t\t\tsb.WriteString(s)\n\t\t\t// sb.WriteString(\"}}\")\n\t\t} else {\n\t\t\tsb.WriteString(s)\n\t\t}\n\t}\n\n\t// sb.WriteByte('\\n')\n\treturn sb.String()\n}\n\nfunc TestInner_parseTemplateToFields(t *testing.T) {\n\tss := parseTemplateToFields(NamedTemplate)\n\tstr := revertTemplateString(ss)\n\t// dump.P(ss, str)\n\tassert.Eq(t, NamedTemplate, str)\n\n\tss = parseTemplateToFields(DefaultTemplate)\n\tstr = revertTemplateString(ss)\n\t// dump.P(ss, str)\n\tassert.Eq(t, DefaultTemplate, str)\n\n\ttestTemplate := \"[{{datetime}}] [{{level}}] {{message}} {{data}} {{extra}}\"\n\tss = parseTemplateToFields(testTemplate)\n\tstr = revertTemplateString(ss)\n\tassert.Eq(t, testTemplate, str)\n\t// dump.P(ss, str)\n}\n\nfunc TestUtil_EncodeToString(t *testing.T) {\n\tassert.Eq(t, \"{a:1}\", EncodeToString(map[string]any{\"a\": 1}))\n}\n\nfunc TestUtil_formatArgsWithSpaces(t *testing.T) {\n\t// tests for formatArgsWithSpaces\n\ttests := []struct {\n\t\targs []any\n\t\twant string\n\t}{\n\t\t{nil, \"\"},\n\t\t{[]any{\"a\", \"b\", \"c\"}, \"a b c\"},\n\t\t{[]any{\"a\", \"b\", \"c\", 1, 2, 3}, \"a b c 1 2 3\"},\n\t\t{[]any{\"a\", 1, nil}, \"a 1 <nil>\"},\n\t\t{[]any{12, int8(12), int16(12), int32(12), int64(12)}, \"12 12 12 12 12\"},\n\t\t{[]any{uint(12), uint8(12), uint16(12), uint32(12), uint64(12)}, \"12 12 12 12 12\"},\n\t\t{[]any{float32(12.12), 12.12}, \"12.12 12.12\"},\n\t\t{[]any{true, false}, \"true false\"},\n\t\t{[]any{[]byte(\"abc\"), []byte(\"123\")}, \"abc 123\"},\n\t\t{[]any{timex.OneHour}, \"3600000000000\"},\n\t\t{[]any{errorx.Raw(\"a error message\")}, \"a error message\"},\n\t\t{[]any{[]int{1, 2, 3}}, \"[1 2 3]\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tassert.Eq(t, tt.want, formatArgsWithSpaces(tt.args))\n\t}\n\n\tassert.NotEmpty(t, formatArgsWithSpaces([]any{timex.Now().T()}))\n}\n"
  }
]