Showing preview only (268K chars total). Download the full file or copy to clipboard to get everything.
Repository: hanchuanchuan/bingo2sql
Branch: master
Commit: 56d70d7af3cf
Files: 29
Total size: 248.1 KB
Directory structure:
gitextract_sccp6i0i/
├── .editorconfig
├── .gitignore
├── Makefile
├── README.md
├── circle.yml
├── cmd/
│ ├── bingo2sql/
│ │ └── main.go
│ ├── local.go
│ ├── remote.go
│ ├── root.go
│ ├── server.go
│ └── stats.go
├── cnf/
│ └── config.ini
├── core/
│ ├── parseFile.go
│ ├── parser.go
│ ├── parserV2.go
│ ├── parser_stats.go
│ ├── parser_test.go
│ ├── socket.go
│ └── time.go
├── docs/
│ └── test.md
├── go.mod
├── go.sum
├── main.go
├── parse/
│ ├── bingo2sql.go
│ └── log.go
└── utils/
└── uuid/
├── codec.go
├── generator.go
├── sql.go
└── uuid.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .editorconfig
================================================
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8
# tab_size = 4 spaces
[*.go]
indent_style = tab
indent_size = 4
trim_trailing_whitespace = true
================================================
FILE: .gitignore
================================================
files
bin
coverage.out
.idea/
*.iml
*.swp
*.log
*.fail.go
.DS_Store
.vscode/
*.sql
================================================
FILE: Makefile
================================================
PROJECT=bingo2sql
GOPATH ?= $(shell go env GOPATH)
# Ensure GOPATH is set before running build process.
ifeq "$(GOPATH)" ""
$(error Please set the environment variable GOPATH before running `make`)
endif
FAIL_ON_STDOUT := awk '{ print } END { if (NR > 0) { exit 1 } }'
CURDIR := $(shell pwd)
path_to_add := $(addsuffix /bin,$(subst :,/bin:,$(GOPATH)))
export PATH := $(path_to_add):$(PATH)
GO := GO111MODULE=on go
GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
VERSION := $(shell git describe --tags --dirty)
# 指定部分单元测试跳过
ifeq ("$(SHORT)", "1")
GOTEST := CGO_ENABLED=1 $(GO) test -p 3 -short
else
GOTEST := CGO_ENABLED=1 $(GO) test -p 3
endif
OVERALLS := CGO_ENABLED=1 GO111MODULE=on overalls
GOVERALLS := goveralls
ARCH := "`uname -s`"
LINUX := "Linux"
MAC := "Darwin"
PACKAGE_LIST := go list ./...| grep -vE "vendor"
PACKAGES := $$($(PACKAGE_LIST))
PACKAGE_DIRECTORIES := $(PACKAGE_LIST) | sed 's|github.com/hanchuanchuan/$(PROJECT)/||'
FILES := $$(find $$($(PACKAGE_DIRECTORIES)) -name "*.go" | grep -vE "vendor")
GOFAIL_ENABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|vendor)" | xargs gofail enable)
GOFAIL_DISABLE := $$(find $$PWD/ -type d | grep -vE "(\.git|vendor)" | xargs gofail disable)
# LDFLAGS += -X "github.com/hanchuanchuan/$(PROJECT)//mysql.TiDBReleaseVersion=$(shell git describe --tags --dirty)"
# LDFLAGS += -X "github.com/hanchuanchuan/$(PROJECT)//util/printer.TiDBBuildTS=$(shell date '+%Y-%m-%d %H:%M:%S')"
# LDFLAGS += -X "github.com/hanchuanchuan/$(PROJECT)//util/printer.TiDBGitHash=$(shell git rev-parse HEAD)"
# LDFLAGS += -X "github.com/hanchuanchuan/$(PROJECT)//util/printer.TiDBGitBranch=$(shell git rev-parse --abbrev-ref HEAD)"
# LDFLAGS += -X "github.com/hanchuanchuan/$(PROJECT)//util/printer.GoVersion=$(shell go version)"
CHECK_LDFLAGS += $(LDFLAGS)
.PHONY: all build update clean test gotest server check
default: server buildsucc
build:
GOOS=linux GOARCH=amd64 $(GOBUILD) -ldflags '-s -w $(LDFLAGS)' -o bin/$(PROJECT) main.go
server-admin-check: server_check buildsucc
buildsucc:
@echo Build bingo2sql successfully!
# The retool tools.json is setup from hack/retool-install.sh
check-setup:
@which retool >/dev/null 2>&1 || go get github.com/twitchtv/retool
@retool sync
check: check-setup fmt lint vet
# These need to be fixed before they can be ran regularly
check-fail: goword check-static check-slow
fmt:
@echo "gofmt (simplify)"
@gofmt -s -l -w $(FILES) 2>&1 | $(FAIL_ON_STDOUT)
goword:
retool do goword $(FILES) 2>&1 | $(FAIL_ON_STDOUT)
check-static:
@ # vet and fmt have problems with vendor when ran through metalinter
CGO_ENABLED=0 retool do gometalinter.v2 --disable-all --deadline 120s \
--enable misspell \
--enable megacheck \
--enable ineffassign \
$$($(PACKAGE_DIRECTORIES))
check-slow:
CGO_ENABLED=0 retool do gometalinter.v2 --disable-all \
--enable errcheck \
$$($(PACKAGE_DIRECTORIES))
CGO_ENABLED=0 retool do gosec $$($(PACKAGE_DIRECTORIES))
lint:
@echo "linting"
@CGO_ENABLED=0 retool do revive -formatter friendly -config revive.toml $(PACKAGES)
vet:
@echo "vet"
@go vet -all -shadow $(PACKAGES) 2>&1 | $(FAIL_ON_STDOUT)
clean:
$(GO) clean -i ./...
rm -rf *.out
test: gotest
gotest:
$(GO) get github.com/etcd-io/gofail@v0.0.0-20180808172546-51ce9a71510a
@$(GOFAIL_ENABLE)
ifeq ("$(TRAVIS_COVERAGE)", "1")
@echo "Running in TRAVIS_COVERAGE mode."
@export log_level=error; \
go get github.com/go-playground/overalls
# go get github.com/mattn/goveralls
# $(OVERALLS) -project=github.com/hanchuanchuan/$(PROJECT)/ -covermode=count -ignore='.git,vendor,cmd,docs,LICENSES' || { $(GOFAIL_DISABLE); exit 1; }
# $(GOVERALLS) -service=travis-ci -coverprofile=overalls.coverprofile || { $(GOFAIL_DISABLE); exit 1; }
$(OVERALLS) -project=github.com/hanchuanchuan/$(PROJECT)/ -covermode=count -ignore='.git,vendor,cmd,docs,LICENSES' -concurrency=1 -- -short || { $(GOFAIL_DISABLE); exit 1; }
else
@echo "Running in native mode."
@export log_level=error; \
$(GOTEST) -timeout 30m -cover $(PACKAGES) || { $(GOFAIL_DISABLE); exit 1; }
endif
@$(GOFAIL_DISABLE)
race:
$(GO) get github.com/etcd-io/gofail@v0.0.0-20180808172546-51ce9a71510a
@$(GOFAIL_ENABLE)
@export log_level=debug; \
$(GOTEST) -timeout 30m -race $(PACKAGES) || { $(GOFAIL_DISABLE); exit 1; }
@$(GOFAIL_DISABLE)
leak:
$(GO) get github.com/etcd-io/gofail@v0.0.0-20180808172546-51ce9a71510a
@$(GOFAIL_ENABLE)
@export log_level=debug; \
$(GOTEST) -tags leak $(PACKAGES) || { $(GOFAIL_DISABLE); exit 1; }
@$(GOFAIL_DISABLE)
tikv_integration_test:
$(GO) get github.com/etcd-io/gofail@v0.0.0-20180808172546-51ce9a71510a
@$(GOFAIL_ENABLE)
$(GOTEST) ./store/tikv/. -with-tikv=true || { $(GOFAIL_DISABLE); exit 1; }
@$(GOFAIL_DISABLE)
RACE_FLAG =
ifeq ("$(WITH_RACE)", "1")
RACE_FLAG = -race
GOBUILD = GOPATH=$(GOPATH) CGO_ENABLED=1 $(GO) build
endif
server:
$(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS)' -o bin/$(PROJECT) main.go
server_old:
$(GOBUILD) $(RACE_FLAG) -ldflags '$(LDFLAGS)' -o bin/$(PROJECT) cmd/bingo2sql/main.go
server_check:
$(GOBUILD) $(RACE_FLAG) -ldflags '$(CHECK_LDFLAGS)' -o bin/$(PROJECT) main.go
update:
which dep 2>/dev/null || go get -u github.com/golang/dep/cmd/dep
ifdef PKG
dep ensure -add ${PKG}
else
dep ensure -update
endif
@echo "removing test files"
dep prune
bash ./hack/clean_vendor.sh
gofail-enable:
# Converting gofail failpoints...
@$(GOFAIL_ENABLE)
gofail-disable:
# Restoring gofail failpoints...
@$(GOFAIL_DISABLE)
upload-coverage: SHELL:=/bin/bash
upload-coverage:
ifeq ("$(TRAVIS_COVERAGE)", "1")
mv overalls.coverprofile coverage.txt
bash <(curl -s https://codecov.io/bash)
endif
# windows无法build,github.com/outbrain/golib有引用syslog.Writer,其在windows未实现.
.PHONY: release
release:
@echo "$(CGREEN)Cross platform building for release ...$(CEND)"
# @for GOOS in linux; do
@for GOOS in windows darwin linux; do \
echo "Building $${GOOS}-$${GOARCH} ..."; \
GOOS=$${GOOS} GOARCH=amd64 $(GOBUILD) -ldflags '-s -w $(LDFLAGS)' -o bin/$(PROJECT) cmd/bingo2sql.go; \
cd bin; \
tar -czf $(PROJECT)-$${GOOS}-${VERSION}.tar.gz $(PROJECT); \
rm -f $(PROJECT); \
cd ..; \
done
docker:
GOOS=linux GOARCH=amd64 $(GOBUILD) -ldflags '-s -w $(LDFLAGS)' -o bin/$(PROJECT) cmd/bingo2sql.go
v1=$(shell git tag | awk -F'-' '{print $1}' |tail -1) && docker build -t hanchuanchuan/$(PROJECT)/:$${v1} . \
&& docker tag hanchuanchuan/$(PROJECT)/:$${v1} hanchuanchuan/$(PROJECT)/:latest
docker-push:
v1=$(shell git tag|tail -1) && docker push hanchuanchuan/$(PROJECT)/:$${v1} \
&& docker push hanchuanchuan/$(PROJECT)/:latest
================================================
FILE: README.md
================================================
# bingo2sql
MySQL Binlog 解析工具
从MySQL binlog解析出原始SQL,对应的回滚SQL等。
#### 功能说明
- 本地离线解析:指定本地binlog文件和要解析的表结构即可
- 远程在线解析:指定远程数据库地址,起止时间范围或binlog范围,可指定库/表和操作类型,GTID/线程号等
- 解析服务API:提供HTTP协议方式的解析接口,支持解析和打包下载
#### 限制和要求
- MySQL必须开启binlog
- binlog_format = row
- binlog_row_image = full
#### 测试对比
测试步骤及结束详见 [效率测试](docs/test.md)
### 支持模式
#### 1. 本地解析
```sh
bingo2sql --start-file=~/db_cmdb/blog/mysql-bin.000001 -t table.sql
```
其中`-t`参数指定的是建表语句文件,内容类似:
```sql
-- 需要解析哪个表,提供哪个表的建表语句
CREATE TABLE `tt` (
id int auto_increment primary key,
`TABLE_NAME` varchar(64) NOT NULL DEFAULT ''
) ;
```
#### 2. 远程解析
远程解析的参数及使用均与binlog2sql类似
```
bingo2sql -h=127.0.0.1 -P 3306 -u test -p test -d db1_3306_test_inc \
--start-time="2006-01-02 15:04:05" -t t1 -B
```
#### 3. 解析服务
bingo2sql 支持以服务方式运行,提供解析的HTTP接口支持
```sh
bingo2sql --server --config=config.ini
```
### 支持选项
**解析模式**
- --stop-never 持续解析binlog。可选。默认false,同步至执行命令时最新的binlog位置。
- -K, --no-primary-key 对INSERT语句去除主键。可选。默认false
- -B, --flashback 生成回滚SQL,可解析大文件,不受内存限制。可选。默认false。与stop-never或no-primary-key不能同时添加。
- -M, --minimal-update 最小化update语句. 可选. (default true)
- -I, --minimal-insert 使用包含多个VALUES列表的多行语法编写INSERT语句. (default true)
**解析范围控制**
- --start-file 起始解析文件,只需文件名,无需全路径 。可选,如果指定了起止时间,可以忽略该参数。
- --start-pos 起始解析位置。可选。默认为start-file的起始位置。
- --stop-file 终止解析文件。可选。若解析模式为stop-never,此选项失效。
- --stop-pos 终止解析位置。可选。默认为stop-file的最末位置;若解析模式为stop-never,此选项失效。
- --start-time 起始解析时间,格式'%Y-%m-%d %H:%M:%S'等。可选。默认不过滤。
- --stop-time 终止解析时间,格式'%Y-%m-%d %H:%M:%S'等。可选。默认不过滤。
- -C, --connection-id 线程号,可解析指定线程的SQL。
- -g, --gtid GTID范围.格式为uuid:编号[-编号],多个时以逗号分隔,例如:6573bb29-9d94-11e9-9e0c-0242ac130002:1-100
- --max 解析的最大行数,设置为0则不限制,以避免解析范围过大 (default 100000)
**对象过滤**
-d, --databases 只解析目标db的sql,多个库用逗号隔开,如-d db1,db2。可选。默认为空。
-t, --tables 只解析目标table的sql,多张表用逗号隔开,如-t tbl1,tbl2。可选。默认为空。
--ddl 解析ddl,仅支持正向解析。可选。默认false。
--sql-type 只解析指定类型,支持 insert,update,delete。多个类型用逗号隔开,如--sql-type=insert,delete。可选。默认为增删改都解析。
**附加信息**
- --show-gtid 显示gtid (default true)
- --show-time 显示执行时间,同一时间仅显示首次 (default true)
- --show-all-time 显示每条SQL的执行时间 (default false)
- --show-thread 显示线程号,便于区别同一进程操作 (default false)
- -o, --output 本地或远程解析时,可输出到指定文件(置空则输出到控制台,可通过 > file重定向)
**mysql连接配置** (仅远程解析需要)
```
-h host
-P port
-u user
-p password
```
#### 致谢
bingo2sql借鉴和学习了很多业界知名的开源项目,在此表示感谢!
- [go-mysql](https://github.com/siddontang/go-mysql) handle MySQL network protocol and replication
- [binlog2sql](https://github.com/danfengcao/binlog2sql) Parse MySQL binlog to SQL you want
- [binlog_rollback](https://github.com/GoDannyLai/binlog_rollback) mysql binlog rollback | flashback | redo | dml report | ddl info
================================================
FILE: circle.yml
================================================
version: 2
general:
branches:
ignore:
- gh-pages
jobs:
build:
branches:
ignore: gh-pages
environment:
TZ: "Asia/Shanghai"
docker:
- image: circleci/golang:1.14
- image: circleci/mysql:5.7.31
# - image: mysql:5.7
command: mysqld --lower_case_table_names=1 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --innodb-large-prefix=true --log-bin=on --server_id=111 --explicit_defaults_for_timestamp=true --gtid-mode=1 --enforce_gtid_consistency=1 --log-slave-updates=1 --innodb_buffer_pool_size=512M --max_allowed_packet=64M
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: true
TZ: "Asia/Shanghai"
# volumes:
# - /var/lib/mysql:/var/lib/mysql
working_directory: /go/src/github.com/hanchuanchuan/bingo2sql
steps:
- run:
name: Install mysql-client
command: sudo apt-get update -y --allow-releaseinfo-change && sudo apt upgrade -y && sudo apt-get install default-mysql-client && sudo apt-get install --reinstall ca-certificates libgnutls30 -y
- add_ssh_keys:
fingerprints:
- "42:94:88:f4:be:ff:0b:b9:e8:ae:05:26:f1:e7:fd:57"
- checkout
- run:
name: Waiting for MySQL to be ready
command: |
for i in `seq 1 10`;
do
nc -z localhost 3306 && echo Success && exit 0
echo -n .
sleep 1
done
echo Failed waiting for MySQL && exit 1
- run:
name: mysql init
command: mysql -h 127.0.0.1 -u root -e "select version();create database if not exists test DEFAULT CHARACTER SET utf8;create database if not exists test_inc DEFAULT CHARACTER SET utf8;grant all on *.* to test@'127.0.0.1' identified by 'test';FLUSH PRIVILEGES;show databases;show variables like 'explicit_defaults_for_timestamp';"
- run:
name: "Show Variables"
command: mysql -h 127.0.0.1 -u root -e "show variables"
- run: rm -f go.sum
- run: mysql -h 127.0.0.1 -u root -e "set global GTID_MODE = ON_PERMISSIVE;"
- run: mysql -h 127.0.0.1 -u root -e "set global GTID_MODE = ON;"
- run:
name: "Build & Test"
command: make test
no_output_timeout: 1200
# - setup_remote_docker:
# docker_layer_caching: false
# - run:
# name: Publish Docker Image to Docker Hub
# command: |
# if [ "${CIRCLE_BRANCH}" == "master" ]; then
# echo "$DOCKERHUB_USERNAME"
# echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
# make docker
# make docker-push
# fi
================================================
FILE: cmd/bingo2sql/main.go
================================================
package main
import (
"context"
"fmt"
"math"
"os"
"sync"
"github.com/hanchuanchuan/bingo2sql/core"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/pkg/profile"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// var parserProcess map[string]*parser.MyBinlogParser
var parserProcess sync.Map
type ParseInfo struct {
ID string `json:"id"`
ParseRows int `json:"parse_rows"`
Percent int `json:"percent"`
}
var (
logHeader = `${time_rfc3339} ${prefix} ${level} ${short_file} ${line} `
requestHeader = `${time_rfc3339} ${remote_ip} ${method} ${uri} ${status} ${error} ${latency_human}` + "\n"
flag = pflag.NewFlagSet("bingo2sql", pflag.ExitOnError)
)
var (
host = flag.StringP("host", "h", "", "数据库地址")
port = flag.IntP("port", "P", 3306, "端口号")
user = flag.StringP("user", "u", "", "用户名")
password = flag.StringP("password", "p", "", "密码")
startFile = flag.String("start-file", "", "起始binlog文件")
stopFile = flag.String("stop-file", "", "结束binlog文件")
startPosition = flag.Int("start-pos", 0, "起始位置")
stopPosition = flag.Int("stop-pos", 0, "结束位置")
startTime = flag.String("start-time", "", "开始时间")
stopTime = flag.String("stop-time", "", "结束时间")
databases = flag.StringP("databases", "d", "", "数据库列表,多个时以逗号分隔")
tables = flag.StringP("tables", "t", "", "表名或表结构文件.远程解析时指定表名(可多个,以逗号分隔),本地解析时指定表结构文件")
threadID = flag.Uint64P("connection-id", "C", 0, "指定线程ID")
flashback = flagBoolean("flashback", "B", false, "逆向语句")
parseDDL = flagBoolean("ddl", "", false, "解析DDL语句(仅正向SQL)")
sqlType = flag.String("sql-type", "insert,delete,update", "解析的语句类型")
maxRows = flag.Int("max", 100000, "解析的最大行数,设置为0则不限制")
threads = flag.Int("threads", 64, "解析线程数,影响文件解析速度")
output = flag.StringP("output", "o", "", "输出到指定文件")
gtid = flag.StringP("gtid", "g", "", "GTID范围.格式为uuid:编号[-编号],多个时以逗号分隔")
// removePrimary = flagBoolean("no-primary-key", "K", false, "对INSERT语句去除主键. 可选.")
minimalUpdate = flagBoolean("minimal-update", "M", true, "最小化update语句. 可选.")
minimalInsert = flagBoolean("minimal-insert", "I", true, "使用包含多个VALUES列表的多行语法编写INSERT语句.")
stopNever = flagBoolean("stop-never", "N", false, "持续解析binlog")
showGTID = flagBoolean("show-gtid", "", true, "显示gtid")
showTime = flagBoolean("show-time", "", true, "显示执行时间,同一时间仅显示首次")
showAllTime = flagBoolean("show-all-time", "", false, "显示每条SQL的执行时间")
showThread = flagBoolean("show-thread", "", false, "显示线程号,便于区别同一进程操作")
runServer = flagBoolean("server", "s", false, "启动API服务")
summary = flagBoolean("summary", "S", false, "统计binlog文件中的DML次数等信息")
configFile = flag.StringP("config", "c", "config.ini", "以服务方式启动时需指定配置文件")
debug = flagBoolean("debug", "", false, "调试模式,输出详细日志")
mode = flag.String("profile-mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
// cpuProfile = flagBoolean("cpu-profile", "", false, "调试模式,开启CPU性能跟踪")
)
func main() {
flag.SortFlags = false
// 隐藏CPU性能跟踪调试参数
// flag.MarkHidden("cpu-profile")
if err := flag.Parse(os.Args[1:]); err != nil {
log.Error(err)
return
}
if len(os.Args) < 2 {
fmt.Fprint(os.Stderr, "Usage of bingo2sql:\n")
flag.PrintDefaults()
return
}
switch *mode {
case "cpu":
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
case "mem":
defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile, profile.ProfilePath(".")).Stop()
case "block":
defer profile.Start(profile.BlockProfile, profile.ProfilePath(".")).Stop()
default:
// do nothing
}
if *debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.ErrorLevel)
}
// 以独立工具运行
runParse()
}
// runParse 执行binlog解析
func runParse() {
cfg := &core.BinlogParserConfig{
Host: *host,
Port: uint16(*port),
User: *user,
Password: *password,
StartFile: *startFile,
StopFile: *stopFile,
StartPosition: *startPosition,
StopPosition: *stopPosition,
StartTime: *startTime,
StopTime: *stopTime,
Flashback: *flashback,
ParseDDL: *parseDDL,
IncludeGtids: *gtid,
Databases: *databases,
Tables: *tables,
SqlType: *sqlType,
MaxRows: *maxRows,
Threads: *threads,
OutputFileStr: *output,
// RemovePrimary: *removePrimary,
MinimalUpdate: *minimalUpdate,
MinimalInsert: *minimalInsert,
ShowGTID: *showGTID,
ShowTime: *showTime,
ShowAllTime: *showAllTime,
ShowThread: *showThread,
StopNever: *stopNever,
}
// thread_id溢出处理
if *threadID > math.MaxUint32 {
cfg.ThreadID = uint32(*threadID % (1 << 32))
} else {
cfg.ThreadID = uint32(*threadID)
}
if p, err := core.NewBinlogParser(context.Background(), cfg); err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
} else {
err = p.Parser()
if err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
}
}
}
func flagBoolean(name string, shorthand string, defaultVal bool, usage string) *bool {
if !defaultVal {
// Fix #4125, golang do not print default false value in usage, so we append it.
usage = fmt.Sprintf("%s (default false)", usage)
}
return flag.BoolP(name, shorthand, defaultVal, usage)
}
================================================
FILE: cmd/local.go
================================================
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"math"
"github.com/hanchuanchuan/bingo2sql/core"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var threadID uint64
// rootCmd represents the base command when called without any subcommands
var localCmd = &cobra.Command{
Use: "local",
Short: "本地解析",
Long: `指定binlog文件和表结构文件`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
if cfg.Debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.ErrorLevel)
}
// thread_id溢出处理
if threadID > math.MaxUint32 {
cfg.ThreadID = uint32(threadID % (1 << 32))
} else {
cfg.ThreadID = uint32(threadID)
}
if p, err := core.NewBinlogParser(context.Background(), &cfg); err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
} else {
err = p.Parser()
if err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
}
}
},
}
func init() {
rootCmd.AddCommand(localCmd)
flag := localCmd.Flags()
flag.SortFlags = false
initCommonFalg(flag)
}
================================================
FILE: cmd/remote.go
================================================
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"math"
"github.com/hanchuanchuan/bingo2sql/core"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// rootCmd represents the base command when called without any subcommands
var remoteCmd = &cobra.Command{
Use: "remote",
Short: "远程解析",
Long: `远程解析需要指定数据库地址/端口等信息`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
// thread_id溢出处理
if threadID > math.MaxUint32 {
cfg.ThreadID = uint32(threadID % (1 << 32))
} else {
cfg.ThreadID = uint32(threadID)
}
if p, err := core.NewBinlogParser(context.Background(), &cfg); err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
} else {
err = p.Parser()
if err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
}
}
},
}
func init() {
rootCmd.AddCommand(remoteCmd)
flag := remoteCmd.Flags()
flag.SortFlags = false
flag.StringVarP(&cfg.Host, "host", "h", "", "host")
flag.Uint16VarP(&cfg.Port, "port", "P", 3306, "port")
flag.StringVarP(&cfg.User, "user", "u", "", "user")
flag.StringVarP(&cfg.Password, "password", "p", "", "password")
flag.Bool("help", false, "help for remote")
flag.BoolVarP(&cfg.StopNever, "stop-never", "N", false, "持续解析binlog")
initCommonFalg(flag)
}
================================================
FILE: cmd/root.go
================================================
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"math"
"os"
"github.com/hanchuanchuan/bingo2sql/core"
"github.com/pkg/profile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
var mode string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "bingo2sql",
Short: "mysql binlog解析工具",
Long: `用于解析mysql binlog,支持正向,逆向. 可本地或远程解析.`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
// thread_id溢出处理
if threadID > math.MaxUint32 {
cfg.ThreadID = uint32(threadID % (1 << 32))
} else {
cfg.ThreadID = uint32(threadID)
}
if p, err := core.NewBinlogParser(context.Background(), &cfg); err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
} else {
err = p.Parser()
if err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
}
}
},
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
var cfg core.BinlogParserConfig
func init() {
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.bingo2sql.yaml)")
// Cobra also supports local flags, which will only run
// when this action is called directly.
// rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
cfg = core.BinlogParserConfig{
Port: 3306,
}
rootCmd.PersistentFlags().BoolVarP(&cfg.Debug, "debug", "", false, "调试模式,输出详细日志.sets log level to debug")
flag := rootCmd.Flags()
flag.SortFlags = false
flag.StringVarP(&cfg.Host, "host", "h", "", "host")
flag.Uint16VarP(&cfg.Port, "port", "P", 3306, "port")
flag.StringVarP(&cfg.User, "user", "u", "", "user")
flag.StringVarP(&cfg.Password, "password", "p", "", "password")
flag.Bool("help", false, "help for remote")
initCommonFalg(flag)
flag.BoolVarP(&cfg.StopNever, "stop-never", "N", false, "持续解析binlog")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".bingo2sql" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".bingo2sql")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func flagBoolean(flag *pflag.FlagSet, p *bool, name string, shorthand string, defaultVal bool, usage string) {
if !defaultVal {
// Fix #4125, golang do not print default false value in usage, so we append it.
usage = fmt.Sprintf("%s (default false)", usage)
}
flag.BoolVarP(p, name, shorthand, defaultVal, usage)
}
func initCommonFalg(flag *pflag.FlagSet) {
flag.StringVar(&cfg.StartFile, "start-file", "", "起始binlog文件")
flag.StringVar(&cfg.StopFile, "stop-file", "", "结束binlog文件")
flag.IntVar(&cfg.StartPosition, "start-pos", 0, "起始位置")
flag.IntVar(&cfg.StopPosition, "stop-pos", 0, "结束位置")
flag.StringVar(&cfg.StartTime, "start-time", "", "开始时间")
flag.StringVar(&cfg.StopTime, "stop-time", "", "结束时间")
flag.StringVarP(&cfg.Databases, "databases", "d", "", "数据库列表,多个时以逗号分隔")
flag.StringVarP(&cfg.Tables, "tables", "t", "", "表名,如果数据库为多个,则需指名表前缀,多个时以逗号分隔")
flag.Uint64VarP(&threadID, "connection-id", "C", 0, "指定线程ID")
flag.BoolVarP(&cfg.Flashback, "flashback", "B", false, "逆向语句")
flag.BoolVarP(&cfg.ParseDDL, "ddl", "", false, "解析DDL语句(仅正向SQL)")
flag.StringVar(&cfg.SqlType, "sql-type", "insert,delete,update", "解析的语句类型")
flag.IntVar(&cfg.MaxRows, "max", 100000, "解析的最大行数,设置为0则不限制")
flag.IntVar(&cfg.Threads, "threads", 64, "解析线程数,影响文件解析速度")
flag.StringVarP(&cfg.OutputFileStr, "output", "o", "", "输出到指定文件")
flag.StringVarP(&cfg.IncludeGtids, "gtid", "g", "", "GTID范围.格式为uuid:编号[-编号],多个时以逗号分隔")
flagBoolean(flag, &cfg.RemovePrimary, "no-primary-key", "K", false, "对INSERT语句去除主键. 可选.")
flagBoolean(flag, &cfg.MinimalUpdate, "minimal-update", "M", true, "最小化update语句. 可选.")
flagBoolean(flag, &cfg.MinimalInsert, "minimal-insert", "I", true, "使用包含多个VALUES列表的多行语法编写INSERT语句.")
flagBoolean(flag, &cfg.ShowGTID, "show-gtid", "", true, "显示gtid")
flagBoolean(flag, &cfg.ShowTime, "show-time", "", true, "显示执行时间,同一时间仅显示首次")
flagBoolean(flag, &cfg.ShowAllTime, "show-all-time", "", false, "显示每条SQL的执行时间")
flagBoolean(flag, &cfg.ShowThread, "show-thread", "", false, "显示线程号,便于区别同一进程操作")
flag.StringVar(&mode, "profile-mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
switch mode {
case "cpu":
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
case "mem":
defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile, profile.ProfilePath(".")).Stop()
case "block":
defer profile.Start(profile.BlockProfile, profile.ProfilePath(".")).Stop()
}
}
================================================
FILE: cmd/server.go
================================================
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"net/http"
"os"
"time"
"github.com/hanchuanchuan/bingo2sql/parse"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/siddontang/go/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands
var serverCmd = &cobra.Command{
Use: "server",
Short: "以服务方式运行",
Long: `用于解析mysql binlog,支持正向,逆向. 可本地或远程解析.`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
startServer()
},
}
func init() {
rootCmd.AddCommand(serverCmd)
flag := serverCmd.Flags()
flag.StringVarP(&cfgFile, "config", "c", "config.ini", "以服务方式启动时需指定配置文件")
}
func startServer() {
viper := viper.New()
viper.SetConfigFile(cfgFile)
viper.SetConfigType("ini")
if err := viper.ReadInConfig(); err != nil {
log.Fatalf("Error: %s", err.Error())
return
}
// logDir := cnf.Section("Bingo").Key("log").String()
// httpLogDir := cnf.Section("Bingo").Key("httplog").String()
// level := cnf.Section("Bingo").Key("logLevel").String()
logDir := viper.GetString("Bingo.log")
httpLogDir := viper.GetString("Bingo.httplog")
//echo's output log file
elog, err := os.OpenFile(logDir, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println(fmt.Sprintf(`open echo log file %s error: %s`, logDir, err.Error()))
return
}
defer elog.Close()
httplog, err := os.OpenFile(httpLogDir, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println(fmt.Sprintf(`open echo log file %s error: %s`, httpLogDir, err.Error()))
return
}
defer httplog.Close()
// // 初始化Router
// r := mux.NewRouter()
// // // 静态文件路由
// // r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
// // 普通路由
// r.HandleFunc("/", HomeHandler)
// r.Use(TestMiddleware)
// http.ListenAndServe(":3000", r)
// lvl, _ := zerolog.ParseLevel(level)
// zerolog.SetGlobalLevel(lvl)
// log.Logger = log.With().Caller().Logger().Output(
// zerolog.ConsoleWriter{Out: elog, NoColor: true})
// log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
//new echo
router := echo.New()
router.Server.WriteTimeout = time.Duration(30) * time.Second
// Middleware
router.Use(middleware.Logger())
router.Use(middleware.Recover())
// Middleware
// router.Use(middleware.LoggerWithConfig(
// middleware.LoggerConfig{Output: httplog, Format: requestHeader}))
// router.Use(middleware.Recover())
log.Info(`parse binlog tool is started`)
group := router.Group("/binlog")
group.GET("/", func(c echo.Context) error {
return c.String(http.StatusOK, "Hello, World!\n")
})
// 发起binlog解析请求
group.POST("/parse", parse.ParseBinlog)
// 解析进程的进度
group.GET("/parse/:id", parse.GetParseInfo)
// 获取所有解析
group.GET("/parse", parse.GetAllParse)
// 停止解析操作
group.DELETE("/parse/:id", parse.ParseBinlogStop)
// 下载解析生成的文件
group.GET("/parse/download/:id", parse.Download)
// router.Logger.Fatal(router.Start(addr))
router.Logger.Fatal(router.Start(":8077"))
}
================================================
FILE: cmd/stats.go
================================================
/*
Copyright © 2020 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/hanchuanchuan/bingo2sql/core"
"github.com/pkg/profile"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// runServer = flagBoolean(flag,, "server", "s", false, "启动API服务")
// summary = flagBoolean(flag, "summary", "S", false, "统计binlog文件中的DML次数等信息")
// configFile = flag.StringP("config", "c", "config.ini", "以服务方式启动时需指定配置文件")
// rootCmd represents the base command when called without any subcommands
var summaryCmd = &cobra.Command{
Use: "stats",
Short: "stats binlog events",
Long: `统计binlog文件中的DML次数等信息`,
Run: func(cmd *cobra.Command, args []string) {
if cfg.Debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.ErrorLevel)
}
if p, err := core.NewBinlogParserStats(&cfg); err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
} else {
err = p.ParserStats()
if err != nil {
log.Error("binlog解析操作失败")
log.Error(err)
return
}
}
},
}
func init() {
rootCmd.AddCommand(summaryCmd)
flag := summaryCmd.Flags()
flag.SortFlags = false
flag.StringVarP(&cfg.Host, "host", "h", "", "host")
flag.Uint16VarP(&cfg.Port, "port", "P", 3306, "port")
flag.StringVarP(&cfg.User, "user", "u", "", "user")
flag.StringVarP(&cfg.Password, "password", "p", "", "password")
flag.Bool("help", false, "help for remote")
flag.StringVar(&cfg.StartFile, "start-file", "", "起始binlog文件")
flag.StringVar(&cfg.StopFile, "stop-file", "", "结束binlog文件")
flag.IntVar(&cfg.StartPosition, "start-pos", 0, "起始位置")
flag.IntVar(&cfg.StopPosition, "stop-pos", 0, "结束位置")
flag.StringVar(&cfg.StartTime, "start-time", "", "开始时间")
flag.StringVar(&cfg.StopTime, "stop-time", "", "结束时间")
flag.StringVarP(&cfg.Databases, "databases", "d", "", "数据库列表,多个时以逗号分隔")
flag.StringVarP(&cfg.Tables, "tables", "t", "", "表名,如果数据库为多个,则需指名表前缀,多个时以逗号分隔")
flag.BoolVarP(&cfg.ParseDDL, "ddl", "", false, "解析DDL语句(仅正向SQL)")
flag.IntVar(&cfg.Threads, "threads", 64, "解析线程数,影响文件解析速度")
flag.StringVarP(&cfg.OutputFileStr, "output", "o", "", "输出到指定文件")
flag.StringVar(&mode, "profile-mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]")
switch mode {
case "cpu":
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
case "mem":
defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile, profile.ProfilePath(".")).Stop()
case "block":
defer profile.Start(profile.BlockProfile, profile.ProfilePath(".")).Stop()
}
}
================================================
FILE: cnf/config.ini
================================================
[Bingo]
addr = :8077
log = logs/go.log
httplog = logs/http.log
writeTimeout = 30
socketAddr = 127.0.0.1:8090
# 日志级别: debug info warn error fatal
logLevel = debug
# 数据库配置
[DBConfig]
Host = 127.0.0.1
Port = 3306
User = test
Password = test
DB = dbmonitor
# 数据库配置
[BackupDBConfig]
Host = 127.0.0.1
Port = 20001
User = test
Password = test
================================================
FILE: core/parseFile.go
================================================
package core
import (
"bufio"
"os"
"strings"
"sync"
"time"
"github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
"github.com/mholt/archiver/v3"
log "github.com/sirupsen/logrus"
)
func (p *MyBinlogParser) parserFile() error {
var wg sync.WaitGroup
defer timeTrack(time.Now(), "parserFile")
defer func() {
if p.outputFile != nil {
p.outputFile.Close()
}
}()
var err error
p.running = true
if len(p.outputFileName) > 0 {
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
if err != nil {
return err
}
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
b := replication.NewBinlogParser()
b.SetUseDecimal(true)
p.currentPosition = mysql.Position{
Name: p.startFile,
Pos: uint32(p.cfg.StartPosition),
}
sendTime := time.Now().Add(time.Second * 5)
sendCount := 0
wg.Add(1)
go p.ProcessChan(&wg)
f := func(e *replication.BinlogEvent) error {
ok, err := p.parseSingleEvent(e)
if err != nil {
return err
}
if !ok {
b.Stop()
}
if e.Header.EventType == replication.QUERY_EVENT ||
e.Header.EventType == replication.UPDATE_ROWS_EVENTv2 ||
e.Header.EventType == replication.WRITE_ROWS_EVENTv2 ||
e.Header.EventType == replication.DELETE_ROWS_EVENTv2 {
if len(p.cfg.SocketUser) > 0 {
// 如果指定了websocket用户,就每5s发送一次进度通知
if p.changeRows > sendCount && time.Now().After(sendTime) {
sendCount = p.changeRows
sendTime = time.Now().Add(time.Second * 5)
kwargs := map[string]interface{}{"rows": p.changeRows}
if p.stopTimestamp > 0 && p.startTimestamp > 0 && p.stopTimestamp > p.startTimestamp {
kwargs["pct"] = (e.Header.Timestamp - p.startTimestamp) * 100 / (p.stopTimestamp - p.startTimestamp)
}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
}
}
if p.cfg.MaxRows > 0 && p.changeRows >= p.cfg.MaxRows {
log.Info("已超出最大行数")
if !p.cfg.StopNever {
b.Stop()
return nil
}
}
}
// 再次判断是否到结束位置,以免处在临界值,且无新日志时程序卡住
if e.Header.Timestamp > 0 {
if p.stopTimestamp > 0 && e.Header.Timestamp >= p.stopTimestamp {
log.Warn("已超出结束时间")
b.Stop()
return nil
}
}
return nil
}
err = b.ParseFile(p.startFile, int64(p.cfg.StartPosition), f)
if err != nil {
println(err.Error())
}
log.Info("操作完成")
close(p.ch)
wg.Wait()
if len(p.cfg.SocketUser) > 0 {
if p.changeRows > 0 {
kwargs := map[string]interface{}{"rows": p.changeRows}
kwargs["pct"] = 99
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
var url string
if strings.Count(p.outputFileName, ".") > 0 {
a := strings.Split(p.outputFileName, ".")
url = strings.Join(a[0:len(a)-1], ".")
} else {
url = p.outputFileName
}
url = url + ".tar.gz"
// err := archiver.TarGz.Make(url, []string{p.outputFileName})
// err := archiver.TarGz.Archiver(
// []string{p.outputFileName},
// url)
err := archiver.Archive([]string{p.outputFileName}, url)
if err != nil {
return err
}
fileInfo, _ := os.Stat(url)
//文件大小
filesize := fileInfo.Size()
// 压缩完成后删除文件
p.clear()
log.Info("压缩完成")
kwargs = map[string]interface{}{"ok": "1", "pct": 100, "rows": p.changeRows,
"url": "/go/download/" + strings.Replace(url, "../", "", -1), "size": filesize}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度",
"", kwargs)
} else {
// 没有数据时清除文件
p.clear()
kwargs := map[string]interface{}{"ok": "1", "size": 0, "pct": 100, "rows": 0}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
}
}
log.Info("操作结束")
return nil
}
================================================
FILE: core/parser.go
================================================
package core
import (
"bufio"
"bytes"
"context"
"database/sql/driver"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
uuid "github.com/hanchuanchuan/bingo2sql/utils/uuid"
"github.com/jinzhu/gorm"
"github.com/jinzhu/now"
"github.com/mholt/archiver/v3"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
"github.com/hanchuanchuan/goInception/ast"
tidb "github.com/hanchuanchuan/goInception/mysql"
tidbParser "github.com/hanchuanchuan/goInception/parser"
)
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
// Column 列结构
type Column struct {
gorm.Model
ColumnName string `gorm:"Column:COLUMN_NAME"`
CollationName string `gorm:"Column:COLLATION_NAME"`
CharacterSetName string `gorm:"Column:CHARACTER_SET_NAME"`
ColumnComment string `gorm:"Column:COLUMN_COMMENT"`
ColumnType string `gorm:"Column:COLUMN_TYPE"`
ColumnKey string `gorm:"Column:COLUMN_KEY"`
Extra string `gorm:"Column:EXTRA"`
isGenerated *bool `gorm:"-"`
}
// IsGenerated 是否为计算列
func (f *Column) IsGenerated() bool {
if f.isGenerated == nil {
v := strings.Contains(f.Extra, "VIRTUAL GENERATED") ||
strings.Contains(f.Extra, "STORED GENERATED")
f.isGenerated = &v
}
return *f.isGenerated
}
// IsUnsigned 是否无符号列
func (f *Column) IsUnsigned() bool {
columnType := f.ColumnType
if strings.Contains(columnType, "unsigned") || strings.Contains(columnType, "zerofill") {
return true
}
return false
}
// Table 表结构
type Table struct {
tableID uint64
// 仅本地解析使用
TableName string
Schema string
Columns []Column
hasPrimary bool
primarys map[int]bool
}
// ValidColumns 可用列
func (t *Table) ValidColumns() (result []Column) {
if t == nil {
return
}
for _, f := range t.Columns {
if !f.IsGenerated() {
result = append(result, f)
}
}
return result
}
// MasterStatus 主库binlog信息(相对bingo2sql的主库,可以是只读库)
type MasterStatus struct {
gorm.Model
File string `gorm:"Column:File"`
Position int `gorm:"Column:Position"`
Binlog_Do_DB string `gorm:"Column:Binlog_Do_DB"`
Binlog_Ignore_DB string `gorm:"Column:Binlog_Ignore_DB"`
Executed_Gtid_Set string `gorm:"Column:Executed_Gtid_Set"`
}
// MasterLog 主库binlog日志
type MasterLog struct {
gorm.Model
Name string `gorm:"Column:Log_name"`
Size int `gorm:"Column:File_size"`
}
// GtidSetInfo GTID集合,可指定或排除GTID范围
type GtidSetInfo struct {
uuid []byte
startSeqNo int64
stopSeqNo int64
}
// BinlogParserConfig 解析选项
type BinlogParserConfig struct {
Flavor string
// InsID int `json:"ins_id" form:"ins_id"`
Host string `json:"host" form:"host"`
Port uint16 `json:"port" form:"port"`
User string `json:"user" form:"user"`
Password string `json:"password" form:"password"`
// binlog文件,位置
StartFile string `json:"start_file" form:"start_file"`
StopFile string `json:"stop_file" form:"stop_file"`
StartPosition int `json:"start_position" form:"start_position"`
StopPosition int `json:"stop_position" form:"stop_position"`
// 起止时间
StartTime string `json:"start_time" form:"start_time"`
StopTime string `json:"stop_time" form:"stop_time"`
// 回滚
Flashback bool `json:"flashback" form:"flashback"`
// 解析DDL语句
ParseDDL bool `json:"parse_ddl" form:"parse_ddl"`
// 限制的数据库
Databases string `json:"databases" form:"databases"`
// 限制的表
Tables string `json:"tables" form:"tables"`
// sql类型
SqlType string `json:"sql_type" form:"sql_type"`
// 最大解析行数
MaxRows int `json:"max_rows" form:"max_rows"`
// 解析线程数,影响文件解析速度
Threads int `json:"threads" form:"threads"`
// 输出到指定文件.为空时输出到控制台
OutputFileStr string
// 输出所有列(已弃用,有主键时均会使用主键)
// AllColumns bool `json:"all_columns" form:"all_columns"`
// debug模式,打印详细日志
Debug bool `json:"debug" form:"debug"`
// websocket的用户名,用以发送进度提醒
SocketUser string `json:"socket_user" form:"socket_user"`
// 解析任务开始时间
beginTime int64
ThreadID uint32 `json:"thread_id" form:"thread_id"`
IncludeGtids string `json:"include_gtids" form:"include_gtids"`
// 对INSERT语句去除主键,以便于使用. 默认为false
RemovePrimary bool
// 持续解析binlog
StopNever bool
// 最小化Update语句, 当开启时,update语句中未变更的值不再记录到解析结果中
MinimalUpdate bool
// 使用包含多个VALUES列表的多行语法编写INSERT语句
MinimalInsert bool
// 输出设置
ShowGTID bool // 输出GTID
ShowTime bool // 输出执行时间(相同时间时仅返回首次)
ShowAllTime bool // 输出所有执行时间
ShowThread bool // 输出线程号,方便判断同一执行人的操作
uniqueKey string
}
type Parser interface {
write([]byte, *replication.BinlogEvent)
}
// row 协程解析binlog事件的通道
type row struct {
sql []byte
e *replication.BinlogEvent
gtid []byte
opid string
// 用以打印线程号,不打印时置空
threadID uint32
}
type baseParser struct {
ctx context.Context
cancelFn context.CancelFunc
// 记录表结构以重用
allTables map[uint64]*Table
cfg *BinlogParserConfig
startFile string
stopFile string
// binlog数据库
db *gorm.DB
running bool
lastTimestamp uint32
write1 Parser
ch chan *row
// currentBackupInfo BackupInfo
// gtid []byte
// write interface{}
OnlyDatabases map[string]bool
OnlyTables map[string]string
SqlTypes map[string]bool
// 本地解析模式.指定binlog文件和表结构本地解析
localMode bool
}
// MyBinlogParser 解析类
type MyBinlogParser struct {
baseParser
masterStatus *MasterStatus
startTimestamp uint32
stopTimestamp uint32
// 输出到指定文件
outputFile *os.File
outputFileName string
// db *gorm.DB
// 使用buffer写的测试
// 解析600M日志,约500w行,直接使用文件时,用时5min
// 使用buffer用时1min
bufferWriter *bufio.Writer
// lastTimestamp uint32
gtid []byte
lastGtid []byte
gtidEvent *replication.GTIDEvent
includeGtids []*GtidSetInfo
jumpGtids map[*GtidSetInfo]bool
// 表结构缓存(仅用于本地解析)
tableCacheList map[string]*Table
// 解析用临时变量
currentPosition mysql.Position // 当前binlog位置
currentThreadID uint32 // 当前event线程号
changeRows int // 解析SQL行数
currentTimtstamp uint32 // 当前解析到的时间戳
// 保存binlogs文件和position,用以计算percent
binlogs []MasterLog
// 表结构发生变化后导致无法正确解析的列表
ignoreTables map[string]interface{}
}
var (
TimeFormat string = "2006-01-02 15:04:05"
TimeLocation *time.Location
)
func init() {
TimeLocation, _ = time.LoadLocation("Asia/Shanghai")
}
// SetUniqueKey 自定义唯一标识, 指定后通过ID()将返回该值
func (cfg *BinlogParserConfig) SetUniqueKey(key string) {
cfg.uniqueKey = key
}
// ID 解析任务的唯一标识
func (cfg *BinlogParserConfig) ID() string {
if cfg.uniqueKey != "" {
return cfg.uniqueKey
}
s1 := strings.Replace(cfg.Host, ".", "_", -1)
if len(s1) > 20 {
s1 = s1[:20]
}
var s2 string
// if len(cfg.StartTime) > 0 {
// s2 = strings.Replace(cfg.StartTime, ".", "_", -1)
// s2 = strings.Replace(s2, " ", "_", -1)
// s2 = strings.Replace(s2, ":", "", -1)
// } else {
// s2 = strconv.FormatInt(cfg.beginTime, 10)
// }
s2 = strconv.FormatInt(cfg.beginTime, 10)
if cfg.Flashback {
s2 += "_rollback"
}
return fmt.Sprintf("%s_%d_%s",
s1,
cfg.Port,
s2)
}
// Parser 远程解析
func (p *MyBinlogParser) Parser() error {
// runtime.GOMAXPROCS(runtime.NumCPU())
if p.cfg.Host == "" && p.cfg.StartFile != "" {
_, err := os.Stat(p.cfg.StartFile)
if err != nil {
return fmt.Errorf("open start_file: %w", err)
}
return p.parserFile()
}
var wg sync.WaitGroup
defer timeTrack(time.Now(), "Parser")
defer func() {
if p.outputFile != nil {
p.outputFile.Close()
}
}()
var err error
p.running = true
p.db, err = p.getDB()
if err != nil {
return err
}
defer p.db.Close()
if len(p.outputFileName) > 0 {
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
if err != nil {
return fmt.Errorf("create output file: %w", err)
}
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
cfg := replication.BinlogSyncerConfig{
ServerID: 2000000111,
Flavor: p.cfg.Flavor,
Host: p.cfg.Host,
Port: p.cfg.Port,
User: p.cfg.User,
Password: p.cfg.Password,
UseDecimal: true,
// RawModeEnabled: p.cfg.RawMode,
// SemiSyncEnabled: p.cfg.SemiSync,
}
b := replication.NewBinlogSyncer(cfg)
defer b.Close()
p.currentPosition = mysql.Position{
Name: p.startFile,
Pos: uint32(p.cfg.StartPosition),
}
s, err := b.StartSync(p.currentPosition)
if err != nil {
log.Infof("Start sync error: %v\n", err)
return fmt.Errorf("Start sync: %w", err)
}
sendTime := time.Now().Add(time.Second * 5)
sendCount := 0
wg.Add(1)
go p.ProcessChan(&wg)
// var ctx context.Context
var finishError error
FOR:
for {
// if p.cfg.StopNever {
// ctx = p.ctx
// } else {
// var cancel context.CancelFunc
// ctx, cancel = context.WithTimeout(p.ctx, 10*time.Second)
// defer cancel()
// }
// e, err := s.GetEvent(context.Background())
e, err := s.GetEvent(p.ctx)
if err != nil {
// Try to output all left events
// events := s.DumpEvents()
// for _, e := range events {
// // e.Dump(os.Stdout)
// log.Info("===============")
// log.Info(e.Header.EventType)
// }
if err == context.DeadlineExceeded {
log.Warnf("Waiting for timeout(10s), no new event is generated, automatically stop: %v\n", err)
// err = fmt.Errorf("Waiting for timeout(10s), no new event is generated, automatically stop: %v\n", err)
err = nil
} else {
log.Errorf("Get event error: %v\n", err)
}
finishError = fmt.Errorf("get binlog event: %w", err)
break FOR
}
select {
case <-p.ctx.Done():
finishError = context.Canceled
break FOR
default:
ok, err := p.parseSingleEvent(e)
if err != nil {
finishError = err
break FOR
}
if !ok {
break FOR
}
// if time.Now().After(sendTime) {
// var m runtime.MemStats
// runtime.ReadMemStats(&m)
// log.Debugf("%d MB\n", m.Alloc/1024/1024)
// sendTime = time.Now().Add(time.Second * 1)
// }
if len(p.cfg.SocketUser) > 0 {
// 如果指定了websocket用户,就每5s发送一次进度通知
if p.changeRows > sendCount && time.Now().After(sendTime) {
sendCount = p.changeRows
sendTime = time.Now().Add(time.Second * 5)
kwargs := map[string]interface{}{"rows": p.changeRows}
if p.stopTimestamp > 0 && p.startTimestamp > 0 && p.stopTimestamp > p.startTimestamp {
kwargs["pct"] = (e.Header.Timestamp - p.startTimestamp) * 100 / (p.stopTimestamp - p.startTimestamp)
}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
}
}
}
}
close(p.ch)
wg.Wait()
if len(p.cfg.SocketUser) > 0 {
if p.changeRows > 0 {
kwargs := map[string]interface{}{"rows": p.changeRows}
kwargs["pct"] = 99
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
// 打包,以便下载
fileSize, err := p.Archive()
if err != nil {
return err
}
kwargs = map[string]interface{}{
"ok": "1",
"pct": 100,
"rows": p.changeRows,
"size": fileSize}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度",
"", kwargs)
} else {
// 没有数据时清除文件
p.clear()
kwargs := map[string]interface{}{"ok": "1", "size": 0, "pct": 100, "rows": 0}
go sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度", "", kwargs)
}
}
p.running = false
log.WithField("parsed_rows", p.changeRows).Info("解析完成")
return finishError
}
// checkFinish 检查是否需要结束循环
func (p *MyBinlogParser) checkFinish(currentPosition *mysql.Position) int {
returnValue := -1
// 满足以下任一条件时结束循环
// * binlog文件超出指定结束文件
// * 超出指定结束文件的指定位置
// * 超出当前最新binlog位置
// 当前binlog解析位置 相对于配置的结束位置/最新位置
// 超出时返回 1
// 等于时返回 0
// 小于时返回 -1
// 根据是否为事件开始做不同判断
// 事件开始时做大于判断
// 事件结束时做等于判断
var stopMsg string
if p.stopFile != "" && currentPosition.Name > p.stopFile {
stopMsg = "超出指定结束文件"
returnValue = 1
} else if p.cfg.StopPosition > 0 && currentPosition.Name == p.stopFile {
if currentPosition.Pos > uint32(p.cfg.StopPosition) {
stopMsg = "超出指定位置"
returnValue = 1
} else if currentPosition.Pos == uint32(p.cfg.StopPosition) {
stopMsg = "超出指定位置"
returnValue = 0
}
}
if p.masterStatus != nil {
if currentPosition.Name > p.masterStatus.File ||
currentPosition.Name == p.masterStatus.File &&
currentPosition.Pos > uint32(p.masterStatus.Position) {
stopMsg = "超出最新binlog位置"
returnValue = 1
} else if currentPosition.Name == p.masterStatus.File &&
currentPosition.Pos == uint32(p.masterStatus.Position) {
stopMsg = "超出最新binlog位置"
returnValue = 0
}
}
if stopMsg != "" {
log.WithFields(log.Fields{
"当前文件": currentPosition.Name,
"结束文件": p.stopFile,
"当前位置": currentPosition.Pos,
"结束位置": p.cfg.StopPosition,
}).Info(stopMsg)
}
return returnValue
}
// clear 压缩完成或者没有数据时删除文件
func (p *MyBinlogParser) clear() {
err := os.Remove(p.outputFileName)
if err != nil {
log.Error(err)
log.Errorf("删除文件失败! %s", p.outputFileName)
}
}
// isGtidEventInGtidSet 检查gtid事件
// 0 正常,包含
// 1 跳转,不包含
// 2 超出结束位置,跳过
func (p *MyBinlogParser) isGtidEventInGtidSet() (status uint8) {
e := p.gtidEvent
// 0 正常,包含
// 1 跳转,不包含
// 2 超出结束位置,跳过
if len(p.includeGtids) == 0 {
status = 0
return
}
if e == nil {
status = 0
return
}
for _, info := range p.includeGtids {
if ok, _ := p.jumpGtids[info]; !ok && bytes.Equal(e.SID, info.uuid) {
if e.GNO < info.startSeqNo {
status = 1
return
} else if e.GNO > info.stopSeqNo {
p.jumpGtids[info] = true
} else {
status = 0
return
}
}
}
all_ok := true
for _, value := range p.jumpGtids {
if !value {
all_ok = false
}
}
if all_ok {
status = 2
return
}
status = 1
return
}
func (p *MyBinlogParser) Stop() {
log.Warn("Process killed")
if p.cancelFn != nil {
p.cancelFn()
}
p.running = false
}
func (p *MyBinlogParser) write(b []byte, binEvent *replication.BinlogEvent) {
data := &row{
sql: b,
e: binEvent,
}
if p.Config().ShowThread {
data.threadID = p.currentThreadID
}
if p.Config().ShowGTID {
data.gtid = p.gtid
}
p.ch <- data
}
// byteEquals 判断字节切片是否相等
func byteEquals(v1, v2 []byte) bool {
if len(v1) != len(v2) {
return false
}
for i, v := range v1 {
if v != v2[i] {
return false
}
}
return true
}
func (p *MyBinlogParser) myWrite(data *row) {
var buf bytes.Buffer
// 输出GTID
if p.Config().ShowGTID && len(data.gtid) > 0 {
if len(p.lastGtid) == 0 {
p.lastGtid = data.gtid
buf.WriteString("# ")
buf.Write(data.gtid)
buf.WriteString("\n")
} else if !byteEquals(data.gtid, p.lastGtid) {
p.lastGtid = data.gtid
buf.WriteString("\n# ")
buf.Write(data.gtid)
buf.WriteString("\n")
}
}
buf.Write(data.sql)
if p.Config().ShowAllTime {
timeinfo := fmt.Sprintf("; # %s",
time.Unix(int64(data.e.Header.Timestamp), 0).Format(TimeFormat))
buf.WriteString(timeinfo)
if p.Config().ShowThread {
buf.WriteString(" # thread_id=")
buf.WriteString(strconv.Itoa(int(data.threadID)))
}
buf.WriteString("\n")
} else if p.Config().ShowTime {
if p.lastTimestamp != data.e.Header.Timestamp {
timeinfo := fmt.Sprintf("; # %s",
time.Unix(int64(data.e.Header.Timestamp), 0).Format(TimeFormat))
buf.WriteString(timeinfo)
p.lastTimestamp = data.e.Header.Timestamp
} else {
buf.WriteString(";")
}
if p.Config().ShowThread {
buf.WriteString(" # thread_id=")
buf.WriteString(strconv.Itoa(int(data.threadID)))
}
buf.WriteString("\n")
} else {
if p.Config().ShowThread {
buf.WriteString(" # thread_id=")
buf.WriteString(strconv.Itoa(int(data.threadID)))
} else {
buf.WriteString(";")
}
buf.WriteString("\n")
}
p.write2(buf.Bytes())
}
func (p *MyBinlogParser) write2(b []byte) {
if len(p.outputFileName) > 0 {
p.bufferWriter.Write(b)
} else {
fmt.Print(string(b))
}
}
// NewBinlogParser binlog解析器
func NewBinlogParser(ctx context.Context, cfg *BinlogParserConfig) (*MyBinlogParser, error) {
p := new(MyBinlogParser)
p.allTables = make(map[uint64]*Table)
p.jumpGtids = make(map[*GtidSetInfo]bool)
p.ch = make(chan *row, cfg.Threads)
p.write1 = p
cfg.beginTime = time.Now().Unix()
p.cfg = cfg
p.OnlyDatabases = make(map[string]bool)
// [table_name] = db_name
p.OnlyTables = make(map[string]string)
p.SqlTypes = make(map[string]bool)
p.ignoreTables = make(map[string]interface{})
p.startFile = cfg.StartFile
p.stopFile = cfg.StopFile
if err := p.parseGtidSets(); err != nil {
return nil, err
}
if len(cfg.SocketUser) > 0 {
p.outputFileName = fmt.Sprintf("files/%s.sql", p.cfg.ID())
// p.outputFileName = fmt.Sprintf("files/%s_%s.sql", time.Now().Format("20060102_1504"), p.cfg.Id())
// var fileName []string
// fileName = append(fileName, strings.Replace(cfg.Host, ".", "_", -1))
// fileName = append(fileName, strconv.Itoa(int(cfg.Port)))
// if cfg.Databases != "" {
// fileName = append(fileName, cfg.Databases)
// }
// if cfg.Tables != "" {
// fileName = append(fileName, cfg.Tables)
// }
// fileName = append(fileName, time.Now().Format("20060102_1504"))
// if cfg.Flashback {
// fileName = append(fileName, "rollback")
// }
// p.outputFileName = fmt.Sprintf("files/%s.sql", strings.Join(fileName, "_"))
} else {
if cfg.OutputFileStr != "" {
p.outputFileName = cfg.OutputFileStr
} else {
p.outputFile = os.Stdout
}
}
// 本地解析模式,host为空,表名为SQL文件
if p.cfg.Host == "" {
if p.cfg.Tables == "" {
return nil, fmt.Errorf("本地解析模式请指定表结构文件--tables")
}
_, err := os.Stat(p.cfg.Tables)
if err != nil {
return nil, fmt.Errorf("读取表结构文件失败(%s): %v", p.cfg.Tables, err)
}
if err := p.readTableSchema(p.cfg.Tables); err != nil {
return nil, fmt.Errorf("读取表结构文件失败(%s): %v", p.cfg.Tables, err)
}
if len(p.tableCacheList) == 0 {
return nil, fmt.Errorf("未找到建表语句! 请提供需要解析的表对应的建表语句,并以分号分隔.")
} else {
log.Infof("共读取到表结构 %d 个", len(p.tableCacheList))
}
p.localMode = true
}
if !p.localMode {
var err error
p.db, err = p.getDB()
if err != nil {
return nil, err
}
defer p.db.Close()
p.masterStatus, err = p.mysqlMasterStatus()
if err != nil {
return nil, err
}
if p.cfg.Debug {
p.db.LogMode(true)
}
}
if err := p.parserInit(); err != nil {
return nil, err
}
p.ctx, p.cancelFn = context.WithCancel(ctx)
return p, nil
}
func (p *MyBinlogParser) ProcessChan(wg *sync.WaitGroup) {
for {
r := <-p.ch
if r == nil {
if len(p.outputFileName) > 0 {
p.bufferWriter.Flush()
}
wg.Done()
break
}
// p.myWrite(r.sql, r.e, r.gtid)
p.myWrite(r)
}
}
// parseGtidSets 解析gtid集合
func (p *MyBinlogParser) parseGtidSets() error {
if len(p.cfg.IncludeGtids) == 0 {
return nil
}
sets := strings.Split(p.cfg.IncludeGtids, ",")
for _, s := range sets {
g := &GtidSetInfo{}
str := strings.Split(s, ":")
if len(str) != 2 {
return errors.New("错误GTID格式!正确格式为uuid:编号[-编号],多个时以逗号分隔")
}
// 把uuid逆向为16位[]byte
u2, err := uuid.FromString(str[0])
if err != nil {
return fmt.Errorf("GTID解析失败!(%s)", err.Error())
}
g.uuid = u2.Bytes()
nos := strings.Split(str[1], "-")
if len(nos) == 1 {
n, err := strconv.ParseInt(nos[0], 10, 64)
if err != nil {
return fmt.Errorf("GTID解析失败!(%s)", err.Error())
}
g.startSeqNo = n
g.stopSeqNo = n
} else {
n, err := strconv.ParseInt(nos[0], 10, 64)
if err != nil {
return fmt.Errorf("GTID解析失败!(%s)", err.Error())
}
n2, err := strconv.ParseInt(nos[1], 10, 64)
if err != nil {
return fmt.Errorf("GTID解析失败!(%s)", err.Error())
}
g.startSeqNo = n
g.stopSeqNo = n2
}
p.jumpGtids[g] = false
p.includeGtids = append(p.includeGtids, g)
// fmt.Println(*g)
}
log.Debugf("gtid集合数量: %d", len(p.includeGtids))
return nil
}
func (p *baseParser) getDB() (*gorm.DB, error) {
if p.cfg.Host == "" {
return nil, fmt.Errorf("请指定数据库地址")
}
if p.cfg.Port == 0 {
return nil, fmt.Errorf("请指定数据库端口")
}
if p.cfg.User == "" {
return nil, fmt.Errorf("请指定数据库用户名")
}
addr := fmt.Sprintf("%s:%s@tcp(%s:%d)/mysql?charset=utf8mb4&parseTime=True&loc=Local",
p.cfg.User, p.cfg.Password, p.cfg.Host, p.cfg.Port)
return gorm.Open("mysql", addr)
}
// parserInit 解析服务初始化
func (p *MyBinlogParser) parserInit() error {
defer timeTrack(time.Now(), "parserInit")
if len(p.outputFileName) > 0 {
var err error
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
p.checkError(err)
// outputFile, err := os.OpenFile(outputFileStr, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
// if err != nil {
// log.Info(index, "open file failed.", err.Error())
// break
// }
defer p.outputFile.Close()
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
if p.cfg.StartTime != "" {
t, err := now.Parse(p.cfg.StartTime)
if err != nil {
return err
}
p.startTimestamp = uint32(t.Unix())
}
if p.cfg.StopTime != "" {
t, err := now.Parse(p.cfg.StopTime)
if err != nil {
return err
}
p.stopTimestamp = uint32(t.Unix())
}
// 如果未指定开始文件,就自动解析
if len(p.startFile) == 0 {
binlogs := p.autoParseBinlogPosition()
if len(binlogs) == 0 {
p.checkError(errors.New("无法获取master binlog"))
}
p.startFile = binlogs[0].Name
if p.startTimestamp > 0 || p.stopTimestamp > 0 {
for _, masterLog := range binlogs {
timestamp, err := p.getBinlogFirstTimestamp(masterLog.Name)
p.checkError(err)
log.WithFields(log.Fields{
"起始时间": time.Unix(int64(timestamp), 0).Format(TimeFormat),
"binlogFile": masterLog.Name,
}).Info("binlog信息")
if timestamp <= p.startTimestamp {
p.startFile = masterLog.Name
}
if p.stopFile == "" && p.stopTimestamp > 0 && timestamp > p.stopTimestamp {
p.stopFile = masterLog.Name
}
}
}
if len(p.startFile) == 0 {
p.checkError(errors.New("未能解析指定时间段的binlog文件,请检查后重试!"))
}
log.Infof("根据指定的时间段,解析出的开始binlog文件是:%s,结束文件是:%s\n",
p.startFile, p.stopFile)
}
// // 未指定结束文件时,仅解析该文件
// if len(p.stopFile) == 0 {
// p.stopFile = p.startFile
// }
if len(p.cfg.SqlType) > 0 {
for _, s := range strings.Split(p.cfg.SqlType, ",") {
p.SqlTypes[s] = true
}
} else {
p.SqlTypes["insert"] = true
p.SqlTypes["update"] = true
p.SqlTypes["delete"] = true
}
if len(p.cfg.Databases) > 0 {
for _, db := range strings.Split(p.cfg.Databases, ",") {
db = strings.ToLower(strings.Trim(db, " `"))
p.OnlyDatabases[db] = true
}
}
if len(p.cfg.Tables) > 0 {
for _, s := range strings.Split(p.cfg.Tables, ",") {
if strings.Contains(s, ".") {
names := strings.SplitN(s, ".", 2)
db, table := names[0], names[1]
db = strings.ToLower(strings.Trim(db, " `"))
table = strings.ToLower(strings.Trim(table, " `"))
p.OnlyTables[table] = db
} else {
key := strings.ToLower(strings.Trim(s, " `"))
p.OnlyTables[key] = ""
}
}
}
// log.Infof("dbs: %#v", p.OnlyDatabases)
// log.Infof("tbls: %#v", p.OnlyTables)
return nil
}
func Exists(filename string) bool {
_, err := os.Stat(filename)
return err == nil || os.IsExist(err)
}
// getBinlogFirstTimestamp 获取binlog文件第一个时间戳
func (p *MyBinlogParser) getBinlogFirstTimestamp(file string) (uint32, error) {
logLevel := log.GetLevel()
defer func() {
log.SetLevel(logLevel)
}()
// 临时调整日志级别,忽略close sync异常
log.SetLevel(log.FatalLevel)
cfg := replication.BinlogSyncerConfig{
ServerID: 2000000110,
Flavor: p.cfg.Flavor,
Host: p.cfg.Host,
Port: p.cfg.Port,
User: p.cfg.User,
Password: p.cfg.Password,
// RawModeEnabled: p.cfg.RawMode,
// SemiSyncEnabled: p.cfg.SemiSync,
}
b := replication.NewBinlogSyncer(cfg)
pos := mysql.Position{Name: file,
Pos: uint32(4)}
s, err := b.StartSync(pos)
if err != nil {
return 0, err
}
defer func() {
b.Close()
}()
for {
// return
e, err := s.GetEvent(context.Background())
if err != nil {
// b.Close()
return 0, err
}
// log.Infof("事件类型:%s", e.Header.EventType)
if e.Header.Timestamp > 0 {
// b.Close()
return e.Header.Timestamp, nil
}
}
}
// check 读取文件的函数调用大多数都需要检查错误,
// 使用下面这个错误检查方法可以方便一点
func check(e error) {
if e != nil {
panic(e)
}
}
func timeParseToUnix(timeStr string) (uint32, error) {
// t, err := time.Parse(TimeFormat, *timeStr)
t, err := time.ParseInLocation(TimeFormat, timeStr, TimeLocation)
if err != nil {
return 0, err
}
stamp := t.Unix()
return uint32(stamp), nil
}
func (p *MyBinlogParser) checkError(e error) {
if e != nil {
log.Error(e)
if len(p.cfg.SocketUser) > 0 {
kwargs := map[string]interface{}{"error": e.Error()}
sendMsg(p.cfg.SocketUser, "binlog_parse_progress", "binlog解析进度",
"", kwargs)
}
panic(e)
}
}
func (p *MyBinlogParser) schemaFilter(table *replication.TableMapEvent) bool {
if len(p.OnlyDatabases) == 0 && len(p.OnlyTables) == 0 {
return true
}
dbName := strings.ToLower(string(table.Schema))
if len(p.OnlyDatabases) > 0 {
if _, ok := p.OnlyDatabases[dbName]; !ok {
return false
}
}
tableName := strings.ToLower(string(table.Table))
if len(p.OnlyTables) > 0 {
if db, ok := p.OnlyTables[tableName]; !ok {
return false
} else if db != "" && db != dbName {
return false
}
}
return true
}
// generateInsertSQL 生成insert语句
func (p *MyBinlogParser) generateInsertSQL(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
tableName := getTableName(e)
// if len(t.Columns) < int(e.ColumnCount) {
// return fmt.Errorf("表%s缺少列!当前列数:%d,binlog的列数%d",
// tableName, len(t.Columns), e.ColumnCount)
// }
var columnNames []string
c := "`%s`"
template := "INSERT INTO %s(%s) VALUES(%s)"
if p.cfg.MinimalInsert && len(e.Rows) > 1 {
template = "INSERT INTO %s(%s) VALUES%s"
}
for i, col := range t.Columns {
if i < int(e.ColumnCount) && !col.IsGenerated() {
// 有主键且设置去除主键时 作特殊处理
if t.hasPrimary && p.cfg.RemovePrimary {
if _, ok := t.primarys[i]; !ok {
columnNames = append(columnNames, fmt.Sprintf(c, col.ColumnName))
}
} else {
columnNames = append(columnNames, fmt.Sprintf(c, col.ColumnName))
}
}
}
paramValues := strings.Repeat("?,", len(columnNames))
paramValues = strings.TrimRight(paramValues, ",")
if p.cfg.MinimalInsert && len(e.Rows) > 1 {
paramValues = strings.Repeat("("+paramValues+"),", len(e.Rows))
paramValues = strings.TrimRight(paramValues, ",")
}
sql := fmt.Sprintf(template, tableName, strings.Join(columnNames, ","), paramValues)
var vv []driver.Value
for _, rows := range e.Rows {
for i, d := range rows {
if t.Columns[i].IsGenerated() {
continue
}
if t.hasPrimary && p.cfg.RemovePrimary {
if _, ok := t.primarys[i]; ok {
continue
}
}
if t.Columns[i].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[i].ColumnType))
}
vv = append(vv, d)
}
if !p.cfg.MinimalInsert || len(e.Rows) == 1 {
r, err := InterpolateParams(sql, vv)
if err != nil {
log.Error(err)
}
p.write(r, binEvent)
vv = nil
}
}
if p.cfg.MinimalInsert && len(e.Rows) > 1 {
r, err := InterpolateParams(sql, vv)
if err != nil {
log.Error(err)
}
p.write(r, binEvent)
}
return nil
}
func (p *MyBinlogParser) checkCanParse(t *Table, e *replication.RowsEvent) bool {
if len(t.Columns) < int(e.ColumnCount) {
tableName := getTableName(e)
if _, ok := p.ignoreTables[tableName]; !ok {
p.ignoreTables[tableName] = nil
log.Warn(fmt.Sprintf("表%s缺少列,已忽略.(当前列数:%d,binlog的列数%d)",
tableName, len(t.Columns), e.ColumnCount))
}
return false
}
return true
}
// generateDeleteSQL 生成delete语句
func (p *MyBinlogParser) generateDeleteSQL(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
tableName := getTableName(e)
// if len(t.Columns) < int(e.ColumnCount) {
// return fmt.Errorf("表%s缺少列!当前列数:%d,binlog的列数%d",
// tableName, len(t.Columns), e.ColumnCount)
// }
template := "DELETE FROM %s WHERE"
sql := fmt.Sprintf(template, tableName)
c_null := " `%s` IS ?"
c := " `%s`=?"
var columnNames []string
for _, rows := range e.Rows {
columnNames = nil
var vv []driver.Value
for i, d := range rows {
if t.hasPrimary {
_, ok := t.primarys[i]
if ok {
if t.Columns[i].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[i].ColumnType))
}
vv = append(vv, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[i].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[i].ColumnName))
}
}
} else {
if t.Columns[i].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[i].ColumnType))
}
vv = append(vv, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[i].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[i].ColumnName))
}
}
}
newSql := strings.Join([]string{sql, strings.Join(columnNames, " AND")}, "")
r, err := InterpolateParams(newSql, vv)
if err != nil {
log.Error(err)
}
p.write(r, binEvent)
}
return nil
}
// processValue 处理无符号值(unsigned)
func processValue(value driver.Value, dataType string) driver.Value {
if value == nil {
return value
}
switch v := value.(type) {
case int8:
if v >= 0 {
return value
}
return int64(1<<8 + int64(v))
case int16:
if v >= 0 {
return value
}
return int64(1<<16 + int64(v))
case int32:
if v >= 0 {
return value
}
if dataType == "mediumint" {
return int64(1<<24 + int64(v))
}
return int64(1<<32 + int64(v))
case int64:
if v >= 0 {
return value
}
return math.MaxUint64 - uint64(abs(v)) + 1
// case int:
// case float32:
// case float64:
default:
// log.Error("解析错误")
// log.Errorf("%T", v)
return value
}
}
func abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
}
// generateUpdateSQL 生成update语句
func (p *MyBinlogParser) generateUpdateSQL(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
tableName := getTableName(e)
// if len(t.Columns) < int(e.ColumnCount) {
// return fmt.Errorf("表%s缺少列!当前列数:%d,binlog的列数%d",
// tableName, len(t.Columns), e.ColumnCount)
// }
template := "UPDATE %s SET%s WHERE"
setValue := " `%s`=?"
var columnNames []string
c_null := " `%s` IS ?"
c := " `%s`=?"
var sql string
var sets []string
// for i, col := range t.Columns {
// if i < int(e.ColumnCount) {
// sets = append(sets, fmt.Sprintf(setValue, col.ColumnName))
// }
// }
// 最小化回滚语句, 当开启时,update语句中未变更的值不再记录到回滚语句中
minimalMode := p.cfg.MinimalUpdate
if !minimalMode {
for i, col := range t.Columns {
if i < int(e.ColumnCount) && !col.IsGenerated() {
sets = append(sets, fmt.Sprintf(setValue, col.ColumnName))
}
}
sql = fmt.Sprintf(template, tableName, strings.Join(sets, ","))
}
// sql := fmt.Sprintf(template,tableName,
// strings.Join(sets, ","))
var (
oldValues []driver.Value
newValues []driver.Value
newSql string
)
// update时, Rows为2的倍数, 双数index为旧值,单数index为新值
for i, rows := range e.Rows {
if i%2 == 0 {
// 旧值
columnNames = nil
for j, d := range rows {
if t.Columns[j].IsGenerated() {
continue
}
if t.hasPrimary {
if _, ok := t.primarys[j]; ok {
oldValues = append(oldValues, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[j].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[j].ColumnName))
}
}
} else {
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
oldValues = append(oldValues, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[j].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[j].ColumnName))
}
}
}
} else {
// 新值
for j, d := range rows {
if t.Columns[j].IsGenerated() {
continue
}
if minimalMode {
// 最小化模式下,列如果相等则省略
if !compareValue(d, e.Rows[i-1][j]) {
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
newValues = append(newValues, d)
if j < len(t.Columns) {
sets = append(sets, fmt.Sprintf(setValue, t.Columns[j].ColumnName))
}
}
} else {
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
newValues = append(newValues, d)
}
}
newValues = append(newValues, oldValues...)
if minimalMode {
sql = fmt.Sprintf(template, tableName,
strings.Join(sets, ","))
sets = nil
}
newSql = strings.Join([]string{sql, strings.Join(columnNames, " AND")}, "")
// log.Info(newSql, len(newValues))
r, err := InterpolateParams(newSql, newValues)
p.checkError(err)
p.write(r, binEvent)
oldValues = nil
newValues = nil
}
}
return nil
}
// generateUpdateRollbackSQL 生成update语句
func (p *MyBinlogParser) generateUpdateRollbackSQL(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
tableName := getTableName(e)
// if len(t.Columns) < int(e.ColumnCount) {
// return fmt.Errorf("表%s缺少列!当前列数:%d,binlog的列数%d",
// tableName, len(t.Columns), e.ColumnCount)
// }
template := "UPDATE %s SET%s WHERE"
setValue := " `%s`=?"
var columnNames []string
c_null := " `%s` IS ?"
c := " `%s`=?"
var sql string
var sets []string
// 最小化回滚语句, 当开启时,update语句中未变更的值不再记录到回滚语句中
minimalMode := p.cfg.MinimalUpdate
if !minimalMode {
for i, col := range t.Columns {
if i < int(e.ColumnCount) && !col.IsGenerated() {
sets = append(sets, fmt.Sprintf(setValue, col.ColumnName))
}
}
sql = fmt.Sprintf(template, tableName, strings.Join(sets, ","))
}
var (
oldValues []driver.Value
newValues []driver.Value
newSql string
)
// update时, Rows为2的倍数, 双数index为旧值,单数index为新值
for i, rows := range e.Rows {
if i%2 == 0 {
// 旧值
for j, d := range rows {
if t.Columns[j].IsGenerated() {
continue
}
if minimalMode {
// 最小化模式下,列如果相等则省略
if !compareValue(d, e.Rows[i+1][j]) {
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
newValues = append(newValues, d)
if j < len(t.Columns) {
sets = append(sets, fmt.Sprintf(setValue, t.Columns[j].ColumnName))
}
}
} else {
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
newValues = append(newValues, d)
}
}
} else {
// 新值
if p.cfg.Flashback {
columnNames = nil
}
for j, d := range rows {
if t.hasPrimary {
if _, ok := t.primarys[j]; ok {
if t.Columns[j].IsGenerated() {
continue
}
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
oldValues = append(oldValues, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[j].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[j].ColumnName))
}
}
} else {
if t.Columns[j].IsGenerated() {
continue
}
if t.Columns[j].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[j].ColumnType))
}
oldValues = append(oldValues, d)
if d == nil {
columnNames = append(columnNames,
fmt.Sprintf(c_null, t.Columns[j].ColumnName))
} else {
columnNames = append(columnNames,
fmt.Sprintf(c, t.Columns[j].ColumnName))
}
}
}
newValues = append(newValues, oldValues...)
if minimalMode {
sql = fmt.Sprintf(template, tableName, strings.Join(sets, ","))
sets = nil
}
newSql = strings.Join([]string{sql, strings.Join(columnNames, " AND")}, "")
r, err := InterpolateParams(newSql, newValues)
p.checkError(err)
p.write(r, binEvent)
oldValues = nil
newValues = nil
}
}
return nil
}
func (p *MyBinlogParser) tableInformation(tableId uint64, schema []byte, tableName []byte) (*Table, error) {
table, ok := p.allTables[tableId]
if ok {
return table, nil
}
if p.localMode && len(p.tableCacheList) > 0 {
key := strings.ToLower(fmt.Sprintf("%s.%s", string(schema), string(tableName)))
if table, ok := p.tableCacheList[key]; ok {
p.allTables[tableId] = table
return table, nil
} else {
key = strings.ToLower(string(tableName))
if table, ok := p.tableCacheList[key]; ok {
p.allTables[tableId] = table
return table, nil
}
}
return nil, nil
}
sql := `SELECT COLUMN_NAME, ifnull(COLLATION_NAME,'') as COLLATION_NAME,
ifnull(CHARACTER_SET_NAME,'') as CHARACTER_SET_NAME,
ifnull(COLUMN_COMMENT,'') as COLUMN_COMMENT, COLUMN_TYPE,
ifnull(COLUMN_KEY,'') as COLUMN_KEY,
ifnull(EXTRA,'') as EXTRA
FROM information_schema.columns
WHERE table_schema = ? and table_name = ?
ORDER BY ORDINAL_POSITION`
var columns []Column
p.db.Raw(sql, string(schema), string(tableName)).Scan(&columns)
primarys := make(map[int]bool)
uniques := make(map[int]bool)
for i, r := range columns {
if r.ColumnKey == "PRI" {
primarys[i] = true
}
if r.ColumnKey == "UNI" {
uniques[i] = true
}
}
newTable := new(Table)
newTable.tableID = tableId
newTable.Columns = columns
// if !p.cfg.AllColumns {
if len(primarys) > 0 {
newTable.primarys = primarys
newTable.hasPrimary = true
} else if len(uniques) > 0 {
newTable.primarys = uniques
newTable.hasPrimary = true
} else {
newTable.hasPrimary = false
}
// }
p.allTables[tableId] = newTable
return newTable, nil
}
func (p *MyBinlogParser) mysqlMasterStatus() (*MasterStatus, error) {
defer timeTrack(time.Now(), "mysqlMasterStatus")
// rows, err := db.Query("SHOW MASTER STATUS")
// p.checkError(err)
// defer rows.Close()
r := MasterStatus{}
p.db.Raw("SHOW MASTER STATUS").Scan(&r)
// columns, _ := rows.Columns()
// r := MasterStatus{}
// for rows.Next() {
// if len(columns) == 4 {
// err = rows.Scan(&(r.File), &(r.Position), &(r.Binlog_Do_DB),
// &(r.Binlog_Ignore_DB))
// } else if len(columns) == 5 {
// err = rows.Scan(&(r.File), &(r.Position), &(r.Binlog_Do_DB),
// &(r.Binlog_Ignore_DB), &(r.Executed_Gtid_Set))
// } else {
// return nil, errors.New("show master status返回的列数无法解析!!!")
// }
// p.checkError(err)
// }
return &r, nil
}
func (p *MyBinlogParser) autoParseBinlogPosition() []MasterLog {
if p.binlogs != nil {
return p.binlogs
}
var binlogIndex []MasterLog
p.db.Raw("SHOW MASTER LOGS").Scan(&binlogIndex)
p.binlogs = binlogIndex
return binlogIndex
}
// InterpolateParams 填充占位符参数
func InterpolateParams(query string, args []driver.Value) ([]byte, error) {
// Number of ? should be same to len(args)
if strings.Count(query, "?") != len(args) {
log.WithFields(log.Fields{
"需要参数": strings.Count(query, "?"),
"提供参数": len(args),
}).Error("sql的参数个数不匹配")
return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
}
var buf []byte
argPos := 0
for i := 0; i < len(query); i++ {
q := strings.IndexByte(query[i:], '?')
if q == -1 {
buf = append(buf, query[i:]...)
break
}
buf = append(buf, query[i:i+q]...)
i += q
arg := args[argPos]
argPos++
if arg == nil {
buf = append(buf, "NULL"...)
continue
}
switch v := arg.(type) {
case int8:
buf = strconv.AppendInt(buf, int64(v), 10)
case int16:
buf = strconv.AppendInt(buf, int64(v), 10)
case int32:
buf = strconv.AppendInt(buf, int64(v), 10)
case int64:
buf = strconv.AppendInt(buf, v, 10)
case uint64:
buf = strconv.AppendUint(buf, uint64(v), 10)
case int:
buf = strconv.AppendInt(buf, int64(v), 10)
case decimal.Decimal:
buf = append(buf, v.String()...)
case float32:
buf = strconv.AppendFloat(buf, float64(v), 'g', -1, 32)
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case bool:
if v {
buf = append(buf, '1')
} else {
buf = append(buf, '0')
}
case time.Time:
if v.IsZero() {
buf = append(buf, "'0000-00-00'"...)
} else {
v := v.In(time.UTC)
v = v.Add(time.Nanosecond * 500) // To round under microsecond
year := v.Year()
year100 := year / 100
year1 := year % 100
month := v.Month()
day := v.Day()
hour := v.Hour()
minute := v.Minute()
second := v.Second()
micro := v.Nanosecond() / 1000
buf = append(buf, []byte{
'\'',
digits10[year100], digits01[year100],
digits10[year1], digits01[year1],
'-',
digits10[month], digits01[month],
'-',
digits10[day], digits01[day],
' ',
digits10[hour], digits01[hour],
':',
digits10[minute], digits01[minute],
':',
digits10[second], digits01[second],
}...)
if micro != 0 {
micro10000 := micro / 10000
micro100 := micro / 100 % 100
micro1 := micro % 100
buf = append(buf, []byte{
'.',
digits10[micro10000], digits01[micro10000],
digits10[micro100], digits01[micro100],
digits10[micro1], digits01[micro1],
}...)
}
buf = append(buf, '\'')
}
case string:
buf = append(buf, '\'')
buf = escapeBytesBackslash(buf, []byte(v))
buf = append(buf, '\'')
case []byte:
if v == nil {
buf = append(buf, "NULL"...)
} else {
// buf = append(buf, "_binary'"...)
buf = append(buf, '\'')
buf = escapeBytesBackslash(buf, v)
buf = append(buf, '\'')
}
default:
// fmt.Println(v)
log.Printf("%T", v)
log.Info("解析错误")
return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
}
// 4 << 20 , 4MB
// 移除对单行大小的判断,不入库不需要该限制
// if len(buf)+4 > 4<<20 {
// // log.Print("%T", v)
// log.Info("解析错误")
// return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
// }
}
if argPos != len(args) {
// log.Print("%T", v)
log.Info("解析错误")
return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
}
return buf, nil
}
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
// If cap(buf) is not enough, reallocate new buffer.
func reserveBuffer(buf []byte, appendSize int) []byte {
newSize := len(buf) + appendSize
if cap(buf) < newSize {
// Grow buffer exponentially
newBuf := make([]byte, len(buf)*2+appendSize)
copy(newBuf, buf)
buf = newBuf
}
return buf[:newSize]
}
// escapeBytesBackslash escapes []byte with backslashes (\)
// This escapes the contents of a string (provided as []byte) by adding backslashes before special
// characters, and turning others into specific escape sequences, such as
// turning newlines into \n and null bytes into \0.
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
func escapeBytesBackslash(buf, v []byte) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
switch c {
case '\x00':
buf[pos] = '\\'
buf[pos+1] = '0'
pos += 2
case '\n':
buf[pos] = '\\'
buf[pos+1] = 'n'
pos += 2
case '\r':
buf[pos] = '\\'
buf[pos+1] = 'r'
pos += 2
case '\x1a':
buf[pos] = '\\'
buf[pos+1] = 'Z'
pos += 2
case '\'':
buf[pos] = '\\'
buf[pos+1] = '\''
pos += 2
case '"':
buf[pos] = '\\'
buf[pos+1] = '"'
pos += 2
case '\\':
buf[pos] = '\\'
buf[pos+1] = '\\'
pos += 2
default:
buf[pos] = c
pos++
}
}
return buf[:pos]
}
// escapeStringBackslash is similar to escapeBytesBackslash but for string.
func escapeStringBackslash(buf []byte, v string) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
c := v[i]
switch c {
case '\x00':
buf[pos] = '\\'
buf[pos+1] = '0'
pos += 2
case '\n':
buf[pos] = '\\'
buf[pos+1] = 'n'
pos += 2
case '\r':
buf[pos] = '\\'
buf[pos+1] = 'r'
pos += 2
case '\x1a':
buf[pos] = '\\'
buf[pos+1] = 'Z'
pos += 2
case '\'':
buf[pos] = '\\'
buf[pos+1] = '\''
pos += 2
case '"':
buf[pos] = '\\'
buf[pos+1] = '"'
pos += 2
case '\\':
buf[pos] = '\\'
buf[pos+1] = '\\'
pos += 2
default:
buf[pos] = c
pos++
}
}
return buf[:pos]
}
// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
// This escapes the contents of a string by doubling up any apostrophes that
// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
// effect on the server.
// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
func escapeBytesQuotes(buf, v []byte) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for _, c := range v {
if c == '\'' {
buf[pos] = '\''
buf[pos+1] = '\''
pos += 2
} else {
buf[pos] = c
pos++
}
}
return buf[:pos]
}
// escapeStringQuotes is similar to escapeBytesQuotes but for string.
func escapeStringQuotes(buf []byte, v string) []byte {
pos := len(buf)
buf = reserveBuffer(buf, len(v)*2)
for i := 0; i < len(v); i++ {
c := v[i]
if c == '\'' {
buf[pos] = '\''
buf[pos+1] = '\''
pos += 2
} else {
buf[pos] = c
pos++
}
}
return buf[:pos]
}
// GetDataTypeBase 获取dataType中的数据类型,忽略长度
func GetDataTypeBase(dataType string) string {
if i := strings.Index(dataType, "("); i > 0 {
return dataType[0:i]
}
return dataType
}
// readTableSchema 读取表结构
func (p *MyBinlogParser) readTableSchema(path string) error {
fileObj, err := os.Open(path)
if err != nil {
return err
}
defer fileObj.Close()
reader := bufio.NewReader(fileObj)
p.tableCacheList = make(map[string]*Table)
var buf []string
quotaIsDouble := true
parser := tidbParser.New()
for {
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
log.Println(err)
break
}
}
if strings.Count(line, "'")%2 == 1 {
quotaIsDouble = !quotaIsDouble
}
if ((strings.HasSuffix(line, ";") || strings.HasSuffix(line, ";\r")) &&
quotaIsDouble) || err == io.EOF {
buf = append(buf, line)
s1 := strings.Join(buf, "\n")
s1 = strings.TrimRight(s1, ";")
buf = nil
stmtNodes, _, err := parser.Parse(s1, "utf8mb4", "utf8mb4_bin")
if err != nil {
return fmt.Errorf("解析失败: %v", err)
}
for _, stmtNode := range stmtNodes {
// 是ASCII码160的特殊空格
// currentSql := strings.Trim(stmtNode.Text(), " ;\t\n\v\f\r ")
switch node := stmtNode.(type) {
case *ast.CreateTableStmt:
p.cacheNewTable(buildTableInfo(node))
}
}
} else {
buf = append(buf, line)
}
if err == io.EOF {
break
}
}
return nil
}
// cacheNewTable 缓存表结构
func (p *MyBinlogParser) cacheNewTable(t *Table) {
var key string
if t.Schema == "" {
key = t.TableName
} else {
key = fmt.Sprintf("%s.%s", t.Schema, t.TableName)
}
key = strings.ToLower(key)
p.tableCacheList[key] = t
p.OnlyTables[strings.ToLower(t.TableName)] = t.Schema
}
// buildTableInfo 构建表结构
func buildTableInfo(node *ast.CreateTableStmt) *Table {
table := &Table{
Schema: node.Table.Schema.String(),
TableName: node.Table.Name.String(),
}
for _, ct := range node.Constraints {
switch ct.Tp {
// 设置主键标志
case ast.ConstraintPrimaryKey:
for _, col := range ct.Keys {
for _, field := range node.Cols {
if field.Name.Name.L == col.Column.Name.L {
field.Tp.Flag |= tidb.PriKeyFlag
break
}
}
}
case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey:
for _, col := range ct.Keys {
for _, field := range node.Cols {
if field.Name.Name.L == col.Column.Name.L {
field.Tp.Flag |= tidb.UniqueKeyFlag
break
}
}
}
}
}
table.Columns = make([]Column, 0, len(node.Cols))
for _, field := range node.Cols {
table.Columns = append(table.Columns, *buildNewColumnToCache(table, field))
}
table.configPrimaryKey()
return table
}
// buildNewColumnToCache 构建列
func buildNewColumnToCache(t *Table, field *ast.ColumnDef) *Column {
c := &Column{}
c.ColumnName = field.Name.Name.String()
c.ColumnType = field.Tp.InfoSchemaStr()
for _, op := range field.Options {
switch op.Tp {
case ast.ColumnOptionComment:
c.ColumnComment = op.Expr.GetDatum().GetString()
case ast.ColumnOptionPrimaryKey:
c.ColumnKey = "PRI"
case ast.ColumnOptionUniqKey:
c.ColumnKey = "UNI"
case ast.ColumnOptionAutoIncrement:
// c.Extra += "auto_increment"
case ast.ColumnOptionGenerated:
if op.Stored {
c.Extra += " STORED GENERATED"
} else {
c.Extra += " VIRTUAL GENERATED"
}
}
}
if c.ColumnKey != "PRI" && tidb.HasPriKeyFlag(field.Tp.Flag) {
c.ColumnKey = "PRI"
} else if tidb.HasUniKeyFlag(field.Tp.Flag) {
c.ColumnKey = "UNI"
}
return c
}
// compareValue 比较两值是否相等
func compareValue(v1 interface{}, v2 interface{}) bool {
equal := false
// 处理特殊情况
if v1 == nil && v2 == nil {
return true
}
if v1 == nil || v2 == nil {
return false
}
switch v := v1.(type) {
case []byte:
equal = byteEquals(v, v2.([]byte))
case decimal.Decimal:
if newDec, ok := v2.(decimal.Decimal); ok {
equal = v.Equal(newDec)
} else {
equal = false
}
default:
equal = v1 == v2
}
return equal
}
// configPrimaryKey 配置主键设置
func (t *Table) configPrimaryKey() {
var primarys map[int]bool
primarys = make(map[int]bool)
var uniques map[int]bool
uniques = make(map[int]bool)
for i, r := range t.Columns {
if r.ColumnKey == "PRI" {
primarys[i] = true
}
if r.ColumnKey == "UNI" {
uniques[i] = true
}
}
// newTable.tableId = tableId
if len(primarys) > 0 {
t.primarys = primarys
t.hasPrimary = true
} else if len(uniques) > 0 {
t.primarys = uniques
t.hasPrimary = true
} else {
t.hasPrimary = false
}
}
func (p *MyBinlogParser) parseSingleEvent(e *replication.BinlogEvent) (ok bool, err error) {
// 是否继续,默认为true
ok = true
finishFlag := -1
if e.Header.LogPos > 0 {
p.currentPosition.Pos = e.Header.LogPos
}
if e.Header.EventType == replication.ROTATE_EVENT {
if event, ok := e.Event.(*replication.RotateEvent); ok {
p.currentPosition = mysql.Position{
Name: string(event.NextLogName),
Pos: uint32(event.Position)}
}
}
if !p.cfg.StopNever {
if e.Header.Timestamp > 0 {
if p.startTimestamp > 0 && e.Header.Timestamp < p.startTimestamp {
return
}
if p.stopTimestamp > 0 && e.Header.Timestamp > p.stopTimestamp {
log.Warn("已超出结束时间")
return false, nil
}
}
p.currentTimtstamp = e.Header.Timestamp
finishFlag = p.checkFinish(&p.currentPosition)
if finishFlag == 1 {
log.Warn("is finish")
return false, nil
}
}
// 只需解析GTID返回内的event
if len(p.includeGtids) > 0 {
switch e.Header.EventType {
case replication.TABLE_MAP_EVENT, replication.QUERY_EVENT,
replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2,
replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2,
replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
status := p.isGtidEventInGtidSet()
if status == 1 {
return
} else if status == 2 {
log.Info("已超出GTID范围,自动结束")
if !p.cfg.StopNever {
return false, nil
}
}
}
}
switch event := e.Event.(type) {
case *replication.GTIDEvent:
if len(p.includeGtids) > 0 {
p.gtidEvent = event
}
// e.Dump(os.Stdout)
u, _ := uuid.FromBytes(event.SID)
p.gtid = append([]byte(u.String()), []byte(fmt.Sprintf(":%d", event.GNO))...)
// fmt.Println(p.gtid)
case *replication.TableMapEvent:
if !p.schemaFilter(event) {
return
}
_, err = p.tableInformation(event.TableID, event.Schema, event.Table)
if err != nil {
return false, err
}
case *replication.QueryEvent:
if p.cfg.ThreadID > 0 || p.cfg.ShowThread {
p.currentThreadID = event.SlaveProxyID
if p.cfg.ThreadID != p.currentThreadID {
return
}
}
// 回滚或者仅dml语句时 跳过
if p.cfg.Flashback || !p.cfg.ParseDDL {
return
}
if string(event.Query) != "BEGIN" && string(event.Query) != "COMMIT" {
if len(event.Schema) > 0 {
p.write(append([]byte(fmt.Sprintf("USE `%s`;\n", event.Schema)), event.Query...), e)
}
// changeRows++
} else {
// fmt.Println(string(event.Query))
// log.Info("#start %d %d %d", e.Header.LogPos,
// e.Header.LogPos+e.Header.EventSize,
// e.Header.LogPos-e.Header.EventSize)
// if binlog_event.query == 'BEGIN':
// e_start_pos = last_pos
}
case *replication.RowsEvent:
if !p.schemaFilter(event.Table) {
return
}
if p.cfg.ThreadID > 0 && p.cfg.ThreadID != p.currentThreadID {
return
}
table, err := p.tableInformation(event.TableID, event.Table.Schema, event.Table.Table)
if err != nil {
return false, err
}
switch e.Header.EventType {
case replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2:
if _, ok := p.SqlTypes["insert"]; ok && p.checkCanParse(table, event) {
if p.cfg.Flashback {
err = p.generateDeleteSQL(table, event, e)
} else {
err = p.generateInsertSQL(table, event, e)
}
p.changeRows = p.changeRows + len(event.Rows)
}
case replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2:
if _, ok := p.SqlTypes["delete"]; ok && p.checkCanParse(table, event) {
if p.cfg.Flashback {
err = p.generateInsertSQL(table, event, e)
} else {
err = p.generateDeleteSQL(table, event, e)
}
p.changeRows = p.changeRows + len(event.Rows)
}
case replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
if _, ok := p.SqlTypes["update"]; ok && p.checkCanParse(table, event) {
if p.cfg.Flashback {
err = p.generateUpdateRollbackSQL(table, event, e)
} else {
err = p.generateUpdateSQL(table, event, e)
}
p.changeRows = p.changeRows + len(event.Rows)/2
}
}
if err != nil {
return false, err
}
}
if p.cfg.MaxRows > 0 && p.changeRows >= p.cfg.MaxRows {
log.Info("已超出最大行数")
return false, nil
}
// 再次判断是否到结束位置,以免处在临界值,且无新日志时程序卡住
if e.Header.Timestamp > 0 {
if p.stopTimestamp > 0 && e.Header.Timestamp > p.stopTimestamp {
log.Warn("已超出结束时间")
return false, nil
}
}
if !p.running {
log.Warn("进程手动中止")
return false, nil
}
if finishFlag > -1 {
return false, nil
}
return
}
// ParseRows 获取已解析行数
func (p *MyBinlogParser) Config() *BinlogParserConfig {
return p.cfg
}
// ParseRows 获取已解析行数
func (p *MyBinlogParser) ParseRows() int {
return p.changeRows
}
// Percent 获取解析百分比
func (p *MyBinlogParser) Percent() int {
if !p.running {
return 100
}
if p.cfg.StartFile != "" {
start := mysql.Position{
Name: p.cfg.StartFile,
Pos: uint32(p.cfg.StartPosition),
}
stop := mysql.Position{
Name: p.cfg.StopFile,
Pos: uint32(p.cfg.StopPosition),
}
return ComputePercent(start, stop, p.currentPosition,
*p.masterStatus, p.autoParseBinlogPosition())
}
if p.stopTimestamp > 0 {
if p.currentTimtstamp < p.startTimestamp || p.stopTimestamp == p.startTimestamp {
return 0
} else {
return int((p.currentTimtstamp - p.startTimestamp) * 100 / (p.stopTimestamp - p.startTimestamp))
}
}
if p.cfg.MaxRows > 0 {
if p.changeRows < p.cfg.MaxRows {
return p.changeRows / p.cfg.MaxRows * 100
}
return 100
}
return 0
}
// Archive文件压缩打包
func (p *MyBinlogParser) Archive() (fileSize int64, err error) {
var url string
if strings.Count(p.outputFileName, ".") > 0 {
a := strings.Split(p.outputFileName, ".")
url = strings.Join(a[0:len(a)-1], ".")
} else {
url = p.outputFileName
}
url = url + ".tar.gz"
err = archiver.Archive([]string{p.outputFileName}, url)
if err != nil {
return 0, err
}
fileInfo, _ := os.Stat(url)
//文件大小
fileSize = fileInfo.Size()
// 压缩完成后删除文件
p.clear()
log.Info("打包完成")
return fileSize, nil
}
// getTableName 获取RowsEvent的表名
func getTableName(e *replication.RowsEvent) string {
var build strings.Builder
build.WriteString("`")
build.Write(e.Table.Schema)
build.WriteString("`.`")
build.Write(e.Table.Table)
build.WriteString("`")
return build.String()
}
func ComputePercent(start, stop, currnet mysql.Position,
masterStatus MasterStatus, binlogs []MasterLog) int {
if start.Name == stop.Name && stop.Pos > 0 {
return int((currnet.Pos - start.Pos) * 100 / (stop.Pos - start.Pos))
} else {
stopFile := stop.Name
stopPosition := int(stop.Pos)
if stopFile == "" {
stopFile = masterStatus.File
stopPosition = masterStatus.Position
}
total := 0
parsed := 0
for _, binlog := range binlogs {
if binlog.Name < start.Name {
continue
}
if binlog.Name > stopFile {
break
}
// log.Info("---")
// log.Infof("total: %d, parsed: %d, binlog: %s %v",
// total, parsed, binlog.Name, binlog.Size)
// 第一个日志文件指定了文件
if binlog.Name == start.Name && binlog.Size > int(start.Pos) {
total += binlog.Size - int(start.Pos)
} else if binlog.Name == stopFile {
// 最后一个日志文件
if stopPosition > 0 {
total += stopPosition
} else {
total += binlog.Size
}
} else if binlog.Name < stopFile {
total += binlog.Size
}
if currnet.Name == binlog.Name {
if binlog.Name == start.Name {
if currnet.Pos > start.Pos {
parsed += int(currnet.Pos - start.Pos)
}
} else {
parsed += int(currnet.Pos)
}
} else if binlog.Name < currnet.Name {
if binlog.Name == start.Name {
if binlog.Size > int(start.Pos) {
parsed += binlog.Size - int(start.Pos)
}
} else {
parsed += binlog.Size
}
}
// log.Infof("total: %d, parsed: %d, binlog: %s %v",
// total, parsed, binlog.Name, binlog.Size)
}
pecent := 0
if total > 0 {
pecent = parsed * 100 / total
}
if parsed >= total {
pecent = 100
}
// log.Infof("parsed: %d, total: %d, percent: %v",
// parsed, total, pecent)
return pecent
}
}
================================================
FILE: core/parserV2.go
================================================
package core
import (
"bytes"
"database/sql/driver"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/go-mysql-org/go-mysql/replication"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
)
// generateInsertSQL 生成insert语句
func (p *MyBinlogParser) generateInsertSQL_2(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
if len(t.Columns) < int(e.ColumnCount) {
return fmt.Errorf("表%s.%s缺少列!当前列数:%d,binlog的列数%d",
e.Table.Schema, e.Table.Table, len(t.Columns), e.ColumnCount)
}
var columnNames []string
c := "`%s`"
buf := &bytes.Buffer{}
buf.WriteString("INSERT ")
// if s.ignore {
// _, _ = buf.WriteString("IGNORE ")
// }
buf.WriteString("INTO ")
buf.WriteString("`")
buf.Write(e.Table.Schema)
buf.WriteString("`.`")
buf.Write(e.Table.Table)
buf.WriteString("`")
buf.WriteString("(")
strings.Join(columnNames, ",")
for i, col := range t.Columns {
if i < int(e.ColumnCount) {
// 有主键且设置去除主键时 作特殊处理
if t.hasPrimary && p.cfg.RemovePrimary {
if _, ok := t.primarys[i]; !ok {
columnNames = append(columnNames, fmt.Sprintf(c, col.ColumnName))
}
} else {
columnNames = append(columnNames, fmt.Sprintf(c, col.ColumnName))
}
}
}
buf.WriteString(strings.Join(columnNames, ","))
buf.WriteString(") VALUES")
var insertSQL string
if !p.Config().MinimalInsert {
insertSQL = buf.String()
}
for rowIndex, rows := range e.Rows {
if rowIndex > 0 {
if p.Config().MinimalInsert {
buf.WriteString(",")
} else {
buf.WriteString(insertSQL)
}
}
buf.WriteString("(")
for colIndex, d := range rows {
if t.hasPrimary && p.cfg.RemovePrimary {
if _, ok := t.primarys[colIndex]; ok {
continue
}
}
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
return err
}
if colIndex > 0 {
buf.WriteByte(',')
}
buf.Write(v)
}
buf.WriteString(")")
if !p.cfg.MinimalInsert {
p.write(buf.Bytes(), binEvent)
buf = &bytes.Buffer{}
}
}
if p.cfg.MinimalInsert {
p.write(buf.Bytes(), binEvent)
buf.Reset()
}
return nil
}
// generateDeleteSQL 生成delete语句
func (p *MyBinlogParser) generateDeleteSQL_2(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
if len(t.Columns) < int(e.ColumnCount) {
return fmt.Errorf("表%s.%s缺少列!当前列数:%d,binlog的列数%d",
e.Table.Schema, e.Table.Table, len(t.Columns), e.ColumnCount)
}
for _, rows := range e.Rows {
buf := &bytes.Buffer{}
buf.WriteString("DELETE FROM ")
buf.WriteString("`")
buf.Write(e.Table.Schema)
buf.WriteString("`.`")
buf.Write(e.Table.Table)
buf.WriteString("` WHERE ")
initFirst := false
for colIndex, d := range rows {
if t.hasPrimary {
if _, ok := t.primarys[colIndex]; ok {
if initFirst {
buf.WriteString(" AND ")
}
initFirst = true
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
if d == nil {
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("` IS NULL")
} else {
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
}
} else {
if initFirst {
buf.WriteString(" AND ")
}
initFirst = true
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
if d == nil {
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("` IS NULL")
} else {
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
}
}
p.write(buf.Bytes(), binEvent)
// buf = &bytes.Buffer{}
}
return nil
}
// generateUpdateSQL 生成update语句
func (p *MyBinlogParser) generateUpdateSQL_2(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
if len(t.Columns) < int(e.ColumnCount) {
return fmt.Errorf("表%s.%s缺少列!当前列数:%d,binlog的列数%d",
e.Table.Schema, e.Table.Table, len(t.Columns), e.ColumnCount)
}
// 最小化回滚语句, 当开启时,update语句中未变更的值不再记录到回滚语句中
minimalMode := p.cfg.MinimalUpdate
// update时, Rows为2的倍数, 双数index为旧值,单数index为新值
buf := &bytes.Buffer{}
bufWhere := &bytes.Buffer{}
for rowIndex, row := range e.Rows {
if rowIndex%2 == 0 {
// 旧值
for colIndex, d := range row {
if t.hasPrimary {
if _, ok := t.primarys[colIndex]; ok {
if bufWhere.Len() > 0 {
bufWhere.WriteString(" AND ")
}
if d == nil {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("` IS NULL")
} else {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("`=")
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
bufWhere.Write(v)
}
}
} else {
if colIndex > 0 {
bufWhere.WriteString(" AND ")
}
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
if d == nil {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("` IS NULL")
} else {
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("`=")
bufWhere.Write(v)
}
}
}
} else {
initFirst := false
buf.WriteString("UPDATE ")
buf.WriteString("`")
buf.Write(e.Table.Schema)
buf.WriteString("`.`")
buf.Write(e.Table.Table)
buf.WriteString("` SET ")
for colIndex, d := range row {
if minimalMode {
// 最小化模式下,列如果相等则省略
if !compareValue(d, e.Rows[rowIndex-1][colIndex]) {
if initFirst {
buf.WriteByte(',')
}
initFirst = true
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
} else {
if colIndex > 0 {
buf.WriteByte(',')
}
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
}
buf.WriteString(" WHERE ")
buf.Write(bufWhere.Bytes())
p.write(buf.Bytes(), binEvent)
buf = &bytes.Buffer{}
bufWhere = &bytes.Buffer{}
}
}
return nil
}
// generateUpdateRollbackSQL 生成update语句
func (p *MyBinlogParser) generateUpdateRollbackSQL_2(t *Table, e *replication.RowsEvent,
binEvent *replication.BinlogEvent) error {
if len(t.Columns) < int(e.ColumnCount) {
return fmt.Errorf("表%s.%s缺少列!当前列数:%d,binlog的列数%d",
e.Table.Schema, e.Table.Table, len(t.Columns), e.ColumnCount)
}
// 最小化回滚语句, 当开启时,update语句中未变更的值不再记录到回滚语句中
minimalMode := p.cfg.MinimalUpdate
// update时, Rows为2的倍数, 双数index为旧值,单数index为新值
buf := &bytes.Buffer{}
bufWhere := &bytes.Buffer{}
for rowIndex, rows := range e.Rows {
// 旧值
if rowIndex%2 == 0 {
buf.WriteString("UPDATE ")
buf.WriteString("`")
buf.Write(e.Table.Schema)
buf.WriteString("`.`")
buf.Write(e.Table.Table)
buf.WriteString("` SET ")
initFirst := false
for colIndex, d := range rows {
if minimalMode {
// 最小化模式下,列如果相等则省略
if !compareValue(d, e.Rows[rowIndex+1][colIndex]) {
if initFirst {
buf.WriteString(",")
}
initFirst = true
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
} else {
if colIndex > 0 {
buf.WriteString(",")
}
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
buf.WriteString("`")
buf.WriteString(t.Columns[colIndex].ColumnName)
buf.WriteString("`=")
buf.Write(v)
}
}
} else { // 新值
for colIndex, d := range rows {
if t.hasPrimary {
_, ok := t.primarys[colIndex]
if ok {
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
if bufWhere.Len() > 0 {
bufWhere.WriteString(" AND ")
}
if d == nil {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("` IS NULL")
} else {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("`=")
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
bufWhere.Write(v)
}
}
} else {
if t.Columns[colIndex].IsUnsigned() {
d = processValue(d, GetDataTypeBase(t.Columns[colIndex].ColumnType))
}
if bufWhere.Len() > 0 {
bufWhere.WriteString(" AND ")
}
if d == nil {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("` IS NULL")
} else {
bufWhere.WriteString("`")
bufWhere.WriteString(t.Columns[colIndex].ColumnName)
bufWhere.WriteString("`=")
v, err := valueSerialize(d)
if err != nil {
log.Error(err)
}
bufWhere.Write(v)
}
}
}
buf.WriteString(" WHERE ")
buf.Write(bufWhere.Bytes())
p.write(buf.Bytes(), binEvent)
buf = &bytes.Buffer{}
bufWhere = &bytes.Buffer{}
}
}
return nil
}
// valueSerialize 参数转换成数据库类型
func valueSerialize(arg driver.Value) (buf []byte, err error) {
if arg == nil {
buf = append(buf, "NULL"...)
return buf, nil
}
switch v := arg.(type) {
case int8:
buf = strconv.AppendInt(buf, int64(v), 10)
case int16:
buf = strconv.AppendInt(buf, int64(v), 10)
case int32:
buf = strconv.AppendInt(buf, int64(v), 10)
case int64:
buf = strconv.AppendInt(buf, v, 10)
case uint64:
buf = strconv.AppendUint(buf, uint64(v), 10)
case int:
buf = strconv.AppendInt(buf, int64(v), 10)
case decimal.Decimal:
buf = append(buf, v.String()...)
case float32:
buf = strconv.AppendFloat(buf, float64(v), 'g', -1, 32)
case float64:
buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
case bool:
if v {
buf = append(buf, '1')
} else {
buf = append(buf, '0')
}
case time.Time:
if v.IsZero() {
buf = append(buf, "'0000-00-00'"...)
} else {
v := v.In(time.UTC)
v = v.Add(time.Nanosecond * 500) // To round under microsecond
year := v.Year()
year100 := year / 100
year1 := year % 100
month := v.Month()
day := v.Day()
hour := v.Hour()
minute := v.Minute()
second := v.Second()
micro := v.Nanosecond() / 1000
buf = append(buf, []byte{
'\'',
digits10[year100], digits01[year100],
digits10[year1], digits01[year1],
'-',
digits10[month], digits01[month],
'-',
digits10[day], digits01[day],
' ',
digits10[hour], digits01[hour],
':',
digits10[minute], digits01[minute],
':',
digits10[second], digits01[second],
}...)
if micro != 0 {
micro10000 := micro / 10000
micro100 := micro / 100 % 100
micro1 := micro % 100
buf = append(buf, []byte{
'.',
digits10[micro10000], digits01[micro10000],
digits10[micro100], digits01[micro100],
digits10[micro1], digits01[micro1],
}...)
}
buf = append(buf, '\'')
}
case string:
buf = append(buf, '\'')
buf = escapeBytesBackslash(buf, []byte(v))
buf = append(buf, '\'')
case []byte:
if v == nil {
buf = append(buf, "NULL"...)
} else {
// buf = append(buf, "_binary'"...)
buf = append(buf, '\'')
buf = escapeBytesBackslash(buf, v)
buf = append(buf, '\'')
}
default:
// fmt.Println(v)
log.Printf("%T", v)
log.Info("解析错误")
return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
}
// 4 << 20 , 4MB
if len(buf)+4 > 4<<20 {
// log.Print("%T", v)
log.Info("解析错误")
return nil, errors.New("driver: skip fast-path; continue as if unimplemented")
}
return buf, nil
}
================================================
FILE: core/parser_stats.go
================================================
package core
import (
"bufio"
"context"
"errors"
"fmt"
"os"
"sort"
"strings"
"time"
"github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
"github.com/jinzhu/now"
log "github.com/sirupsen/logrus"
)
type SummaryStats struct {
Inserts int `json:"insert"`
Updates int `json:"update"`
Deletes int `json:"delete"`
Total int `json:"total"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Tables []TableStats `json:"tables"`
}
type TableStats struct {
Table string
DB string
Inserts int `json:"insert"`
Updates int `json:"update"`
Deletes int `json:"delete"`
Total int `json:"total"`
}
func (s *SummaryStats) String() string {
if len(s.Tables) > 1 {
sort.SliceStable(s.Tables, func(cur, next int) bool {
return s.Tables[cur].Total < s.Tables[next].Total
})
}
var tableStr string
for _, t := range s.Tables {
tableStr += t.String()
}
// fmt.Sprintf("start: %v\n"+" stop: %v\n", s.StartTime.Format(time.RFC3339),
// s.EndTime.Format(time.RFC3339)) + "\n" +
return tableStr +
fmt.Sprintf("\n"+`Summary: %d
insert: %d
update: %d
delete: %d`+"\n", s.Total, s.Inserts, s.Updates, s.Deletes)
}
func (t *TableStats) String() string {
return fmt.Sprintf(`%s.%s: %d [insert:%d, update:%d, delete:%d]`+"\n", t.DB, t.Table, t.Total, t.Inserts, t.Updates, t.Deletes)
}
type BinlogParserStats struct {
baseParser
masterStatus *MasterStatus
startTimestamp uint32
stopTimestamp uint32
// 输出到指定文件
outputFile *os.File
outputFileName string
// db *gorm.DB
// 使用buffer写的测试
// 解析600M日志,约500w行,直接使用文件时,用时5min
// 使用buffer用时1min
bufferWriter *bufio.Writer
// 表结构缓存(仅用于本地解析)
tableCacheList map[string]*Table
// 解析用临时变量
currentPosition mysql.Position // 当前binlog位置
currentTimtstamp uint32 // 当前解析到的时间戳
// 保存binlogs文件和position,用以计算percent
binlogs []MasterLog
stats SummaryStats
statsTables map[string]*TableStats
}
// Parser 远程解析
func (p *BinlogParserStats) ParserStats() error {
// runtime.GOMAXPROCS(runtime.NumCPU())
if p.cfg.Host == "" && p.cfg.StartFile != "" {
_, err := os.Stat(p.cfg.StartFile)
if err != nil {
return fmt.Errorf("open start_file: %w", err)
}
return p.parserFile()
}
defer timeTrack(time.Now(), "Parser")
defer func() {
if p.outputFile != nil {
p.outputFile.Close()
}
}()
var err error
p.running = true
p.db, err = p.getDB()
if err != nil {
return err
}
defer p.db.Close()
if len(p.outputFileName) > 0 {
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
if err != nil {
return fmt.Errorf("create output file: %w", err)
}
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
cfg := replication.BinlogSyncerConfig{
ServerID: 2000000211,
Flavor: p.cfg.Flavor,
Host: p.cfg.Host,
Port: p.cfg.Port,
User: p.cfg.User,
Password: p.cfg.Password,
UseDecimal: true,
}
b := replication.NewBinlogSyncer(cfg)
defer b.Close()
p.currentPosition = mysql.Position{
Name: p.startFile,
Pos: uint32(p.cfg.StartPosition),
}
s, err := b.StartSync(p.currentPosition)
if err != nil {
log.Infof("Start sync error: %v\n", err)
return fmt.Errorf("Start sync: %w", err)
}
var finishError error
FOR:
for {
e, err := s.GetEvent(p.ctx)
if err != nil {
if err == context.DeadlineExceeded {
log.Warnf("Waiting for timeout(10s), no new event is generated, automatically stop: %v\n", err)
// err = fmt.Errorf("Waiting for timeout(10s), no new event is generated, automatically stop: %v\n", err)
err = nil
} else {
log.Errorf("Get event error: %v\n", err)
}
finishError = fmt.Errorf("get binlog event: %w", err)
break FOR
}
select {
case <-p.ctx.Done():
finishError = context.Canceled
break FOR
default:
ok, err := p.parseSingleEvent(e)
if err != nil {
finishError = err
break FOR
}
if !ok {
break FOR
}
}
}
p.running = false
return finishError
}
// checkFinish 检查是否需要结束循环
func (p *BinlogParserStats) checkFinish(currentPosition *mysql.Position) int {
returnValue := -1
// 满足以下任一条件时结束循环
// * binlog文件超出指定结束文件
// * 超出指定结束文件的指定位置
// * 超出当前最新binlog位置
// 当前binlog解析位置 相对于配置的结束位置/最新位置
// 超出时返回 1
// 等于时返回 0
// 小于时返回 -1
// 根据是否为事件开始做不同判断
// 事件开始时做大于判断
// 事件结束时做等于判断
var stopMsg string
if p.stopFile != "" && currentPosition.Name > p.stopFile {
stopMsg = "超出指定结束文件"
returnValue = 1
} else if p.cfg.StopPosition > 0 && currentPosition.Name == p.stopFile {
if currentPosition.Pos > uint32(p.cfg.StopPosition) {
stopMsg = "超出指定位置"
returnValue = 1
} else if currentPosition.Pos == uint32(p.cfg.StopPosition) {
stopMsg = "超出指定位置"
returnValue = 0
}
}
if p.masterStatus != nil {
if currentPosition.Name > p.masterStatus.File ||
currentPosition.Name == p.masterStatus.File &&
currentPosition.Pos > uint32(p.masterStatus.Position) {
stopMsg = "超出最新binlog位置"
returnValue = 1
} else if currentPosition.Name == p.masterStatus.File &&
currentPosition.Pos == uint32(p.masterStatus.Position) {
stopMsg = "超出最新binlog位置"
returnValue = 0
}
}
if stopMsg != "" {
log.WithFields(log.Fields{
"当前文件": currentPosition.Name,
"结束文件": p.stopFile,
"当前位置": currentPosition.Pos,
"结束位置": p.cfg.StopPosition,
}).Info(stopMsg)
}
return returnValue
}
// clear 压缩完成或者没有数据时删除文件
func (p *BinlogParserStats) clear() {
err := os.Remove(p.outputFileName)
if err != nil {
log.Error(err)
log.Errorf("删除文件失败! %s", p.outputFileName)
}
}
func (p *BinlogParserStats) Stop() {
log.Warn("Process killed")
if p.cancelFn != nil {
p.cancelFn()
}
p.running = false
}
func (p *BinlogParserStats) writeString(str string) {
if len(p.outputFileName) > 0 {
p.bufferWriter.WriteString(str)
} else {
fmt.Print(str)
}
}
// NewBinlogParserStats binlog解析器
func NewBinlogParserStats(cfg *BinlogParserConfig) (*BinlogParserStats, error) {
p := new(BinlogParserStats)
p.allTables = make(map[uint64]*Table)
cfg.beginTime = time.Now().Unix()
p.cfg = cfg
p.OnlyDatabases = make(map[string]bool)
// [table_name] = db_name
p.OnlyTables = make(map[string]string)
p.SqlTypes = make(map[string]bool)
p.startFile = cfg.StartFile
p.stopFile = cfg.StopFile
if cfg.OutputFileStr != "" {
p.outputFileName = cfg.OutputFileStr
} else {
p.outputFile = os.Stdout
}
// 本地解析模式,host为空,表名为SQL文件
if p.cfg.Host == "" {
p.localMode = true
}
if !p.localMode {
var err error
p.db, err = p.getDB()
if err != nil {
return nil, err
}
defer p.db.Close()
p.masterStatus, err = p.mysqlMasterStatus()
if err != nil {
return nil, err
}
if p.cfg.Debug {
p.db.LogMode(true)
}
}
if err := p.parserInit(); err != nil {
return nil, err
}
p.ctx, p.cancelFn = context.WithCancel(context.Background())
p.stats = SummaryStats{}
p.statsTables = make(map[string]*TableStats)
return p, nil
}
// parserInit 解析服务初始化
func (p *BinlogParserStats) parserInit() error {
defer timeTrack(time.Now(), "parserInit")
if len(p.outputFileName) > 0 {
var err error
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
p.checkError(err)
// outputFile, err := os.OpenFile(outputFileStr, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
// if err != nil {
// log.Info(index, "open file failed.", err.Error())
// break
// }
defer p.outputFile.Close()
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
if p.cfg.StartTime != "" {
t, err := now.Parse(p.cfg.StartTime)
if err != nil {
return err
}
p.startTimestamp = uint32(t.Unix())
}
if p.cfg.StopTime != "" {
t, err := now.Parse(p.cfg.StopTime)
if err != nil {
return err
}
p.stopTimestamp = uint32(t.Unix())
}
// 如果未指定开始文件,就自动解析
if len(p.startFile) == 0 {
binlogs := p.autoParseBinlogPosition()
if len(binlogs) == 0 {
p.checkError(errors.New("无法获取master binlog"))
}
p.startFile = binlogs[0].Name
if p.startTimestamp > 0 || p.stopTimestamp > 0 {
for _, masterLog := range binlogs {
timestamp, err := p.getBinlogFirstTimestamp(masterLog.Name)
p.checkError(err)
log.WithFields(log.Fields{
"起始时间": time.Unix(int64(timestamp), 0).Format(TimeFormat),
"binlogFile": masterLog.Name,
}).Info("binlog信息")
if timestamp <= p.startTimestamp {
p.startFile = masterLog.Name
}
if p.stopFile == "" && p.stopTimestamp > 0 && timestamp > p.stopTimestamp {
p.stopFile = masterLog.Name
}
}
}
if len(p.startFile) == 0 {
p.checkError(errors.New("未能解析指定时间段的binlog文件,请检查后重试!"))
}
log.Infof("根据指定的时间段,解析出的开始binlog文件是:%s,结束文件是:%s\n",
p.startFile, p.stopFile)
}
// // 未指定结束文件时,仅解析该文件
// if len(p.stopFile) == 0 {
// p.stopFile = p.startFile
// }
if len(p.cfg.SqlType) > 0 {
for _, s := range strings.Split(p.cfg.SqlType, ",") {
p.SqlTypes[s] = true
}
} else {
p.SqlTypes["insert"] = true
p.SqlTypes["update"] = true
p.SqlTypes["delete"] = true
}
if len(p.cfg.Databases) > 0 {
for _, db := range strings.Split(p.cfg.Databases, ",") {
db = strings.ToLower(strings.Trim(db, " `"))
p.OnlyDatabases[db] = true
}
}
if len(p.cfg.Tables) > 0 {
for _, s := range strings.Split(p.cfg.Tables, ",") {
if strings.Contains(s, ".") {
names := strings.SplitN(s, ".", 2)
db, table := names[0], names[1]
db = strings.ToLower(strings.Trim(db, " `"))
table = strings.ToLower(strings.Trim(table, " `"))
p.OnlyTables[table] = db
} else {
key := strings.ToLower(strings.Trim(s, " `"))
p.OnlyTables[key] = ""
}
}
}
// log.Infof("dbs: %#v", p.OnlyDatabases)
// log.Infof("tbls: %#v", p.OnlyTables)
return nil
}
// getBinlogFirstTimestamp 获取binlog文件第一个时间戳
func (p *BinlogParserStats) getBinlogFirstTimestamp(file string) (uint32, error) {
logLevel := log.GetLevel()
defer func() {
log.SetLevel(logLevel)
}()
// 临时调整日志级别,忽略close sync异常
log.SetLevel(log.FatalLevel)
cfg := replication.BinlogSyncerConfig{
ServerID: 2000000110,
Flavor: p.cfg.Flavor,
Host: p.cfg.Host,
Port: p.cfg.Port,
User: p.cfg.User,
Password: p.cfg.Password,
// RawModeEnabled: p.cfg.RawMode,
// SemiSyncEnabled: p.cfg.SemiSync,
}
b := replication.NewBinlogSyncer(cfg)
pos := mysql.Position{Name: file,
Pos: uint32(4)}
s, err := b.StartSync(pos)
if err != nil {
return 0, err
}
defer func() {
b.Close()
}()
for {
// return
e, err := s.GetEvent(context.Background())
if err != nil {
// b.Close()
return 0, err
}
// log.Infof("事件类型:%s", e.Header.EventType)
if e.Header.Timestamp > 0 {
// b.Close()
return e.Header.Timestamp, nil
}
}
}
func (p *BinlogParserStats) checkError(e error) {
if e != nil {
log.Error(e)
panic(e)
}
}
func (p *BinlogParserStats) mysqlMasterStatus() (*MasterStatus, error) {
defer timeTrack(time.Now(), "mysqlMasterStatus")
r := MasterStatus{}
p.db.Raw("SHOW MASTER STATUS").Scan(&r)
return &r, nil
}
func (p *BinlogParserStats) autoParseBinlogPosition() []MasterLog {
if p.binlogs != nil {
return p.binlogs
}
var binlogIndex []MasterLog
p.db.Raw("SHOW MASTER LOGS").Scan(&binlogIndex)
p.binlogs = binlogIndex
return binlogIndex
}
func getDBTableKey(db, table string) string {
return fmt.Sprintf("`%s`.`%s`", strings.ToLower(db), strings.ToLower(table))
}
func (p *BinlogParserStats) parseSingleEvent(e *replication.BinlogEvent) (ok bool, err error) {
// 是否继续,默认为true
ok = true
finishFlag := -1
if e.Header.LogPos > 0 {
p.currentPosition.Pos = e.Header.LogPos
}
if e.Header.EventType == replication.ROTATE_EVENT {
if event, ok := e.Event.(*replication.RotateEvent); ok {
p.currentPosition = mysql.Position{
Name: string(event.NextLogName),
Pos: uint32(event.Position)}
}
}
if e.Header.Timestamp > 0 {
if p.startTimestamp > 0 && e.Header.Timestamp < p.startTimestamp {
return
}
if p.stats.StartTime.IsZero() {
p.stats.StartTime = time.Unix(int64(e.Header.Timestamp), 0)
}
p.stats.EndTime = time.Unix(int64(e.Header.Timestamp), 0)
if p.stopTimestamp > 0 && e.Header.Timestamp > p.stopTimestamp {
log.Warn("已超出结束时间")
return false, nil
}
}
p.currentTimtstamp = e.Header.Timestamp
finishFlag = p.checkFinish(&p.currentPosition)
if finishFlag == 1 {
log.Warn("is finish")
return false, nil
}
switch event := e.Event.(type) {
case *replication.RowsEvent:
key := getDBTableKey(string(event.Table.Schema), string(event.Table.Table))
if _, ok := p.statsTables[key]; !ok {
p.statsTables[key] = &TableStats{
DB: string(event.Table.Schema),
Table: string(event.Table.Table),
}
}
currentTable := p.statsTables[key]
switch e.Header.EventType {
case replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2:
currentTable.Inserts += len(event.Rows)
currentTable.Total += len(event.Rows)
case replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2:
currentTable.Deletes += len(event.Rows)
currentTable.Total += len(event.Rows)
case replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
currentTable.Updates += len(event.Rows) / 2
currentTable.Total += len(event.Rows)
}
}
// 再次判断是否到结束位置,以免处在临界值,且无新日志时程序卡住
if e.Header.Timestamp > 0 {
if p.stopTimestamp > 0 && e.Header.Timestamp > p.stopTimestamp {
log.Warn("已超出结束时间")
return false, nil
}
}
if !p.running {
log.Warn("进程手动中止")
return false, nil
}
if finishFlag > -1 {
return false, nil
}
return
}
func (p *BinlogParserStats) parserFile() error {
defer timeTrack(time.Now(), "parserFile")
defer func() {
if p.outputFile != nil {
p.outputFile.Close()
}
}()
var err error
p.running = true
if len(p.outputFileName) > 0 {
if Exists(p.outputFileName) {
// p.checkError(errors.New("指定的文件已存在!"))
// 不作追加,改为文件存在时清除内容,O_APPEND
p.outputFile, err = os.OpenFile(p.outputFileName,
os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
} else {
p.outputFile, err = os.Create(p.outputFileName)
}
if err != nil {
return err
}
p.bufferWriter = bufio.NewWriter(p.outputFile)
}
b := replication.NewBinlogParser()
b.SetUseDecimal(true)
p.currentPosition = mysql.Position{
Name: p.startFile,
Pos: uint32(p.cfg.StartPosition),
}
f := func(e *replication.BinlogEvent) error {
ok, err := p.parseSingleEvent(e)
if err != nil {
return err
}
if !ok {
b.Stop()
}
// 再次判断是否到结束位置,以免处在临界值,且无新日志时程序卡住
if e.Header.Timestamp > 0 {
if p.stopTimestamp > 0 && e.Header.Timestamp >= p.stopTimestamp {
log.Warn("已超出结束时间")
b.Stop()
return nil
}
}
return nil
}
err = b.ParseFile(p.startFile, int64(p.cfg.StartPosition), f)
if err != nil {
println(err.Error())
}
p.stats.Tables = make([]TableStats, 0, len(p.statsTables))
for _, t := range p.statsTables {
p.stats.Inserts += t.Inserts
p.stats.Updates += t.Updates
p.stats.Deletes += t.Deletes
p.stats.Total += t.Total
p.stats.Tables = append(p.stats.Tables, *t)
}
p.writeString(p.stats.String())
return nil
}
================================================
FILE: core/parser_test.go
================================================
package core_test
import (
"bufio"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"github.com/go-mysql-org/go-mysql/client"
"github.com/go-mysql-org/go-mysql/mysql"
_ "github.com/go-sql-driver/mysql"
"github.com/hanchuanchuan/bingo2sql/core"
. "github.com/pingcap/check"
log "github.com/sirupsen/logrus"
)
// Use docker mysql to test, mysql is 3306, mariadb is 3316
var testHost = flag.String("host", "127.0.0.1", "MySQL master host")
var testOutputLogs = flag.Bool("out", false, "output binlog event")
var allTables = map[string]string{
"test_json_v2": `CREATE TABLE IF NOT EXISTS test_json_v2 (
id INT,
c JSON,
PRIMARY KEY (id)
) ENGINE=InnoDB`,
"test_replication": `CREATE TABLE IF NOT EXISTS test_replication (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(256),
f FLOAT,
d DOUBLE,
de DECIMAL(10,2),
i INT,
bi BIGINT,
e enum ("e1", "e2"),
b BIT(8),
y YEAR,
da DATE,
ts TIMESTAMP,
dt DATETIME,
tm TIME,
t TEXT,
bb BLOB,
se SET('a', 'b', 'c'),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8`,
"test_json": `CREATE TABLE IF NOT EXISTS test_json (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 JSON,
c2 DECIMAL(10, 0),
PRIMARY KEY (id)
) ENGINE=InnoDB`,
"test_geo": `CREATE TABLE IF NOT EXISTS test_geo (id int auto_increment primary key, g GEOMETRY)`,
"test_parse_time": `CREATE TABLE IF NOT EXISTS test_parse_time (
id int auto_increment primary key,
a1 DATETIME,
a2 DATETIME(3),
a3 DATETIME(6),
b1 TIMESTAMP,
b2 TIMESTAMP(3) ,
b3 TIMESTAMP(6))`,
"test_simple": `CREATE TABLE IF NOT EXISTS test_simple (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 varchar(100),
c2 int,
PRIMARY KEY (id)
) ENGINE=InnoDB`,
"test_long_text": `CREATE TABLE IF NOT EXISTS test_long_text (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 longtext,
PRIMARY KEY (id)
) ENGINE=InnoDB`,
"test_generated": `CREATE TABLE IF NOT EXISTS test_generated (
id int primary key,
price int,
number int,
total int generated always as (price*number))`,
}
var (
// 解析binlog生成的SQL文件
binlogOutputFile string = "binlog_output.sql"
// 本地方式解析binlog生成的SQL文件
localOutputFile string = "binlog_output_local.sql"
// 表结构文件. 用于本地解析
tableSchemaFile string = "table_schema.sql"
)
var (
defaultConfig core.BinlogParserConfig
localConfig core.BinlogParserConfig
)
var _ = Suite(&testParserSuite{})
func TestBinLogSyncer(t *testing.T) {
TestingT(t)
}
type testParserSuite struct {
c *client.Conn
flavor string
config core.BinlogParserConfig // 远程解析
localConfig core.BinlogParserConfig // 本地解析
}
func (t *testParserSuite) SetUpSuite(c *C) {
defaultConfig = core.BinlogParserConfig{
Host: *testHost,
Port: 3306,
User: "test",
Password: "test",
StartFile: "mysql-bin.000001",
Databases: "test",
OutputFileStr: binlogOutputFile,
}
localConfig = core.BinlogParserConfig{
// 通过setBinlogDir自动获取
// StartFile: "mysql-bin.000001",
Databases: "test",
Tables: tableSchemaFile,
OutputFileStr: localOutputFile,
}
t.config = defaultConfig
t.localConfig = localConfig
t.initTableSchema()
t.setBinlogDir(c)
t.createTables(c)
log.SetLevel(log.InfoLevel)
}
func (t *testParserSuite) TearDownSuite(c *C) {
os.Remove(binlogOutputFile)
os.Remove(localOutputFile)
os.Remove(tableSchemaFile)
}
func (t *testParserSuite) SetUpTest(c *C) {
}
func (t *testParserSuite) TearDownTest(c *C) {
t.reset()
// if t.b != nil {
// t.b.Close()
// t.b = nil
// }
if t.c != nil {
t.c.Close()
t.c = nil
}
}
func (t *testParserSuite) testExecute(c *C, query ...string) {
for _, q := range query {
_, err := t.c.Execute(q)
c.Assert(err, IsNil)
}
}
func (t *testParserSuite) setBinlogDir(c *C) {
if t.c == nil {
t.setupTest(c, mysql.MySQLFlavor)
}
result, err := t.c.Execute("show variables like 'log_bin_basename'")
c.Assert(err, IsNil)
basename, err := result.GetString(0, 1)
c.Assert(err, IsNil)
basename = strings.Replace(basename, "/data/mysql", "/Users/hanchuanchuan", 1)
t.localConfig.StartFile = fmt.Sprintf("%s.000001", basename)
localConfig.StartFile = fmt.Sprintf("%s.000001", basename)
log.Infof("开始本地日志文件:%s", localConfig.StartFile)
result, err = t.c.Execute("show master logs")
c.Assert(err, IsNil)
basename, err = result.GetString(0, 0)
c.Assert(err, IsNil)
t.config.StartFile = basename
defaultConfig.StartFile = basename
log.Infof("开始日志文件:%s", basename)
}
func (t *testParserSuite) SetFlashback(v bool) {
t.config.Flashback = v
t.localConfig.Flashback = v
}
func (t *testParserSuite) SetMinimalUpdate(v bool) {
t.config.MinimalUpdate = v
t.localConfig.MinimalUpdate = v
}
func (t *testParserSuite) SetRemovePrimary(v bool) {
t.config.RemovePrimary = v
t.localConfig.RemovePrimary = v
}
func (t *testParserSuite) SetSQLType(v string) {
t.config.SqlType = v
t.localConfig.SqlType = v
}
func (t *testParserSuite) SetIncludeGtids(v string) {
t.config.IncludeGtids = v
t.localConfig.IncludeGtids = v
}
func (t *testParserSuite) setupTest(c *C, flavor string) {
var port uint16 = 3306
switch flavor {
case mysql.MariaDBFlavor:
port = 3316
}
t.flavor = flavor
var err error
if t.c != nil {
t.c.Close()
}
// db, err := sql.Open(sqlType, "user:password@tcp(127.0.0.1:3306)/database?multiStatements=true")
// if err != nil {
// log.Fatalf("open mysql err: %s", err)
// }
t.c, err = client.Connect(fmt.Sprintf("%s:%d", *testHost, port), "test", "test", "")
c.Assert(err, IsNil)
// _, err = t.c.Execute("CREATE DATABASE IF NOT EXISTS test")
// c.Assert(err, IsNil)
_, err = t.c.Execute("USE test")
c.Assert(err, IsNil)
_, err = t.c.Execute("set binlog_format = 'row'")
c.Assert(err, IsNil)
}
func (t *testParserSuite) getThreadID(c *C) uint32 {
result, err := t.c.Execute("select connection_id()")
c.Assert(err, IsNil)
threadID, err := result.GetInt(0, 0)
c.Assert(err, IsNil)
// log.Errorf("%#v", threadID)
return uint32(threadID)
}
func (t *testParserSuite) getServerUUID(c *C) string {
result, err := t.c.Execute("show variables like 'server_uuid'")
c.Assert(err, IsNil)
uuid, err := result.GetString(0, 1)
c.Assert(err, IsNil)
log.Infof("server uuid: %#v", uuid)
result, err = t.c.Execute("show master status;")
c.Assert(err, IsNil)
file, _ := result.GetString(0, 0)
gtidSet, _ := result.GetString(0, 4)
log.Infof("binlog: %s, gtid set: %s", file, gtidSet)
return uuid
}
// func (t *testParserSuite) testPositionSync(c *C) {
// //get current master binlog file and position
// r, err := t.c.Execute("SHOW MASTER STATUS")
// c.Assert(err, IsNil)
// binFile, _ := r.GetString(0, 0)
// binPos, _ := r.GetInt(0, 1)
// s, err := t.b.StartSync(mysql.Position{Name: binFile, Pos: uint32(binPos)})
// c.Assert(err, IsNil)
// // Test re-sync.
// time.Sleep(100 * time.Millisecond)
// t.b.c.SetReadDeadline(time.Now().Add(time.Millisecond))
// time.Sleep(100 * time.Millisecond)
// t.testSync(c, s)
// }
// func (t *testParserSuite) TestMysqlPositionSync(c *C) {
// t.setupTest(c, mysql.MySQLFlavor)
// t.testPositionSync(c)
// }
// func (t *testParserSuite) TestMysqlGTIDSync(c *C) {
// t.setupTest(c, mysql.MySQLFlavor)
// r, err := t.c.Execute("SELECT @@gtid_mode")
// c.Assert(err, IsNil)
// modeOn, _ := r.GetString(0, 0)
// if modeOn != "ON" {
// c.Skip("GTID mode is not ON")
// }
// r, err = t.c.Execute("SHOW GLOBAL VARIABLES LIKE 'SERVER_UUID'")
// c.Assert(err, IsNil)
// var masterUuid uuid.UUID
// if s, _ := r.GetString(0, 1); len(s) > 0 && s != "NONE" {
// masterUuid, err = uuid.FromString(s)
// c.Assert(err, IsNil)
// }
// set, _ := mysql.ParseMysqlGTIDSet(fmt.Sprintf("%s:%d-%d", masterUuid.String(), 1, 2))
// s, err := t.b.StartSyncGTID(set)
// c.Assert(err, IsNil)
// t.testSync(c, s)
// }
// func (t *testParserSuite) TestMariadbPositionSync(c *C) {
// t.setupTest(c, mysql.MariaDBFlavor)
// t.testPositionSync(c)
// }
// func (t *testParserSuite) TestMariadbGTIDSync(c *C) {
// t.setupTest(c, mysql.MariaDBFlavor)
// // get current master gtid binlog pos
// r, err := t.c.Execute("SELECT @@gtid_binlog_pos")
// c.Assert(err, IsNil)
// str, _ := r.GetString(0, 0)
// set, _ := mysql.ParseMariadbGTIDSet(str)
// s, err := t.b.StartSyncGTID(set)
// c.Assert(err, IsNil)
// t.testSync(c, s)
// }
// func (t *testParserSuite) TestMariadbAnnotateRows(c *C) {
// t.setupTest(c, mysql.MariaDBFlavor)
// t.b.cfg.DumpCommandFlag = BINLOG_SEND_ANNOTATE_ROWS_EVENT
// t.testPositionSync(c)
// }
// func (t *testParserSuite) TestMysqlSemiPositionSync(c *C) {
// t.setupTest(c, mysql.MySQLFlavor)
// t.b.cfg.SemiSyncEnabled = true
// t.testPositionSync(c)
// }
// func (t *testParserSuite) TestMysqlBinlogCodec(c *C) {
// t.setupTest(c, mysql.MySQLFlavor)
// t.testExecute(c, "RESET MASTER")
// var wg sync.WaitGroup
// wg.Add(1)
// defer wg.Wait()
// go func() {
// defer wg.Done()
// t.testSync(c, nil)
// t.testExecute(c, "FLUSH LOGS")
// t.testSync(c, nil)
// }()
// binlogDir := "./var"
// os.RemoveAll(binlogDir)
// err := t.b.StartBackup(binlogDir, mysql.Position{Name: "", Pos: uint32(0)}, 2*time.Second)
// c.Assert(err, IsNil)
// p := NewBinlogParser()
// p.SetVerifyChecksum(true)
// f := func(e *BinlogEvent) error {
// if *testOutputLogs {
// e.Dump(os.Stdout)
// os.Stdout.Sync()
// }
// return nil
// }
// dir, err := os.Open(binlogDir)
// c.Assert(err, IsNil)
// defer dir.Close()
// files, err := dir.Readdirnames(-1)
// c.Assert(err, IsNil)
// for _, file := range files {
// err = p.ParseFile(path.Join(binlogDir, file), 0, f)
// c.Assert(err, IsNil)
// }
// }
func (t *testParserSuite) checkBinlog(c *C, sqls ...string) {
binlogs := t.getBinlog(c)
c.Assert(len(binlogs), Equals, len(sqls), Commentf("%#v", binlogs))
for i, line := range binlogs {
c.Assert(line, Equals, sqls[i], Commentf("%#v", binlogs))
}
}
func (t *testParserSuite) getBinlog(c *C) []string {
// 在线方式解析
resultOnline := t.getBinlogWithConfig(c, &t.config)
// 判断本地文件是否存在
if _, err := os.Stat(t.localConfig.StartFile); err == nil {
// if runtime.GOOS == "linux" {
// 本地解析
resultLocal := t.getBinlogWithConfig(c, &t.localConfig)
c.Assert(len(resultOnline), Equals, len(resultLocal), Commentf("local: %#v, online: %#v", resultLocal, resultOnline))
for i, line := range resultOnline {
c.Assert(line, Equals, resultLocal[i], Commentf("local: %#v, online: %#v", resultLocal[i], resultOnline))
}
} else {
log.Warnf("跳过本地文件解析! 本地文件不存在:%s", t.localConfig.StartFile)
}
return resultOnline
}
// getBinlogWithConfig 根据配置文件
func (t *testParserSuite) getBinlogWithConfig(c *C, config *core.BinlogParserConfig) []string {
p, err := core.NewBinlogParser(context.Background(), config)
c.Assert(err, IsNil)
err = p.Parser()
c.Assert(err, IsNil)
fileObj, err := os.Open(config.OutputFileStr)
c.Assert(err, IsNil)
defer fileObj.Close()
//一个文件对象本身是实现了io.Reader的 使用bufio.NewReader去初始化一个Reader对象,存在buffer中的,读取一次就会被清空
reader := bufio.NewReader(fileObj)
var buf []string
for {
line, err := reader.ReadString('\n')
if err != nil {
if err != io.EOF {
c.Assert(err, IsNil)
return nil
}
break
}
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "# ") || line == "" {
continue
}
if strings.Contains(line, "# ") {
line = line[0:strings.Index(line, "# ")]
}
buf = append(buf, strings.TrimSpace(line))
}
return buf
}
func (t *testParserSuite) reset() {
t.config = defaultConfig
t.localConfig = localConfig
// log.SetLevel(log.ErrorLevel)
}
func (t *testParserSuite) TestSync(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
// "SET SESSION binlog_format = 'MIXED'",
`DROP TABLE IF EXISTS test_replication`,
`CREATE TABLE test_replication (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(256),
f FLOAT,
d DOUBLE,
de DECIMAL(10,2),
i INT,
bi BIGINT,
e enum ("e1", "e2"),
b BIT(8),
y YEAR,
da DATE,
ts TIMESTAMP,
dt DATETIME,
tm TIME,
t TEXT,
bb BLOB,
se SET('a', 'b', 'c'),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8`)
//use row format
t.testExecute(c,
`INSERT INTO test_replication (str, f, i, e, b, y, da, ts, dt, tm, de, t, bb, se)
VALUES ("3", -3.14, 10, "e1", 0b0011, 1985,
"2012-05-07", "2012-05-07 14:01:01", "2012-05-07 14:01:01",
"14:01:01", -45363.64, "abc", "12345", "a,b")`)
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(1,'3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);")
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(1,'3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);")
t.SetFlashback(true)
t.checkBinlog(c, "DELETE FROM `test`.`test_replication` WHERE `id`=1;")
t.SetFlashback(false)
t.SetRemovePrimary(true)
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES('3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);")
}
func (t *testParserSuite) TestParseDDL(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_replication`,
`CREATE TABLE test_replication (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(256),
f FLOAT,
d DOUBLE,
de DECIMAL(10,2),
i INT,
bi BIGINT,
e enum ("e1", "e2"),
b BIT(8),
y YEAR,
da DATE,
ts TIMESTAMP,
dt DATETIME,
tm TIME,
t TEXT,
bb BLOB,
se SET('a', 'b', 'c'),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8`)
//use row format
t.testExecute(c,
`INSERT INTO test_replication (str, f, i, e, b, y, da, ts, dt, tm, de, t, bb, se)
VALUES ("3", -3.14, 10, "e1", 0b0011, 1985,
"2012-05-07", "2012-05-07 14:01:01", "2012-05-07 14:01:01",
"14:01:01", -45363.64, "abc", "12345", "a,b")`)
t.config.ParseDDL = true
t.localConfig.ParseDDL = true
t.checkBinlog(c,
"USE `test`;",
"DROP TABLE IF EXISTS `test_replication` /* generated by server */;",
"USE `test`;",
"CREATE TABLE test_replication (",
"id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,",
"str VARCHAR(256),",
"f FLOAT,", "d DOUBLE,",
"de DECIMAL(10,2),", "i INT,",
"bi BIGINT,", "e enum (\"e1\", \"e2\"),",
"b BIT(8),", "y YEAR,", "da DATE,",
"ts TIMESTAMP,", "dt DATETIME,",
"tm TIME,", "t TEXT,", "bb BLOB,",
"se SET('a', 'b', 'c'),", "PRIMARY KEY (id)",
") ENGINE=InnoDB DEFAULT CHARSET=utf8;",
"INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(1,'3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);",
)
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_replication` WHERE `id`=1;",
)
}
func (t *testParserSuite) TestStopTime(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_replication`,
`CREATE TABLE test_replication (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
str VARCHAR(256),
f FLOAT,
d DOUBLE,
de DECIMAL(10,2),
i INT,
bi BIGINT,
e enum ("e1", "e2"),
b BIT(8),
y YEAR,
da DATE,
ts TIMESTAMP,
dt DATETIME,
tm TIME,
t TEXT,
bb BLOB,
se SET('a', 'b', 'c'),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8`)
//use row format
t.testExecute(c,
`INSERT INTO test_replication (str, f, i, e, b, y, da, ts, dt, tm, de, t, bb, se)
VALUES ("3", -3.14, 10, "e1", 0b0011, 1985,
"2012-05-07", "2012-05-07 14:01:01", "2012-05-07 14:01:01",
"14:01:01", -45363.64, "abc", "12345", "a,b")`)
t.config.StartTime = time.Now().Add(-10 * time.Minute).Format("2006-01-02 15:04")
t.localConfig.StartTime = t.config.StartTime
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(1,'3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);")
t.config.StopTime = time.Now().Add(-5 * time.Minute).Format("2006-01-02 15:04")
t.localConfig.StopTime = t.config.StopTime
t.SetFlashback(true)
// 时间限制范围内无数据
t.checkBinlog(c)
t.config.StopTime = time.Now().Add(time.Minute).Format("2006-01-02 15:04:05")
t.localConfig.StopTime = t.config.StopTime
t.checkBinlog(c, "DELETE FROM `test`.`test_replication` WHERE `id`=1;")
t.SetFlashback(false)
t.SetRemovePrimary(true)
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES('3',-3.14,NULL,-45363.64,10,NULL,1,3,1985,'2012-05-07','2012-05-07 14:01:01','2012-05-07 14:01:01','14:01:01','abc','12345',3);")
}
func (t *testParserSuite) TestGeometry(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c,
"RESET MASTER",
"DROP TABLE IF EXISTS test_geo",
`CREATE TABLE test_geo (id int auto_increment primary key, g GEOMETRY)`,
)
tbls := []string{
`INSERT INTO test_geo(g) VALUES (POINT(1, 1))`,
`INSERT INTO test_geo(g) VALUES (LINESTRING(POINT(0,0), POINT(1,1), POINT(2,2)))`,
`DELETE from test_geo where id>0`,
}
t.testExecute(c, tbls...)
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_geo` WHERE `id`=1;",
"DELETE FROM `test`.`test_geo` WHERE `id`=2;",
"INSERT INTO `test`.`test_geo`(`id`,`g`) VALUES(1,'\\0\\0\\0\\0\x01\x01\\0\\0\\0\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\xf0?');",
"INSERT INTO `test`.`test_geo`(`id`,`g`) VALUES(2,'\\0\\0\\0\\0\x01\x02\\0\\0\\0\x03\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\xf0?\\0\\0\\0\\0\\0\\0\\0@\\0\\0\\0\\0\\0\\0\\0@');",
)
}
func (t *testParserSuite) TestDatetime(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c,
"RESET MASTER",
`SET sql_mode=''`,
`DROP TABLE IF EXISTS test_parse_time`,
`CREATE TABLE test_parse_time (
id int auto_increment primary key,
a1 DATETIME,
a2 DATETIME(3),
a3 DATETIME(6),
b1 TIMESTAMP,
b2 TIMESTAMP(3) ,
b3 TIMESTAMP(6))`,
)
t.testExecute(c, `INSERT INTO test_parse_time(a1,a2,a3,b1,b2,b3) VALUES
("2014-09-08 17:51:04.123456", "2014-09-08 17:51:04.123456", "2014-09-08 17:51:04.123456",
"2014-09-08 17:51:04.123456","2014-09-08 17:51:04.123456","2014-09-08 17:51:04.123456"),
("0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000",
"0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000", "0000-00-00 00:00:00.000000"),
("2014-09-08 17:51:04.000456", "2014-09-08 17:51:04.000456", "2014-09-08 17:51:04.000456",
"2014-09-08 17:51:04.000456","2014-09-08 17:51:04.000456","2014-09-08 17:51:04.000456")`,
`delete from test_parse_time where id > 0`)
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_parse_time` WHERE `id`=1;",
"DELETE FROM `test`.`test_parse_time` WHERE `id`=2;",
"DELETE FROM `test`.`test_parse_time` WHERE `id`=3;",
"INSERT INTO `test`.`test_parse_time`(`id`,`a1`,`a2`,`a3`,`b1`,`b2`,`b3`) VALUES(1,'2014-09-08 17:51:04','2014-09-08 17:51:04.123','2014-09-08 17:51:04.123456','2014-09-08 17:51:04','2014-09-08 17:51:04.123','2014-09-08 17:51:04.123456');",
"INSERT INTO `test`.`test_parse_time`(`id`,`a1`,`a2`,`a3`,`b1`,`b2`,`b3`) VALUES(2,'0000-00-00 00:00:00','0000-00-00 00:00:00.000','0000-00-00 00:00:00.000000','0000-00-00 00:00:00','0000-00-00 00:00:00.000','0000-00-00 00:00:00.000000');",
"INSERT INTO `test`.`test_parse_time`(`id`,`a1`,`a2`,`a3`,`b1`,`b2`,`b3`) VALUES(3,'2014-09-08 17:51:04','2014-09-08 17:51:04.000','2014-09-08 17:51:04.000456','2014-09-08 17:51:04','2014-09-08 17:51:04.000','2014-09-08 17:51:04.000456');",
)
}
func (t *testParserSuite) TestBinlogRowImageMinimal(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 100
t.testExecute(c, `RESET MASTER;`,
"SET SESSION binlog_row_image = 'MINIMAL'",
fmt.Sprintf(`INSERT INTO test_replication (id, str, f, i, bb, de) VALUES (%d, "4", -3.14, 100, "abc", -45635.64)`, id),
fmt.Sprintf(`UPDATE test_replication SET f = -12.14, de = 555.34 WHERE id = %d`, id),
fmt.Sprintf(`DELETE FROM test_replication WHERE id = %d`, id))
t.SetSQLType("update")
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `id`=NULL, `str`=NULL, `f`=-12.14, `d`=NULL, `de`=555.34, `i`=NULL, `bi`=NULL, `e`=NULL, `b`=NULL, `y`=NULL, `da`=NULL, `ts`=NULL, `dt`=NULL, `tm`=NULL, `t`=NULL, `bb`=NULL, `se`=NULL WHERE `id`=100;")
t.SetFlashback(true)
t.SetMinimalUpdate(true)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `id`=100, `f`=NULL, `de`=NULL WHERE `id` IS NULL;",
)
t.SetFlashback(false)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `id`=NULL, `f`=-12.14, `de`=555.34 WHERE `id`=100;",
)
t.testExecute(c, "SET SESSION binlog_row_image = 'FULL'")
}
func (t *testParserSuite) TestMinimalUpdate(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 100
t.testExecute(c, `RESET MASTER;`,
fmt.Sprintf(`INSERT INTO test_replication (id, str, f, i, bb, de) VALUES (%d, "4", -3.14, 100, "abc", -45635.64)`, id),
fmt.Sprintf(`UPDATE test_replication SET f = -12.14, de = 555.34 WHERE id = %d`, id),
fmt.Sprintf(`UPDATE test_replication SET str=null WHERE id = %d`, id),
fmt.Sprintf(`DELETE FROM test_replication WHERE id = %d`, id))
t.SetSQLType("update")
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `id`=100, `str`='4', `f`=-12.14, `d`=NULL, `de`=555.34, `i`=100, `bi`=NULL, `e`=NULL, `b`=NULL, `y`=NULL, `da`=NULL, `ts`=NULL, `dt`=NULL, `tm`=NULL, `t`=NULL, `bb`='abc', `se`=NULL WHERE `id`=100;",
"UPDATE `test`.`test_replication` SET `id`=100, `str`=NULL, `f`=-12.14, `d`=NULL, `de`=555.34, `i`=100, `bi`=NULL, `e`=NULL, `b`=NULL, `y`=NULL, `da`=NULL, `ts`=NULL, `dt`=NULL, `tm`=NULL, `t`=NULL, `bb`='abc', `se`=NULL WHERE `id`=100;")
t.SetFlashback(true)
t.SetMinimalUpdate(true)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `f`=-3.14, `de`=-45635.64 WHERE `id`=100;",
"UPDATE `test`.`test_replication` SET `str`='4' WHERE `id`=100;",
)
t.SetFlashback(false)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `f`=-12.14, `de`=555.34 WHERE `id`=100;",
"UPDATE `test`.`test_replication` SET `str`=NULL WHERE `id`=100;",
)
}
func (t *testParserSuite) TestFieldGenerated(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 1
t.testExecute(c, `RESET MASTER;`,
`INSERT INTO test_generated(id,price,number) VALUES (1,1,1),(2,3,4)`,
fmt.Sprintf(`UPDATE test_generated SET price = 10, number = 20 WHERE id = %d`, id),
fmt.Sprintf(`delete from test_generated WHERE id = %d`, id),
)
// t.SetSQLType("insert")
t.SetSQLType("insert,update,delete")
t.checkBinlog(c,
"INSERT INTO `test`.`test_generated`(`id`,`price`,`number`) VALUES(1,1,1);", "INSERT INTO `test`.`test_generated`(`id`,`price`,`number`) VALUES(2,3,4);", "UPDATE `test`.`test_generated` SET `id`=1, `price`=10, `number`=20 WHERE `id`=1;",
"DELETE FROM `test`.`test_generated` WHERE `id`=1;",
)
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_generated` WHERE `id`=1;",
"DELETE FROM `test`.`test_generated` WHERE `id`=2;",
"UPDATE `test`.`test_generated` SET `id`=1, `price`=1, `number`=1 WHERE `id`=1;",
"INSERT INTO `test`.`test_generated`(`id`,`price`,`number`) VALUES(1,10,20);",
)
}
func (t *testParserSuite) TestTextMax(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 100
// 65536 = 64k
value := strings.Repeat("a", 1024*1024*10)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_long_text`,
`CREATE TABLE test_long_text (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 longtext,
PRIMARY KEY (id)
) ENGINE=InnoDB`,
fmt.Sprintf(`INSERT INTO test_long_text (id, c1) VALUES (%d, "")`, id),
fmt.Sprintf(`UPDATE test_long_text SET c1 = '%s' WHERE id = %d`, value, id),
fmt.Sprintf(`DELETE FROM test_long_text WHERE id = %d`, id))
t.checkBinlog(c,
"INSERT INTO `test`.`test_long_text`(`id`,`c1`) VALUES(100,'');",
fmt.Sprintf("UPDATE `test`.`test_long_text` SET `id`=100, `c1`='%s' WHERE `id`=100;", value),
"DELETE FROM `test`.`test_long_text` WHERE `id`=100;",
)
t.SetFlashback(true)
t.SetMinimalUpdate(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_long_text` WHERE `id`=100;",
"UPDATE `test`.`test_long_text` SET `c1`='' WHERE `id`=100;",
fmt.Sprintf("INSERT INTO `test`.`test_long_text`(`id`,`c1`) VALUES(100,'%s');", value),
)
}
func (t *testParserSuite) TestUpdate2Null(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 100
t.testExecute(c, `RESET MASTER;`,
fmt.Sprintf(`INSERT INTO test_replication (id,str, f, i, e, b, y, da, ts, dt, tm, de, t, bb, se)
VALUES (%d,"3", -3.14, 10, "e1", 0b0011, 1985,
"2012-05-07", "2012-05-07 14:01:01", "2012-05-07 14:01:01",
"14:01:01", -45363.64, "abc", "12345", "a,b")`, id),
fmt.Sprintf(`UPDATE test_replication SET str = null,f = null,d = null,de = null,i = null,bi = null,e = null,b = null,y = null,da = null,ts = null,dt = null,tm = null,t = null,bb = null WHERE id = %d`, id),
fmt.Sprintf(`DELETE FROM test_replication WHERE id = %d`, id))
t.SetSQLType("update")
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `id`=100, `str`=NULL, `f`=NULL, `d`=NULL, `de`=NULL, `i`=NULL, `bi`=NULL, `e`=NULL, `b`=NULL, `y`=NULL, `da`=NULL, `ts`=NULL, `dt`=NULL, `tm`=NULL, `t`=NULL, `bb`=NULL, `se`=3 WHERE `id`=100;")
t.SetFlashback(true)
t.SetMinimalUpdate(true)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `str`='3', `f`=-3.14, `de`=-45363.64, `i`=10, `e`=1, `b`=3, `y`=1985, `da`='2012-05-07', `ts`='2012-05-07 14:01:01', `dt`='2012-05-07 14:01:01', `tm`='14:01:01', `t`='abc', `bb`='12345' WHERE `id`=100;",
)
t.SetFlashback(false)
t.checkBinlog(c,
"UPDATE `test`.`test_replication` SET `str`=NULL, `f`=NULL, `de`=NULL, `i`=NULL, `e`=NULL, `b`=NULL, `y`=NULL, `da`=NULL, `ts`=NULL, `dt`=NULL, `tm`=NULL, `t`=NULL, `bb`=NULL WHERE `id`=100;",
)
}
func (t *testParserSuite) TestRemovePrimary(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
id := 100
t.testExecute(c, `RESET MASTER;`,
fmt.Sprintf(`INSERT INTO test_replication (id, str, f, i, bb, de) VALUES (%d, "4", -3.14, 100, "abc", -45635.64)`, id),
fmt.Sprintf(`DELETE FROM test_replication WHERE id = %d`, id))
t.checkBinlog(c, "INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(100,'4',-3.14,NULL,-45635.64,100,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'abc',NULL);",
"DELETE FROM `test`.`test_replication` WHERE `id`=100;")
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_replication` WHERE `id`=100;",
"INSERT INTO `test`.`test_replication`(`id`,`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES(100,'4',-3.14,NULL,-45635.64,100,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'abc',NULL);",
)
t.SetFlashback(false)
t.SetRemovePrimary(true)
t.checkBinlog(c,
"INSERT INTO `test`.`test_replication`(`str`,`f`,`d`,`de`,`i`,`bi`,`e`,`b`,`y`,`da`,`ts`,`dt`,`tm`,`t`,`bb`,`se`) VALUES('4',-3.14,NULL,-45635.64,100,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'abc',NULL);",
"DELETE FROM `test`.`test_replication` WHERE `id`=100;")
}
// TestThreadID 测试指定线程号,操作后断开重连
func (t *testParserSuite) TestThreadID(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_simple`,
`DROP TABLE IF EXISTS test_simple`,
`CREATE TABLE test_simple (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 varchar(100),
c2 int,
PRIMARY KEY (id)
) ENGINE=InnoDB`)
t.testExecute(c,
`INSERT INTO test_simple (c1, c2) VALUES ('test',1)`,
`DELETE FROM test_simple WHERE id > 0`)
threadID := t.getThreadID(c)
if t.c != nil {
t.c.Close()
t.c = nil
}
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c,
`INSERT INTO test_simple (c1, c2) VALUES ('test2',2)`,
`DELETE FROM test_simple WHERE id > 0`)
// 未限制线程号时返回所有数据
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(2,'test2',2);",
"DELETE FROM `test`.`test_simple` WHERE `id`=2;")
t.config.ThreadID = threadID
t.localConfig.ThreadID = t.config.ThreadID
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;")
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);",
)
}
// TestInsert 测试insert
func (t *testParserSuite) TestInsert(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_simple`,
`CREATE TABLE test_simple (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 varchar(100),
c2 int,
PRIMARY KEY (id)
) ENGINE=InnoDB`)
t.testExecute(c,
`INSERT INTO test_simple (c1, c2) VALUES ('a1',1),('a2',2),('a3',3),('a4',4),('a5',5)`,
`DELETE FROM test_simple WHERE id > 0`)
t.config.MinimalInsert = false
t.localConfig.MinimalInsert = false
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'a1',1);",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(2,'a2',2);",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(3,'a3',3);",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(4,'a4',4);",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(5,'a5',5);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"DELETE FROM `test`.`test_simple` WHERE `id`=2;",
"DELETE FROM `test`.`test_simple` WHERE `id`=3;",
"DELETE FROM `test`.`test_simple` WHERE `id`=4;",
"DELETE FROM `test`.`test_simple` WHERE `id`=5;")
t.config.MinimalInsert = true
t.localConfig.MinimalInsert = true
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'a1',1),(2,'a2',2),(3,'a3',3),(4,'a4',4),(5,'a5',5);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"DELETE FROM `test`.`test_simple` WHERE `id`=2;",
"DELETE FROM `test`.`test_simple` WHERE `id`=3;",
"DELETE FROM `test`.`test_simple` WHERE `id`=4;",
"DELETE FROM `test`.`test_simple` WHERE `id`=5;")
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"DELETE FROM `test`.`test_simple` WHERE `id`=2;",
"DELETE FROM `test`.`test_simple` WHERE `id`=3;",
"DELETE FROM `test`.`test_simple` WHERE `id`=4;",
"DELETE FROM `test`.`test_simple` WHERE `id`=5;",
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'a1',1),(2,'a2',2),(3,'a3',3),(4,'a4',4),(5,'a5',5);",
)
}
// TestThreadID 测试指定线程号,操作后断开重连
func (t *testParserSuite) TestGTID(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_simple`,
`CREATE TABLE test_simple (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 varchar(100),
c2 int,
PRIMARY KEY (id)
) ENGINE=InnoDB`)
t.testExecute(c,
`INSERT INTO test_simple (c1, c2) VALUES ('test',1)`,
`DELETE FROM test_simple WHERE id > 0`)
t.testExecute(c,
`INSERT INTO test_simple (c1, c2) VALUES ('test2',2)`,
`DELETE FROM test_simple WHERE id > 0`)
uuid := t.getServerUUID(c)
// ---- 错误GTID ----
t.SetIncludeGtids("123")
_, err := core.NewBinlogParser(context.Background(), &t.config)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "错误GTID格式!正确格式为uuid:编号[-编号],多个时以逗号分隔")
t.SetIncludeGtids(uuid)
_, err = core.NewBinlogParser(context.Background(), &t.localConfig)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "错误GTID格式!正确格式为uuid:编号[-编号],多个时以逗号分隔")
t.SetIncludeGtids(uuid + ":abc")
_, err = core.NewBinlogParser(context.Background(), &t.localConfig)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "GTID解析失败!(strconv.ParseInt: parsing \"abc\": invalid syntax)")
// ------ end ------
t.SetIncludeGtids(uuid + ":3")
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);")
t.SetIncludeGtids(uuid + ":3-4")
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;")
t.SetIncludeGtids(uuid + ":5")
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(2,'test2',2);")
t.SetIncludeGtids(uuid + ":3-4," + uuid + ":6-7")
t.checkBinlog(c,
"INSERT INTO `test`.`test_simple`(`id`,`c1`,`c2`) VALUES(1,'test',1);",
"DELETE FROM `test`.`test_simple` WHERE `id`=1;",
"DELETE FROM `test`.`test_simple` WHERE `id`=2;",
)
}
func (t *testParserSuite) TestJson(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
`DROP TABLE IF EXISTS test_json`,
`CREATE TABLE test_json (
id BIGINT(64) UNSIGNED NOT NULL AUTO_INCREMENT,
c1 JSON,
c2 DECIMAL(10, 0),
PRIMARY KEY (id)
) ENGINE=InnoDB`)
t.testExecute(c,
`INSERT INTO test_json (c2) VALUES (1)`,
`INSERT INTO test_json (c1, c2) VALUES ('{"key1": "value1", "key2": "value2"}', 1)`,
`update test_json set c1 = '{"key1": "value123"}',c2=2000 where id =2`,
`delete from test_json where id > 0`)
t.SetFlashback(true)
t.checkBinlog(c,
"DELETE FROM `test`.`test_json` WHERE `id`=1;",
"DELETE FROM `test`.`test_json` WHERE `id`=2;",
"UPDATE `test`.`test_json` SET `id`=2, `c1`='{\\\"key1\\\":\\\"value1\\\",\\\"key2\\\":\\\"value2\\\"}', `c2`=1 WHERE `id`=2;",
"INSERT INTO `test`.`test_json`(`id`,`c1`,`c2`) VALUES(1,NULL,1);",
"INSERT INTO `test`.`test_json`(`id`,`c1`,`c2`) VALUES(2,'{\\\"key1\\\":\\\"value123\\\"}',2000);")
}
func (t *testParserSuite) TestJsonV2(c *C) {
t.setupTest(c, mysql.MySQLFlavor)
t.testExecute(c, `RESET MASTER;`,
"DROP TABLE IF EXISTS test_json_v2",
`CREATE TABLE test_json_v2 (
id INT,
c JSON,
PRIMARY KEY (id)
) ENGINE=InnoDB`)
tbls := []string{
`INSERT INTO test_json_v2 VALUES (0, NULL)`,
`INSERT INTO test_json_v2 VALUES (1, '{\"a\": 2}')`,
`INSERT INTO test_json_v2 VALUES (2, '[1,2]')`,
`INSERT INTO test_json_v2 VALUES (3, '{\"a\":\"b\", \"c\":\"d\",\"ab\":\"abc\", \"bc\": [\"x\", \"y\"]}')`,
`INSERT INTO test_json_v2 VALUES (4, '[\"here\", [\"I\", \"am\"], \"!!!\"]')`,
`INSERT INTO test_json_v2 VALUES (5, '\"scalar string\"')`,
`INSERT INTO test_json_v2 VALUES (6, 'true')`,
`INSERT INTO test_json_v2 VALUES (7, 'false')`,
`INSERT INTO test_json_v2 VALUES (8, 'null')`,
`INSERT INTO test_json_v2 VALUES (9, '-1')`,
`INSERT INTO test_json_v2 VALUES (10, CAST(CAST(1 AS UNSIGNED) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (11, '32767')`,
`INSERT INTO test_json_v2 VALUES (12, '32768')`,
`INSERT INTO test_json_v2 VALUES (13, '-32768')`,
`INSERT INTO test_json_v2 VALUES (14, '-32769')`,
`INSERT INTO test_json_v2 VALUES (15, '2147483647')`,
`INSERT INTO test_json_v2 VALUES (16, '2147483648')`,
`INSERT INTO test_json_v2 VALUES (17, '-2147483648')`,
`INSERT INTO test_json_v2 VALUES (18, '-2147483649')`,
`INSERT INTO test_json_v2 VALUES (19, '18446744073709551615')`,
`INSERT INTO test_json_v2 VALUES (20, '18446744073709551616')`,
`INSERT INTO test_json_v2 VALUES (21, '3.14')`,
`INSERT INTO test_json_v2 VALUES (22, '{}')`,
`INSERT INTO test_json_v2 VALUES (23, '[]')`,
`INSERT INTO test_json_v2 VALUES (24, CAST(CAST('2015-01-15 23:24:25' AS DATETIME) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (25, CAST(CAST('23:24:25' AS TIME) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (125, CAST(CAST('23:24:25.12' AS TIME(3)) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (225, CAST(CAST('23:24:25.0237' AS TIME(3)) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (26, CAST(CAST('2015-01-15' AS DATE) AS JSON))`,
`INSERT INTO test_json_v2 VALUES (27, CAST(TIMESTAMP'2015-01-15 23:24:25' AS JSON))`,
`INSERT INTO test_json_v2 VALUES (127, CAST(TIMESTAMP'2015-01-15 23:24:25.12' AS JSON))`,
`INSERT INTO test_json_v2 VALUES (227, CAST(TIMESTAMP'2015-01-15 23:24:25.0237' AS JSON))`,
`INSERT INTO test_json_v2 VALUES (327, CAST(UNIX_TIMESTAMP('2015-01-15 23:24:25') AS JSON))`,
`INSERT INTO test_json_v2 VALUES (28, CAST(ST_GeomFromText('POINT(1 1)') AS JSON))`,
`INSERT INTO test_json_v2 VALUES (29, CAST('[]' AS CHAR CHARACTER SET 'ascii'))`,
// TODO: 30 and 31 are BIT type from JSON_TYPE, may support later.
`INSERT INTO test_json_v2 VALUES (30, CAST(x'cafe' AS JSON))`,
`INSERT INTO test_json_v2 VALUES (31, CAST(x'cafebabe' AS JSON))`,
`INSERT INTO test_json_v2 VALUES (100, CONCAT('{\"', REPEAT('a', 2 * 100 - 1), '\":123}'))`,
}
t.testExecute(c, tbls...)
t.checkBinlog(c,
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(0,NULL);",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(1,'{\\\"a\\\":2}');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(2,'[1,2]');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(3,'{\\\"a\\\":\\\"b\\\",\\\"ab\\\":\\\"abc\\\",\\\"bc\\\":[\\\"x\\\",\\\"y\\\"],\\\"c\\\":\\\"d\\\"}');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(4,'[\\\"here\\\",[\\\"I\\\",\\\"am\\\"],\\\"!!!\\\"]');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(5,'\\\"scalar string\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(6,'true');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(7,'false');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(8,'null');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(9,'-1');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(10,'1');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(11,'32767');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(12,'32768');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(13,'-32768');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(14,'-32769');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(15,'2147483647');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(16,'2147483648');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(17,'-2147483648');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(18,'-2147483649');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(19,'18446744073709551615');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(20,'18446744073709552000');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(21,'3.14');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(22,'{}');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(23,'[]');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(24,'\\\"2015-01-15 23:24:25.000000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(25,'\\\"23:24:25.000000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(125,'\\\"23:24:25.120000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(225,'\\\"23:24:25.024000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(26,'\\\"2015-01-15 00:00:00.000000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(27,'\\\"2015-01-15 23:24:25.000000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(127,'\\\"2015-01-15 23:24:25.120000\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(227,'\\\"2015-01-15 23:24:25.023700\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(327,'1421335465');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(28,'{\\\"coordinates\\\":[1,1],\\\"type\\\":\\\"Point\\\"}');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(29,'[]');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(30,'\\\"\\\\ufffd\\\\ufffd\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(31,'\\\"\\\\ufffd\\\\ufffd\\\\ufffd\\\\ufffd\\\"');",
"INSERT INTO `test`.`test_json_v2`(`id`,`c`) VALUES(100,'{\\\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\\\":123}');")
t.testExecute(c, "delete from test_json_v2 where id >0")
}
func (t *testParserSuite) initTableSchema(tableName ...string) {
var tables []string
if len(tableName) > 0 {
for _, name := range tableName {
if v, ok := allTables[name]; ok {
tables = append(tables, v)
}
}
} else {
for _, value := range allTables {
tables = append(tables, value)
}
}
err := ioutil.WriteFile(tableSchemaFile, []byte(strings.Join(tables, ";\n")), 0666)
if err != nil {
log.Fatal(err)
}
}
// createTable 初始化时创建所有表
func (t *testParserSuite) createTables(c *C) {
var tables []string
for tableName, value := range allTables {
tables = append(tables, value)
tables = append(tables, fmt.Sprintf("truncate table `%s`;", tableName))
}
t.testExecute(c, tables...)
}
func TestComputePercent(t *testing.T) {
start := mysql.Position{
Name: "001",
Pos: 0,
}
stop := mysql.Position{
Name: "001",
Pos: 0,
}
current := mysql.Position{
Name: "001",
Pos: 100,
}
master := core.MasterStatus{
File: "002",
Position: 600,
}
binlogs := []core.MasterLog{
{
Name: "001",
Size: 1000,
},
{
Name: "002",
Size: 1000,
},
}
pct := core.ComputePercent(start, stop, current,
master, binlogs)
expected := 10
if pct != expected {
t.Errorf("got %v expected %v", pct, expected)
}
stop = mysql.Position{
Name: "002",
Pos: 0,
}
pct = core.ComputePercent(start, stop, current,
master, binlogs)
expected = 5
if pct != expected {
t.Errorf("got %v expected %v", pct, expected)
}
stop = mysql.Position{
Name: "003",
Pos: 0,
}
pct = core.ComputePercent(start, stop, current,
master, binlogs)
expected = 5
if pct != expected {
t.Errorf("got %v expected %v", pct, expected)
}
start = mysql.Position{
Name: "001",
Pos: 200,
}
stop = mysql.Position{
Name: "",
Pos: 0,
}
current = mysql.Position{
Name: "002",
Pos: 400,
}
pct = core.ComputePercent(start, stop, current,
master, binlogs)
expected = 85
if pct != expected {
t.Errorf("got %v expected %v", pct, expected)
}
}
================================================
FILE: core/socket.go
================================================
// go-mysqlbinlog: a simple binlog tool to sync remote MySQL binlog.
// go-mysqlbinlog supports semi-sync mode like facebook mysqlbinlog.
// see http://yoshinorimatsunobu.blogspot.com/2014/04/semi-synchronous-replication-at-facebook.html
package core
// time go run mainRemote.go -start-time="2018-09-17 00:00:00" -stop-time="2018-09-25 00:00:00" -o=1.sql
import (
"fmt"
"github.com/imroc/req"
log "github.com/sirupsen/logrus"
)
var URL string
var header = req.Header{"Accept": "application/json"}
// func init() {
// cnf, err := ini.Load("../cnf/config.ini")
// if err != nil {
// log.Fatal().Err(err).Msg("加载配置文件失败!")
// return
// }
// addr := cnf.Section("Bingo").Key("socketAddr").String()
// URL = fmt.Sprintf("http://%s/socket", addr)
// req.SetTimeout(5 * time.Second)
// }
// 向客户端推送消息
func sendMsg(user string, event string, title string, text string, kwargs map[string]interface{}) bool {
if user == "" {
return true
}
if URL == "" {
return true
}
url := fmt.Sprintf("%s/room/%s", URL, user)
param := req.Param{
"event": event,
"title": title,
"content": text,
}
if kwargs != nil && len(kwargs) > 0 {
for k, v := range kwargs {
param[k] = v
}
}
r, err := req.Post(url, header, param)
if err != nil {
log.Error("请求websocket失败!")
log.Print(err)
return false
}
// r.ToJSON(&foo) // 响应体转成对象
// log.Printf("%+v", r) // 打印详细信息
resp := r.Response()
if resp.StatusCode == 200 {
return true
} else {
log.Error("请求websocket失败!")
log.Printf("%+v", r) // 打印详细信息
return false
}
}
================================================
FILE: core/time.go
================================================
package core
import (
"time"
)
func timeTrack(start time.Time, name string) {
// elapsed := time.Since(start)
// log.Printf("%s took %s", name, time.Since(start))
}
================================================
FILE: docs/test.md
================================================
# 效率测试
测试结果
mysqlbinlog > bingo2sql > binlog_rollback > binlog2sql
**说明:**
该测试结果可能存在的偏差如下:
- 输出格式略有差异
- 测试所在环境的状态变化
- binlog_rollback 输出了binlog分析等信息等
### 测试数据准备
- 准备100w行基础数据
- 更新
- 删除
- 获取binlog位置
#### 创建测试表
```sql
use test;
reset master;
drop table if exists tt;
CREATE TABLE `tt` (
ID bigint unsigned auto_increment primary key,
`TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '',
`TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '',
`TABLE_NAME` varchar(64) NOT NULL DEFAULT '',
`COLUMN_NAME` varchar(64) NOT NULL DEFAULT '',
`ORDINAL_POSITION` bigint(21) unsigned NOT NULL DEFAULT '0',
`COLUMN_DEFAULT` longtext,
`IS_NULLABLE` varchar(3) NOT NULL DEFAULT '',
`DATA_TYPE` varchar(64) NOT NULL DEFAULT '',
`CHARACTER_MAXIMUM_LENGTH` bigint(21) unsigned DEFAULT NULL,
`CHARACTER_OCTET_LENGTH` bigint(21) unsigned DEFAULT NULL,
`NUMERIC_PRECISION` bigint(21) unsigned DEFAULT NULL,
`NUMERIC_SCALE` bigint(21) unsigned DEFAULT NULL,
`DATETIME_PRECISION` bigint(21) unsigned DEFAULT NULL,
`CHARACTER_SET_NAME` varchar(32) DEFAULT NULL,
`COLLATION_NAME` varchar(32) DEFAULT NULL,
`COLUMN_TYPE` longtext NOT NULL,
`COLUMN_KEY` varchar(3) NOT NULL DEFAULT '',
`EXTRA` varchar(30) NOT NULL DEFAULT '',
`PRIVILEGES` varchar(80) NOT NULL DEFAULT '',
`COLUMN_COMMENT` varchar(1024) NOT NULL DEFAULT '',
`GENERATION_EXPRESSION` longtext NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
```
#### 搭建测试数据
```sql
insert into tt(TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,PRIVILEGES,COLUMN_COMMENT,GENERATION_EXPRESSION)
select * from information_schema.columns limit 1000;
# 准备100w行基础数据
insert into tt(TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,PRIVILEGES,COLUMN_COMMENT,GENERATION_EXPRESSION)
select tt.TABLE_CATALOG,tt.TABLE_SCHEMA,tt.TABLE_NAME,tt.COLUMN_NAME,tt.ORDINAL_POSITION,tt.COLUMN_DEFAULT,tt.IS_NULLABLE,tt.DATA_TYPE,tt.CHARACTER_MAXIMUM_LENGTH,tt.CHARACTER_OCTET_LENGTH,tt.NUMERIC_PRECISION,tt.NUMERIC_SCALE,tt.DATETIME_PRECISION,tt.CHARACTER_SET_NAME,tt.COLLATION_NAME,tt.COLUMN_TYPE,tt.COLUMN_KEY,tt.EXTRA,tt.PRIVILEGES,tt.COLUMN_COMMENT,tt.GENERATION_EXPRESSION from tt,tt t1;
update tt set ORDINAL_POSITION=ORDINAL_POSITION+10 where id%5=0;
update tt set TABLE_NAME=concat(TABLE_NAME,"1"),COLLATION_NAME=concat(COLLATION_NAME,"2"),
COLUMN_COMMENT = TABLE_NAME where id%4=0;
delete from tt where id%2=0;
# 获取binlog位置以便设置解析参数
show BINARY LOGS;
```
Log_name | File_size
-----------| -----------
mysql-bin.000001 | 357491554
#### bin2sql 测试脚本
```sh
for i in {1..10}; do
echo $i;
/usr/bin/time -f "real %E | user %U | sys %S | cpu %P" -a -o out.txt bin/bingo2sql -h 127.0.0.1 -P 3306 -u test -p test --start-file=mysql-bin.000001 --start-pos=4 --stop-file=mysql-bin.000001 --stop-pos=357490000 -o /tmp/binlog2/out.sql --max=0 --show-gtid=false --show-time=false;
done
cat out.txt;
```
执行结果
real | user | sys | cpu
---- | ---- | ---- | ----
real 0:13.49 | user 33.65 | sys 2.68 | cpu 269%
real 0:13.61 | user 34.21 | sys 2.75 | cpu 271%
real 0:13.42 | user 33.26 | sys 2.61 | cpu 267%
real 0:13.61 | user 33.99 | sys 2.64 | cpu 269%
real 0:13.85 | user 34.77 | sys 2.65 | cpu 270%
real 0:13.58 | user 33.58 | sys 2.62 | cpu 266%
real 0:13.61 | user 34.06 | sys 2.59 | cpu 269%
real 0:13.47 | user 33.39 | sys 2.65 | cpu 267%
real 0:11.68 | user 28.75 | sys 2.29 | cpu 265%
real 0:11.45 | user 28.68 | sys 2.11 | cpu 268%
#### binlog_rollback 测试脚本
```sh
rm -f out.txt;
for i in {1..10}; do
echo $i;
/usr/bin/time -f "real %E | user %U | sys %S | cpu %P" -a -o out.txt ./binlog_rollback -m repl -w 2sql -M mysql -t 4 -H 127.0.0.1 -P 3306 -u test -p test -sbin mysql-bin.000001 -spos 4 -ebin mysql-bin.000001 -epos 357490000 -o /tmp/binlog -dj=""
done
cat out.txt;
```
执行结果
real | user | sys | cpu
---- | ---- | ---- | ----
real 0:28.79 | user 68.74 | sys 8.46 | cpu 268%
real 0:27.35 | user 66.22 | sys 7.86 | cpu 270%
real 0:28.58 | user 69.29 | sys 8.31 | cpu 271%
real 0:29.19 | user 70.78 | sys 8.47 | cpu 271%
real 0:28.31 | user 68.91 | sys 8.06 | cpu 271%
real 0:28.67 | user 68.67 | sys 8.33 | cpu 268%
real 0:28.40 | user 68.92 | sys 7.73 | cpu 269%
real 0:28.44 | user 69.04 | sys 8.05 | cpu 271%
real 0:28.43 | user 68.97 | sys 7.94 | cpu 270%
real 0:28.87 | user 69.27 | sys 7.99 | cpu 267%
#### bin2sql测试脚本
```sh
rm -f out.txt;
for i in {1..10}; do
echo $i;
/usr/bin/time -f "real %E | user %U | sys %S | cpu %P" -a -o out.txt python binlog2sql.py -h127.0.0.1 -P3306 -utest -p'test' --start-file='mysql-bin.000001' --start-position=4 --stop-file='mysql-bin.000001' --stop-position=357490000 --only-dml > /tmp/binlog3/out.sql
done
cat out.txt;
```
执行结果
**无**
感兴趣的同学可以自行测试。
#### mysqlbinlog 测试脚本
```sh
rm -f out.txt;
for i in {1..10}; do
echo $i;
/usr/bin/time -f "real %E | user %U | sys %S | cpu %P" -a -o out.txt mysqlbinlog -h127.0.0.1 -P3306 -utest -p'test' --start-position=4 --stop-position=357490000 --base64-output=DECODE-ROWS -v -r /tmp/binlog4/out.sql /data/mysql/db_cmdb/blog/mysql-bin.000001
done
cat out.txt;
```
执行结果
real | user | sys | cpu
---- | ---- | ---- | ----
real 0:13.42 | user 11.43 | sys 1.96 | cpu 99%
real 0:13.29 | user 11.37 | sys 1.88 | cpu 99%
real 0:13.59 | user 11.54 | sys 1.94 | cpu 99%
real 0:13.17 | user 11.20 | sys 1.94 | cpu 99%
real 0:13.27 | user 11.37 | sys 1.83 | cpu 99%
real 0:13.76 | user 11.62 | sys 2.04 | cpu 99%
real 0:13.98 | user 12.01 | sys 1.93 | cpu 99%
real 0:14.05 | user 11.86 | sys 1.95 | cpu 98%
real 0:13.76 | user 11.67 | sys 1.96 | cpu 99%
real 0:13.79 | user 11.72 | sys 2.01 | cpu 99%
================================================
FILE: go.mod
================================================
module github.com/hanchuanchuan/bingo2sql
replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.2.0
go 1.18
// replace golang.org/x/sys => github.com/golang/sys v0.0.0-20200201011859-915c9c3d4ccf
require (
github.com/go-mysql-org/go-mysql v1.6.0
github.com/go-sql-driver/mysql v1.5.0
github.com/hanchuanchuan/goInception v1.2.0
github.com/imroc/req v0.3.0
github.com/jinzhu/gorm v1.9.12
github.com/jinzhu/now v1.1.1
github.com/labstack/echo/v4 v4.1.14
github.com/mholt/archiver/v3 v3.3.0
github.com/mitchellh/go-homedir v1.1.0
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8
github.com/pkg/profile v1.4.0
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.6
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.6.2
)
require github.com/etcd-io/gofail v0.0.0-20180808172546-51ce9a71510a // indirect
require (
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/frankban/quicktest v1.7.2 // indirect
github.com/fsnotify/fsnotify v1.4.7 // indirect
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 // indirect
github.com/golang/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-cmp v0.3.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 // indirect
github.com/klauspost/compress v1.9.7 // indirect
github.com/klauspost/pgzip v1.2.1 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.1 // indirect
github.com/labstack/gommon v0.3.0 // indirect
github.com/magiconair/properties v1.8.1 // indirect
github.com/mattn/go-colorable v0.1.4 // indirect
github.com/mattn/go-isatty v0.0.11 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/nwaples/rardecode v1.0.0 // indirect
github.com/pelletier/go-toml v1.6.0 // indirect
github.com/pierrec/lz4 v2.4.1+incompatible // indirect
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726
github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/cast v1.3.1 // indirect
// github.com/spf13/cobra v0.0.6
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/ulikunitz/xz v0.5.6 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.1.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/youtube/vitess v2.1.1+incompatible // indirect
go.uber.org/atomic v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect
golang.org/x/text v0.3.6 // indirect
gopkg.in/ini.v1 v1.52.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
)
================================================
FILE: go.sum
================================================
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/CorgiMan/json2 v0.0.0-20150213135156-e72957aba209 h1:rRZPHlNlREFwuEtpYikMNZPs4l5g6zic54l2XDAK4ws=
github.com/CorgiMan/json2 v0.0.0-20150213135156-e72957aba209/go.mod h1:VwmVPvMIZlx23Q7F1umyYmkhNDqf6WQKfMUhGEdVcLA=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/blacktear23/go-proxyprotocol v0.0.0-20171102103907-62e368e1c470/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
github.com/cznic/mathutil v0.0.0-20181021201202-eba54fb065b7/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
github.com/cznic/parser v0.0.0-20181122101858-d773202d5b1f/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
github.com/cznic/y v0.0.0-20181122101901-b05e8c2e8d7b/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/etcd-io/gofail v0.0.0-20180808172546-51ce9a71510a h1:QNEenQIsGDEEfFNSnN+h6hE1OwnHqTg7Dl9gEk1Cko4=
github.com/etcd-io/gofail v0.0.0-20180808172546-51ce9a71510a/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-mysql-org/go-mysql v1.6.0 h1:19B5fojzZcri/1wj9G/1+ws8RJ3N6rJs2X5c/+kBLuQ=
github.com/go-mysql-org/go-mysql v1.6.0/go.mod h1:GX0clmylJLdZEYAojPCDTCvwZxbTBrke93dV55715u0=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hanchuanchuan/gh-ost v1.0.49-0.20200114083508-62a578b91654 h1:PRc0l9OaUyPNPoMo0nruucADDwpFZkVvxMbGCZXpjFg=
github.com/hanchuanchuan/gh-ost v1.0.49-0.20200114083508-62a578b91654/go.mod h1:9NockcUOKVL+JLrnCsXtpRAuPseKoMVqfDmn251PbG0=
github.com/hanchuanchuan/go-mysql v0.0.0-20200114082439-6d0d8d3a982e h1:IjLlaPxsWKkYuePu8Ed2YbHLuBAuKLW6lclhmeq2A6I=
github.com/hanchuanchuan/go-mysql v0.0.0-20200114082439-6d0d8d3a982e/go.mod h1:HJV3Ej7p/Ck/htb5F5VwYAHDL02jA4J6uAufArtmsos=
github.com/hanchuanchuan/goInception v1.2.0 h1:uAai3jAT8D7zwE6YhmMDaJg+3eTPfQauYiNMq9xtztY=
github.com/hanchuanchuan/goInception v1.2.0/go.mod h1:idDo5pLO0j10hNjc2xf1fht01QEbUNTTyPVHGdDp9Z4=
github.com/hanchuanchuan/golib v0.0.0-20200113085747-47643bc243f1 h1:h4yTn7Lu9roQdSZezhf8BwMi3R/KWikL6xV02rmZj6Q=
github.com/hanchuanchuan/golib v0.0.0-20200113085747-47643bc243f1/go.mod h1:6V4x6V71pCU4o9UfPmemdrU9sEhSF6AmXt+PiFr8x4Q=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imroc/req v0.2.3/go.mod h1:J9FsaNHDTIVyW/b5r6/Df5qKEEEq2WzZKIgKSajd1AE=
github.com/imroc/req v0.3.0 h1:3EioagmlSG+z+KySToa+Ylo3pTFZs+jh3Brl7ngU12U=
github.com/imroc/req v0.3.0/go.mod h1:F+NZ+2EFSo6EFXdeIbpfE9hcC233id70kf0byW97Caw=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
github.com/jinzhu/gorm v1.9.12 h1:Drgk1clyWT9t9ERbzHza6Mj/8FY/CqMyVzOiHviMo6Q=
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v0.0.0-20181116074157-8ec929ed50c3/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9 h1:hJix6idebFclqlfZCHE7EUX7uqLCyb70nHNHH1XKGBg=
github.com/juju/errors v0.0.0-20190930114154-d42613fe1ab9/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/labstack/echo/v4 v4.1.14 h1:h8XP66UfB3tUm+L3QPw7tmwAu3pJaA/nyfHPCcz46ic=
github.com/labstack/echo/v4 v4.1.14/go.mod h1:Q5KZ1vD3V5FEzjM79hjwVrC3ABr7F5IdM23bXQMRDGg=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:M
gitextract_sccp6i0i/
├── .editorconfig
├── .gitignore
├── Makefile
├── README.md
├── circle.yml
├── cmd/
│ ├── bingo2sql/
│ │ └── main.go
│ ├── local.go
│ ├── remote.go
│ ├── root.go
│ ├── server.go
│ └── stats.go
├── cnf/
│ └── config.ini
├── core/
│ ├── parseFile.go
│ ├── parser.go
│ ├── parserV2.go
│ ├── parser_stats.go
│ ├── parser_test.go
│ ├── socket.go
│ └── time.go
├── docs/
│ └── test.md
├── go.mod
├── go.sum
├── main.go
├── parse/
│ ├── bingo2sql.go
│ └── log.go
└── utils/
└── uuid/
├── codec.go
├── generator.go
├── sql.go
└── uuid.go
SYMBOL INDEX (226 symbols across 20 files)
FILE: cmd/bingo2sql/main.go
type ParseInfo (line 20) | type ParseInfo struct
function main (line 87) | func main() {
function runParse (line 126) | func runParse() {
function flagBoolean (line 188) | func flagBoolean(name string, shorthand string, defaultVal bool, usage s...
FILE: cmd/local.go
function init (line 64) | func init() {
FILE: cmd/remote.go
function init (line 56) | func init() {
FILE: cmd/root.go
function Execute (line 67) | func Execute() {
function init (line 76) | func init() {
function initConfig (line 108) | func initConfig() {
function flagBoolean (line 133) | func flagBoolean(flag *pflag.FlagSet, p *bool, name string, shorthand st...
function initCommonFalg (line 141) | func initCommonFalg(flag *pflag.FlagSet) {
FILE: cmd/server.go
function init (line 47) | func init() {
function startServer (line 55) | func startServer() {
FILE: cmd/stats.go
function init (line 56) | func init() {
FILE: core/parseFile.go
method parserFile (line 16) | func (p *MyBinlogParser) parserFile() error {
FILE: core/parser.go
constant digits01 (line 32) | digits01 = "012345678901234567890123456789012345678901234567890123456789...
constant digits10 (line 33) | digits10 = "000000000011111111112222222222333333333344444444445555555555...
type Column (line 36) | type Column struct
method IsGenerated (line 49) | func (f *Column) IsGenerated() bool {
method IsUnsigned (line 59) | func (f *Column) IsUnsigned() bool {
type Table (line 68) | type Table struct
method ValidColumns (line 81) | func (t *Table) ValidColumns() (result []Column) {
method configPrimaryKey (line 2212) | func (t *Table) configPrimaryKey() {
type MasterStatus (line 94) | type MasterStatus struct
type MasterLog (line 104) | type MasterLog struct
type GtidSetInfo (line 111) | type GtidSetInfo struct
type BinlogParserConfig (line 118) | type BinlogParserConfig struct
method SetUniqueKey (line 297) | func (cfg *BinlogParserConfig) SetUniqueKey(key string) {
method ID (line 302) | func (cfg *BinlogParserConfig) ID() string {
type Parser (line 191) | type Parser interface
type row (line 196) | type row struct
type baseParser (line 206) | type baseParser struct
method getDB (line 913) | func (p *baseParser) getDB() (*gorm.DB, error) {
type MyBinlogParser (line 243) | type MyBinlogParser struct
method Parser (line 330) | func (p *MyBinlogParser) Parser() error {
method checkFinish (line 523) | func (p *MyBinlogParser) checkFinish(currentPosition *mysql.Position) ...
method clear (line 577) | func (p *MyBinlogParser) clear() {
method isGtidEventInGtidSet (line 589) | func (p *MyBinlogParser) isGtidEventInGtidSet() (status uint8) {
method Stop (line 636) | func (p *MyBinlogParser) Stop() {
method write (line 644) | func (p *MyBinlogParser) write(b []byte, binEvent *replication.BinlogE...
method myWrite (line 672) | func (p *MyBinlogParser) myWrite(data *row) {
method write2 (line 729) | func (p *MyBinlogParser) write2(b []byte) {
method ProcessChan (line 844) | func (p *MyBinlogParser) ProcessChan(wg *sync.WaitGroup) {
method parseGtidSets (line 860) | func (p *MyBinlogParser) parseGtidSets() error {
method parserInit (line 930) | func (p *MyBinlogParser) parserInit() error {
method getBinlogFirstTimestamp (line 1059) | func (p *MyBinlogParser) getBinlogFirstTimestamp(file string) (uint32,...
method checkError (line 1128) | func (p *MyBinlogParser) checkError(e error) {
method schemaFilter (line 1141) | func (p *MyBinlogParser) schemaFilter(table *replication.TableMapEvent...
method generateInsertSQL (line 1164) | func (p *MyBinlogParser) generateInsertSQL(t *Table, e *replication.Ro...
method checkCanParse (line 1242) | func (p *MyBinlogParser) checkCanParse(t *Table, e *replication.RowsEv...
method generateDeleteSQL (line 1256) | func (p *MyBinlogParser) generateDeleteSQL(t *Table, e *replication.Ro...
method generateUpdateSQL (line 1368) | func (p *MyBinlogParser) generateUpdateSQL(t *Table, e *replication.Ro...
method generateUpdateRollbackSQL (line 1500) | func (p *MyBinlogParser) generateUpdateRollbackSQL(t *Table, e *replic...
method tableInformation (line 1624) | func (p *MyBinlogParser) tableInformation(tableId uint64, schema []byt...
method mysqlMasterStatus (line 1691) | func (p *MyBinlogParser) mysqlMasterStatus() (*MasterStatus, error) {
method autoParseBinlogPosition (line 1723) | func (p *MyBinlogParser) autoParseBinlogPosition() []MasterLog {
method readTableSchema (line 2035) | func (p *MyBinlogParser) readTableSchema(path string) error {
method cacheNewTable (line 2092) | func (p *MyBinlogParser) cacheNewTable(t *Table) {
method parseSingleEvent (line 2239) | func (p *MyBinlogParser) parseSingleEvent(e *replication.BinlogEvent) ...
method Config (line 2414) | func (p *MyBinlogParser) Config() *BinlogParserConfig {
method ParseRows (line 2419) | func (p *MyBinlogParser) ParseRows() int {
method Percent (line 2424) | func (p *MyBinlogParser) Percent() int {
method Archive (line 2460) | func (p *MyBinlogParser) Archive() (fileSize int64, err error) {
function init (line 292) | func init() {
function byteEquals (line 660) | func byteEquals(v1, v2 []byte) bool {
function NewBinlogParser (line 738) | func NewBinlogParser(ctx context.Context, cfg *BinlogParserConfig) (*MyB...
function Exists (line 1053) | func Exists(filename string) bool {
function check (line 1111) | func check(e error) {
function timeParseToUnix (line 1117) | func timeParseToUnix(timeStr string) (uint32, error) {
function processValue (line 1322) | func processValue(value driver.Value, dataType string) driver.Value {
function abs (line 1362) | func abs(n int64) int64 {
function InterpolateParams (line 1736) | func InterpolateParams(query string, args []driver.Value) ([]byte, error) {
function reserveBuffer (line 1878) | func reserveBuffer(buf []byte, appendSize int) []byte {
function escapeBytesBackslash (line 1894) | func escapeBytesBackslash(buf, v []byte) []byte {
function escapeStringBackslash (line 1938) | func escapeStringBackslash(buf []byte, v string) []byte {
function escapeBytesQuotes (line 1987) | func escapeBytesQuotes(buf, v []byte) []byte {
function escapeStringQuotes (line 2006) | func escapeStringQuotes(buf []byte, v string) []byte {
function GetDataTypeBase (line 2026) | func GetDataTypeBase(dataType string) string {
function buildTableInfo (line 2108) | func buildTableInfo(node *ast.CreateTableStmt) *Table {
function buildNewColumnToCache (line 2149) | func buildNewColumnToCache(t *Table, field *ast.ColumnDef) *Column {
function compareValue (line 2184) | func compareValue(v1 interface{}, v2 interface{}) bool {
function getTableName (line 2488) | func getTableName(e *replication.RowsEvent) string {
function ComputePercent (line 2498) | func ComputePercent(start, stop, currnet mysql.Position,
FILE: core/parserV2.go
method generateInsertSQL_2 (line 18) | func (p *MyBinlogParser) generateInsertSQL_2(t *Table, e *replication.Ro...
method generateDeleteSQL_2 (line 112) | func (p *MyBinlogParser) generateDeleteSQL_2(t *Table, e *replication.Ro...
method generateUpdateSQL_2 (line 189) | func (p *MyBinlogParser) generateUpdateSQL_2(t *Table, e *replication.Ro...
method generateUpdateRollbackSQL_2 (line 318) | func (p *MyBinlogParser) generateUpdateRollbackSQL_2(t *Table, e *replic...
function valueSerialize (line 451) | func valueSerialize(arg driver.Value) (buf []byte, err error) {
FILE: core/parser_stats.go
type SummaryStats (line 19) | type SummaryStats struct
method String (line 38) | func (s *SummaryStats) String() string {
type TableStats (line 29) | type TableStats struct
method String (line 58) | func (t *TableStats) String() string {
type BinlogParserStats (line 62) | type BinlogParserStats struct
method ParserStats (line 95) | func (p *BinlogParserStats) ParserStats() error {
method checkFinish (line 204) | func (p *BinlogParserStats) checkFinish(currentPosition *mysql.Positio...
method clear (line 258) | func (p *BinlogParserStats) clear() {
method Stop (line 266) | func (p *BinlogParserStats) Stop() {
method writeString (line 274) | func (p *BinlogParserStats) writeString(str string) {
method parserInit (line 341) | func (p *BinlogParserStats) parserInit() error {
method getBinlogFirstTimestamp (line 465) | func (p *BinlogParserStats) getBinlogFirstTimestamp(file string) (uint...
method checkError (line 515) | func (p *BinlogParserStats) checkError(e error) {
method mysqlMasterStatus (line 522) | func (p *BinlogParserStats) mysqlMasterStatus() (*MasterStatus, error) {
method autoParseBinlogPosition (line 533) | func (p *BinlogParserStats) autoParseBinlogPosition() []MasterLog {
method parseSingleEvent (line 549) | func (p *BinlogParserStats) parseSingleEvent(e *replication.BinlogEven...
method parserFile (line 634) | func (p *BinlogParserStats) parserFile() error {
function NewBinlogParserStats (line 283) | func NewBinlogParserStats(cfg *BinlogParserConfig) (*BinlogParserStats, ...
function getDBTableKey (line 545) | func getDBTableKey(db, table string) string {
FILE: core/parser_test.go
function TestBinLogSyncer (line 102) | func TestBinLogSyncer(t *testing.T) {
type testParserSuite (line 106) | type testParserSuite struct
method SetUpSuite (line 115) | func (t *testParserSuite) SetUpSuite(c *C) {
method TearDownSuite (line 148) | func (t *testParserSuite) TearDownSuite(c *C) {
method SetUpTest (line 154) | func (t *testParserSuite) SetUpTest(c *C) {
method TearDownTest (line 157) | func (t *testParserSuite) TearDownTest(c *C) {
method testExecute (line 171) | func (t *testParserSuite) testExecute(c *C, query ...string) {
method setBinlogDir (line 178) | func (t *testParserSuite) setBinlogDir(c *C) {
method SetFlashback (line 206) | func (t *testParserSuite) SetFlashback(v bool) {
method SetMinimalUpdate (line 211) | func (t *testParserSuite) SetMinimalUpdate(v bool) {
method SetRemovePrimary (line 216) | func (t *testParserSuite) SetRemovePrimary(v bool) {
method SetSQLType (line 221) | func (t *testParserSuite) SetSQLType(v string) {
method SetIncludeGtids (line 226) | func (t *testParserSuite) SetIncludeGtids(v string) {
method setupTest (line 231) | func (t *testParserSuite) setupTest(c *C, flavor string) {
method getThreadID (line 263) | func (t *testParserSuite) getThreadID(c *C) uint32 {
method getServerUUID (line 273) | func (t *testParserSuite) getServerUUID(c *C) string {
method checkBinlog (line 426) | func (t *testParserSuite) checkBinlog(c *C, sqls ...string) {
method getBinlog (line 434) | func (t *testParserSuite) getBinlog(c *C) []string {
method getBinlogWithConfig (line 456) | func (t *testParserSuite) getBinlogWithConfig(c *C, config *core.Binlo...
method reset (line 498) | func (t *testParserSuite) reset() {
method TestSync (line 504) | func (t *testParserSuite) TestSync(c *C) {
method TestParseDDL (line 551) | func (t *testParserSuite) TestParseDDL(c *C) {
method TestStopTime (line 612) | func (t *testParserSuite) TestStopTime(c *C) {
method TestGeometry (line 664) | func (t *testParserSuite) TestGeometry(c *C) {
method TestDatetime (line 690) | func (t *testParserSuite) TestDatetime(c *C) {
method TestBinlogRowImageMinimal (line 727) | func (t *testParserSuite) TestBinlogRowImageMinimal(c *C) {
method TestMinimalUpdate (line 757) | func (t *testParserSuite) TestMinimalUpdate(c *C) {
method TestFieldGenerated (line 787) | func (t *testParserSuite) TestFieldGenerated(c *C) {
method TestTextMax (line 814) | func (t *testParserSuite) TestTextMax(c *C) {
method TestUpdate2Null (line 849) | func (t *testParserSuite) TestUpdate2Null(c *C) {
method TestRemovePrimary (line 879) | func (t *testParserSuite) TestRemovePrimary(c *C) {
method TestThreadID (line 905) | func (t *testParserSuite) TestThreadID(c *C) {
method TestInsert (line 956) | func (t *testParserSuite) TestInsert(c *C) {
method TestGTID (line 1009) | func (t *testParserSuite) TestGTID(c *C) {
method TestJson (line 1075) | func (t *testParserSuite) TestJson(c *C) {
method TestJsonV2 (line 1102) | func (t *testParserSuite) TestJsonV2(c *C) {
method initTableSchema (line 1200) | func (t *testParserSuite) initTableSchema(tableName ...string) {
method createTables (line 1223) | func (t *testParserSuite) createTables(c *C) {
function TestComputePercent (line 1232) | func TestComputePercent(t *testing.T) {
FILE: core/socket.go
function sendMsg (line 34) | func sendMsg(user string, event string, title string, text string, kwarg...
FILE: core/time.go
function timeTrack (line 7) | func timeTrack(start time.Time, name string) {
FILE: main.go
function main (line 5) | func main() {
FILE: parse/bingo2sql.go
type ParseInfo (line 20) | type ParseInfo struct
function TestMiddleware (line 26) | func TestMiddleware(next http.Handler) http.Handler {
function HomeHandler (line 35) | func HomeHandler(w http.ResponseWriter, r *http.Request) {
function GetParseInfo (line 41) | func GetParseInfo(c echo.Context) error {
function GetAllParse (line 65) | func GetAllParse(c echo.Context) error {
function ParseBinlog (line 102) | func ParseBinlog(c echo.Context) error {
function ParseBinlogStop (line 168) | func ParseBinlogStop(c echo.Context) error {
function Download (line 207) | func Download(c echo.Context) error {
FILE: parse/log.go
function init (line 25) | func init() {
type contextHook (line 44) | type contextHook struct
method Fire (line 48) | func (hook *contextHook) Fire(entry *log.Entry) error {
method Levels (line 65) | func (hook *contextHook) Levels() []log.Level {
function isSkippedPackageName (line 70) | func isSkippedPackageName(name string) bool {
function PrintVersion (line 76) | func PrintVersion() {
function GetInfo (line 85) | func GetInfo() string {
FILE: utils/uuid/codec.go
function FromBytes (line 32) | func FromBytes(input []byte) (u UUID, err error) {
function FromBytesOrNil (line 39) | func FromBytesOrNil(input []byte) UUID {
function FromString (line 49) | func FromString(input string) (u UUID, err error) {
function FromStringOrNil (line 56) | func FromStringOrNil(input string) UUID {
method MarshalText (line 66) | func (u UUID) MarshalText() (text []byte, err error) {
method UnmarshalText (line 94) | func (u *UUID) UnmarshalText(text []byte) (err error) {
method decodeCanonical (line 113) | func (u *UUID) decodeCanonical(t []byte) (err error) {
method decodeHashLike (line 138) | func (u *UUID) decodeHashLike(t []byte) (err error) {
method decodeBraced (line 151) | func (u *UUID) decodeBraced(t []byte) (err error) {
method decodeURN (line 164) | func (u *UUID) decodeURN(t []byte) (err error) {
method decodePlain (line 179) | func (u *UUID) decodePlain(t []byte) (err error) {
method MarshalBinary (line 191) | func (u UUID) MarshalBinary() (data []byte, err error) {
method UnmarshalBinary (line 198) | func (u *UUID) UnmarshalBinary(data []byte) (err error) {
FILE: utils/uuid/generator.go
constant epochStart (line 40) | epochStart = 122192928000000000
type epochFunc (line 42) | type epochFunc
type hwAddrFunc (line 43) | type hwAddrFunc
function NewV1 (line 53) | func NewV1() (UUID, error) {
function NewV2 (line 58) | func NewV2(domain byte) (UUID, error) {
function NewV3 (line 63) | func NewV3(ns UUID, name string) UUID {
function NewV4 (line 68) | func NewV4() (UUID, error) {
function NewV5 (line 73) | func NewV5(ns UUID, name string) UUID {
type Generator (line 78) | type Generator interface
type rfc4122Generator (line 87) | type rfc4122Generator struct
method NewV1 (line 110) | func (g *rfc4122Generator) NewV1() (UUID, error) {
method NewV2 (line 135) | func (g *rfc4122Generator) NewV2(domain byte) (UUID, error) {
method NewV3 (line 157) | func (g *rfc4122Generator) NewV3(ns UUID, name string) UUID {
method NewV4 (line 166) | func (g *rfc4122Generator) NewV4() (UUID, error) {
method NewV5 (line 178) | func (g *rfc4122Generator) NewV5(ns UUID, name string) UUID {
method getClockSequence (line 187) | func (g *rfc4122Generator) getClockSequence() (uint64, uint16, error) {
method getHardwareAddr (line 215) | func (g *rfc4122Generator) getHardwareAddr() ([]byte, error) {
method getEpoch (line 239) | func (g *rfc4122Generator) getEpoch() uint64 {
function newRFC4122Generator (line 101) | func newRFC4122Generator() Generator {
function newFromHash (line 244) | func newFromHash(h hash.Hash, ns UUID, name string) UUID {
function defaultHWAddrFunc (line 254) | func defaultHWAddrFunc() (net.HardwareAddr, error) {
FILE: utils/uuid/sql.go
method Value (line 30) | func (u UUID) Value() (driver.Value, error) {
method Scan (line 37) | func (u *UUID) Scan(src interface{}) error {
type NullUUID (line 54) | type NullUUID struct
method Value (line 60) | func (u NullUUID) Value() (driver.Value, error) {
method Scan (line 69) | func (u *NullUUID) Scan(src interface{}) error {
FILE: utils/uuid/uuid.go
constant Size (line 33) | Size = 16
type UUID (line 37) | type UUID
method Version (line 88) | func (u UUID) Version() byte {
method Variant (line 93) | func (u UUID) Variant() byte {
method Bytes (line 109) | func (u UUID) Bytes() []byte {
method String (line 115) | func (u UUID) String() string {
method SetVersion (line 132) | func (u *UUID) SetVersion(v byte) {
method SetVariant (line 137) | func (u *UUID) SetVariant(v byte) {
constant _ (line 41) | _ byte = iota
constant V1 (line 42) | V1
constant V2 (line 43) | V2
constant V3 (line 44) | V3
constant V4 (line 45) | V4
constant V5 (line 46) | V5
constant VariantNCS (line 51) | VariantNCS byte = iota
constant VariantRFC4122 (line 52) | VariantRFC4122
constant VariantMicrosoft (line 53) | VariantMicrosoft
constant VariantFuture (line 54) | VariantFuture
constant DomainPerson (line 59) | DomainPerson = iota
constant DomainGroup (line 60) | DomainGroup
constant DomainOrg (line 61) | DomainOrg
function Equal (line 83) | func Equal(u1 UUID, u2 UUID) bool {
function Must (line 156) | func Must(u UUID, err error) UUID {
Condensed preview — 29 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (288K chars).
[
{
"path": ".editorconfig",
"chars": 162,
"preview": "[*]\nend_of_line = lf\ninsert_final_newline = true\ncharset = utf-8\n\n# tab_size = 4 spaces\n[*.go]\nindent_style = tab\nindent"
},
{
"path": ".gitignore",
"chars": 83,
"preview": "files\nbin\ncoverage.out\n.idea/\n*.iml\n*.swp\n*.log\n*.fail.go\n.DS_Store\n.vscode/\n*.sql\n"
},
{
"path": "Makefile",
"chars": 6637,
"preview": "PROJECT=bingo2sql\nGOPATH ?= $(shell go env GOPATH)\n\n# Ensure GOPATH is set before running build process.\nifeq \"$(GOPATH)"
},
{
"path": "README.md",
"chars": 2725,
"preview": "# bingo2sql\nMySQL Binlog 解析工具\n\n从MySQL binlog解析出原始SQL,对应的回滚SQL等。\n\n#### 功能说明\n\n- 本地离线解析:指定本地binlog文件和要解析的表结构即可\n- 远程在线解析:指定远"
},
{
"path": "circle.yml",
"chars": 2742,
"preview": "version: 2\n\ngeneral:\n branches:\n ignore:\n - gh-pages\n\njobs:\n build:\n branches:\n ignore: gh-pages\n\n "
},
{
"path": "cmd/bingo2sql/main.go",
"chars": 5211,
"preview": "package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com/hanchuanchuan/bingo2sql/core\"\n\t_ \"github.co"
},
{
"path": "cmd/local.go",
"chars": 1678,
"preview": "/*\nCopyright © 2020 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may n"
},
{
"path": "cmd/remote.go",
"chars": 1911,
"preview": "/*\nCopyright © 2020 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may n"
},
{
"path": "cmd/root.go",
"chars": 6163,
"preview": "/*\nCopyright © 2020 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may n"
},
{
"path": "cmd/server.go",
"chars": 3725,
"preview": "/*\nCopyright © 2020 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may n"
},
{
"path": "cmd/stats.go",
"chars": 3094,
"preview": "/*\nCopyright © 2020 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may n"
},
{
"path": "cnf/config.ini",
"chars": 338,
"preview": "[Bingo]\naddr = :8077\nlog = logs/go.log\nhttplog = logs/http.log\nwriteTimeout = 30\nsocketAddr = 127.0.0.1:8090\n# 日志级别: de"
},
{
"path": "core/parseFile.go",
"chars": 3912,
"preview": "package core\n\nimport (\n\t\"bufio\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/go-mysql-org/go-mysql/mysql\"\n\t\"github.com"
},
{
"path": "core/parser.go",
"chars": 58259,
"preview": "package core\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"database/sql/driver\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"strcon"
},
{
"path": "core/parserV2.go",
"chars": 13156,
"preview": "package core\n\nimport (\n\t\"bytes\"\n\t\"database/sql/driver\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-m"
},
{
"path": "core/parser_stats.go",
"chars": 15470,
"preview": "package core\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-mysql-org/"
},
{
"path": "core/parser_test.go",
"chars": 42637,
"preview": "package core_test\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t"
},
{
"path": "core/socket.go",
"chars": 1596,
"preview": "// go-mysqlbinlog: a simple binlog tool to sync remote MySQL binlog.\n// go-mysqlbinlog supports semi-sync mode like face"
},
{
"path": "core/time.go",
"chars": 170,
"preview": "package core\n\nimport (\n\t\"time\"\n)\n\nfunc timeTrack(start time.Time, name string) {\n\t// elapsed := time.Since(start)\n\t// lo"
},
{
"path": "docs/test.md",
"chars": 6225,
"preview": "# 效率测试\n\n测试结果\n\nmysqlbinlog > bingo2sql > binlog_rollback > binlog2sql\n\n**说明:**\n\n该测试结果可能存在的偏差如下:\n\n- 输出格式略有差异\n- 测试所在环境的状态变化"
},
{
"path": "go.mod",
"chars": 3596,
"preview": "module github.com/hanchuanchuan/bingo2sql\n\nreplace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.2.0\n\ngo 1"
},
{
"path": "go.sum",
"chars": 48388,
"preview": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ngithub.com/BurntSushi/toml v0.3.1 h1:"
},
{
"path": "main.go",
"chars": 94,
"preview": "package main\n\nimport \"github.com/hanchuanchuan/bingo2sql/cmd\"\n\nfunc main() {\n\tcmd.Execute()\n}\n"
},
{
"path": "parse/bingo2sql.go",
"chars": 4767,
"preview": "package parse\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hanchuanchuan/bingo2sql/core\"\n"
},
{
"path": "parse/log.go",
"chars": 2067,
"preview": "package parse\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nvar (\n\t// BuildTS 打包时"
},
{
"path": "utils/uuid/codec.go",
"chars": 6029,
"preview": "// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>\n//\n// Permission is hereby granted, free of charge, to any "
},
{
"path": "utils/uuid/generator.go",
"chars": 6611,
"preview": "// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>\n//\n// Permission is hereby granted, free of charge, to any "
},
{
"path": "utils/uuid/sql.go",
"chars": 2382,
"preview": "// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>\n//\n// Permission is hereby granted, free of charge, to any "
},
{
"path": "utils/uuid/uuid.go",
"chars": 4184,
"preview": "// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>\n//\n// Permission is hereby granted, free of charge, to any "
}
]
About this extraction
This page contains the full source code of the hanchuanchuan/bingo2sql GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 29 files (248.1 KB), approximately 94.2k tokens, and a symbol index with 226 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.