目录

Logstash 配置 Mysql 数据准实时同步到 ES 中

题记

一次同步多张表是开发中的一般需求。之前研究了很久找到方法,但没有详细总结。 博友前天在线提问,说明这块理解的还不够透彻。 我整理下, 一是为了尽快解决博友问题, 二是加深记忆,便于未来产品开发中快速上手。

同步原理

以下是通过ES5.4.0, logstash5.4.1 验证成功。

可以确认的是2.X版本同样可以验证成功。

下载插件

1
2
3
./bin/logstash-plugin install logstash-input-jdbc
./bin/logstash-plugin install logstash-output-elasticsearch
12

修改配置

配置作业文件

创建一个Logstash作业配置文件,文件命名为logstash-mysql-es.conf

配置文件内容

  • 这里 mysql 驱动文件需要自己手动下载,并复制到 logstash 目录下,默认路径为 /usr/share/logstash/logstash-core/lib/jars , 如果另外目录需要在配置中配置绝对路径
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
input {
	jdbc {
		jdbc_driver_library => "mysql-connector-java-5.1.44-bin.jar"
		jdbc_driver_class => "com.mysql.jdbc.Driver"
		jdbc_connection_string => "jdbc:mysql://localhost.com:3306/db_name?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&useSSL=false&zeroDateTimeBehavior=convertToNull&serverTimezone=Asia/Shanghai"
		jdbc_user => "db_user"
		jdbc_password => "db_password"#
		开启分页
		jdbc_paging_enabled => "true"#
		每页数量
		jdbc_page_size => "1000"
		jdbc_default_timezone => "Asia/Shanghai"#
		配置数据刷新频率, 如果所有都为 '*', 表示每分钟刷新一次, 这也是 Mysql 数据同步的最小频率。
		schedule => "* * * * *"#
		数据同步查询接口, 注意 sql 语句不需要带‘;’
		号结尾
		statement => "select * from test where updatetime > :sql_last_value"##
		除了上面这种直接执行sql语句的, 还有指定sql文件, 当然只能二选一
		statement_filepath => "/home/test/test.sql"#
		启用追踪, 如果为 true, 则需要指定 tracking_column
		use_column_value => true# 指定递增字段
		tracking_column => "updatetime"#
		递增字段类型, 目前只有数字( numeric) 和时间类型( timestamp), 默认是数字类型
		tracking_column_type => "timestamp"#
		记录最后一次运行的结果
		record_last_run => true# 上面运行结果保存的位置
		last_run_metadata_path => "./logstash_jdbc_last_run"
	}
}
output {
	elasticsearch {
		hosts => "localhost:9200"
		user => "elastic"
		password => "es_password"
		index => "employee"
		document_id => "%{id}"
	}
	stdout {
		codec => json_lines
	}
}

配置同步多张数据表

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
input {
	stdin {}

	jdbc {
		type => "cxx_article_info"#
		mysql jdbc connection string to our backup databse 后面的test对应mysql中的test数据库
		jdbc_connection_string => "jdbc:mysql://110.10.15.37:3306/cxxwb"#
		the user we wish to excute our statement as
		jdbc_user => "root"
		jdbc_password => "xxxxx"

		record_last_run => "true"
		use_column_value => "true"
		tracking_column => "id"
		last_run_metadata_path => "/opt/logstash/bin/logstash_xxy/cxx_info"
		clean_run => "false"

		#
		the path to our downloaded jdbc driver
		jdbc_driver_library => "/opt/elasticsearch/lib/mysql-connector-java-5.1.38.jar"#
		the name of the driver class
		for mysql
		jdbc_driver_class => "com.mysql.jdbc.Driver"
		jdbc_paging_enabled => "true"
		jdbc_page_size => "500"
		statement => "select * from cxx_article_info where id > :sql_last_value"#
		定时字段 各字段含义( 由左至右) 分、 时、 天、 月、 年, 全部为 * 默认含义为每分钟都更新
		schedule => "* * * * *"#
		设定ES索引类型
	}

	jdbc {
		type => "cxx_user"#
		mysql jdbc connection string to our backup databse 后面的test对应mysql中的test数据库
		jdbc_connection_string => "jdbc:mysql://110.10.15.37:3306/cxxwb"#
		the user we wish to excute our statement as
		jdbc_user => "root"
		jdbc_password => "xxxxxx"

		record_last_run => "true"
		use_column_value => "true"
		tracking_column => "id"
		last_run_metadata_path => "/opt/logstash/bin/logstash_xxy/cxx_user_info"
		clean_run => "false"

		#
		the path to our downloaded jdbc driver
		jdbc_driver_library => "/opt/elasticsearch/lib/mysql-connector-java-5.1.38.jar"#
		the name of the driver class
		for mysql
		jdbc_driver_class => "com.mysql.jdbc.Driver"
		jdbc_paging_enabled => "true"
		jdbc_page_size => "500"
		statement => "select * from cxx_user_info where id > :sql_last_value"#
		以下对应着要执行的sql的绝对路径。# statement_filepath => "/opt/logstash/bin/logstash_mysql2es/department.sql"#
		定时字段 各字段含义( 由左至右) 分、 时、 天、 月、 年, 全部为 * 默认含义为每分钟都更新
		schedule => "* * * * *"#
		设定ES索引类型
	}

}

filter {
	mutate {
		convert => ["publish_time", "string"]
	}

	date {
		timezone => "Europe/Berlin"
		match => ["publish_time", "ISO8601", "yyyy-MM-dd HH:mm:ss"]
	}#
	date {#
		match => ["publish_time", "yyyy-MM-dd HH:mm:ss,SSS"]# remove_field => ["publish_time"]#
	}
	json {
		source => "message"
		remove_field => ["message"]
	}
}

output {

	if [type] == "cxxarticle_info" {
		elasticsearch {#
			ESIP地址与端口
			hosts => "10.100.11.231:9200"#
			ES索引名称( 自己定义的)
			index => "cxx_info_index"#
			自增ID编号# document_id => "%{id}"
		}
	}

	if [type] == "cxx_user" {
		elasticsearch {#
			ESIP地址与端口
			hosts => "10.100.11.231:9200"#
			ES索引名称( 自己定义的)
			index => "cxx_user_index"#
			自增ID编号# document_id => "%{id}"
		}
	}

}

启动

启动运行

1
/usr/share/logstash/bin/logstash --path.settings /etc/logstash -f /home/elk/logstash/conf/logstash-mysql-es.conf

后台启动

1
nohup /usr/share/logstash/bin/logstash --path.settings /etc/logstash -f /home/elk/logstash/conf/logstash-mysql-es.conf &

3、同步成功结果

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
[2017-07-19T15:08:05,438][INFO ][logstash.pipeline ] Pipeline main started
The stdin plugin is now waiting for input:
[2017-07-19T15:08:05,491][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2017-07-19T15:09:00,721][INFO ][logstash.inputs.jdbc ] (0.007000s) SELECT count(*) AS `count` FROM (select * from cxx_article_info where id > 0) AS `t1` LIMIT 1
[2017-07-19T15:09:00,721][INFO ][logstash.inputs.jdbc ] (0.008000s) SELECT count(*) AS `count` FROM (select * from cxx_user_info where id > 0) AS `t1` LIMIT 1
[2017-07-19T15:09:00,730][INFO ][logstash.inputs.jdbc ] (0.004000s) SELECT * FROM (select * from cxx_user_info where id > 0) AS `t1` LIMIT 500 OFFSET 0
[2017-07-19T15:09:00,731][INFO ][logstash.inputs.jdbc ] (0.007000s) SELECT * FROM (select * from cxx_article_info where id > 0) AS `t1` LIMIT 500 OFFSET 0
[2017-07-19T15:10:00,173][INFO ][logstash.inputs.jdbc ] (0.002000s) SELECT count(*) AS `count` FROM (select * from cxx_article_info where id > 3) AS `t1` LIMIT 1
[2017-07-19T15:10:00,174][INFO ][logstash.inputs.jdbc ] (0.003000s) SELECT count(*) AS `count` FROM (select * from cxx_user_info where id > 2) AS `t1` LIMIT 1
[2017-07-19T15:11:00,225][INFO ][logstash.inputs.jdbc ] (0.001000s) SELECT count(*) AS `count` FROM (select * from cxx_article_info where id > 3) AS `t1` LIMIT 1
[2017-07-19T15:11:00,225][INFO ][logstash.inputs.jdbc ] (0.002000s) SELECT count(*) AS `count` FROM (select * from cxx_user_info where id > 2) AS `t1` LIMIT 1

扩展

1)多个表无非就是在input里面多加几个类型,在output中多加基础 类型判定。 举例:

1
if [type]=="cxx_user"  1

2)input里的type和output if判定的type保持一致,该type对应ES中的type。

Logstash 配置多个conf和配置增量更新

参考:https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html

Logstash可以配置多个pipeline,每一个pipeline设置不同的参数,包括读取的conf;

也可以配置一个pipeline,读取多个conf,其读取多个conf,实际上是把它们合并一起,所以,conf里面需要写上type,通过type来判断写入那个索引。

logstash.yml的配置修改如下:

1
2
3
4
5
6
7
logstash.yml的设置:
# pipeline.id: main
pipeline.id: d_bzdz
 
#
# path.config:
path.config: "/map/es/soft/logstash-7.6.2/config/myconfig/*.conf"

运行的时候就不需要 -f conf参数。

1
运行 ./bin/logstash
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
 
input{
	# stdin{}
	jdbc{
		jdbc_connection_string => "jdbc:oracle:thin:@//:1521/GIS"
		jdbc_user => ""
		jdbc_password => ""
		jdbc_driver_library => "/es/soft/logstash-7.6.2/lib/ojdbc7.jar"
		jdbc_driver_class => "Java::oracle.jdbc.driver.OracleDriver"
		# jdbc_default_timezone => "Asia/Shanghai"  
        # 统一用一个时间标准即可,不需要设置。oracle本来的时区也是+00。
		# plugin_timezone => "local"
		# 228万条记录 12分钟完成同步
		jdbc_paging_enabled => "true"
		# 每批传输数量
		jdbc_page_size => "100000"
		# sql_last_value 是最后查询的值(时间或者id),未执行查询之前,它的值是1970.1.1,应该是时间戳的值;
		# 如果设置 use_column_value => "true"  
#和tracking_column ,sql_last_value 的默认值是0,之后是这个字段的最新值。
		# 如果use_column_value => "false",
#后续每次查询,都会记录执行查询的logstash时间 作为sql_last_value
		statement => "select t.systemid .... and t.lastupdatedtime > :sql_last_value order by t.lastupdatedtime asc"		
		# 设置为true时,sql_last_value的值是tracking_column的值;
#设置为false是,sql_last_value的值是上次执行的值。
		use_column_value => "true"
		# 是否保存状态
		record_last_run => "true"
		# 记录最后一条数据的时间戳,所以,sql语句里面需要 有
#order by t.lastupdatedtime asc
		tracking_column => "lastupdatedtime"
		# 只有两种类型numberic 和timestamp
		tracking_column_type => "timestamp"		
		# 记录 sql_last_value的文件
		last_run_metadata_path => "/es/soft/logstash-7.6.2/config/myconfig/mlp_parameter.txt"
		#设置监听间隔,各字段(分、时、天、月、年),全部*代表每分钟都更新
		# "0 * * * *" 在每小时每天的第0分钟运行
		# 设置每天12点0分运行  corn表达式
		schedule => "47 12 * * *"
		# 这个类型可以判断输入到哪个索引,因为我设置了一个pipeline读取多个conf,
#它实际是把多个conf合并成一个。
		type => "mlp_jdbc"
	}
}
 
filter {
	if[type] == "mlp_jdbc" {
		mutate {
			add_field => ["[location][lat]","%{zxwd}"]
			add_field => ["[location][lon]","%{zxjd}"]
		}
 
		ruby {
			code => "event.set('timestamp',event.get('@timestamp').time.localtime )"
		}
 
		ruby {
			code => "event.set('@timestamp',event.get('timestamp'))"
		}
 
		mutate {
			remove_field => ["timestamp"]
		}
	}	
}
 
output{
	if[type] == "mlp_jdbc" {
		elasticsearch{
			hosts => "localhost:9200"
			index => "d_bzdz_mlp"
			document_id => "%{systemid}"	
		}
		stdout{
			codec => "json_lines"
			# codec => "rubydebug"
		}
	}	
}