陈老师:1415968548 郑老师:2735197625 乐老师:354331153
客服热线:
19941464235 / 19906632509 / 19906733890 / 19905812933(微信同号)

客服微信

腾讯云分布式数据库TDSQL在线迁移与数据同步

作者:郭一军
原创
发布时间:2024-01-15 17:28
浏览量:2987
##########################################
load_data
##########################################

一、load_data (groupshard)

1、数据源(原生MySQL5.7)
use yunbee;
select * from game_score_log;

select * from game_score_log into outfile '/tmp/yunbee.csv' fields terminated by ',' enclosed by '"' lines terminated by '\n';

[root@mac1 ~]# more /tmp/yunbee.csv

2、拷贝到groupshard实例上
scp /tmp/yunbee.csv  root@172.17.16.2:/tmp/

3、确保一主二备实例参数 local_infile已开启:
cd /data/tdsql_run/4002/percona-5.7.17/install;./jmysql.sh 4002
set global local_infile=on;
show variables like '%infile%'\G

cd /data/tdsql_run/4003/percona-5.7.17/install;./jmysql.sh 4003
set global local_infile=on;
show variables like '%infile%'\G

4、在目标库建shardkey表,如下:

CREATE TABLE `game_score_log` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(10) NOT NULL,
`score` int(11) NOT NULL,
PRIMARY KEY (`id`)
)  shardkey=id;


5、用load_data导入数据

cd /data/tdsql_run/15002/gateway/bin
./load_data mode1 172.17.16.2 15002 yunbee Yunbee123 yunbee.game_score_log auto '/tmp/yunbee.csv' ',' '"'


6、 导入完成后,请查看日志是否有报错,日志在数据源所在的目录,如看到日志最后一行显示:main:load data sucess,说明导入成功。
[root@mac1 tmp]# cat /tmp/yunbee.csv.log


7、验证目标表是否有数据
MySQL [tencent]> select * from game_score_log;



二、load_data (noshard)
1、设置参数(一主二备)
set global local_infile=on;
show variables like 'local_infile'\G


2、数据源(原生MySQL5)
[root@mac1 ~]# vim /tmp/yunbee.csv
"10001","王五","100"
"10002","李四","95"
"10003","李四","100"
"10004","李四","75"
"10005","王五","70"
"10006","张三","85"
"10007","张三","65"
"10008","王五","70"


3、在目标库建no shardkey表,如下:
CREATE TABLE `game_score_log` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(10) NOT NULL,
`score` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=10009 DEFAULT CHARSET=utf8;


4、用load_data导入数据
cd /data/tdsql_run/15002/gateway/bin
./load_data mode1 172.21.32.16 15003 tencent Tencent123 tencent.game_score_log auto '/tmp/yunbee.csv' ',' '"'


[root@mac1 tmp]# more yunbee.csv.log
main:host:172.21.32.16,port:15003,user:tencent,pwd:Tencent123,db_table:tencent.game_score_log,index:auto,file_name:/tmp/yunbee.csv,terminate:,,enclosed:
main:connect to proxy to get router info
main:create_sql:CREATE TABLE `game_score_log` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(10) NOT NULL,
`score` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=10009 DEFAULT CHARSET=utf8
main:shardkey is empty,table:tencent.game_score_log is a noshard table
main:(ERROR):partition_num is zero


三、load  data (MySQL原生来做noshard)
set global local_infile=on;
show variables like '%infile%'\G

1、数据源(原生MySQL5)
[root@mac1 ~]# vim /tmp/yunbee.csv
"10001","王五","100"
"10002","李四","95"
"10003","李四","100"
"10004","李四","75"
"10005","王五","70"
"10006","张三","85"
"10007","张三","65"
"10008","王五","70"


2、创建表
Create Table: CREATE TABLE `game_score_log` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(10) NOT NULL,
`score` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=10009 DEFAULT CHARSET=utf8


方法一
3、load data导入数据
cd /data/tdsql_run/4003/percona-5.7.17/install/
./jmysql.sh 4003

load data local infile '/tmp/yunbee.csv' into table game_score_log
CHARACTER SET utf8
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\'
LINES TERMINATED BY '\n'
(id, name, score);


方法二:
4、mysql原生(远程)
tmp_file="/tmp/yunbee.csv"
mysql="mysql -c -h172.21.32.5 -P15003 -utencent -pTencent123 tencent -N"
echo "load data local infile '${tmp_file}' ignore into table tencent.game_score_log fields terminated by ',' lines terminated by '\n'" | ${mysql}


#############################
MySQL--->TDSQL  (NoShard)
#############################
一、MySQL源库
ma 6
use yunbee;
select * from t1;

二、TDSQL目标库

1、环境变量
vim /etc/profile
/data/home/tdsql/tdsqlinstall/mysqlagent/bin

2、创建目录
mkdir -p  /yunbee/mydumper

3、导出数据
./mydumper --host=172.17.16.6 --port=3306 --user=gyj --password=Gyj123 --events --routines --triggers --less-locking --ignore-sysdb=1 --chunk-filesize=1024 --outputdir=/yunbee/mydumper  –complete-insert

4、删除sys前缀数据
cd /yunbee/mydumper
rm -rf  sys*


5、加载数据
./myloader --host=172.17.16.2 --port=15003 --user=yunbee --password=Yunbee123 --directory=/yunbee/mydumper --enable-binlog


6、查binlog位置
cd /yunbee/mydumper
more metadata


7、搭主从(超级管理员登录)
cd /data/tdsql_run/4004/percona-5.7.17/install/
./jmysql.sh 4004

change master to
master_host='172.17.16.6',
master_port=3306,
master_user='gyj',
master_password='Gyj123',
master_log_file='mysql-bin.000003',
master_log_pos=21426,
MASTER_AUTO_POSITION = 0 for channel 'c1';


CHANGE REPLICATION FILTER REPLICATE_WILD_IGNORE_TABLE = ('mysql.%%');

start slave;

show slave status\G;


8、确认主从是否可以同步
1)源库
insert into t1 values(4,'DDDDD');
2)目标库
select * from t1;



三、检查主备数据一致性
cd /data/tdsql_run/4004/percona-5.7.17/install
tar -xzvf percona-toolkit-2.2.16.tar.gz
cd percona-toolkit-2.2.16/bin
#单表
pt-table-checksum h='172.17.16.6',u='gyj',p='Gyj123',P=3306  -dyunbee -tt1 --nocheck-replication-filters --no-check-binlog-format  --replicate=yunbee.checksum

#单库
pt-table-checksum h='172.17.16.6',u='gyj',p='Gyj123',P=3306  --databases=yunbee --nocheck-replication-filters --no-check-binlog-format  --replicate=yunbee.checksum


四、断开增量同步
stop slave;
show slave status\G;//查看主备关系是否断开。



#################################
Oracle->TDSQL(oncesynctable)
#################################
tdsql1
/data/bin

导入数据Oracle-->TDSQL
./oncesynctable --sourceuser=gyj --sourcepass=Gyj123 --srcCharset=AL32UTF8 --sourcehostlist=172.17.16.6_1521 --oracleServiceName=PROD --srctype=oracle --dsttype=mariadb --dsthostlist=172.17.16.2_15002 --dstCharset=UTF8 --dstuser=yunbee --dstpass='Yunbee123' --rule='gyj.*=>yunbee.*' --truncate=0


CREATE TABLE `t1` (
`id` decimal(38,0) NOT NULL,
`name` varchar(10) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 shardkey=id;


hdfs dfs mv /tdsqlbackup/tdsqlzk/autocoldbackup/sets/set_1601891515_18/xtrabackup/xtrabackup+1601915569+20201006+003249+172.17.16.7+4004+268957348+20201006+003310++xbstream.lz4
/tdsqlbackup/tdsqlzk/autocoldbackup/sets/set_1601891515_18/xtrabackup/xtrabackup+1601915569+20201006+003249+172.17.16.7+4004+268957348+20201006+003310+1+xbstream.lz4
#################################################

set global log_output='table';
set global general_log=on;
use mysql;
select event_time,thread_id,left(argument,64) from general_log;


###############################
数据闪回
###############################

1、模拟数据
mysql -h172.17.16.216 -P15002 -uyunbee -pyunbee.net

create database tencent;
use tencent;
flush logs;
show master status\G
create table fb_t1 (id int primary key,name varchar(10));
insert into fb_t1 values(1,'AAAAAA');
update fb_t1 set name='BBBBBB' where id=1;
delete from fb_t1 where id=1;


2、查看数据
cd  /data/tdsql_run/4004/percona-5.7.17/bin
./mysqlbinlog --help
./mysqlbinlog -vv /data1/4002/dblogs/bin/binlog.000082 |egrep -i -C 20 'insert'
./mysqlbinlog -vv /data1/4002/dblogs/bin/binlog.000082 |egrep -i -C 20 'update'
./mysqlbinlog -vv /data1/4002/dblogs/bin/binlog.000082 |egrep -i -C 20 'delete'

BEGIN
/*!*/;
# at 20856
#200818 14:48:56 server id 269395106  end_log_pos 20906         Table_map: `tencent`.`fb_t1` mapped to number 324
# at 20906
#200818 14:48:56 server id 269395106  end_log_pos 20962         Update_rows: table id 324 flags: STMT_END_F

BINLOG '
2Hk7XxOipA4QMgAAAKpRAAAAAEQBAAAAAAEAB3RlbmNlbnQABWZiX3QxAAIDDwIeAAI=
2Hk7Xx+ipA4QOAAAAOJRAAAAAEQBAAAAAAEAAgAC///8AQAAAAZBQUFBQUH8AQAAAAZCQkJCQkI=
'/*!*/;
### UPDATE `tencent`.`fb_t1`
### WHERE
###   @1=1 /* INT meta=0 nullable=0 is_null=0 */
###   @2='AAAAAA' /* VARSTRING(30) meta=30 nullable=1 is_null=0 */
### SET
###   @1=1 /* INT meta=0 nullable=0 is_null=0 */
###   @2='BBBBBB' /* VARSTRING(30) meta=30 nullable=1 is_null=0 */
# at 20962
#200818 14:48:56 server id 269395106  end_log_pos 20989         Xid = 3083224
COMMIT/*!*/;
# at 20989


3、闪回数据
cd /data/tdsql_run/4002/mysqlagent/bin
./mysqlbinlog_flashback_percona --help


./mysqlbinlog_flashback_percona --user="yunbee" --pass="Yunbee123"  --host=/data1/4004/prod/mysql.sock   --port=4004 --start-position=23668 --output-only=1 --output-file="/tmp/flashback.sql"  /data1/4004/dblogs/bin/binlog.000002



#################################################

二级分区
CREATE TABLE employees_int (
id INT key NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired date,
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
)
shardkey=id
PARTITION BY RANGE ( month(hired) ) (
PARTITION p0 VALUES LESS THAN (201801),
PARTITION p1 VALUES LESS THAN (201901),
PARTITION p2 VALUES LESS THAN (202001),
PARTITION p3 VALUES LESS THAN (202101)
);

insert into employees_int (id,fname,lname,hired,job_code,store_id) values(1,'yijun','guo',20160101,01,01);
insert into employees_int (id,fname,lname,hired,job_code,store_id) values(2,'jiyan','chen',20180701,01,01);
insert into employees_int (id,fname,lname,hired,job_code,store_id) values(3,'junrui','guo',20190601,01,01);
insert into employees_int (id,fname,lname,hired,job_code,store_id) values(4,'junle','chen',20200801,01,01);


序列:

use yunbee;

CREATE TABLE  `test_sequence_table` (
`id1` int(10) NOT NULL,
`id2` int(10) NOT NULL,
`b` varchar(10)  NOT NULL DEFAULT '',
PRIMARY KEY (`id1`,`id2`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin shardkey=id2;


create tdsql_sequence yunbee.sid1 start with 1 tdsql_minvalue 1 maxvalue 1000  tdsql_increment by 1  tdsql_cycle;


SHOW CREATE tdsql_sequence yunbee.sid1;

insert into yunbee.test_sequence_table (id1,id2,b) values (tdsql_nextval(test.sid1),tdsql_nextval(xa.sid2),1);
insert into yunbee.test_sequence_table (id1,id2,b) values (tdsql_nextval(test.sid1),tdsql_nextval(xa.sid2),2);
insert into yunbee.test_sequence_table (id1,id2,b) values (tdsql_nextval(test.sid1),tdsql_nextval(xa.sid2),3);
insert intoyunbee.test_sequence_table (id1,id2,b) values (tdsql_nextval(test.sid1),tdsql_nextval(xa.sid2),4);


######################
添加新的DB机器资源
######################
一、在腾讯云平台创建CMV虚机

config:4cpu+8gmemory+100ghdd

1.新增三台机器:
172.17.16.4
172.17.16.12
172.17.16.15

2. 修改主机名
hostnamectl set-hostname tdsql5
hostnamectl set-hostname tdsql6
hostnamectl set-hostname tdsql7

vim /etc/hosts
172.17.16.4      tdsql5
172.17.16.12    tdsql6
172.17.16.15    tdsql7


3.设置NTP
vim /etc/ntp.conf

server 172.17.16.2
restrict 172.17.16.2  mask 255.255.240.0 nomodify notrap

systemctl restart ntpd.service
ntpdate -u 172.17.16.2

4.ssh打通
ssh-copy-id  172.17.16.8
ssh-copy-id  172.17.16.15
ssh-copy-id  172.17.16.17



mkdir /data
mkdir /data1

二、添加新的db机器资源

1.修改newdb.yml文件内容
cd /tdsql/tdsql_install/playbooks
vim tdsql_newdb.yml
- name: install newdb beginning
hosts: tdsql_newdb
remote_user: root
gather_facts: false
roles:
- tdsql_beginning

- name: install db_module server
hosts: tdsql_newdb
remote_user: root
gather_facts: false
roles:
- tdsql_db_module

- name: install hdfs_single on db
hosts: tdsql_newdb
remote_user: root
gather_facts: false
roles:
- tdsql_hdfs_on_db_single


2.修改tdsql_hosts文件内容

vim tdsql_hosts

[tdsql_newdb]
tdsql_newdb1 ansible_ssh_host=172.17.16.4
tdsql_newdb2 ansible_ssh_host=172.17.16.12
tdsql_newdb3 ansible_ssh_host=172.17.16.15


3.执行安装新db
ansible-playbook -i tdsql_hosts playbooks/tdsql_newdb.yml


4.前台上报机器资源(赤兔->资源管理)
1)添加机型规格
DB-TS40

2)上报[设备管理]
172.17.16.4
172.17.16.12
172.17.16.15

3) 上报[网关资源]
172.17.16.4
172.17.16.12
172.17.16.15


5.创建实例
实例管理->创建[分布式实例]
实例管理->创建[非分布式实例]




##########################

1、groupshard执行计划
create table st1 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st1(id,name,city) values(1,'AAAAAA','HZ');
insert into st1(id,name,city) values(2,'BBBBBB','HZ');
insert into st1(id,name,city) values(3,'CCCCCC','BJ');
insert into st1(id,name,city) values(4,'DDDDDD','BJ');
insert into st1(id,name,city) values(5,'EEEEEE','SZ');
insert into st1(id,name,city) values(6,'FFFFFF','SZ');

create table st2 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st2(id,name,city) values(1,'AAAAAA','HZ');
insert into st2(id,name,city) values(2,'BBBBBB','HZ');
insert into st2(id,name,city) values(3,'CCCCCC','BJ');
insert into st2(id,name,city) values(4,'DDDDDD','BJ');
insert into st2(id,name,city) values(5,'EEEEEE','SZ');
insert into st2(id,name,city) values(6,'FFFFFF','SZ');

create table st3 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st3(id,name,city) values(1,'AAAAAA','HZ');
insert into st3(id,name,city) values(2,'BBBBBB','HZ');
insert into st3(id,name,city) values(3,'CCCCCC','BJ');
insert into st3(id,name,city) values(4,'DDDDDD','BJ');
insert into st3(id,name,city) values(5,'EEEEEE','SZ');
insert into st3(id,name,city) values(6,'FFFFFF','SZ');

create index idx_st1_name on st1(name);
create index idx_st2_name on st2(name);

/*sets:allsets*/ select * from st1;
/*sets:allsets*/ select * from st2;
/*sets:allsets*/ select * from st3;
explain select * from st1 left join st2 on st1.id=st2.id group by st1.id;

2.noshard执行计划
create table nt1 (id int primary key,name varchar(10),city varchar(20));
insert into nt1(id,name,city) values(1,'AAAAAA','HZ');
insert into nt1(id,name,city) values(2,'BBBBBB','HZ');
insert into nt1(id,name,city) values(3,'CCCCCC','BJ');
insert into nt1(id,name,city) values(4,'DDDDDD','BJ');
insert into nt1(id,name,city) values(5,'EEEEEE','SZ');
insert into nt1(id,name,city) values(6,'FFFFFF','SZ');

create table nt2 (id int primary key,name varchar(10),city varchar(20));
insert into nt2(id,name,city) values(1,'AAAAAA','HZ');
insert into nt2(id,name,city) values(2,'BBBBBB','HZ');
insert into nt2(id,name,city) values(3,'CCCCCC','BJ');
insert into nt2(id,name,city) values(4,'DDDDDD','BJ');
insert into nt2(id,name,city) values(5,'EEEEEE','SZ');
insert into nt2(id,name,city) values(6,'FFFFFF','SZ');

create index idx_nt1_name on nt1(name);
create index idx_nt1_city on nt1(city);

create index idx_nt2_name on nt2(name);
create index idx_nt2_city on nt2(city);

select * from nt1;
select * from nt2;

explain select * from nt1 left join nt2 on nt1.id=nt2.id group by nt1.id;

#filesort&temporary
mysql -h172.17.16.6 -P15003 -uyunbee -pYunbee123 yunbee
create table t1(id int primary key,col1 int,col2 varchar(10),key(id,col1));
create table t2(id int primary key,col1 int,col2 varchar(10),key(col1));
explain select * from t1,t2 where t1.id=1 and t1.col1=t2.col2 order by t1.col1;
explain select * from t1 force index(id),t2 where t1.id=1 and t1.col1=t2.col2 order by t1.col1;
explain select * from t1 force index(id),t2 where t1.id=1 and t1.col1=t2.col2 order by t1.col2;



1、等值 & 不等值
explain select * from nt1 where name='AAAAAA';
explain select * from nt1 where name!='AAAAAA';
explain select name from nt1 where name!='AAAAAA';


3.like

create index idx_nt1_city on nt1(city);
explain select * from nt1 where city like '%H';
explain select * from nt1 where city like 'H%';


4.join
explain select * from nt1 t1 left join nt2 t2 on t1.id=t2.id;
explain select * from nt1 t1 join nt2 t2  on t1.id=t2.id;


5.order by
explain select * from nt1 order by city;
explain select city from nt1 order by city;
explain select id,city from nt1 order by city;

6.group by
explain select * from nt1 left join nt2 on nt1.id=nt2.id group by nt1.id;
explain select * from nt1 left join nt2 on nt1.id=nt2.id group by nt2.id;


7.子查询
explain select * from nt1 where name=(select name from nt2 where id=1);

explain select * from nt1 where city in(select distinct city from nt2);


8.统计数据未及时更新(noshard)
mysql -h172.17.16.2 -P15003 -uyunbee -pYunbee123
explain select * from sbtest1 where k between 100000 and 200000;

explain select * from sbtest1 force index(k_1)  where k between 100000 and 200000;


9.函数或表达式位于左值
explain select * from nt1 where id=1;
explain select * from nt1 where id+1=2;
explain select * from nt1 where power(id,2)=4;


10.未合理利用前缀
create table nt3 (
a int,
b int,
c char(20),
d char(20),
e char(20),
primary key (a));

create index idx_nt3_b_c on nt3 (b,c);
create index idx_nt3_e on nt3(e);
insert into nt3  values(10,'15','tom','aaaaa','abcd1');
insert into nt3  values(20,'15','rose','aaaaa','abcd1');
insert into nt3  values(30,'35','jack','aaaaa','dcba1');
insert into nt3  values(40,'35','amida','bbbbb','dcba1');
insert into nt3  values(50,'45','joe','bbbbbb','cbad1');
insert into nt3  values(60,'55','yunbee','bbbbb','cbad1');


explain select * from nt3 where e like 'abc%';
explain select * from nt3 where e like '%abc';
explain select * from nt3 where c='aaaaa';
explain select * from nt3 where b='tom' and c='aaaaa';


11.重复索引 & 冗余索引

create index idx_nt3_b on nt3 (b);
create index idx_nt3_a_b_c on nt3 (a,b,c);
create index idx_nt3_b_c_e on nt3 (b,c,e);

show create table nt3\G


12.网关下推

create table st1 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st1(id,name,city) values(1,'AAAAAA','HZ');
insert into st1(id,name,city) values(2,'BBBBBB','HZ');
insert into st1(id,name,city) values(3,'CCCCCC','BJ');
insert into st1(id,name,city) values(4,'DDDDDD','BJ');
insert into st1(id,name,city) values(5,'EEEEEE','SZ');
insert into st1(id,name,city) values(6,'FFFFFF','SZ');

create table st2 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st2(id,name,city) values(1,'AAAAAA','HZ');
insert into st2(id,name,city) values(2,'BBBBBB','HZ');
insert into st2(id,name,city) values(3,'CCCCCC','BJ');
insert into st2(id,name,city) values(4,'DDDDDD','BJ');
insert into st2(id,name,city) values(5,'EEEEEE','SZ');
insert into st2(id,name,city) values(6,'FFFFFF','SZ');

create table st3 (id int primary key,name varchar(10),city varchar(20)) shardkey=id;
insert into st3(id,name,city) values(1,'AAAAAA','HZ');
insert into st3(id,name,city) values(2,'BBBBBB','HZ');
insert into st3(id,name,city) values(3,'CCCCCC','BJ');
insert into st3(id,name,city) values(4,'DDDDDD','BJ');
insert into st3(id,name,city) values(5,'EEEEEE','SZ');
insert into st3(id,name,city) values(6,'FFFFFF','SZ');


explain select * from st1 where id=1;

explain select * from st1 where city='HZ';

explain select * from st1,st2 where st1.name=st2.city;


13.子查询可以下推

explain select * from st1 where st1.city in (select city from st2);

explain select * from st1 where st1.city in (select name from st2 where st1.city=st2.name);

explain select * from st1,st2 where st1.name=st2.city;

explain select * from st1 where st1.id > (select min(id) from st2);

create table t1(
id int,
col1 int,
col2 varchar(10),
key(id,col1));

create table t2(
id int,
col1 int,
col2 varchar(10),
key(col1));


explain select * from t1 force index(id),t2
where t1.id=1 and t1.col1=t2.col2
order by t1.col1;

explain select * from t1 force index(id),t2
where t1.id=1 and t1.col1=t2.col2
order by t1.col2;


explain select * from t1 force index(id),t2
where t1.id=1 and t1.col1=t2.col2
order by t2.col1;


create table employees(
emp_no int(11) not null,
birth_date date not null,
first_name varchar(10) not null,
last_name  varchar(10) not null,
gender enum('M','F') not null,
hire_date date not null,
primary key(emp_no),
key idx_birth_date (birth_date),
key idx_birth_date_emp (emp_no,birth_date)
);




1.Elasticsearch health status

curl -XGET "http://localhost:9200/_cluster/health?pretty=true"


2.集群的节点列表
curl 'localhost:9200/_cat/nodes?v'


3.查看索引状态
curl -XGET "http://localhost:9200/_cat/indices?v"



4.设置所有副本(rep)个数为0
curl -XPUT "http://localhost:9200/_settings" -H 'Content-Type: application/json' -d'
{
"index" : {
"number_of_replicas" : 0
}
}'


5.设置单个副本个数为0
curl -XPUT "http://localhost:9200/my_index/_settings" -H 'Content-Type: application/json' -d'
{
"index" : {
"number_of_replicas" : 1
}
}'


6.创建索引

curl -XPUT 'localhost:9200/gateway-interf-log?pretty'


1、备份/物理回档
binlog download error

2、KIBANA

set /tdsqlzk/group_1601890264_9/groupconfig{"backup":{"increment_flag":"0","increment_interval_day":"0","mydumper":"0","xtrabackup":"1"},"backup_time"{"ebackuptime":"23:59:59","sbackuptime":"00:00:00"},"beendcnslave":"0","coldbacktokafka":"0","days":"30","dctokafka":"1"}


cat ../log/server.log
/data/application/kafka/bin/kafka-topics.sh --zookeeper 172.17.16.2:2118,172.17.16.5:2118,172.17.16.7:2118/kafka --list
./kafka-console-consumer.sh --bootstrap-server 172.17.16.5:9092 --topic proxy-group_1601890264_9

[root@tdsql2 bin]# vim kafka-server-start.sh
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
fi

./kafka-server-start.sh  -daemon ../config/server.properties

/data/application/kafka/bin/kafka-console-consumer.sh --bootstrap-server 172.17.16.5:9092 --topic proxy-group_1601890264_9


3、主备流程

实例:系统监控日志
cd /data/application/clouddba/bin
vim analyze_monitorlig.py
MONITORLOG_PATH = "/data/monitorlog"


版本号说明
TDSQL版本号说明和各个版本链接:
如:10.3.14.1.0
a.b.c.d.e
a.b:当前TDSQL在V2的架构下,这个是固定的10.3
c:核心组件的大跌代版本
d:独立部署发布包的子版本迭代,自增
e:平台的兼容性
0:x86+centos(包括其它类centos如:redhat、tlinux、中标麒麟)
1:arm+ubuntu(包括其它类ubuntu如:银河麒麟、UOS)
2:arm+centos(包括其它类centos如:redhat、tlinux、中标麒麟)


一、升级DB(使用普通账号tdsql升级)
1、查TDSQL的DB版本
实例管理->set-16662622-12->DB监控->自定义字段->MySQL版本

2、升级备库,查看备库
ps -ef |grep 4003
cd /data/home/tdsql/tdsqlinstall      #公共目录,最新升级包
cd /data/tdsql_run/4003              #分熟目录,当前DB目录

3、先备份备库
mv percona-5.7.17/   percona-5.7.17_bak_`date +%F`


4、从公共目录拷贝一份最新的程序到当前分离目录
cp -r /data/home/tdql/tdsqlinstall/percona-5.7.17  .


5、拷贝配置文件
cp -r percona-5.7.17_bak_20200819/etc/   percona-5.7.17/


6、确认配置文件是不是拷过来了
ll  percona-5.7.17/etc/

7、重启DB服务
cd percona-5.7.17/install/
./restartmysql_cgroup.sh 4003


8、确认版本是否最新
实例管理->set-16662622-12->DB监控->自定义字段->MySQL版本

9、主备切换
实例管理->set-16662622-12->实例详情->主备切换


10、再升级两个备。。。


11、自动启动进程
cd /data/oc_agent/conf
cat 4003_mysqlsafe_v2_proc.xml

ps -ef |grep -w mysql_safe
ps -ef |grep -w mysql_safe |grep -w 'default-file=/data/tdsql_run/4003/percona-5.7.17/etc/my_4003.conf --user=tdsql'
cd /data/home/tdsql/tdsqlinstall;./oc_pull_mysqld.sh 4003 /data1/tdsql_run/4003 /data/tdql_data percona-5.7.17


二、升级proxy(使用普通账号tdsql升级)
1、查看proxy版本
实例管理->set-16662622-12->Proxy监控->自定义字段->Proxy版本

2、查看proxy目录
ps -ef |grep 15002
cd /data/tdsql_run/15002
ll

3、备份proxy
mv gateway/ gateway_bak


4、从公共目录拷贝一份最新的程序到当前分离目录
cp -r /data/home/tdsql/tdsqlinstall/gateway .


5、拷贝配置文件
cp -r gateway_bak/conf/ gateway/conf


6、确认配置文件是不是拷过来了
ll  gateway/conf/

7、重启proxy
cat /data/oc_agent/conf/15002_mysqlprox_v2_proc.xml
cd /data/tdsql_run/15002/gateway/bin
./restart.sh instance_15002

ps -ef |grep 15002
#router_update      负责网关路由更新(当路由有变更时)
#mysql-proxy        网关主进程,负责SQL转发,鉴权,聚合等大部分工作
#dcagent_tokkafka   把proxy的日志通过kafka拷贝到es,做SQL审计


8、确认版本是否最新
实例管理->set-16662622-12->Proxy监控->自定义字段->Proxy版本


9、再升级其它proxy。。。




三、升级agent(使用普通账号tdsql升级)
1、查看agent版本
实例管理->set-16662622-12->DB监控->自定义字段->agentL版本

2、主备切换
实例管理->set-16662622-12->实例详情->容灾配置->手动免切-设置
设置一直免切/设置1个小时内免切

3、查看agent目录
ps -ef |grep mysqlreport|grep  4003

4、备份agent
cd /data/tdsql_run/4003
mv mysqlagent/  mysqlagent_bak


5、从公共目录拷贝一份最新的程序到当前分离目录
cp -r /data/home/tdsql/tdsqlinstall/mysqlagent .


6、拷贝配置文件
cp -r mysqlagent_bak/conf/ mysqlagent/conf


7、确认配置文件是不是拷过来了
ll  mysqlagent/conf/


8、重启mysqlagent
cat /data/oc_agent/conf/4003_mysqlreport_v2_proc.xml
cd /data/tdsql_run/4003/mysqlagent/bin
./restartreport_cgroup.sh ../conf/mysqlagent_4003.xml

9、确认版本是否最新
实例管理->set-16662622-12->DB监控->自定义字段->mysqlagent


10、清除手工免切
实例管理->set-16662622-12->实例详情->容灾配置->删除手动免切