MyCat分库分表的示例分析
这篇文章主要为大家展示了“MyCat分库分表的示例分析”,内容简而易懂,条理清晰,希望能够帮助大家解决疑惑,下面让小编带领大家一起研究并学习一下“MyCat分库分表的示例分析”这篇文章吧。
一、当前分片信息配置
schema.xml
<?xmlversion="1.0"?><!DOCTYPEmycat:schemaSYSTEM"schema.dtd"><mycat:schemaxmlns:mycat="http://io.mycat/"><schemaname="mycatdb"checkSQLschema="false"sqlMaxLimit="100"><tablename="user04"dataNode="dn$1-3,dn$13-15,dn$25-27"rule="sharding-by-murmur-user04-id"></table></schema><!--<dataNodename="dn1$0-743"dataHost="localhost1"database="db$0-743"/>--><dataNodename="dn1"dataHost="node1"database="testdb01"/><dataNodename="dn2"dataHost="node1"database="testdb02"/><dataNodename="dn3"dataHost="node1"database="testdb03"/><dataNodename="dn13"dataHost="node2"database="testdb13"/><dataNodename="dn14"dataHost="node2"database="testdb14"/><dataNodename="dn15"dataHost="node2"database="testdb15"/><dataNodename="dn25"dataHost="node3"database="testdb25"/><dataNodename="dn26"dataHost="node3"database="testdb26"/><dataNodename="dn27"dataHost="node3"database="testdb27"/><!--<dataNodename="dn4"dataHost="sequoiadb1"database="SAMPLE"/><dataNodename="jdbc_dn1"dataHost="jdbchost"database="db1"/><dataNodename="jdbc_dn2"dataHost="jdbchost"database="db2"/><dataNodename="jdbc_dn3"dataHost="jdbchost"database="db3"/>--><dataHostname="node1"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.31"url="192.168.8.31:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node2"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.32"url="192.168.8.32:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node3"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.33"url="192.168.8.33:3306"user="root"password="mysql"></writeHost></dataHost></mycat:schema>
rule.xml
<tableRulename="sharding-by-murmur-user04-id"><rule><columns>id</columns><algorithm>murmur-id</algorithm></rule></tableRule><functionname="murmur-id"class="io.mycat.route.function.PartitionByMurmurHash"><propertyname="seed">0</property><!--默认是0--><propertyname="type">0</property><!--默认是0,表示integer,非0表示string--><propertyname="count">9</property><!--要分片的数据库节点数量,必须指定,否则没法分片--><propertyname="virtualBucketTimes">160</property><!--一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍--></function>
当前user04进行hash分区,共9个分片,存放在9个物理库。
二、数据查看
node1
mysql>select*fromtestdb01.user04;+----+--------+|id|name|+----+--------+|8|steven||14|steven||16|steven||17|steven||34|steven||49|steven|+----+--------+6rowsinset(0.00sec)mysql>select*fromtestdb02.user04;+----+--------+|id|name|+----+--------+|9|steven||10|steven||44|steven||45|steven||46|steven||48|steven|+----+--------+6rowsinset(0.06sec)mysql>select*fromtestdb03.user04;+----+--------+|id|name|+----+--------+|11|steven||24|steven||33|steven||35|steven||40|steven|+----+--------+5rowsinset(0.07sec)
node2
mysql>select*fromtestdb13.user04;+----+--------+|id|name|+----+--------+|20|steven||25|steven||38|steven||39|steven|+----+--------+4rowsinset(0.07sec)mysql>select*fromtestdb14.user04;+----+--------+|id|name|+----+--------+|1|steven||41|steven||50|steven|+----+--------+3rowsinset(0.03sec)mysql>select*fromtestdb15.user04;+----+--------+|id|name|+----+--------+|12|steven||18|steven||32|steven||36|steven|+----+--------+4rowsinset(0.12sec)
node3
mysql>select*fromtestdb25.user04;+----+--------+|id|name|+----+--------+|6|steven||13|steven||19|steven||23|steven||27|steven||28|steven||29|steven||31|steven||37|steven|+----+--------+9rowsinset(0.05sec)mysql>select*fromtestdb26.user04;+----+--------+|id|name|+----+--------+|4|steven||5|steven||15|steven||22|steven||42|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb27.user04;+----+--------+|id|name|+----+--------+|2|steven||3|steven||7|steven||21|steven||26|steven||30|steven||43|steven||47|steven|+----+--------+8rowsinset(0.06sec)
下面增加9个分片,重新进行配置
三、配置schema.xml,rule.xml
复制schema.xml、rule.xml并重命名为newSchema.xml、newRule.xml放于conf目录下,修改newSchema.xml和newRule.xml配置文件为扩容后的mycat配置参数(表的节点数、数据源、路由规则)
newSchema.xml
<?xmlversion="1.0"?><!DOCTYPEmycat:schemaSYSTEM"schema.dtd"><mycat:schemaxmlns:mycat="http://io.mycat/"><schemaname="mycatdb"checkSQLschema="false"sqlMaxLimit="100"><tablename="user04"dataNode="dn$1-6,dn$13-18,dn$25-30"rule="sharding-by-murmur-user04-id"></table></schema><!--<dataNodename="dn1$0-743"dataHost="localhost1"database="db$0-743"/>--><dataNodename="dn1"dataHost="node1"database="testdb01"/><dataNodename="dn2"dataHost="node1"database="testdb02"/><dataNodename="dn3"dataHost="node1"database="testdb03"/><dataNodename="dn4"dataHost="node1"database="testdb04"/><dataNodename="dn5"dataHost="node1"database="testdb05"/><dataNodename="dn6"dataHost="node1"database="testdb06"/><dataNodename="dn13"dataHost="node2"database="testdb13"/><dataNodename="dn14"dataHost="node2"database="testdb14"/><dataNodename="dn15"dataHost="node2"database="testdb15"/><dataNodename="dn16"dataHost="node2"database="testdb16"/><dataNodename="dn17"dataHost="node2"database="testdb17"/><dataNodename="dn18"dataHost="node2"database="testdb18"/><dataNodename="dn25"dataHost="node3"database="testdb25"/><dataNodename="dn26"dataHost="node3"database="testdb26"/><dataNodename="dn27"dataHost="node3"database="testdb27"/><dataNodename="dn28"dataHost="node3"database="testdb28"/><dataNodename="dn29"dataHost="node3"database="testdb29"/><dataNodename="dn30"dataHost="node3"database="testdb30"/><!--<dataNodename="dn4"dataHost="sequoiadb1"database="SAMPLE"/><dataNodename="jdbc_dn1"dataHost="jdbchost"database="db1"/><dataNodename="jdbc_dn2"dataHost="jdbchost"database="db2"/><dataNodename="jdbc_dn3"dataHost="jdbchost"database="db3"/>--><dataHostname="node1"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.31"url="192.168.8.31:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node2"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.32"url="192.168.8.32:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node3"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.33"url="192.168.8.33:3306"user="root"password="mysql"></writeHost></dataHost></mycat:schema>
newRule.xml
<tableRulename="sharding-by-murmur-user04-id"><rule><columns>id</columns><algorithm>murmur-id</algorithm></rule></tableRule><functionname="murmur-id"class="io.mycat.route.function.PartitionByMurmurHash"><propertyname="seed">0</property><!--默认是0--><propertyname="type">0</property><!--默认是0,表示integer,非0表示string--><propertyname="count">18</property><!--要分片的数据库节点数量,必须指定,否则没法分片--><propertyname="virtualBucketTimes">160</property><!--一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍--></function>
将分片数量改为18
四、修改migrateTables.properties
#schema1=tb1,tb2,...#schema2=all(写all或者不写将对此schema下拆分节点变化的拆分表全部进行重新路由)#...#sample#TESTDB=travelrecord,company,goodsmycatdb=user04
五、重新分区
修改 bin 目录下的 dataMigrate.sh 脚本文件,
../bin/dataMigrate.sh
[root@mycatconf]#../bin/dataMigrate.sh"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64/jre/bin/java"-DMYCAT_HOME="/usr/local/mycat"-classpath"/usr/local/mycat/conf:/usr/local/mycat/lib/classes:/usr/local/mycat/lib/asm-4.0.jar:/usr/local/mycat/lib/commons-collections-3.2.1.jar:/usr/local/mycat/lib/commons-lang-2.6.jar:/usr/local/mycat/lib/curator-client-2.11.0.jar:/usr/local/mycat/lib/curator-framework-2.11.0.jar:/usr/local/mycat/lib/curator-recipes-2.11.0.jar:/usr/local/mycat/lib/disruptor-3.3.4.jar:/usr/local/mycat/lib/dom4j-1.6.1.jar:/usr/local/mycat/lib/druid-1.0.26.jar:/usr/local/mycat/lib/ehcache-core-2.6.11.jar:/usr/local/mycat/lib/fastjson-1.2.12.jar:/usr/local/mycat/lib/guava-19.0.jar:/usr/local/mycat/lib/hamcrest-core-1.3.jar:/usr/local/mycat/lib/hamcrest-library-1.3.jar:/usr/local/mycat/lib/jline-0.9.94.jar:/usr/local/mycat/lib/joda-time-2.9.3.jar:/usr/local/mycat/lib/jsr305-2.0.3.jar:/usr/local/mycat/lib/kryo-2.10.jar:/usr/local/mycat/lib/leveldb-0.7.jar:/usr/local/mycat/lib/leveldb-api-0.7.jar:/usr/local/mycat/lib/log4j-1.2.17.jar:/usr/local/mycat/lib/log4j-1.2-api-2.5.jar:/usr/local/mycat/lib/log4j-api-2.5.jar:/usr/local/mycat/lib/log4j-core-2.5.jar:/usr/local/mycat/lib/log4j-slf4j-impl-2.5.jar:/usr/local/mycat/lib/mapdb-1.0.7.jar:/usr/local/mycat/lib/minlog-1.2.jar:/usr/local/mycat/lib/mongo-java-driver-2.11.4.jar:/usr/local/mycat/lib/Mycat-server-1.6.6.1-release.jar:/usr/local/mycat/lib/mysql-binlog-connector-java-0.16.1.jar:/usr/local/mycat/lib/mysql-connector-java-5.1.35.jar:/usr/local/mycat/lib/netty-3.7.0.Final.jar:/usr/local/mycat/lib/netty-buffer-4.1.9.Final.jar:/usr/local/mycat/lib/netty-common-4.1.9.Final.jar:/usr/local/mycat/lib/objenesis-1.2.jar:/usr/local/mycat/lib/reflectasm-1.03.jar:/usr/local/mycat/lib/sequoiadb-driver-1.12.jar:/usr/local/mycat/lib/slf4j-api-1.6.1.jar:/usr/local/mycat/lib/univocity-parsers-2.2.1.jar:/usr/local/mycat/lib/velocity-1.7.jar:/usr/local/mycat/lib/wrapper.jar:/usr/local/mycat/lib/zookeeper-3.4.6.jar"-server-Xms2G-Xmx2G-XX:MaxPermSize=64M-XX:+AggressiveOpts-XX:MaxDirectMemorySize=2Gio.mycat.util.dataMigrator.DataMigrator-tempFileDir=-isAwaysUseMaster=true-mysqlBin=-cmdLength=110*1024-charset=utf8-deleteTempFileDir=true-threadCount=-delThreadCount=-queryPageSize=OpenJDK64-BitServerVMwarning:ignoringoptionMaxPermSize=64M;supportwasremovedin8.02018-11-2317:25:57:664[1]->creatingmigratorscheduleandtempfilesformigrate...+---------------------------------------------[mycatdb:user04]migrateinfo---------------------------------------------+|tableSize=50||migratebefore=[dn1,dn2,dn3,dn13,dn14,dn15,dn25,dn26,dn27]||migrateafter=[dn1,dn2,dn3,dn4,dn5,dn6,dn13,dn14,dn15,dn16,dn17,dn18,dn25,dn26,dn27,dn28,dn29,dn30]||rulefunction=PartitionByMurmurHash|+-----------------------------------------------------------------------------------------------------------------------++----------------[mycatdb:user04]migrateschedule----------------+|dn13[4]->[0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0]||dn14[3]->[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0]||dn15[4]->[0,0,0,0,0,2,0,0,0,0,1,0,1,0,0,0,0,0]||dn1[6]->[0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,1]||dn25[9]->[0,0,0,0,0,0,5,0,0,1,0,1,0,0,1,0,0,1]||dn26[5]->[0,0,0,0,0,0,0,1,0,0,1,0,1,0,0,1,1,0]||dn27[8]->[0,0,0,0,0,0,0,0,5,0,0,0,1,1,0,0,0,0]||dn2[6]->[0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,1,0,0]||dn3[5]->[0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,1,0]|+-----------------------------------------------------------------+2018-11-2317:26:10:896[2]->startmigratedata...[mycatdb:user04]dn1->dn26completedin740ms[mycatdb:user04]dn1->dn17completedin792ms[mycatdb:user04]dn1->dn30completedin472ms[mycatdb:user04]dn2->dn17completedin474ms[mycatdb:user04]dn2->dn18completedin400ms[mycatdb:user04]dn2->dn25completedin458ms[mycatdb:user04]dn2->dn28completedin388ms[mycatdb:user04]dn3->dn16completedin477ms[mycatdb:user04]dn3->dn25completedin410ms[mycatdb:user04]dn3->dn29completedin423ms[mycatdb:user04]dn13->dn4completedin455ms[mycatdb:user04]dn13->dn17completedin483ms[mycatdb:user04]dn14->dn27completedin496ms[mycatdb:user04]dn14->dn29completedin449ms[mycatdb:user04]dn15->dn6completedin399ms[mycatdb:user04]dn15->dn17completedin395ms[mycatdb:user04]dn15->dn25completedin512ms[mycatdb:user04]dn25->dn13completedin486ms[mycatdb:user04]dn25->dn16completedin473ms[mycatdb:user04]dn25->dn18completedin375ms[mycatdb:user04]dn25->dn30completedin395ms[mycatdb:user04]dn25->dn27completedin482ms[mycatdb:user04]dn26->dn17completedin422ms[mycatdb:user04]dn26->dn14completedin495ms[mycatdb:user04]dn26->dn25completedin440ms[mycatdb:user04]dn26->dn28completedin438ms[mycatdb:user04]dn26->dn29completedin424ms[mycatdb:user04]dn27->dn15completedin438ms[mycatdb:user04]dn27->dn25completedin404ms[mycatdb:user04]dn27->dn26completedin396ms2018-11-2317:26:18:106[3]->cleaningredundantdata...[mycatdb:user04]cleandataNodedn2completedin97ms[mycatdb:user04]cleandataNodedn3completedin130ms[mycatdb:user04]cleandataNodedn2completedin201ms[mycatdb:user04]cleandataNodedn3completedin279ms[mycatdb:user04]cleandataNodedn1completedin243ms[mycatdb:user04]cleandataNodedn1completedin274ms[mycatdb:user04]cleandataNodedn3completedin200ms[mycatdb:user04]cleandataNodedn1completedin187ms[mycatdb:user04]cleandataNodedn2completedin199ms[mycatdb:user04]cleandataNodedn2completedin183ms[mycatdb:user04]cleandataNodedn15completedin120ms[mycatdb:user04]cleandataNodedn15completedin146ms[mycatdb:user04]cleandataNodedn13completedin155ms[mycatdb:user04]cleandataNodedn13completedin223ms[mycatdb:user04]cleandataNodedn14completedin166ms[mycatdb:user04]cleandataNodedn14completedin234ms[mycatdb:user04]cleandataNodedn15completedin221ms[mycatdb:user04]cleandataNodedn25completedin111ms[mycatdb:user04]cleandataNodedn27completedin118ms[mycatdb:user04]cleandataNodedn25completedin152ms[mycatdb:user04]cleandataNodedn26completedin186ms[mycatdb:user04]cleandataNodedn27completedin149ms[mycatdb:user04]cleandataNodedn26completedin182ms[mycatdb:user04]cleandataNodedn25completedin183ms[mycatdb:user04]cleandataNodedn26completedin208ms[mycatdb:user04]cleandataNodedn25completedin164ms[mycatdb:user04]cleandataNodedn27completedin207ms[mycatdb:user04]cleandataNodedn25completedin242ms[mycatdb:user04]cleandataNodedn26completedin179ms[mycatdb:user04]cleandataNodedn26completedin129ms2018-11-2317:26:21:423[4]->validatingtablesmigrateresult...+------migrateresult-------+|[mycatdb:user04]->success|+---------------------------+2018-11-2317:26:22:385migratedatacompletein24736ms
六、重命名newSchema.xml和newRule.xml
扩容成功后,将newSchema.xml和newRule.xml重命名为schema.xml和rule.xml并替换掉原文件,重启mycat服务,整个扩容过程完成。
七、验证数据
node1
mysql>select*fromtestdb01.user04;+----+--------+|id|name|+----+--------+|16|steven||49|steven|+----+--------+2rowsinset(0.01sec)mysql>select*fromtestdb02.user04;+----+--------+|id|name|+----+--------+|46|steven||48|steven|+----+--------+2rowsinset(0.00sec)mysql>select*fromtestdb03.user04;+----+--------+|id|name|+----+--------+|24|steven||40|steven|+----+--------+2rowsinset(0.00sec)mysql>select*fromtestdb04.user04;+----+--------+|id|name|+----+--------+|20|steven||25|steven||39|steven|+----+--------+3rowsinset(0.00sec)mysql>select*fromtestdb05.user04;Emptyset(0.01sec)mysql>select*fromtestdb06.user04;+----+--------+|id|name|+----+--------+|32|steven||36|steven|+----+--------+2rowsinset(0.00sec)
node2
mysql>select*fromtestdb13.user04;+----+--------+|id|name|+----+--------+|6|steven||19|steven||23|steven||28|steven||29|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb14.user04;+----+--------+|id|name|+----+--------+|5|steven|+----+--------+1rowinset(0.00sec)mysql>select*fromtestdb15.user04;+----+--------+|id|name|+----+--------+|7|steven||26|steven||30|steven||43|steven||47|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb16.user04;+----+--------+|id|name|+----+--------+|33|steven||37|steven|+----+--------+2rowsinset(0.00sec)mysql>select*fromtestdb17.user04;+----+--------+|id|name|+----+--------+|10|steven||12|steven||14|steven||15|steven||34|steven||38|steven|+----+--------+6rowsinset(0.01sec)mysql>select*fromtestdb18.user04;+----+--------+|id|name|+----+--------+|31|steven||45|steven|+----+--------+2rowsinset(0.00sec)
node3
mysql>select*fromtestdb25.user04;+----+--------+|id|name|+----+--------+|2|steven||18|steven||22|steven||35|steven||44|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb26.user04;+----+--------+|id|name|+----+--------+|3|steven||17|steven|+----+--------+2rowsinset(0.00sec)mysql>select*fromtestdb27.user04;+----+--------+|id|name|+----+--------+|21|steven||27|steven||41|steven|+----+--------+3rowsinset(0.00sec)mysql>select*fromtestdb28.user04;+----+--------+|id|name|+----+--------+|9|steven||42|steven|+----+--------+2rowsinset(0.00sec)mysql>select*fromtestdb29.user04;+----+--------+|id|name|+----+--------+|1|steven||4|steven||11|steven||50|steven|+----+--------+4rowsinset(0.00sec)mysql>select*fromtestdb30.user04;+----+--------+|id|name|+----+--------+|8|steven||13|steven|+----+--------+2rowsinset(0.00sec)
可以看到user04分片由原来的9个分片变成了18个分片,验证完毕。
下面我们将缩减分片至9个分片
八、配置schema.xml,rule.xml
复制schema.xml、rule.xml并重命名为newSchema.xml、newRule.xml放于conf目录下,修改newSchema.xml和newRule.xml配置文件为缩容后的mycat配置参数(表的节点数、数据源、路由规则)
newSchema.xml
<?xmlversion="1.0"?><!DOCTYPEmycat:schemaSYSTEM"schema.dtd"><mycat:schemaxmlns:mycat="http://io.mycat/"><schemaname="mycatdb"checkSQLschema="false"sqlMaxLimit="100"><tablename="user04"dataNode="dn$1-3,dn$13-15,dn$25-27"rule="sharding-by-murmur-user04-id"></table><tablename="user05"dataNode="dn$1-36"rule="sharding-by-date-adddate"></table><tablename="user06"dataNode="dn$1-36"rule="sharding-by-month-adddate"></table><tablename="user07"dataNode="dn$1-36"rule="sharding-by-hour-adddate"></table></schema><!--<dataNodename="dn1$0-743"dataHost="localhost1"database="db$0-743"/>--><dataNodename="dn1"dataHost="node1"database="testdb01"/><dataNodename="dn2"dataHost="node1"database="testdb02"/><dataNodename="dn3"dataHost="node1"database="testdb03"/><dataNodename="dn13"dataHost="node2"database="testdb13"/><dataNodename="dn14"dataHost="node2"database="testdb14"/><dataNodename="dn15"dataHost="node2"database="testdb15"/><dataNodename="dn25"dataHost="node3"database="testdb25"/><dataNodename="dn26"dataHost="node3"database="testdb26"/><dataNodename="dn27"dataHost="node3"database="testdb27"/><!--<dataNodename="dn4"dataHost="sequoiadb1"database="SAMPLE"/><dataNodename="jdbc_dn1"dataHost="jdbchost"database="db1"/><dataNodename="jdbc_dn2"dataHost="jdbchost"database="db2"/><dataNodename="jdbc_dn3"dataHost="jdbchost"database="db3"/>--><dataHostname="node1"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.31"url="192.168.8.31:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node2"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.32"url="192.168.8.32:3306"user="root"password="mysql"></writeHost></dataHost><dataHostname="node3"maxCon="1000"minCon="10"balance="1"writeType="0"dbType="mysql"dbDriver="native"switchType="1"slaveThreshold="100"><heartbeat>selectuser()</heartbeat><!--canhavemultiwritehosts--><writeHosthost="192.168.8.33"url="192.168.8.33:3306"user="root"password="mysql"></writeHost></dataHost></mycat:schema>
newRule.xml
<tableRulename="sharding-by-murmur-user04-id"><rule><columns>id</columns><algorithm>murmur-id</algorithm></rule></tableRule><functionname="murmur-id"class="io.mycat.route.function.PartitionByMurmurHash"><propertyname="seed">0</property><!--默认是0--><propertyname="type">0</property><!--默认是0,表示integer,非0表示string--><propertyname="count">9</property><!--要分片的数据库节点数量,必须指定,否则没法分片--><propertyname="virtualBucketTimes">160</property><!--一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍--></function>
九、重新分区
修改 bin 目录下的 dataMigrate.sh 脚本文件,
../bin/dataMigrate.sh
[root@mycatconf]#../bin/dataMigrate.sh"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64/jre/bin/java"-DMYCAT_HOME="/usr/local/mycat"-classpath"/usr/local/mycat/conf:/usr/local/mycat/lib/classes:/usr/local/mycat/lib/asm-4.0.jar:/usr/local/mycat/lib/commons-collections-3.2.1.jar:/usr/local/mycat/lib/commons-lang-2.6.jar:/usr/local/mycat/lib/curator-client-2.11.0.jar:/usr/local/mycat/lib/curator-framework-2.11.0.jar:/usr/local/mycat/lib/curator-recipes-2.11.0.jar:/usr/local/mycat/lib/disruptor-3.3.4.jar:/usr/local/mycat/lib/dom4j-1.6.1.jar:/usr/local/mycat/lib/druid-1.0.26.jar:/usr/local/mycat/lib/ehcache-core-2.6.11.jar:/usr/local/mycat/lib/fastjson-1.2.12.jar:/usr/local/mycat/lib/guava-19.0.jar:/usr/local/mycat/lib/hamcrest-core-1.3.jar:/usr/local/mycat/lib/hamcrest-library-1.3.jar:/usr/local/mycat/lib/jline-0.9.94.jar:/usr/local/mycat/lib/joda-time-2.9.3.jar:/usr/local/mycat/lib/jsr305-2.0.3.jar:/usr/local/mycat/lib/kryo-2.10.jar:/usr/local/mycat/lib/leveldb-0.7.jar:/usr/local/mycat/lib/leveldb-api-0.7.jar:/usr/local/mycat/lib/log4j-1.2.17.jar:/usr/local/mycat/lib/log4j-1.2-api-2.5.jar:/usr/local/mycat/lib/log4j-api-2.5.jar:/usr/local/mycat/lib/log4j-core-2.5.jar:/usr/local/mycat/lib/log4j-slf4j-impl-2.5.jar:/usr/local/mycat/lib/mapdb-1.0.7.jar:/usr/local/mycat/lib/minlog-1.2.jar:/usr/local/mycat/lib/mongo-java-driver-2.11.4.jar:/usr/local/mycat/lib/Mycat-server-1.6.6.1-release.jar:/usr/local/mycat/lib/mysql-binlog-connector-java-0.16.1.jar:/usr/local/mycat/lib/mysql-connector-java-5.1.35.jar:/usr/local/mycat/lib/netty-3.7.0.Final.jar:/usr/local/mycat/lib/netty-buffer-4.1.9.Final.jar:/usr/local/mycat/lib/netty-common-4.1.9.Final.jar:/usr/local/mycat/lib/objenesis-1.2.jar:/usr/local/mycat/lib/reflectasm-1.03.jar:/usr/local/mycat/lib/sequoiadb-driver-1.12.jar:/usr/local/mycat/lib/slf4j-api-1.6.1.jar:/usr/local/mycat/lib/univocity-parsers-2.2.1.jar:/usr/local/mycat/lib/velocity-1.7.jar:/usr/local/mycat/lib/wrapper.jar:/usr/local/mycat/lib/zookeeper-3.4.6.jar"-server-Xms2G-Xmx2G-XX:MaxPermSize=64M-XX:+AggressiveOpts-XX:MaxDirectMemorySize=2Gio.mycat.util.dataMigrator.DataMigrator-tempFileDir=-isAwaysUseMaster=true-mysqlBin=-cmdLength=110*1024-charset=utf8-deleteTempFileDir=true-threadCount=-delThreadCount=-queryPageSize=OpenJDK64-BitServerVMwarning:ignoringoptionMaxPermSize=64M;supportwasremovedin8.02018-11-2317:47:02:047[1]->creatingmigratorscheduleandtempfilesformigrate...+---------------------------------------------[mycatdb:user04]migrateinfo---------------------------------------------+|tableSize=50||migratebefore=[dn1,dn2,dn3,dn4,dn5,dn6,dn13,dn14,dn15,dn16,dn17,dn18,dn25,dn26,dn27,dn28,dn29,dn30]||migrateafter=[dn1,dn2,dn3,dn13,dn14,dn15,dn25,dn26,dn27]||rulefunction=PartitionByMurmurHash|+-----------------------------------------------------------------------------------------------------------------------++--[mycatdb:user04]migrateschedule---+|dn13[5]->[0,0,0,0,0,0,5,0,0]||dn14[1]->[0,0,0,0,0,0,0,1,0]||dn15[5]->[0,0,0,0,0,0,0,0,5]||dn16[2]->[0,0,1,0,0,0,1,0,0]||dn17[6]->[2,1,0,1,0,1,0,1,0]||dn18[2]->[0,1,0,0,0,0,1,0,0]||dn1[2]->[0,0,0,0,0,0,0,0,0]||dn25[5]->[0,1,1,0,0,1,0,1,1]||dn26[2]->[1,0,0,0,0,0,0,0,1]||dn27[3]->[0,0,0,0,1,0,1,0,0]||dn28[2]->[0,1,0,0,0,0,0,1,0]||dn29[4]->[0,0,1,0,2,0,0,1,0]||dn2[2]->[0,0,0,0,0,0,0,0,0]||dn30[2]->[1,0,0,0,0,0,1,0,0]||dn3[2]->[0,0,0,0,0,0,0,0,0]||dn4[3]->[0,0,0,3,0,0,0,0,0]||dn5[0]->[0,0,0,0,0,0,0,0,0]||dn6[2]->[0,0,0,0,0,2,0,0,0]|+--------------------------------------+2018-11-2317:47:06:683[2]->startmigratedata...[mycatdb:user04]dn6->dn15completedin763ms[mycatdb:user04]dn4->dn13completedin835ms[mycatdb:user04]dn13->dn25completedin473ms[mycatdb:user04]dn14->dn26completedin468ms[mycatdb:user04]dn15->dn27completedin414ms[mycatdb:user04]dn16->dn3completedin406ms[mycatdb:user04]dn16->dn25completedin439ms[mycatdb:user04]dn17->dn1completedin474ms[mycatdb:user04]dn17->dn2completedin426ms[mycatdb:user04]dn17->dn13completedin396ms[mycatdb:user04]dn17->dn15completedin408ms[mycatdb:user04]dn17->dn26completedin444ms[mycatdb:user04]dn18->dn2completedin420ms[mycatdb:user04]dn18->dn25completedin508ms[mycatdb:user04]dn25->dn2completedin439ms[mycatdb:user04]dn25->dn3completedin459ms[mycatdb:user04]dn25->dn15completedin422ms[mycatdb:user04]dn25->dn26completedin442ms[mycatdb:user04]dn25->dn27completedin448ms[mycatdb:user04]dn26->dn1completedin412ms[mycatdb:user04]dn26->dn27completedin434ms[mycatdb:user04]dn27->dn14completedin436ms[mycatdb:user04]dn27->dn25completedin442ms[mycatdb:user04]dn28->dn2completedin453ms[mycatdb:user04]dn28->dn26completedin397ms[mycatdb:user04]dn29->dn3completedin381ms[mycatdb:user04]dn29->dn14completedin405ms[mycatdb:user04]dn29->dn26completedin440ms[mycatdb:user04]dn30->dn1completedin437ms[mycatdb:user04]dn30->dn25completedin358ms2018-11-2317:47:13:659[3]->cleaningredundantdata...[mycatdb:user04]cleandataNodedn6completedin267ms[mycatdb:user04]cleandataNodedn4completedin285ms[mycatdb:user04]cleandataNodedn17completedin154ms[mycatdb:user04]cleandataNodedn17completedin228ms[mycatdb:user04]cleandataNodedn15completedin141ms[mycatdb:user04]cleandataNodedn16completedin248ms[mycatdb:user04]cleandataNodedn18completedin241ms[mycatdb:user04]cleandataNodedn14completedin192ms[mycatdb:user04]cleandataNodedn16completedin316ms[mycatdb:user04]cleandataNodedn17completedin254ms[mycatdb:user04]cleandataNodedn17completedin325ms[mycatdb:user04]cleandataNodedn17completedin222ms[mycatdb:user04]cleandataNodedn13completedin170ms[mycatdb:user04]cleandataNodedn18completedin198ms[mycatdb:user04]cleandataNodedn25completedin101ms[mycatdb:user04]cleandataNodedn29completedin195ms[mycatdb:user04]cleandataNodedn30completedin240ms[mycatdb:user04]cleandataNodedn29completedin279ms[mycatdb:user04]cleandataNodedn27completedin172ms[mycatdb:user04]cleandataNodedn25completedin176ms[mycatdb:user04]cleandataNodedn28completedin258ms[mycatdb:user04]cleandataNodedn25completedin162ms[mycatdb:user04]cleandataNodedn25completedin202ms[mycatdb:user04]cleandataNodedn28completedin313ms[mycatdb:user04]cleandataNodedn26completedin185ms[mycatdb:user04]cleandataNodedn29completedin243ms[mycatdb:user04]cleandataNodedn30completedin258ms[mycatdb:user04]cleandataNodedn27completedin161ms[mycatdb:user04]cleandataNodedn25completedin168ms[mycatdb:user04]cleandataNodedn26completedin160ms2018-11-2317:47:17:586[4]->validatingtablesmigrateresult...+------migrateresult-------+|[mycatdb:user04]->success|+---------------------------+2018-11-2317:47:18:102migratedatacompletein16057ms
十、重命名newSchema.xml和newRule.xml
缩容成功后,将newSchema.xml和newRule.xml重命名为schema.xml和rule.xml并替换掉原文件,重启mycat服务,整个缩容过程完成。
十一、验证数据
node1
mysql>select*fromtestdb01.user04;+----+--------+|id|name|+----+--------+|8|steven||14|steven||16|steven||17|steven||34|steven||49|steven|+----+--------+6rowsinset(0.00sec)mysql>select*fromtestdb02.user04;+----+--------+|id|name|+----+--------+|9|steven||10|steven||44|steven||45|steven||46|steven||48|steven|+----+--------+6rowsinset(0.00sec)mysql>select*fromtestdb03.user04;+----+--------+|id|name|+----+--------+|11|steven||24|steven||33|steven||35|steven||40|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb04.user04;Emptyset(0.00sec)mysql>select*fromtestdb05.user04;Emptyset(0.01sec)mysql>select*fromtestdb06.user04;Emptyset(0.00sec)
node2
mysql>select*fromtestdb13.user04;+----+--------+|id|name|+----+--------+|20|steven||25|steven||38|steven||39|steven|+----+--------+4rowsinset(0.00sec)mysql>select*fromtestdb14.user04;+----+--------+|id|name|+----+--------+|1|steven||41|steven||50|steven|+----+--------+3rowsinset(0.00sec)mysql>select*fromtestdb15.user04;+----+--------+|id|name|+----+--------+|12|steven||18|steven||32|steven||36|steven|+----+--------+4rowsinset(0.00sec)mysql>select*fromtestdb16.user04;Emptyset(0.00sec)mysql>select*fromtestdb17.user04;Emptyset(0.00sec)mysql>select*fromtestdb18.user04;Emptyset(0.00sec)
node3
mysql>select*fromtestdb25.user04;+----+--------+|id|name|+----+--------+|6|steven||13|steven||19|steven||23|steven||27|steven||28|steven||29|steven||31|steven||37|steven|+----+--------+9rowsinset(0.01sec)mysql>select*fromtestdb26.user04;+----+--------+|id|name|+----+--------+|4|steven||5|steven||15|steven||22|steven||42|steven|+----+--------+5rowsinset(0.01sec)mysql>select*fromtestdb27.user04;+----+--------+|id|name|+----+--------+|2|steven||3|steven||7|steven||21|steven||26|steven||30|steven||43|steven||47|steven|+----+--------+8rowsinset(0.01sec)mysql>select*fromtestdb28.user04;Emptyset(0.00sec)mysql>select*fromtestdb29.user04;Emptyset(0.00sec)mysql>select*fromtestdb30.user04;Emptyset(0.00sec)
缩容之后,user04的所有数据变成了9个分片,其他分片没有存放数据,验证完毕。
遇到的问题:
由于测试环境之前用作一主两从测试环境,开启了gtid_mode,所以出现报错:
ERROR 1840 (HY000) at line 3 in file: '/usr/local/mycat/temp/mycatdb-user04/dn1(old)-dn17(new)/user04.sql': @@GLOBAL.GTID_PURGED can only be set when @@GLOBAL.GTID_EXECUTED is empty. -> mysql -h292.168.8.32 -P3306 -uroot -pmysql -Dtestdb17 -f --default-character-set=utf8 -e "source /usr/local/mycat/temp/mycatdb-user04/dn1(old)-dn17(new)/user04.sql"
解决方法:关闭gtid_mode
另外,扩容也可以采用停机方法,备份逻辑表所有数据,重新进行分片,然后再导入备份的数据,从而完成扩容的目的。
以上是“MyCat分库分表的示例分析”这篇文章的所有内容,感谢各位的阅读!相信大家都有了一定的了解,希望分享的内容对大家有所帮助,如果还想学习更多知识,欢迎关注亿速云行业资讯频道!
声明:本站所有文章资源内容,如无特殊说明或标注,均为采集网络资源。如若本站内容侵犯了原著者的合法权益,可联系本站删除。