Mongodb集群部署测试
MongoDB 的数据分块称为chunk。每个chunk 都是Collection 中一段连续的数据记录,通常最大尺寸是200MB,超出则生成新的数据块。
要构建一个 MongoDB Sharding Cluster,需要三种角色:
Shard Server: mongod 实例,用于存储实际的数据块。
Config Server: mongod 实例,存储了整个Cluster Metadata,其中包括chunk 信息。
Route Server: mongos 实例,前端路由,客户端由此接入,且让整个集群看上去像单一进程数据库。
Route 转发请求到实际的目标服务进程,并将多个结果合并回传给客户端。Route 本身并不存储任何数据和状态,仅在启动时从Config Server 获取信息。Config Server 上的任何变动都会传递给所有的Route Process。
在实际使用中,为了获取高可用、高性能的集群方案,我们会将Shard Server 部署成Replica Sets,然后用LVS 部署多个Route。
##########################################################################################
以下为测试过程。
在一台机器上部署。
分两片
每片两份
##########################################################################################
#第一个分片 shard1 复制两份
./mongod -shardsvr -replSet shard_liyan1 -port 16001 -dbpath /home/liyan/data/mongodata/mutildata/shard11 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard11.log -logappend -fork
./mongod -shardsvr -replSet shard_liyan1 -port 16003 -dbpath /home/liyan/data/mongodata/mutildata/shard12 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard12.log -logappend -fork
#初始化shard_liyan1的replica set
#用mongo连接其中一个mongod,执行:
config={_id:'shard_liyan1',members:[
{_id:0,host:'127.0.0.1:16001'},
{_id:1,host:'127.0.0.1:16003'}
]
}
rs.initiate(config);
##########################################################################################
#第二个分片 shard2 复制两份
./mongod -shardsvr -replSet shard_liyan2 -port 16002 -dbpath /home/liyan/data/mongodata/mutildata/shard21 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard21.log -logappend -fork
./mongod -shardsvr -replSet shard_liyan2 -port 16004 -dbpath /home/liyan/data/mongodata/mutildata/shard22 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard22.log -logappend -fork
#初始化shard_liyan1的replica set
#用mongo连接其中一个mongod,执行:
config={_id:'shard_liyan2',members:[
{_id:0,host:'127.0.0.1:16002'},
{_id:1,host:'127.0.0.1:16004'}
]
}
rs.initiate(config);
##########################################################################################
#配置两个 config Server(必须是一个或三个。否则报错。原因不明。)
./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config1 -port 16005 -logpath /home/liyan/data/mongodata/logs/config1.log -logappend -fork
./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config2 -port 16006 -logpath /home/liyan/data/mongodata/logs/config2.log -logappend -fork
./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config3 -port 16007 -logpath /home/liyan/data/mongodata/logs/config3.log -logappend -fork
##########################################################################################
#配置mongs (上产需要再上层加上LVS)
#在server1,server2上分别执行:指定mongos操作的config server
./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16008 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos1.log -logappend -fork
./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16009 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos2.log -logappend -fork
#mongs不需要dbpath
##########################################################################################
#指定mongos对应的shard
#连接到其中一个mongos进程,并切换到admin数据库做以下配置
#1. 连接到mongs,并切换到admin
./mongo 10.1.1.1:30000/admin
>db
admin
#2. 加入shards
db.runCommand( { addshard : "shard_liyan1/127.0.0.1:16001,127.0.0.1:16003",name:"s1",maxsize:20480} );
db.runCommand( { addshard : "shard_liyan2/127.0.0.1:16002,127.0.0.1:16004",name:"s2",maxsize:20480} );
#3. 可选参数
#Name:用于指定每个shard的名字,不指定的话系统将自动分配
#maxSize:指定各个shard可使用的最大磁盘空间,单位megabytes
#########################################################################################
#4. Listing shards
>db.runCommand( { listshards : 1 } )
#如果列出了以上二个你加的shards,表示shards已经配置成功 如下
#{
# "shards" : [
# {
# "_id" : "s1",
# "host" : "shard_liyan1/127.0.0.1:16001,127.0.0.1:16003"
# },
# {
# "_id" : "s2",
# "host" : "shard_liyan2/127.0.0.1:16002,127.0.0.1:16004"
# }
# ],
# "ok" : 1
#}
#
##########################################################################################
#5. 激活数据库分片
#命令:
> db.runCommand( { enablesharding : "ISYDB" } );
#通过执行以上命令,可以让数据库跨shard,如果不执行这步,数据库只会存放在一个shard,一旦激活数据库分片,数据库中不同的collection将被存放在不同的shard上,但一个collection仍旧存放在同一个shard上,要使单个collection也分片,还需单独对collection作些操作
##########################################################################################
#Collecton分片
#要使单个collection也分片存储,需要给collection指定一个分片key,通过以下命令操作:
#> db.runCommand( { shardcollection : “<namespace>”,key : <shardkeypatternobject> });
#注:
# a. 分片的collection系统会自动创建一个索引(也可用户提前创建好)
# b. 分片的collection只能有一个在分片key上的唯一索引,其它唯一索引不被允许
>db.runCommand( { shardcollection : "ISYDB.VIDEO",key : {_id: 1} } )
#>for (var i = 1; i <= 100; i++) db.VIDEO.save({_id:i,value:"liyanTest"});
#> db.VIDEO.stats()
##########################################################################################
以下是所有启动的脚本日志
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan1 -port 16001 -dbpath /home/liyan/data/mongodata/mutildata/shard11 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard11.log -logappend -fork
./mongod -shardsvr -replSet shard_liyan1 -port 16003 -dbpath /home/liyan/data/mongodata/mutildata/shard12 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard12.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard11.log
forked process: 21328
[liyan@cctv226 bin]$
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan1 -port 16003 -dbpath /home/liyan/data/mongodata/mutildata/shard12 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard12.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard12.log
forked process: 21334
[liyan@cctv226 bin]$ ./mongo -port 16001
MongoDB shell version: 1.8.3-rc1
connecting to: 127.0.0.1:16001/test
> config={_id:'shard_liyan1',members:[
... {_id:0,host:'127.0.0.1:16001'},
... {_id:1,host:'127.0.0.1:16003'}
... ]
... }
{
"_id" : "shard_liyan1",
"members" : [
{
"_id" : 0,
"host" : "127.0.0.1:16001"
},
{
"_id" : 1,
"host" : "127.0.0.1:16003"
}
]
}
> rs.initiate(config);
{
"info" : "Config now saved locally. Should come online in about a minute.",
"ok" : 1
}
> exit
bye
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan2 -port 16002 -dbpath /home/liyan/data/mongodata/mutildata/shard21 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard21.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard21.log
forked process: 21385
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan2 -port 16004 -dbpath /home/liyan/data/mongodata/mutildata/shard22 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard22.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard22.log
forked process: 21391
[liyan@cctv226 bin]$ ./mongo -port 16002
MongoDB shell version: 1.8.3-rc1
connecting to: 127.0.0.1:16002/test
> config={_id:'shard_liyan2',members:[
... {_id:0,host:'127.0.0.1:16002'},
... {_id:1,host:'127.0.0.1:16004'}
... ]
... }
{
"_id" : "shard_liyan2",
"members" : [
{
"_id" : 0,
"host" : "127.0.0.1:16002"
},
{
"_id" : 1,
"host" : "127.0.0.1:16004"
}
]
}
> rs.initiate(config);
{
"info" : "Config now saved locally. Should come online in about a minute.",
"ok" : 1
}
>
shard_liyan2:PRIMARY> exit
bye
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan3 -port 16010 -dbpath /home/liyan/data/mongodata/mutildata/shard31 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard31.log -logappend -fork
./mongod -shardsvr -replSet shard_liyan3 -port 16011 -dbpath /home/liyan/data/mongodata/mutildata/shard32 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard32.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard31.log
forked process: 21443
[liyan@cctv226 bin]$ ./mongod -shardsvr -replSet shard_liyan3 -port 16011 -dbpath /home/liyan/data/mongodata/mutildata/shard32 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard32.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/shard32.log
forked process: 21449
[liyan@cctv226 bin]$ ./mongo -port 16010
MongoDB shell version: 1.8.3-rc1
connecting to: 127.0.0.1:16010/test
> config={_id:'shard_liyan3',members:[
... {_id:0,host:'127.0.0.1:16010'},
... {_id:1,host:'127.0.0.1:16011'}
... ]
... }
{
"_id" : "shard_liyan3",
"members" : [
{
"_id" : 0,
"host" : "127.0.0.1:16010"
},
{
"_id" : 1,
"host" : "127.0.0.1:16011"
}
]
}
> rs.initiate(config);
{
"info" : "Config now saved locally. Should come online in about a minute.",
"ok" : 1
}
> exit
bye
[liyan@cctv226 bin]$ ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config1 -port 16005 -logpath /home/liyan/data/mongodata/logs/config1.log -logappend -fork
./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config2 -port 16006 -logpath /home/liyan/data/mongodata/logs/config2.log -logappend -fork
./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config3 -port 16007 -logpath /home/liyan/data/mongodata/logs/config3.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/config1.log
forked process: 21497
[liyan@cctv226 bin]$
[liyan@cctv226 bin]$ ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config2 -port 16006 -logpath /home/liyan/data/mongodata/logs/config2.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/config2.log
forked process: 21503
[liyan@cctv226 bin]$
[liyan@cctv226 bin]$ ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config3 -port 16007 -logpath /home/liyan/data/mongodata/logs/config3.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/config3.log
forked process: 21509
[liyan@cctv226 bin]$ ./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16008 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos1.log -logappend -fork
./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16009 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos2.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/mongos1.log
forked process: 21531
[liyan@cctv226 bin]$ ./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16009 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos2.log -logappend -fork
all output going to: /home/liyan/data/mongodata/logs/mongos2.log
forked process: 21534
[liyan@cctv226 bin]$
[liyan@cctv226 bin]$ ps -ef|grep liyan
root 20935 19810 0 10:33 ? 00:00:00 sshd: liyan [priv]
liyan 20937 20935 0 10:33 ? 00:00:00 sshd:
liyan 20938 20937 0 10:33 pts/3 00:00:00 -bash
liyan 21328 1 0 13:46 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan1 -port 16001 -dbpath /home/liyan/data/mongodata/mutildata/shard11 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard11.log -logappend -fork
liyan 21334 1 0 13:46 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan1 -port 16003 -dbpath /home/liyan/data/mongodata/mutildata/shard12 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard12.log -logappend -fork
liyan 21385 1 0 13:47 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan2 -port 16002 -dbpath /home/liyan/data/mongodata/mutildata/shard21 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard21.log -logappend -fork
liyan 21391 1 0 13:47 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan2 -port 16004 -dbpath /home/liyan/data/mongodata/mutildata/shard22 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard22.log -logappend -fork
liyan 21443 1 0 13:49 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan3 -port 16010 -dbpath /home/liyan/data/mongodata/mutildata/shard31 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard31.log -logappend -fork
liyan 21449 1 0 13:49 ? 00:00:00 ./mongod -shardsvr -replSet shard_liyan3 -port 16011 -dbpath /home/liyan/data/mongodata/mutildata/shard32 -oplogSize 100 -logpath /home/liyan/data/mongodata/logs/shard32.log -logappend -fork
liyan 21497 1 1 13:49 ? 00:00:00 ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config1 -port 16005 -logpath /home/liyan/data/mongodata/logs/config1.log -logappend -fork
liyan 21503 1 1 13:49 ? 00:00:00 ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config2 -port 16006 -logpath /home/liyan/data/mongodata/logs/config2.log -logappend -fork
liyan 21509 1 1 13:49 ? 00:00:00 ./mongod -configsvr -dbpath /home/liyan/data/mongodata/mutildata/config3 -port 16007 -logpath /home/liyan/data/mongodata/logs/config3.log -logappend -fork
liyan 21531 1 0 13:50 ? 00:00:00 ./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16008 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos1.log -logappend -fork
liyan 21534 1 0 13:50 ? 00:00:00 ./mongos -configdb 127.0.0.1:16005,127.0.0.1:16006,127.0.0.1:16007 -port 16009 -chunkSize 5 -logpath /home/liyan/data/mongodata/logs/mongos2.log -logappend -fork
liyan 21560 20938 0 13:50 pts/3 00:00:00 ps -ef
liyan 21561 20938 0 13:50 pts/3 00:00:00 grep liyan
[liyan@cctv226 bin]$ ./mongo -port 16008
MongoDB shell version: 1.8.3-rc1
connecting to: 127.0.0.1:16008/test
> use admin
switched to db admin
> db.runCommand( { addshard : "shard_liyan1/127.0.0.1:16001,127.0.0.1:16003",name:"s1",maxsize:20480} );
{ "shardAdded" : "s1", "ok" : 1 }
> db.runCommand( { addshard : "shard_liyan2/127.0.0.1:16002,127.0.0.1:16004",name:"s2",maxsize:20480} );
{ "shardAdded" : "s2", "ok" : 1 }
> db.runCommand( { addshard : "shard_liyan3/127.0.0.1:16010,127.0.0.1:16011",name:"s3",maxsize:20480} );
{ "shardAdded" : "s3", "ok" : 1 }
> db.runCommand( { listshards : 1 } )
{
"shards" : [
{
"_id" : "s1",
"host" : "shard_liyan1/127.0.0.1:16001,127.0.0.1:16003"
},
{
"_id" : "s2",
"host" : "shard_liyan2/127.0.0.1:16002,127.0.0.1:16004"
},
{
"_id" : "s3",
"host" : "shard_liyan3/127.0.0.1:16010,127.0.0.1:16011"
}
],
"ok" : 1
}
> db.runCommand( { enablesharding : "ISYDB" } );
{ "ok" : 1 }
> db.runCommand( { shardcollection : "ISYDB.VIDEO",key : {_id: 1} } )
{ "collectionsharded" : "ISYDB.VIDEO", "ok" : 1 }
>