master-server,gateway-server,console这3个sharkstore组件是由go语言编写的,
各组件目录结构类似,main函数分别存在各组件目录的cmd下。
data-server是由c++编写的,目录结构和go语言组件不同。下面分别描述下编译各组件的命令:
cd sharkstore/master-server/cmd && sh ./build.sh
cd sharkstore/proxy/gateway-server/cmd && sh ./build.sh
cd sharkstore/console/cmd && sh ./build.sh
编译前,首先安装data-server依赖,详细查看data-server目录中INSTALL.md,
依赖安装之后执行下面命令进行编译:
mkdir build && cd build
cmake ..
make -j 4
使用下面的配置可以快速完成sharkstore在本地的环境搭建:
mkdir -p sharkstore/bin/{ms,gw,cs,ds}
cp sharkstore/master-server/cmd/master-server sharkstore/bin/ms
cp sharkstore/proxy/gateway-server/cmd/gateway-server sharkstore/bin/gw
cp sharkstore/console/cmd/sharkstore-console sharkstore/bin/cs
在sharkstore/bin/ms目录中创建如下配置ms.conf
touch sharkstore/bin/ms/ms.conf
#MS Configuration. node-id = 1
name = "ms"
#process role is master or metric role = "master"
version = "v1"
#secret key for master, leaves it empty will ignore http request signature verification secret-key = "test"#cluster meta data store path
data-dir = "sharkstore/bin/ms/data"[cluster]
cluster-id = 1[[cluster.peer]]
id = 1
host = "127.0.0.1"
http-port = 9887
rpc-port = 38887
raft-ports = [9877,9867][raft]
heartbeat-interval = "500ms"
retain-logs-count = 100[log]
dir = "sharkstore/bin/ms/log"
module = "master"
level = "debug"[metric]
interval = "15s"
address = ""[schedule]
max-snapshot-count = 10000000
max-node-down-time = "1h"
leader-schedule-limit = 64
region-schedule-limit = 120
replica-schedule-limit = 120
max-task-timeout = "300s"
max-range-down-time = "360s"
node-range-balance-time = "120s"
storage-available-threshold = 25[replication]
max-replicas = 1
location-labels = []
在sharkstore/bin/gw目录中创建如下配置gw.conf
touch sharkstore/bin/gw/gw.conf
#GS Configuration. http-port = 18080
lock-port = 18090
mysql-port = 4360
max-clients = 10000
max-record-limit = 10000
user = "test"
password = "123456"
charset = "utf8"
[performance]
max-work-num = 100
#task queue size
max-task-queue-len = 10000
#keep connect size for each ds
grpc-pool-size = 10
#128 KB
grpc-win-size = 131072
slow-insert = "20ms"
slow-select = "100ms"
slow-delete = "20ms"
[cluster]
id = 1
address = ["127.0.0.1:38887"]
token = "test"
[log]
dir = "./log"
module = "gateway"
level = "debug"
[metric]
interval = "15s"
address = ""
在sharkstore/bin/cs目录中创建如下配置cs.conf
touch sharkstore/bin/cs/cs.conf
project.home.dir=sharkstore/console
http.port=10081
#gin framework
gin.log.file=./gin.log
#debug, release, test
gin.mode=debug#gateway mysql api
mysql.host=127.0.0.1
mysql.port=4360
mysql.user=test
mysql.passwd=123456#log
log.dir = ./logs
log.module= console
log.level = warn#grafana monitor domain
monitor.domain = http://sharkstore.grafana.com#online
app.name = source
app.token = 123456789
app.domain.name = test.sharkstore.console.com
app.address = http://test.sharkstore.console.com:10081lock.cluster.id=1
在sharkstore/bin/ds目录中创建如下配置ds.conf
touch sharkstore/bin/ds/ds.conf
base_path = sharkstore/bin/ds
[rocksdb]
#rocksdb path
path = sharkstore/bin/ds/db
#rocksdb block cache size, default 1024MB, max uint: MB
block_cache_size = 10240MB
#time unit: ms
#set task defaul timeout if request is not
#default value is 3000 ms
task_timeout = 3000
#thread stack size, should >= 64KB
#default value is 64KB
thread_stack_size = 256KB
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group =
#unix username to run this program,
#not set (empty) means run by current user
run_by_user =
[heartbeat]
#master's ip_addr and port
#may be multiple different master
master_host = 127.0.0.1:38887
#the number of the above master_host
master_num = 1
#time unit: s
#default value is 10 ms
node_heartbeat_interval = 10
#time unit: s
#default value is 10 s
range_heartbeat_interval = 10
[log]
#if log path is not set then use base_path
#log path = $log_path + /logs
log_path= sharkstore/bin/ds
#sync log buff to disk every interval seconds
#default value is 10 seconds
sync_log_buff_interval = 10
#if rotate the error log every day
#default value is false
rotate_error_log = true
#keep days of the log files
#0 means do not delete old log files
#default value is 0
log_file_keep_days = 7
#standard log level as syslog, case insensitive, value list:
###emerg for emergency
###alert
###crit for critical
###error
###warn for warning
###notice
###info
###debug
log_level=debug
[socket]
#connect timeout in seconds
#default value is 30s
connect_timeout = 3
#network timeout in seconds
#default value is 30s
network_timeout = 30
#epoll wait timeout
#default value is 30ms
epoll_timeout = 30
#socket keep time
#default value is 30m
socket_keep_time = 1800
#max concurrent connections this server supported
#default value is 256
max_connections = 100000
#default value is 16K
max_pkg_size = 256KB
#default value is 64KB
min_buff_size = 16KB
#default value is 64KB
max_buff_size = 256KB
[worker]
#ip_addr = 127.0.0.1
#listen port of recv data
port = 7180
#socket accept thread number
#default value is 1
accept_threads = 1
#epoll recv event thread number
#no default value and must be configured
event_recv_threads = 4
#epoll send event thread number
#no default value and must be configured
event_send_threads = 2
#thread only handle fast tasks. eg. RawGet
fast_worker = 4
#thread only handle slow tasks. eg. select
slow_worker = 8
#max count of recv worker pkg
max_recv_pkg = 10000000
[manager]
#ip_addr = 127.0.0.1
#listen port of recv data
port = 26180
#socket accept thread number
#default value is 1
accept_threads = 1
#epoll recv event thread number
#no default value and must be configured
event_recv_threads = 1
#epoll send event thread number
#no default value and must be configured
event_send_threads = 1
#the number of threads dealing with the recved queue
#no default value and must be configured
worker_threads = 2
#max count of recv manager pkg
max_recv_pkg = 1000
[range]
#the range real_size is calculated
#if statis_size is greater than check_size
#default value is 32MB
check_size = 32MB
#range split threshold
#default value is 64MB
split_size = 64MB
#default value is 128MB
max_size = 128MB
#range real size statis thread num
worker_threads = 1
#0 sql, 1 redis, default=0
access_mode = 0
[raft]
#ports used by the raft protocol
port = 48887
#raft log path
log_path = sharkstore/bin/ds/raft
#log_file_size = 16777216
#max_log_files = 5
#consensus_threads = 4
#consensus_queue = 100000
#apply_threads = 4
#apply_queue = 100000
#grpc_send_threads = 4
#grpc_recv_threads = 4
[metric]
#metric report ip
ip_addr =
#metric report port
port = 9887
#epoll send event thread number
#no default value and must be configured
event_send_threads = 1
max_recv_pkg = 10
#metric report interval
#default value is 60s
interval = 60
#which cluster to belong to
cluster_id = 1;
#metric report name_space
name_space = ds
#metric report uri
uri = /metric/tcp/process
[client]
ip_addr =
port = 7180
event_recv_threads = 1
event_send_threads = 1
worker_threads = 0
#max count of recv manager pkg
max_recv_pkg = 10000000
cd sharkstore/bin/ms
setsid ./master-server -config=ms.conf &
cd sharkstore/bin/gw
setsid ./gateway-server -config=gw.conf &
cd sharkstore/bin/cs
setsid ./sharkstore-console -config=cs.conf &
cd sharkstore/bin/ds
./data-server ds.conf
访问console的域名http://test.sharkstore.console.com:10081,
在左边导航选择"集群初始化",并填写master-server中相应的集群id和http服务地址:
选择左边导航中的"元数据管理"->"创建集群",并填写gateway http服务地址和sql服务地址:
创建成功后,可以在"系统首页"查看到所有集群,并且可以在"元数据"中创建该集群的库和表:
选择集群"元数据"->"创建db",并填写库名(例:db_test),:
选择"创建表",并填写表名(例:table_test)和表结构(例:col_test):
输入gateway-server的用户和密码,并写入一条记录:
用户可以通过访问gateway-server来读写创建的表,
gateway-server提供sql和rest接口,详细查看sql接口和http接口。