Ceph分布式对象存储集群部署

导读:本篇文章讲解 Ceph分布式对象存储集群部署,希望对大家有帮助,欢迎收藏,转发!站点地址:www.bmabk.com

一、安装环境

node1 10.0.40.133
node2 10.0.40.134
node3 10.0.40.135
硬件配置:1C1G,另外每台机器器最少挂载二块硬盘

二、环境准备,每台都得执行

(1)关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
(2)关闭selinux:
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
(3)关闭NetworkManager
systemctl disable NetworkManager && systemctl stop NetworkManager
(4)添加主机名与IP对应关系:
vim /etc/hosts
10.0.40.133 node1 
10.0.40.134 node2 
10.0.40.135 node3 
(5)设置主机名:
hostnamectl set-hostname node1 
hostnamectl set-hostname node2
hostnamectl set-hostname node3
(6)同步网络时间和修改时区
echo '*/2 * * * * /usr/sbin/ntpdate cn.pool.ntp.org' &>/dev/null >>/var/spool/cron/root
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
(7)设置文件描述符
ulimit -n 65535
cat >> /etc/security/limits.conf << EOF
* soft nofile 65535
* hard nofile 65535
EOF
sysctl -p
(8)在node1上配置免密登录到node2、node3
ssh-copy-id root@node1
ssh-copy-id root@node2
ssh-copy-id root@node3

三、配置yum源,每台都得执行

[root@node1 ~]# yum install epel-release   -y
[root@node1 ~]# cat /etc/yum.repos.d/ceph.repo 
[ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/$basearch
enabled=1
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/noarch
enabled=1
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc

四、部署rados集群(mon、mgr、mds、rados),以下操作在/etc/ceph目录下

1、创建一个ceph集群,生成mon

yum install ceph-deploy   -y
mkdir /etc/ceph
cd /etc/ceph
ceph-deploy new node1 node2 node3 #生成mon

在这里插入图片描述
解决方法 百度网盘链接:https://pan.baidu.com/s/1ShQK4lGke0m6hcDvXP1jWA
提取码:5elt

wget https://pypi.python.org/packages/source/d/distribute/distribute-0.7.3.zip --no-check-certificate
unzip distribute-0.7.3.zip
cd distribute-0.7.3
python setup.py install

2、安装ceph软件

ceph-deploy install --no-adjust-repos node1 node2 node3

3、生成monitor检测集群所使用的的秘钥

ceph-deploy mon create-initial

4、修改ceph.conf配置

vim /etc/ceph/ceph.conf
[global]
fsid = b697e78a-2687-4291-93bf-42739e967bec
mon_initial_members = node1, node2, node3
mon_host = 10.0.40.133,10.0.40.134,10.0.40.135
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

#注意:此文件的最底部要留有最少一个空行
mon clock drift allowed = 2
mon clock drift warn backoff = 30

# 内外网分离配置(非必须,多网卡的情况下可以配置)
public_network = 10.0.40.0/24
#cluster_network = 10.4.41.0/24
#depends on you amount of PGs
#每个OSD允许的最大pg数
mon_max_pg_per_osd = 2000
#default is 2, try to set at least 5. It will be
osd_max_pg_per_osd_hard_ratio = 10
#without it you can't remove a pool
mon_allow_pool_delete = true
#指定Ceph在OSD守护进程的多少秒时间内没有响应后标记其为“down”或“out”状态
mon_osd_down_out_interval = 86400
# 存储集群副本个数(如果只有2个OSD此处请设置为2否则会WARN)
osd_pool_default_size = 2
# 在degraded状态下允许只存在一个副本
osd_pool_default_min_size = 1
#ceph为了限制pg分裂的速度,设置了mon_osd_max_split_count这个参数。这个参数表示【在分裂的时候,每个osd容许的最大分裂数量
mon_osd_max_split_count = 100

#若少于5个OSD, 设置pg_num为128。
#5~10个OSD,设置pg_num为512。
#10~50个OSD,设置pg_num为4096。
#超过50个OSD,可以参考pgcalc计算。 
#池的pg数量,Total PGs = ((Total_number_of_OSD * 100) / max_replication_count) / pool_count 结算的结果往上取靠近2的N次方的值
# 两台ceph每台二个osd pg_num计算得为512
osd_pool_default_pg_num = 512
# 两台ceph每台二个osd pgp_num计算得为512
osd_pool_default_pgp_num = 512


# CRUSH规则用到chooseleaf时的bucket的类型,默认值1
osd_crush_chooseleaf_type = 1
#关闭自动分片
rgw_dynamic_resharding = false
# 每个桶的索引的分片数量(后期可以根据需求动态的针对某个桶更新)
rgw_override_bucket_index_max_shards = 100
rgw_max_chunk_size = 1048576
rgw_cache_lru_size = 1000000
rgw_bucket_default_quota_max_objects = -1

# 如果磁盘容量大,则告警阈值从0.85调为0.9
osd_failsafe_full_ratio = 0.98
mon_osd_full_ratio = 0.95
mon_osd_backfillfull_ratio = 0.93
mon_osd_nearfull_ratio = 0.9

[osd]
#默认值2,osd发送heartbeat给其他osd的间隔时间
osd_heartbeat_interval = 15
#默认值7,OSD 多久没心跳就会被集群认为它挂( down )了
osd_heartbeat_grace = 60
# 处理peering等请求的线程数
osd_op_threads = 8
# 处理snap trim,replica trim及scrub等的线程数
osd_disk_threads = 4
# 如果这个参数被设置,那么Ceph集群启动时,就会在操作系统层面设置最大打开文件描述符。这就避免OSD进程出现与文件描述符不足的情况。参数的缺省值为0,可以设置成一个64位整数
max_open_files = 10485760
# 如果初始化的时候,把该值设置为true,然后重启所有osd。不然创建完pool会提示:100.000% pgs unknown100.000% pgs unknown。所有osd都加入,集群ok后,再统一把该值设置为false, 然后重启所有osd
osd_crush_update_on_start = true

# bluestore存储小文件的优化
#bluestore_cache_autotune = 0
bluestore_cache_size_hdd = 3221225472  #3G
bluestore_cache_kv_ratio = 0.6
bluestore_cache_meta_ratio = 0.4
bluestore_cache_kv_max = 1073741824  #1G
bluestore_csum_type = none
bluestore extent map shard max size = 200
bluestore extent map shard min size = 50
bluestore extent map shard target size = 100
bluestore rocksdb options = compression=kNoCompression,max_write_buffer_number=32,min_write_buffer_number_to_merge=2,recycle_log_file_num=32,compaction_style=kCompactionStyleLevel,write_buffer_size=67108864,target_file_size_base=67108864,max_background_compactions=31,level0_file_num_compaction_trigger=8,level0_slowdown_writes_trigger=32,level0_stop_writes_trigger=64,max_bytes_for_level_base=536870912,compaction_threads=32,max_bytes_for_level_multiplier=8,flusher_threads=8,compaction_readahead_size=2MB
osd map share max epochs = 100
osd max backfills = 5
osd memory target = 4294967296
osd op num shards = 8
osd op num threads per shard = 2
osd min pg log entries = 10
osd max pg log entries = 10
osd pg log dups tracked = 10
osd pg log trim min = 10

# scrub优化
osd scrub begin hour = 0
osd scrub end hour = 7
osd scrub chunk min = 1
osd scrub chunk max = 1
osd scrub sleep = 3
osd deep scrub interval = 241920

5、修改密钥权限并传送ceph.conf文件以及集群所使用的的秘钥

[root@node1 ceph]# chmod +r /etc/ceph/ceph.client.admin.keyring
[root@node1 ceph]# ceph-deploy --overwrite-conf admin node1 node2 node3

6、配置mgr,用于管理集群

[root@node1 ceph]# ceph-deploy mgr create node1 node2 node3

7、开启 dashboard (在任一 mon_server 节点上),传送ceph.conf文件

[root@node1 ceph]# echo -e "\n[mgr]\nmgr modules = dashboard\n" >> /etc/ceph/ceph.conf
[root@node1 ceph]# ceph mgr dump #获取mgrmap,默认最新
[root@node1 ceph]# ceph mgr module enable dashboard
[root@node1 ceph]# ceph mgr dump #获取mgrmap,默认最新

在这里插入图片描述

[root@node1 ceph]# ss -anpt|grep 7000
LISTEN     0      5         [::]:7000                  [::]:*                   users:(("ceph-mgr",pid=2154,fd=26))
[root@node1 ceph]# ceph-deploy --overwrite-conf config push node1 node2 node3

8、准备磁盘(node1、node2、node3三个节点)

#磁盘初始化
parted /dev/sdb mklabel gpt -s
parted /dev/sdc mklabel gpt -s
#建立磁盘分卷
ceph-volume lvm zap /dev/sdb
ceph-volume lvm zap /dev/sdc

9、添加OSD

ceph-deploy osd create --data /dev/sdb node1
ceph-deploy osd create --data /dev/sdb node2 
ceph-deploy osd create --data /dev/sdb node3 
ceph-deploy osd create --data /dev/sdc node1 
ceph-deploy osd create --data /dev/sdc node2 
ceph-deploy osd create --data /dev/sdc node3 

效果
在这里插入图片描述
10、安装,并配置RGW

[root@node1 ceph]# ceph-deploy rgw create node1 node2 node3
[root@node1 ceph]# cat >> /etc/ceph/ceph.conf << EOF
[rgw]
rgw_lifecycle_work_time = "00:00-24:00"
rgw_lc_debug_interval = 10
[client.rgw.node1]
rgw_frontends = "civetweb port=8899 num_threads=5000"
[client.rgw.node2]
rgw_frontends = "civetweb port=8899 num_threads=5000"
[client.rgw.node3]
rgw_frontends = "civetweb port=8899 num_threads=5000"

EOF
[root@node1 ceph]# ceph-deploy --overwrite-conf config push node1 node2 node3

11、创建,并给用户给普通用户开启caps

[root@node1 ceph]# cd /etc/ceph
[root@node1 ceph]# radosgw-admin user create --uid="admin" --display-name="admin"
[root@node1 ceph]# radosgw-admin caps add --uid=admin --caps="users=*"
[root@node1 ceph]# radosgw-admin caps add --uid=admin --caps="buckets=*"
[root@node1 ceph]# radosgw-admin caps add --uid=admin --caps="metadata=*"
[root@node1 ceph]# radosgw-admin caps add --uid=admin --caps="usage=*"
[root@node1 ceph]# radosgw-admin caps add --uid=admin --caps="zone=*"

12、查看进程池,并上传下载测试

ceph df

在这里插入图片描述

#生成一个文件
[root@node1 ceph]# echo '231' > 1.txt
创建一个测试pool
# ceph osd pool create test-pool  32 
#上传一个文件到测试pool
[root@node1 ceph]# rados put hjj  1.txt --pool=test-pool
#查看
[root@node1 ceph]# rados -p  test-pool ls 
hjj
#下载一个文件
[root@node1 ceph]# rrados get hjj 1.txt --pool=test-pool
#删除pool中的文件
[root@node1 ceph]# rados rm hjj --pool=test-pool
#pool名字要输入两次,另外如果有缓存卷,无法删除。
ceph osd pool rm test-pool test-pool --yes-i-really-really-mean-it

在这里插入图片描述

五、配置S3cmd

1、安装S3cmd

yum -y install s3cmd

2、查看用户的共密钥
radosgw-admin user info –uid=admin |grep -w keys -A 6
在这里插入图片描述

3、配置S3cmd文件,

vim /root/.s3cfg

#更改以下四行,注意secret_key = ,access_key = 替换成查看用户的共密钥
secret_key = 3Oo8H8kru44aQr9BuUYoU5h8qPObAZKsbhsIyDVk
access_key = CDZANF35GM19G84W9MF3
host_base = 10.0.40.133:8899
host_bucket = 10.0.40.133:8899

[default]
access_key = CDZANF35GM19G84W9MF3
access_token = 
add_encoding_exts = 
add_headers = 
bucket_location = US
ca_certs_file = 
cache_file = 
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encrypt = False
expiry_date = 
expiry_days = 
expiry_prefix = 
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase = 
guess_mime_type = True
host_base = 10.0.40.133:8899
host_bucket = 10.0.40.133:8899
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
kms_key = 
limit = -1
limitrate = 0
list_md5 = False
log_target_prefix = 
long_listing = False
max_delete = -1
mime_type = 
multipart_chunk_size_mb = 15
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host = 
proxy_port = 0
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key = 3Oo8H8kru44aQr9BuUYoU5h8qPObAZKsbhsIyDVk
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_existing = False
socket_timeout = 300
stats = False
stop_on_error = False
storage_class = 
urlencoding_mode = normal
use_http_expect = False
use_https = False
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error = 
website_index = index.html

4、创建bucket

[root@node1 ceph]# s3cmd mb s3://test1
[root@node1 ceph]# s3cmd ls
2021-07-19 14:26  s3://test1

5、编写编写test1.json文件

[root@node1 ceph]# vim test1.json

注意,主要修改这两项
<ID>test1</ID> <!-- bucket名 -->
<Days>180</Days> <!-- 设置文件过期策略,超过180过期-->

<?xml version="1.0" ?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
        <Rule>
                <ID>test1</ID>  
                <Prefix/>
                <Status>Enabled</Status>
                <Expiration>
                        <Days>180</Days> 
                </Expiration>
        </Rule>
</LifecycleConfiguration>

6、 使用s3cmd 命令设置存储桶的test1规则

s3cmd setlifecycle test1.json s3://test1

6、查看test1规则

s3cmd getlifecycle s3://test1

在这里插入图片描述

7、测试bucket

[root@node1 ceph]# echo '543' >1.txt
[root@node1 ceph]# s3cmd put 1.txt s3://test1 #上传
upload: '1.txt' -> 's3://test1/1.txt'  [1 of 1]
 4 of 4   100% in    0s    73.45 B/s  done
[root@node1 ceph]# rm -f 1.txt
[root@node1 ceph]# s3cmd get s3://test1/1.txt  #下载
download: 's3://test1/1.txt' -> './1.txt'  [1 of 1]
 4 of 4   100% in    0s    92.84 B/s  done
[root@node1 ceph]# ls 1.txt 
1.txt
[root@node1 ceph]#

成功

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

文章由极客之音整理,本文链接:https://www.bmabk.com/index.php/post/75739.html

(0)
小半的头像小半

相关推荐

极客之音——专业性很强的中文编程技术网站,欢迎收藏到浏览器,订阅我们!