国产一级a片免费看高清,亚洲熟女中文字幕在线视频,黄三级高清在线播放,免费黄色视频在线看

打開APP
userphoto
未登錄

開通VIP,暢享免費電子書等14項超值服

開通VIP
ceph ceph.conf 配置流程

1. ceph.conf 設置生效流程, 改動后如何立即生效

2. ceph.conf都有哪些配置項目, 代表什么意思?

注釋符號是‘;‘

ceph git庫編譯出來的測試, 測試命令

[harvis@centos7 build]$ ../src/vstart.sh -d -n -x -l

下面是ceph集群狀態(tài)查看,和ceph.conf文件的內容

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_WARN

no active mgr

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e2: 0/0/1 up

mgr no daemons active

osdmap e16: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v21: 24 pgs, 3 pools, 0 bytes data, 0 objects

75710 MB used, 22542 MB / 98253 MB avail

16 creating+activating

8 activating

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_WARN

no active mgr

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby

mgr no daemons active

osdmap e18: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v26: 24 pgs, 3 pools, 2238 bytes data, 21 objects

75742 MB used, 22510 MB / 98253 MB avail

24 active+clean

client io 926 B/s wr, 0 op/s rd, 4 op/s wr

[harvis@centos7 build]$ ./bin/ceph -s

*** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH ***

cluster d439bdca-809f-4bbe-af91-c67647317172

health HEALTH_OK

monmap e2: 3 mons at {a=127.0.0.1:40165/0,b=127.0.0.1:40166/0,c=127.0.0.1:40167/0}

election epoch 6, quorum 0,1,2 a,b,c

fsmap e7: 1/1/1 up {0=b=up:active}, 2 up:standby

mgr active: x

osdmap e18: 3 osds: 3 up, 3 in

flags sortbitwise,require_jewel_osds,require_kraken_osds,require_luminous_osds

pgmap v34: 24 pgs, 3 pools, 2238 bytes data, 21 objects

75753 MB used, 22499 MB / 98253 MB avail

24 active+clean

[harvis@centos7 build]$ cat ceph.conf

; generated by vstart.sh on Sun Apr 9 03:40:11 CST 2017

[client.vstart.sh]

num mon = 3

num osd = 3

num mds = 3

num mgr = 1

num rgw = 0

[global]

fsid = d439bdca-809f-4bbe-af91-c67647317172

osd pg bits = 3

osd pgp bits = 5 ; (invalid, but ceph should cope!)

osd pool default size = 3

osd crush chooseleaf type = 0

osd pool default min size = 1

osd failsafe full ratio = .99

mon osd reporter subtree level = osd

mon osd full ratio = .99

mon data avail warn = 10

mon data avail crit = 1

erasure code dir = /CEPH/build/lib

plugin dir = /CEPH/build/lib

osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd

rgw frontends = civetweb port=8000

filestore fd cache size = 32

run dir = /CEPH/build/out

enable experimental unrecoverable data corrupting features = *

lockdep = true

auth cluster required = cephx

auth service required = cephx

auth client required = cephx

[client]

keyring = /CEPH/build/keyring

log file = /CEPH/build/out/$name.$pid.log

admin socket = /CEPH/build/out/$name.$pid.asok

[mds]

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug ms = 1

debug mds = 20

debug auth = 20

debug monc = 20

debug mgrc = 20

mds debug scatterstat = true

mds verify scatter = true

mds log max segments = 2

mds debug frag = true

mds debug auth pins = true

mds debug subtrees = true

mds data = /CEPH/build/dev/mds.$id

mds root ino uid = 1000

mds root ino gid = 1000

[mgr]

mgr modules = rest fsstatus

mgr data = /CEPH/build/dev/mgr.$id

mgr module path = /CEPH/src/pybind/mgr

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug ms = 1

debug monc = 20

debug mgr = 20

[osd]

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

osd_check_max_object_name_len_on_startup = false

osd data = /CEPH/build/dev/osd$id

osd journal = /CEPH/build/dev/osd$id/journal

osd journal size = 100

osd class tmp = out

osd class dir = /CEPH/build/lib

osd class load list = *

osd class default list = *

osd scrub load threshold = 2000.0

osd debug op order = true

osd debug misdirected ops = true

filestore wbthrottle xfs ios start flusher = 10

filestore wbthrottle xfs ios hard limit = 20

filestore wbthrottle xfs inodes hard limit = 30

filestore wbthrottle btrfs ios start flusher = 10

filestore wbthrottle btrfs ios hard limit = 20

filestore wbthrottle btrfs inodes hard limit = 30

osd copyfrom max chunk = 524288

bluestore fsck on mount = true

bluestore block create = true

bluestore block db size = 67108864

bluestore block db create = true

bluestore block wal size = 1048576000

bluestore block wal create = true

debug ms = 1

debug osd = 25

debug objecter = 20

debug monc = 20

debug mgrc = 20

debug journal = 20

debug filestore = 20

debug bluestore = 30

debug bluefs = 20

debug rocksdb = 10

debug bdev = 20

debug rgw = 20

debug objclass = 20

[mon]

mon pg warn min per osd = 3

mon osd allow primary affinity = true

mon osd allow pg remap = true

mon reweight min pgs per osd = 4

mon osd prime pg temp = true

crushtool = /CEPH/build/bin/crushtool

mon allow pool delete = true

log file = /CEPH/build/out/$name.log

admin socket = /CEPH/build/out/$name.asok

chdir = ""

pid file = /CEPH/build/out/$name.pid

heartbeat file = /CEPH/build/out/$name.heartbeat

debug mon = 20

debug paxos = 20

debug auth = 20

debug mgrc = 20

debug ms = 1

mon cluster log file = /CEPH/build/out/cluster.mon.$id.log

[global]

[mon.a]

host = centos7

mon data = /CEPH/build/dev/mon.a

mon addr = 127.0.0.1:40165

[mon.b]

host = centos7

mon data = /CEPH/build/dev/mon.b

mon addr = 127.0.0.1:40166

[mon.c]

host = centos7

mon data = /CEPH/build/dev/mon.c

mon addr = 127.0.0.1:40167

[osd.0]

host = centos7

[osd.1]

host = centos7

[osd.2]

host = centos7

[mds.a]

host = centos7

[mds.b]

host = centos7

[mds.c]

host = centos7

[mgr.x]

host = centos7

[harvis@centos7 build]$ 

本站僅提供存儲服務,所有內容均由用戶發(fā)布,如發(fā)現(xiàn)有害或侵權內容,請點擊舉報
打開APP,閱讀全文并永久保存 查看更多類似文章
猜你喜歡
類似文章
分布式存儲的未來 Ceph 14.2.5 集群安裝
Rook定制化和管理Ceph集群
Fedora 14上安裝 Ceph 0.24
ceph工作原理和安裝
Linux運維---1.Ceph分布式存儲架構及工作原理
Kubernetes部署rook ceph存儲系統(tǒng)
更多類似文章 >>
生活服務
分享 收藏 導長圖 關注 下載文章
綁定賬號成功
后續(xù)可登錄賬號暢享VIP特權!
如果VIP功能使用有故障,
可點擊這里聯(lián)系客服!

聯(lián)系客服