CPU Usage on Nodes
Pages: 1 2
ufm
46 Posts
April 18, 2020, 1:13 pmQuote from ufm on April 18, 2020, 1:13 pm
As I can see - loop by ceph config generate-minimal-conf -> ceph config assimilate-conf -i /etc/ceph/ceph.conf
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354360 0.0 0.0 4628 860 ? S 16:09 0:00 /bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
root 2354361 0.0 0.1 798424 44276 ? Sl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config generate-minimal-conf
root 2354385 0.0 0.0 13136 988 pts/0 S+ 16:09 0:00 grep --color=auto ceph
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354543 0.0 0.0 13136 1032 pts/0 S+ 16:09 0:00 grep --color=auto ceph
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354723 0.0 0.0 4628 736 ? S 16:09 0:00 /bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
root 2354724 0.0 0.1 796588 41884 ? Rl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config assimilate-conf -i /etc/ceph/ceph.conf
root 2354748 0.0 0.0 13136 1056 pts/0 S+ 16:09 0:00 grep --color=auto ceph
As I can see - loop by ceph config generate-minimal-conf -> ceph config assimilate-conf -i /etc/ceph/ceph.conf
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354360 0.0 0.0 4628 860 ? S 16:09 0:00 /bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
root 2354361 0.0 0.1 798424 44276 ? Sl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config generate-minimal-conf
root 2354385 0.0 0.0 13136 988 pts/0 S+ 16:09 0:00 grep --color=auto ceph
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354543 0.0 0.0 13136 1032 pts/0 S+ 16:09 0:00 grep --color=auto ceph
root@S-26-4-2-4:~# ps aux|grep ceph
root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr]
ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph
ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph
ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph
ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph
root 2354723 0.0 0.0 4628 736 ? S 16:09 0:00 /bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
root 2354724 0.0 0.1 796588 41884 ? Rl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config assimilate-conf -i /etc/ceph/ceph.conf
root 2354748 0.0 0.0 13136 1056 pts/0 S+ 16:09 0:00 grep --color=auto ceph
admin
2,930 Posts
April 18, 2020, 2:47 pmQuote from admin on April 18, 2020, 2:47 pmThanks for the help.
If you do have a node with 20-30% cpu, it will help to show its listing.
when you say loop, you mean you are see-ing different process-ids each time ? is this after re-booting ?
Thanks for the help.
If you do have a node with 20-30% cpu, it will help to show its listing.
when you say loop, you mean you are see-ing different process-ids each time ? is this after re-booting ?
ufm
46 Posts
April 18, 2020, 8:53 pmQuote from ufm on April 18, 2020, 8:53 pmHmm. I thought you were helping me. 🙂
Yes, I seeing different process-ids each time (it is visible in the previous post).
No, after reboot the node is working fine.
Now I have two nodes working normally (after rebooting) and four with 20-30% load and constantly restarting
/bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
/bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
Hmm. I thought you were helping me. 🙂
Yes, I seeing different process-ids each time (it is visible in the previous post).
No, after reboot the node is working fine.
Now I have two nodes working normally (after rebooting) and four with 20-30% load and constantly restarting
/bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
/bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
admin
2,930 Posts
April 18, 2020, 9:57 pmQuote from admin on April 18, 2020, 9:57 pmThanks for your help. you can reboot the nodes, we will test and try to reproduce this.
Thanks for your help. you can reboot the nodes, we will test and try to reproduce this.
Pages: 1 2
CPU Usage on Nodes
ufm
46 Posts
Quote from ufm on April 18, 2020, 1:13 pm
As I can see - loop by ceph config generate-minimal-conf -> ceph config assimilate-conf -i /etc/ceph/ceph.conf root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354360 0.0 0.0 4628 860 ? S 16:09 0:00 /bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new root 2354361 0.0 0.1 798424 44276 ? Sl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config generate-minimal-conf root 2354385 0.0 0.0 13136 988 pts/0 S+ 16:09 0:00 grep --color=auto ceph root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354543 0.0 0.0 13136 1032 pts/0 S+ 16:09 0:00 grep --color=auto ceph root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354723 0.0 0.0 4628 736 ? S 16:09 0:00 /bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf root 2354724 0.0 0.1 796588 41884 ? Rl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config assimilate-conf -i /etc/ceph/ceph.conf root 2354748 0.0 0.0 13136 1056 pts/0 S+ 16:09 0:00 grep --color=auto ceph
As I can see - loop by ceph config generate-minimal-conf -> ceph config assimilate-conf -i /etc/ceph/ceph.conf root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354360 0.0 0.0 4628 860 ? S 16:09 0:00 /bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new root 2354361 0.0 0.1 798424 44276 ? Sl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config generate-minimal-conf root 2354385 0.0 0.0 13136 988 pts/0 S+ 16:09 0:00 grep --color=auto ceph root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354543 0.0 0.0 13136 1032 pts/0 S+ 16:09 0:00 grep --color=auto ceph root@S-26-4-2-4:~# ps aux|grep ceph root 1074 0.0 0.0 0 0 ? S< 04:00 0:00 [ceph-msgr] ceph 64416 0.1 0.2 981276 72624 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 12 --setuser ceph --setgroup ceph ceph 70914 0.1 0.2 982240 72988 ? Ssl 05:07 0:48 /usr/bin/ceph-osd -f --cluster ceph --id 13 --setuser ceph --setgroup ceph ceph 78196 0.1 0.2 983384 75188 ? Ssl 05:07 0:46 /usr/bin/ceph-osd -f --cluster ceph --id 14 --setuser ceph --setgroup ceph ceph 85177 0.1 0.2 981212 71192 ? Ssl 05:08 0:44 /usr/bin/ceph-osd -f --cluster ceph --id 15 --setuser ceph --setgroup ceph root 2354723 0.0 0.0 4628 736 ? S 16:09 0:00 /bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf root 2354724 0.0 0.1 796588 41884 ? Rl 16:09 0:00 /usr/bin/python2.7 /usr/bin/ceph config assimilate-conf -i /etc/ceph/ceph.conf root 2354748 0.0 0.0 13136 1056 pts/0 S+ 16:09 0:00 grep --color=auto ceph
admin
2,930 Posts
Quote from admin on April 18, 2020, 2:47 pmThanks for the help.
If you do have a node with 20-30% cpu, it will help to show its listing.
when you say loop, you mean you are see-ing different process-ids each time ? is this after re-booting ?
Thanks for the help.
If you do have a node with 20-30% cpu, it will help to show its listing.
when you say loop, you mean you are see-ing different process-ids each time ? is this after re-booting ?
ufm
46 Posts
Quote from ufm on April 18, 2020, 8:53 pmHmm. I thought you were helping me. 🙂
Yes, I seeing different process-ids each time (it is visible in the previous post).
No, after reboot the node is working fine.
Now I have two nodes working normally (after rebooting) and four with 20-30% load and constantly restarting
/bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
/bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
Hmm. I thought you were helping me. 🙂
Yes, I seeing different process-ids each time (it is visible in the previous post).
No, after reboot the node is working fine.
Now I have two nodes working normally (after rebooting) and four with 20-30% load and constantly restarting
/bin/sh -c ceph config generate-minimal-conf > /etc/ceph/ceph.conf.new
/bin/sh -c ceph config assimilate-conf -i /etc/ceph/ceph.conf
admin
2,930 Posts
Quote from admin on April 18, 2020, 9:57 pmThanks for your help. you can reboot the nodes, we will test and try to reproduce this.
Thanks for your help. you can reboot the nodes, we will test and try to reproduce this.