changeset 17:170f04992ccb default tip

add work log that result of count time
author taiki
date Wed, 14 Jan 2015 19:30:55 +0900
parents 731e0901d41d
children
files benchmark_gfs2.txt memo.txt on_ext4.txt on_gfs2.txt on_two_node_gfs2.txt on_zfs.txt plot.sh
diffstat 7 files changed, 176 insertions(+), 20 deletions(-) [+]
line wrap: on
line diff
--- a/benchmark_gfs2.txt	Tue Jan 13 15:22:56 2015 +0900
+++ b/benchmark_gfs2.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -198,3 +198,51 @@
 wrtfile1             16832ops      280ops/s  34.5mb/s      1.3ms/op     2731us/op-cpu [0ms - 386ms]
 createfile1          16840ops      281ops/s   0.0mb/s      3.7ms/op     2748us/op-cpu [0ms - 1741ms]
    1039: 90.588: IO Summary: 185007 ops, 3082.561 ops/s, (280/561 r/w),  72.7mb/s,    439us cpu/op,  23.8ms latency
+
+
+* 2015 1/14 GFS2 / fileserver / 60 seconds / VM bldsv10 / qcow2 / FC
+   statfile1            15995ops      266ops/s   0.0mb/s      0.2ms/op     2216us/op-cpu [0ms - 1842ms]
+   deletefile1          16001ops      267ops/s   0.0mb/s      4.9ms/op     5053us/op-cpu [0ms - 2983ms]
+   closefile3           16011ops      267ops/s   0.0mb/s      0.0ms/op     2162us/op-cpu [0ms - 6ms]
+   readfile1            16017ops      267ops/s  33.8mb/s     23.3ms/op     4302us/op-cpu [0ms - 2322ms]
+   openfile2            16020ops      267ops/s   0.0mb/s      0.3ms/op     2232us/op-cpu [0ms - 1842ms]
+   closefile2           16023ops      267ops/s   0.0mb/s      0.0ms/op     2148us/op-cpu [0ms - 4ms]
+   appendfilerand1      16025ops      267ops/s   2.1mb/s     38.2ms/op     6767us/op-cpu [0ms - 2407ms]
+   openfile1            16031ops      267ops/s   0.0mb/s      0.1ms/op     2205us/op-cpu [0ms - 68ms]
+   closefile1           16033ops      267ops/s   0.0mb/s      0.0ms/op     2132us/op-cpu [0ms - 6ms]
+   wrtfile1             16035ops      267ops/s  32.9mb/s      1.3ms/op     2710us/op-cpu [0ms - 781ms]
+   createfile1          16041ops      267ops/s   0.0mb/s     11.4ms/op     2653us/op-cpu [0ms - 2284ms]
+    1037: 95.813: IO Summary: 176232 ops, 2936.273 ops/s, (267/534 r/w),  68.8mb/s,    433us cpu/op,  26.5ms latency
+
+
+* 2015 1/14 GFS2 / fileserver / 60 seconds / VM bldsv10 bldsv09  / qcow2 / FC
+
+bldsv10_guest
+
+statfile1            14297ops      238ops/s   0.0mb/s      0.1ms/op     2154us/op-cpu [0ms - 52ms]
+deletefile1          14297ops      238ops/s   0.0mb/s      7.0ms/op     5717us/op-cpu [0ms - 2275ms]
+closefile3           14297ops      238ops/s   0.0mb/s      0.0ms/op     2059us/op-cpu [0ms - 8ms]
+readfile1            14297ops      238ops/s  30.4mb/s     30.9ms/op     4364us/op-cpu [0ms - 5291ms]
+openfile2            14316ops      239ops/s   0.0mb/s      0.2ms/op     2193us/op-cpu [0ms - 72ms]
+closefile2           14316ops      239ops/s   0.0mb/s      0.0ms/op     2144us/op-cpu [0ms - 8ms]
+appendfilerand1      14316ops      239ops/s   1.9mb/s     47.9ms/op     7242us/op-cpu [0ms - 4896ms]
+openfile1            14347ops      239ops/s   0.0mb/s      0.1ms/op     2192us/op-cpu [0ms - 89ms]
+closefile1           14347ops      239ops/s   0.0mb/s      0.0ms/op     2159us/op-cpu [0ms - 12ms]
+wrtfile1             14347ops      239ops/s  29.7mb/s      1.9ms/op     2786us/op-cpu [0ms - 600ms]
+createfile1          14347ops      239ops/s   0.0mb/s      5.2ms/op     2361us/op-cpu [0ms - 3196ms]
+ 1950: 70.157: IO Summary: 157524 ops, 2624.795 ops/s, (238/478 r/w),  62.0mb/s,    442us cpu/op,  31.1ms latency
+
+bldsv09_guest
+
+statfile1            13237ops      221ops/s   0.0mb/s      0.1ms/op     2123us/op-cpu [0ms - 175ms]
+deletefile1          13239ops      221ops/s   0.0mb/s      6.8ms/op     5756us/op-cpu [0ms - 2830ms]
+closefile3           13242ops      221ops/s   0.0mb/s      0.0ms/op     2106us/op-cpu [0ms - 30ms]
+readfile1            13247ops      221ops/s  28.0mb/s     33.4ms/op     4085us/op-cpu [0ms - 9190ms]
+openfile2            13253ops      221ops/s   0.0mb/s      0.3ms/op     2255us/op-cpu [0ms - 331ms]
+closefile2           13257ops      221ops/s   0.0mb/s      0.0ms/op     2171us/op-cpu [0ms - 6ms]
+appendfilerand1      13260ops      221ops/s   1.7mb/s     59.4ms/op     7233us/op-cpu [0ms - 9181ms]
+openfile1            13265ops      221ops/s   0.0mb/s      0.1ms/op     2164us/op-cpu [0ms - 330ms]
+closefile1           13269ops      221ops/s   0.0mb/s      0.0ms/op     2170us/op-cpu [0ms - 6ms]
+wrtfile1             13272ops      221ops/s  27.6mb/s      1.2ms/op     2544us/op-cpu [0ms - 402ms]
+createfile1          13279ops      221ops/s   0.0mb/s      0.7ms/op     2370us/op-cpu [0ms - 339ms]
+ 1858: 64.261: IO Summary: 145820 ops, 2429.379 ops/s, (221/442 r/w),  57.3mb/s,    441us cpu/op,  34.0ms latency
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/memo.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -0,0 +1,104 @@
+## VM 2 node から計測
+
+/media/fcs 以下に
+
+    gateway/fedora20_09.xml
+    gateway/fedora20_09.img
+
+    gateway2/fedora20_10.xml
+    gateway2/fedora20_10.img
+
+をそれぞれ作成
+
+でそれぞれ実行
+
+gateway2 上での例
+    virsh define /media/fcs/gateway2/fedora20_10.xml
+    virsh start fedora20_10
+
+
+ansible を実行する host に sshconfig.counttime を作成
+鍵はすべて ansible をつかう client におく
+このファイルも client に。
+
+    Host gateway
+        HostName gateway
+        IdentityFile ~/.ssh/bldsv
+        User xxxxx
+    Host gateway2
+        HostName gateway2
+        IdentityFile ~/.ssh/bldsv
+        User xxxxx
+    Host 192.168.122.18
+        User xxx 
+        Identityfile counttime
+        Proxycommand ssh -F sshcondig.counttime gateway nc -w 120 %h %p
+
+
+各 VM の ip address を知りたい
+
+    /var/lib/libvirt/dnsmasq/default.leases
+
+からとれる
+中身はこんな感じ
+
+    1421178594 52:54:00:44:02:54 192.168.122.18 * 01:52:54:00:44:02:54
+
+そういえば raw だった…
+
+    qemu-img convert -O qcow2 fedora20_09.img fedora20_09.qcow2
+
+というわけで qcow2 へ変換
+
+filebench の測定結果を正しくするため、
+
+    echo 0 > /proc/sys/kernel/randomize_va_space
+
+しとく
+
+その間に、なんとかかんとかに hydra を install
+    yum install hydra
+
+gateway2 の qcow2 形式の測定結果
+あんまり raw と変わらない感ある
+
+    statfile1            15995ops      266ops/s   0.0mb/s      0.2ms/op     2216us/op-cpu [0ms - 1842ms]
+    deletefile1          16001ops      267ops/s   0.0mb/s      4.9ms/op     5053us/op-cpu [0ms - 2983ms]
+    closefile3           16011ops      267ops/s   0.0mb/s      0.0ms/op     2162us/op-cpu [0ms - 6ms]
+    readfile1            16017ops      267ops/s  33.8mb/s     23.3ms/op     4302us/op-cpu [0ms - 2322ms]
+    openfile2            16020ops      267ops/s   0.0mb/s      0.3ms/op     2232us/op-cpu [0ms - 1842ms]
+    closefile2           16023ops      267ops/s   0.0mb/s      0.0ms/op     2148us/op-cpu [0ms - 4ms]
+    appendfilerand1      16025ops      267ops/s   2.1mb/s     38.2ms/op     6767us/op-cpu [0ms - 2407ms]
+    openfile1            16031ops      267ops/s   0.0mb/s      0.1ms/op     2205us/op-cpu [0ms - 68ms]
+    closefile1           16033ops      267ops/s   0.0mb/s      0.0ms/op     2132us/op-cpu [0ms - 6ms]
+    wrtfile1             16035ops      267ops/s  32.9mb/s      1.3ms/op     2710us/op-cpu [0ms - 781ms]
+    createfile1          16041ops      267ops/s   0.0mb/s     11.4ms/op     2653us/op-cpu [0ms - 2284ms]
+     1037: 95.813: IO Summary: 176232 ops, 2936.273 ops/s, (267/534 r/w),  68.8mb/s,    433us cpu/op,  26.5ms latency
+
+gateway でも動かす
+
+IP address を調べる
+    192.168.122.18
+
+鍵作成
+    .ssh/counttime
+    .ssh/counttime.pub
+
+gateway gateway2 で、
+    scp ~/.ssh/counttime.pub xxx@192.168.122.18:
+
+送信。
+
+gateway gateway2 両方の VM fedora20_XX で、いつものやつを
+
+    mkdir .ssh
+    chmod 700 .ssh
+    mv counttime.pub .ssh
+    touch .ssh/authorized_keys
+    cat .ssh/counttime.pub > .ssh/authorized_keys
+    chmod 600 .ssh/authorized_keys
+
+これで鍵を使って pass なしで login 可能に
+
+
+
--- a/on_ext4.txt	Tue Jan 13 15:22:56 2015 +0900
+++ b/on_ext4.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -1,3 +1,3 @@
-VM    81.6
-SSD    181.3
-docker    196.9
+VM    3461.861
+SSD    7608.630
+docker    8255.094
--- a/on_gfs2.txt	Tue Jan 13 15:22:56 2015 +0900
+++ b/on_gfs2.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -1,3 +1,3 @@
-VM    72.7
-FibreChannel    188.1
-docker    183.2
+VM    2872.431
+FibreChannel    7917.701
+docker    7695.120
--- a/on_two_node_gfs2.txt	Tue Jan 13 15:22:56 2015 +0900
+++ b/on_two_node_gfs2.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -1,3 +1,3 @@
-BladeServer1node    188.1
-BladeServer2node-1    12.2
-BladeServer2node-2    21.1
+BladeServer1node    7917.701
+BladeServer2node-1    522.771
+BladeServer2node-2    902.538
--- a/on_zfs.txt	Tue Jan 13 15:22:56 2015 +0900
+++ b/on_zfs.txt	Wed Jan 14 19:30:55 2015 +0900
@@ -1,3 +1,3 @@
-VM    93.1
-SSD    169.9 
-docker    157.6
+VM    3914.076
+SSD    7134.431
+docker    6627.001
--- a/plot.sh	Tue Jan 13 15:22:56 2015 +0900
+++ b/plot.sh	Wed Jan 14 19:30:55 2015 +0900
@@ -1,19 +1,23 @@
 #!/bin/sh
+
+fsname="two_node_gfs2"
+fstitle="Two node GFS2"
+
 gnuplot <<EOF
 # postscript形式に指定
 set terminal postscript eps color enhanced
 # "sample.eps"として出力
-#set output "two_node_gfs2.pdf"
-set output "| epstopdf -f -o=two_node_gfs2.pdf"
-# x軸名を"SN ration"に設定
+#set output "${fsname}.pdf"
+set output "| epstopdf -f -o=${fsname}.pdf"
+# x軸名を設定
 set xlabel "Environment"
-# y軸名を"SER"に設定
-set ylabel "mb/s"
+# y軸名を設定
+set ylabel "ops/s"
 # titleの設定
-set title "two node gfs2 benchmark"
+set title "${fstitle} benchmark"
 # x,yの範囲をどこからどこまでにするのか
 # set xrange [ 0 : 200 ]
-set yrange [ 0 : 200 ]
+set yrange [ 0 : 10000 ]
 # set yrange [ 0 : 0.25]
 # x,y軸のメモリを何ずつ刻むか
 # set xtics 1
@@ -21,5 +25,5 @@
 # plot  "error_rate_graph.data" w l
 set boxwidth 0.5 relative
 set style fill solid border lc rgb "black"
-plot "on_two_node_gfs2.txt" using 0:2:xtic(1) with boxes notitle
+plot "on_${fsname}.txt" using 0:2:xtic(1) with boxes notitle
 EOF