We’re in the office this weekend (as we have been for the last few weekends!), continuing to hammer on ZFS as we get ever closer to S10U2. Building a bit on the work I did last time just to see how well ZFS plays with others, I thought I’d throw a few more variables into the mix.

As expected, it all just works – <sigh>. Of course, I wouldn’t expect anyone to ever use such a configuration (the sharp-eyed amongst you will see that this pool is actually pretty vulnerable to a single device failure, and besides, now that you’ve got ZFS, why would you bother with SVM or Veritas?), but anyway it’s nice to see that it’s possible to set up this sort of thing. Do yourself a favour though: don’t! ;-)

root@usuki[1] zpool list
NAME                    SIZE    USED   AVAIL    CAP  HEALTH     ALTROOT
idliketobuytheworldacoke    744G   32.5G    711G     4%  ONLINE     -
root@usuki[2] zfs list
NAME                    USED  AVAIL  REFER  MOUNTPOINT
idliketobuytheworldacoke  33.3G   704G  32.3G  /idliketobuytheworldacoke
idliketobuytheworldacoke/andkeepitcompany   200M   705G   200M  -
root@usuki[3] mount -v | grep vxfs
/dev/zvol/dsk/idliketobuytheworldacoke/andkeepitcompany
on /itstherealthingcokeiswhattheworldwantstoday
type vxfs read/write/setuid/devices/delaylog/largefiles/ioerror=mwdisable/dev=2d40001
on Sat Mar 11 15:03:51 2006
root@usuki[4] zpool iostat -v
capacity     operations    bandwidth
pool                               used  avail   read  write   read  write
--------------------------------  -----  -----  -----  -----  -----  -----
idliketobuytheworldacoke          32.6G   711G     69     49  5.17M  3.17M
/dev/md/dsk/d1                  4.75G   199G      8      6   854K   510K
/dev/vx/dsk/default/defaultvol  4.73G   199G     11      8  1.11M   686K
mirror                          4.56G  63.4G     23     16  1.04M   664K
c5t8d0                            -      -      8      8   953K   665K
c5t9d0                            -      -      7      8   702K   665K
raidz                           4.93G   131G     10      8   576K   362K
c5t10d0                           -      -      4      4   440K   362K
c5t11d0                           -      -      4      4   449K   362K
c5t12d0                         4.59G  63.4G     10      8  1.07M   667K
/dev/lofi/1                     4.53G  27.2G     14     10  1.46M   911K
/ufs/file2                      4.55G  27.2G     23     12  1.42M   914K
--------------------------------  -----  -----  -----  -----  -----  -----
root@usuki[5] zpool status -v
pool: idliketobuytheworldacoke
state: ONLINE
scrub: scrub in progress, 21.17% done, 0h16m to go
config:
NAME                              STATE     READ WRITE CKSUM
idliketobuytheworldacoke          ONLINE       0     0     0
/dev/md/dsk/d1                  ONLINE       0     0     0
/dev/vx/dsk/default/defaultvol  ONLINE       0     0     0
mirror                          ONLINE       0     0     0
c5t8d0                        ONLINE       0     0     0
c5t9d0                        ONLINE       0     0     0
raidz                           ONLINE       0     0     0
c5t10d0                       ONLINE       0     0     0
c5t11d0                       ONLINE       0     0     0
c5t12d0                         ONLINE       0     0     0
/dev/lofi/1                     ONLINE       0     0     0
/ufs/file2                      ONLINE       0     0     0
errors: No known data errors
root@usuki[6] vxdg list default
Group:     default
dgid:      1142081013.24.usuki
import-id: 1024.23
flags:
version:   120
alignment: 8192 (bytes)
ssb:            on
detach-policy: global
dg-fail-policy: dgdisable
copies:    nconfig=default nlog=default
config:    seqno=0.1044 permlen=1479 free=1473 templen=3 loglen=224
config disk c5t3d0 copy 1 len=1479 state=clean online
config disk c5t4d0 copy 1 len=1479 state=clean online
config disk c5t5d0 copy 1 len=1479 state=clean online
log disk c5t3d0 copy 1 len=224
log disk c5t4d0 copy 1 len=224
log disk c5t5d0 copy 1 len=224
root@usuki[7] metastat
d1: Concat/Stripe
Size: 429948928 blocks (205 GB)
Stripe 0: (interlace: 32 blocks)
Device     Start Block  Dbase   Reloc
c5t0d0s0      32768     Yes     Yes
c5t1d0s0          0     No      Yes
c5t2d0s0          0     No      Yes
Device Relocation Information:
Device   Reloc  Device ID
c5t0d0   Yes    id1,sd@SSEAGATE_ST373307LSUN72G_3HZ9EZY20000751670H9
c5t1d0   Yes    id1,sd@SSEAGATE_ST373307LSUN72G_3HZ9E7L300007516ZTM4
c5t2d0   Yes    id1,sd@SSEAGATE_ST373307LSUN72G_3HZ9FV25000075179V9B
Advertisements