Sunteți pe pagina 1din 6

zpool create tank mirror c1t1d0s0 c1t0s0 cache c1t2d0s--10/09

zpool iostat -v pool


10/08 --u6 --zfs bot
zfs send
zfs rollback -f
zfs send -I pool/fs@snapA pool/fs@snapB >/snap/fscomobo --incremental
zfs snd -I pool/fs@snap1 >snap/fsclonesnap -I
zfs receive -F pool/clone </snap/fsclonesnap -I
refquota --hardlimit
quota --additional
zfs get all pool1
failmode -->wait continue panic --for device failure
zpool create -o failmode=continue users mirror ctd ctd
zpool history
zpool history -l poolname
zfs upgrade
zfs set delegation=off poolname --zfs allow /unallow
set mountpoint=
autoreplace=on -->For automatic replacement with out zpool replace
usb --automatially or then cfgadm -c configure
zfs snapshot -r users/home@todday
zfs rename -r users/home#today @yesterday
normal --lzjb compression
also gzip as well
set compression=gzip
set copies=1,2,3
zfs set shareiscsi=on
iscsiadm list target
radidz2 -11/06
zfs promote
zpool clear --> to clear the fault errors
fsstat zfs
https://hostnmae:6789/zfs
/usr/sbin/smcwebserver start
/usr/sbin/smcwebserver enable
min 6/06
zfs set sharenfs=on pool/fs
new acl --nfsv4 version
old acl -- > posix-draft
efi --8 slice -> 8 mb
mirror ctds ctds mirror ctds ctds
raid 5/4/6/RDP --raid 5 wrtite hole -- > if only apart of raid 5 is written ,po
wer lost before all blocks have made to disk ,parity will remain out of data the
re for use less ..
raid z --variable width radi stripe --only full stipe write --take care by metad
ata has enough info about the underlying datd redundency

first s/f soltion write hole issues


2 disk for raid z
3 disks for raid z2
hybrid poll --unfied storages
zfs root --smi
zpool attach
zpool create data ctds log ctds
no mirror raid z for caching
zpool create -n mypool mirror ctds ctds
zpool create -m /export/home
home c1t0d0 --default mount point
zpool add pool mirror
zpool add pool log mirror ctds ctds
zpool add cacche ctds
zpool attach --changing to diff
zpool cretae a mirror <> <>
zpool attach a <old><new>
zpool detach a <>
zpool online /offline poll <ctds>
zpool clear pool <ctds >
zpool replace pool <ctds >
zpool replae pool <ctds > <ctds > --toatl pool -no redundency
zpool create z mirror <> <> spare <>
zpool add pool spare <>
zpool remove pool c2t3d0 --only for spare and cache
zpool status -x
zpool replace pool <old> <new>
zpool replace pool <>
altroot -- second mount point
cache -file -diff location
zpool import -c <file >
failmode --wait -- >stop all io till device has been resored
continue --allow read for healthy ,but stop write
panic --crash dump
zpool export pool
zpool import /zpool import <pool>
zpool import <old name> <new name>
zpool import -d /file <pool>
zpool destroy pool
zpoolimport -D
zpool import -D <pool>
zpool upgrade -a
768 mb --for zfs root pool
zpool attach rootpool <old> <newdisk> -- mirroring
lucreate -n zfs1009BE
boot -L --to see the avilable lus
boot -Z <rootpool> rpool/ROOT/newbe
for zone ->set zonepath=/zonepool/pool1
****increase swap **
zfs create -V 3g -b 8k rpool/swap
swap -d /dev/zvol/dsk/pool/swap
zfs volslice=2g /rpool/swap
swap -a dev/zvol/dsk/pool/swap

Install boot -installbot -F zfs /usr/platform/`uname -i`/lib/fs/zfs/bootblk /dev/rdsk/c1t1d


x86 # install grub /boot/grub/stage1 /boot/grub/stage2 /dev/rdsk/c1t1ds0
zfs inherit -r moutpoint /rpool/ROOT/s10u6 &
zfs set mountpoint=/ rpool/ROOT/s10u6
root disk replcement
disk should be in SMI
zps replace /zfs attach ..once syncing comletes need to install boot blk
snap shot
zfs creates rpool/snapspace -->any system(then share )
zfs snapshot -r rpool@0810
zfs send -Rv rpool@0810 >/net/host/pool/snap<name>
resotration
boot cdrom -s
mount -F nfs host:/rpool/snapspace /mnt
recreate the root poorl
zpool create -f -o failmode = ..... rpool disk
resore
cat /mnt/rpool.0810|zfs receive -Fdu rpool
set boot fs
zpool set bootfs=rpool/ROOT/zfs1009BE rpool
installboot /install grub --do
reboot
rool back using local snap shot
boot -F failsafe
select rpool--option
zfs rolback ropool@0810 --old snap shot
zfs rolback rpool/ROOT@0810
zfs rollback rpool/ROOT/zfs..BE@0810
init 6
zfs rename old new
set mountpoint=legacy --manage through vfstab & mount unmount
vfstab
pool/fs /mnt
zfs
yes
zfs
zfs
zfs
zfs
zfs

mount
mount -a
unmount
unshare/share -a or{ legacy --etc/dfs/dfstab}
userquota@student{user}=10g pool/fs

groupquota@group=
zfs groupspace/userspace pool/fs

quota -v <uname>
zfs set reservation=10 g -- >it will be mandatorly avilable for a user
snap shots
zfs
zfs
zfs
zfs
zfs

snapshot pool/fs@date
snapshot -r pool/fs@now
destroy snap shot
list -t snapshot
list -o space

roll back
zfs rolback pool/home/fs@abc

clone
zfs snapshot pool/fs@today
zfs clone pool/fs@today pool/abc/clone
restore from clone
zfs snapshot pool/fs@today
zfs clone pool/fs@today pool/sbc/clone
zfs pramote pool/sbc/clone
send /recive
zfs send pool/fs-snap@today |zfs receive pool1/abc
zfs send pool/fs@today | ssh host2 zfs receive poll/abc
zfs send -i pool/fs@wed pool/fs@fr |zfs receive pool2/new --- > increamnet
or zfs send pool/fs@today |gzip > a.zip
zfs receive pool/fs@ys </bkup/snap (already send )
zfs recv --enough
combined send recive
zfs send -I pool/fs@week1 pool/fs@week2 >/bkup/fs@all -I
zfs destry pool/fs@week1 and all
restore
zfs recv -d -F pool/fs <bkup/fs@all -I
ACL
zfs set aclmode=discard pool/fs
aclinherit=noallow
dlegation
zfs allow unmae create,destroy,mount,snapshot pool/fs1

zfs allow pool/fs


zfs allow mr destroy pool
zfs allow -s <@setname> create,destory uname --using acl set
zfs volume --block level
zfs create -V 5gb tank/vol
zones
set fs =zfs
set special =pool/fs
set dir=/abc
end
adding set
zonecfg > add dataset
set name=pool/abc
end
it will show inside the zone
adding volume
zonecfg>add device
set match=/dev/zvol/desk/pool/vol
end
maintain the volume in seperate slice
zpool import -R /mnt poolname -- >imprting with al root
fsck --repair and vaidation
zpool scrub pool -- >explicit checking
zpool status -v pool
zpool scrub -s pool -- >stop
ISCSI
#zfs create -V 500m(size) mypool/test(poolname/filesystem)
#iscsitadm list target
#zfs set shareiscsi=on mypool/test
#iscsitadm list target
then you need to install scsi initiator to other server, then configure ISCSI.
how to upgrade the zpool version in the server?
bash-3.00# zfs upgrade
This system is currently running ZFS filesystem version 3.
All filesystems are formatted with the current version.
If you want configure in lower version of solaris 10 06/06. You need to install
the below package
ZFS package Name

SUNWzfsr
SUNWzfsu
ZFS NFS:
#zpool create mypool c1t0d0 c1t1d0
#zfs create mypool/test
#zfs set sharefs=on mypool/test
or
#sharemgr add-share -s mypool/test
# sharemgr show -vp zfs
zfs nfs=()
zfs/mypool/test
mypool/test

S-ar putea să vă placă și