zfs
Differences
This shows you the differences between two versions of the page.
| Next revision | Previous revision | ||
| zfs [2016/05/09 19:06] – created k2patel | zfs [2020/08/10 02:35] (current) – external edit 127.0.0.1 | ||
|---|---|---|---|
| Line 4: | Line 4: | ||
| - | To check zpool status | + | === To check zpool status |
| <code bash> | <code bash> | ||
| zpool status [< | zpool status [< | ||
| </ | </ | ||
| - | To clear error on drive, which you think reported wrongly. | + | === To clear error on drive, which you think reported wrongly. |
| <code bash> | <code bash> | ||
| zpool clear <volume name> | zpool clear <volume name> | ||
| </ | </ | ||
| - | To add Zeus Drive | + | === To add Zeus Drive === |
| <code bash> | <code bash> | ||
| zpool add performance log < | zpool add performance log < | ||
| </ | </ | ||
| - | Adding spare to volume | + | === Adding spare to volume |
| <code bash> | <code bash> | ||
| zpool add performance spare < | zpool add performance spare < | ||
| </ | </ | ||
| - | Remove attached spare drive to volume | + | === Remove attached spare drive to volume |
| <code bash> | <code bash> | ||
| zpool detach < | zpool detach < | ||
| </ | </ | ||
| - | Removing drives from volume (require to remove spare drives after raid rebuilt) | + | === Removing drives from volume (require to remove spare drives after raid rebuilt) |
| <code bash> | <code bash> | ||
| zpool remove < | zpool remove < | ||
| </ | </ | ||
| + | |||
| + | === List all devices / drive === | ||
| + | |||
| + | <code bash> | ||
| + | sas2ircu 0 DISPLAY | ||
| + | </ | ||
| + | |||
| + | === ZFS Display drive information by serial number === | ||
| + | |||
| + | <code bash> | ||
| + | sas2ircu 0 DISPLAY | grep -B 9 -A 4 <Serial Number> | ||
| + | </ | ||
| + | |||
| + | === Freenas Blink Drives === | ||
| + | |||
| + | <code bash> | ||
| + | sas2ircu 0 locate 3:7 ON | ||
| + | </ | ||
| + | |||
| + | === Offline Drive === | ||
| + | |||
| + | <code bash> | ||
| + | zpool offline hermes 15935140517898495532 | ||
| + | </ | ||
| + | |||
| + | === Replace disk in ZFS Pool === | ||
| + | |||
| + | <code bash> | ||
| + | zpool replace hermes 15935140517898495532 / | ||
| + | </ | ||
| + | |||
| + | ==== Nexenta ==== | ||
| + | === Disk / Lun === | ||
| + | To blink drive use following command in NMC | ||
| + | <code bash> | ||
| + | show lun c8t5000C5005785138Bd0 blink -y | ||
| + | </ | ||
| + | === Nexenta Mgmt. === | ||
| + | |||
| + | To create nexenta collector report, Run following command from root prompt | ||
| + | <code bash> | ||
| + | nexenta-collector --no-upload | ||
| + | </ | ||
| + | Printing Each JBOD Slotmap | ||
| + | <code bash> | ||
| + | nmc -c "show jbod jbod:1 slotmap" | ||
| + | </ | ||
| + | Check HA Status from command line | ||
| + | <code bash> | ||
| + | / | ||
| + | </ | ||
| + | Synchronize Disk Location | ||
| + | <code bash> | ||
| + | nmc -c " | ||
| + | </ | ||
| + | Migrate Pool to another server | ||
| + | <code bash> | ||
| + | / | ||
| + | </ | ||
| + | ==== Solaris ==== | ||
| + | |||
| + | List all faults on system | ||
| + | <code bash> | ||
| + | fmadm faulty | ||
| + | </ | ||
| + | Clearing Faults | ||
| + | <code bash> | ||
| + | fmadm repair < | ||
| + | </ | ||
| + | Checking Service status | ||
| + | <code bash> | ||
| + | svcs nm{s,v,cd} dbus rmvolmgr nmdtrace | ||
| + | </ | ||
| + | Restarting Services | ||
| + | <code bash> | ||
| + | svcadm restart nms | ||
| + | </ | ||
| + | Loading unloading zfs-diagnosis module | ||
| + | <code bash> | ||
| + | fmadm load / | ||
| + | </ | ||
| + | Clearning dangling dev link | ||
| + | <code bash> | ||
| + | devfsadm -Cv | ||
| + | </ | ||
| + | |||
| + | ==== Check status of ARC and L2 ==== | ||
| + | <code bash> | ||
| + | kstat -p zfs: | ||
| + | </ | ||
| + | |||
| + | |||
zfs.1462820804.txt.gz · Last modified: 2020/08/10 02:29 (external edit)
